1 /**************************************************************************
2 *
3 * Copyright 2008 VMware, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 #include "compiler/nir/nir.h"
29 #include "draw/draw_context.h"
30 #include "nir/nir_to_tgsi.h"
31 #include "util/format/u_format.h"
32 #include "util/format/u_format_s3tc.h"
33 #include "util/os_misc.h"
34 #include "util/u_inlines.h"
35 #include "util/u_memory.h"
36 #include "util/u_screen.h"
37 #include "util/u_string.h"
38
39 #include "i915_context.h"
40 #include "i915_debug.h"
41 #include "i915_fpc.h"
42 #include "i915_public.h"
43 #include "i915_reg.h"
44 #include "i915_resource.h"
45 #include "i915_screen.h"
46 #include "i915_winsys.h"
47
48 /*
49 * Probe functions
50 */
51
52 static const char *
i915_get_vendor(struct pipe_screen * screen)53 i915_get_vendor(struct pipe_screen *screen)
54 {
55 return "Mesa Project";
56 }
57
58 static const char *
i915_get_device_vendor(struct pipe_screen * screen)59 i915_get_device_vendor(struct pipe_screen *screen)
60 {
61 return "Intel";
62 }
63
64 static const char *
i915_get_name(struct pipe_screen * screen)65 i915_get_name(struct pipe_screen *screen)
66 {
67 static char buffer[128];
68 const char *chipset;
69
70 switch (i915_screen(screen)->iws->pci_id) {
71 case PCI_CHIP_I915_G:
72 chipset = "915G";
73 break;
74 case PCI_CHIP_I915_GM:
75 chipset = "915GM";
76 break;
77 case PCI_CHIP_I945_G:
78 chipset = "945G";
79 break;
80 case PCI_CHIP_I945_GM:
81 chipset = "945GM";
82 break;
83 case PCI_CHIP_I945_GME:
84 chipset = "945GME";
85 break;
86 case PCI_CHIP_G33_G:
87 chipset = "G33";
88 break;
89 case PCI_CHIP_Q35_G:
90 chipset = "Q35";
91 break;
92 case PCI_CHIP_Q33_G:
93 chipset = "Q33";
94 break;
95 case PCI_CHIP_PINEVIEW_G:
96 chipset = "Pineview G";
97 break;
98 case PCI_CHIP_PINEVIEW_M:
99 chipset = "Pineview M";
100 break;
101 default:
102 chipset = "unknown";
103 break;
104 }
105
106 snprintf(buffer, sizeof(buffer), "i915 (chipset: %s)", chipset);
107 return buffer;
108 }
109
110 static const nir_shader_compiler_options i915_compiler_options = {
111 .fdot_replicates = true,
112 .fuse_ffma32 = true,
113 .lower_bitops = true, /* required for !CAP_INTEGERS nir_to_tgsi */
114 .lower_extract_byte = true,
115 .lower_extract_word = true,
116 .lower_fdiv = true,
117 .lower_fdph = true,
118 .lower_flrp32 = true,
119 .lower_fmod = true,
120 .lower_sincos = true,
121 .lower_uniforms_to_ubo = true,
122 .lower_vector_cmp = true,
123 .use_interpolated_input_intrinsics = true,
124 .force_indirect_unrolling = nir_var_all,
125 .force_indirect_unrolling_sampler = true,
126 .max_unroll_iterations = 32,
127 .no_integers = true,
128 .has_fused_comp_and_csel = true,
129 };
130
131 static const struct nir_shader_compiler_options gallivm_nir_options = {
132 .fdot_replicates = true,
133 .lower_bitops = true, /* required for !CAP_INTEGERS nir_to_tgsi */
134 .lower_scmp = true,
135 .lower_flrp32 = true,
136 .lower_flrp64 = true,
137 .lower_fsat = true,
138 .lower_bitfield_insert = true,
139 .lower_bitfield_extract = true,
140 .lower_fdph = true,
141 .lower_ffma16 = true,
142 .lower_ffma32 = true,
143 .lower_ffma64 = true,
144 .lower_fmod = true,
145 .lower_hadd = true,
146 .lower_uadd_sat = true,
147 .lower_usub_sat = true,
148 .lower_iadd_sat = true,
149 .lower_ldexp = true,
150 .lower_pack_snorm_2x16 = true,
151 .lower_pack_snorm_4x8 = true,
152 .lower_pack_unorm_2x16 = true,
153 .lower_pack_unorm_4x8 = true,
154 .lower_pack_half_2x16 = true,
155 .lower_pack_split = true,
156 .lower_unpack_snorm_2x16 = true,
157 .lower_unpack_snorm_4x8 = true,
158 .lower_unpack_unorm_2x16 = true,
159 .lower_unpack_unorm_4x8 = true,
160 .lower_unpack_half_2x16 = true,
161 .lower_extract_byte = true,
162 .lower_extract_word = true,
163 .lower_uadd_carry = true,
164 .lower_usub_borrow = true,
165 .lower_mul_2x32_64 = true,
166 .lower_ifind_msb = true,
167 .max_unroll_iterations = 32,
168 .use_interpolated_input_intrinsics = true,
169 .lower_cs_local_index_to_id = true,
170 .lower_uniforms_to_ubo = true,
171 .lower_vector_cmp = true,
172 .lower_device_index_to_zero = true,
173 /* .support_16bit_alu = true, */
174 };
175
176 static const void *
i915_get_compiler_options(struct pipe_screen * pscreen,enum pipe_shader_ir ir,enum pipe_shader_type shader)177 i915_get_compiler_options(struct pipe_screen *pscreen, enum pipe_shader_ir ir,
178 enum pipe_shader_type shader)
179 {
180 assert(ir == PIPE_SHADER_IR_NIR);
181 if (shader == PIPE_SHADER_FRAGMENT)
182 return &i915_compiler_options;
183 else
184 return &gallivm_nir_options;
185 }
186
187 static void
i915_optimize_nir(struct nir_shader * s)188 i915_optimize_nir(struct nir_shader *s)
189 {
190 bool progress;
191
192 do {
193 progress = false;
194
195 NIR_PASS_V(s, nir_lower_vars_to_ssa);
196
197 NIR_PASS(progress, s, nir_copy_prop);
198 NIR_PASS(progress, s, nir_opt_algebraic);
199 NIR_PASS(progress, s, nir_opt_constant_folding);
200 NIR_PASS(progress, s, nir_opt_remove_phis);
201 NIR_PASS(progress, s, nir_opt_conditional_discard);
202 NIR_PASS(progress, s, nir_opt_dce);
203 NIR_PASS(progress, s, nir_opt_dead_cf);
204 NIR_PASS(progress, s, nir_opt_cse);
205 NIR_PASS(progress, s, nir_opt_find_array_copies);
206 NIR_PASS(progress, s, nir_opt_if, nir_opt_if_optimize_phi_true_false);
207 NIR_PASS(progress, s, nir_opt_peephole_select, ~0 /* flatten all IFs. */,
208 true, true);
209 NIR_PASS(progress, s, nir_opt_algebraic);
210 NIR_PASS(progress, s, nir_opt_constant_folding);
211 NIR_PASS(progress, s, nir_opt_shrink_stores, true);
212 NIR_PASS(progress, s, nir_opt_shrink_vectors);
213 NIR_PASS(progress, s, nir_opt_loop);
214 NIR_PASS(progress, s, nir_opt_undef);
215 NIR_PASS(progress, s, nir_opt_loop_unroll);
216
217 } while (progress);
218
219 NIR_PASS(progress, s, nir_remove_dead_variables, nir_var_function_temp,
220 NULL);
221
222 /* Group texture loads together to try to avoid hitting the
223 * texture indirection phase limit.
224 */
225 NIR_PASS_V(s, nir_group_loads, nir_group_all, ~0);
226 }
227
228 static char *
i915_check_control_flow(nir_shader * s)229 i915_check_control_flow(nir_shader *s)
230 {
231 if (s->info.stage == MESA_SHADER_FRAGMENT) {
232 nir_function_impl *impl = nir_shader_get_entrypoint(s);
233 nir_block *first = nir_start_block(impl);
234 nir_cf_node *next = nir_cf_node_next(&first->cf_node);
235
236 if (next) {
237 switch (next->type) {
238 case nir_cf_node_if:
239 return "if/then statements not supported by i915 fragment shaders, "
240 "should have been flattened by peephole_select.";
241 case nir_cf_node_loop:
242 return "looping not supported i915 fragment shaders, all loops "
243 "must be statically unrollable.";
244 default:
245 return "Unknown control flow type";
246 }
247 }
248 }
249
250 return NULL;
251 }
252
253 static char *
i915_finalize_nir(struct pipe_screen * pscreen,void * nir)254 i915_finalize_nir(struct pipe_screen *pscreen, void *nir)
255 {
256 nir_shader *s = nir;
257
258 if (s->info.stage == MESA_SHADER_FRAGMENT)
259 i915_optimize_nir(s);
260
261 /* st_program.c's parameter list optimization requires that future nir
262 * variants don't reallocate the uniform storage, so we have to remove
263 * uniforms that occupy storage. But we don't want to remove samplers,
264 * because they're needed for YUV variant lowering.
265 */
266 nir_remove_dead_derefs(s);
267 nir_foreach_uniform_variable_safe (var, s) {
268 if (var->data.mode == nir_var_uniform &&
269 (glsl_type_get_image_count(var->type) ||
270 glsl_type_get_sampler_count(var->type)))
271 continue;
272
273 exec_node_remove(&var->node);
274 }
275 nir_validate_shader(s, "after uniform var removal");
276
277 nir_sweep(s);
278
279 char *msg = i915_check_control_flow(s);
280 if (msg) {
281 if (I915_DBG_ON(DBG_FS) && (!s->info.internal || NIR_DEBUG(PRINT_INTERNAL))) {
282 mesa_logi("failing shader:");
283 nir_log_shaderi(s);
284 }
285 return strdup(msg);
286 }
287
288 if (s->info.stage == MESA_SHADER_FRAGMENT)
289 return i915_test_fragment_shader_compile(pscreen, s);
290 else
291 return NULL;
292 }
293
294 static int
i915_get_shader_param(struct pipe_screen * screen,enum pipe_shader_type shader,enum pipe_shader_cap cap)295 i915_get_shader_param(struct pipe_screen *screen, enum pipe_shader_type shader,
296 enum pipe_shader_cap cap)
297 {
298 switch (cap) {
299 case PIPE_SHADER_CAP_SUPPORTED_IRS:
300 return (1 << PIPE_SHADER_IR_NIR) | (1 << PIPE_SHADER_IR_TGSI);
301
302 case PIPE_SHADER_CAP_INTEGERS:
303 /* mesa/st requires that this cap is the same across stages, and the FS
304 * can't do ints.
305 */
306 return 0;
307
308 /* i915 can't do these, and even if gallivm NIR can we call nir_to_tgsi
309 * manually and TGSI can't.
310 */
311 case PIPE_SHADER_CAP_INT16:
312 case PIPE_SHADER_CAP_FP16:
313 case PIPE_SHADER_CAP_FP16_DERIVATIVES:
314 case PIPE_SHADER_CAP_FP16_CONST_BUFFERS:
315 return 0;
316
317 case PIPE_SHADER_CAP_INDIRECT_TEMP_ADDR:
318 /* While draw could normally handle this for the VS, the NIR lowering
319 * to regs can't handle our non-native-integers, so we have to lower to
320 * if ladders.
321 */
322 return 0;
323
324 default:
325 break;
326 }
327
328 switch (shader) {
329 case PIPE_SHADER_VERTEX:
330 switch (cap) {
331 case PIPE_SHADER_CAP_MAX_TEXTURE_SAMPLERS:
332 case PIPE_SHADER_CAP_MAX_SAMPLER_VIEWS:
333 return 0;
334 case PIPE_SHADER_CAP_MAX_SHADER_BUFFERS:
335 case PIPE_SHADER_CAP_MAX_SHADER_IMAGES:
336 return 0;
337 default:
338 return draw_get_shader_param(shader, cap);
339 }
340 case PIPE_SHADER_FRAGMENT:
341 /* XXX: some of these are just shader model 2.0 values, fix this! */
342 switch (cap) {
343 case PIPE_SHADER_CAP_MAX_INSTRUCTIONS:
344 return I915_MAX_ALU_INSN + I915_MAX_TEX_INSN;
345 case PIPE_SHADER_CAP_MAX_ALU_INSTRUCTIONS:
346 return I915_MAX_ALU_INSN;
347 case PIPE_SHADER_CAP_MAX_TEX_INSTRUCTIONS:
348 return I915_MAX_TEX_INSN;
349 case PIPE_SHADER_CAP_MAX_TEX_INDIRECTIONS:
350 return 4;
351 case PIPE_SHADER_CAP_MAX_CONTROL_FLOW_DEPTH:
352 return 0;
353 case PIPE_SHADER_CAP_MAX_INPUTS:
354 return 10;
355 case PIPE_SHADER_CAP_MAX_OUTPUTS:
356 return 1;
357 case PIPE_SHADER_CAP_MAX_CONST_BUFFER0_SIZE:
358 return 32 * sizeof(float[4]);
359 case PIPE_SHADER_CAP_MAX_CONST_BUFFERS:
360 return 1;
361 case PIPE_SHADER_CAP_MAX_TEMPS:
362 /* 16 inter-phase temps, 3 intra-phase temps. i915c reported 16. too. */
363 return 16;
364 case PIPE_SHADER_CAP_CONT_SUPPORTED:
365 case PIPE_SHADER_CAP_TGSI_SQRT_SUPPORTED:
366 return 0;
367 case PIPE_SHADER_CAP_INDIRECT_INPUT_ADDR:
368 case PIPE_SHADER_CAP_INDIRECT_OUTPUT_ADDR:
369 case PIPE_SHADER_CAP_INDIRECT_TEMP_ADDR:
370 case PIPE_SHADER_CAP_INDIRECT_CONST_ADDR:
371 case PIPE_SHADER_CAP_SUBROUTINES:
372 return 0;
373 case PIPE_SHADER_CAP_INT64_ATOMICS:
374 case PIPE_SHADER_CAP_INT16:
375 case PIPE_SHADER_CAP_GLSL_16BIT_CONSTS:
376 return 0;
377 case PIPE_SHADER_CAP_MAX_TEXTURE_SAMPLERS:
378 case PIPE_SHADER_CAP_MAX_SAMPLER_VIEWS:
379 return I915_TEX_UNITS;
380 case PIPE_SHADER_CAP_TGSI_ANY_INOUT_DECL_RANGE:
381 case PIPE_SHADER_CAP_MAX_SHADER_BUFFERS:
382 case PIPE_SHADER_CAP_MAX_SHADER_IMAGES:
383 case PIPE_SHADER_CAP_MAX_HW_ATOMIC_COUNTERS:
384 case PIPE_SHADER_CAP_MAX_HW_ATOMIC_COUNTER_BUFFERS:
385 return 0;
386
387 default:
388 debug_printf("%s: Unknown cap %u.\n", __func__, cap);
389 return 0;
390 }
391 break;
392 default:
393 return 0;
394 }
395 }
396
397 static int
i915_get_param(struct pipe_screen * screen,enum pipe_cap cap)398 i915_get_param(struct pipe_screen *screen, enum pipe_cap cap)
399 {
400 struct i915_screen *is = i915_screen(screen);
401
402 switch (cap) {
403 /* Supported features (boolean caps). */
404 case PIPE_CAP_ANISOTROPIC_FILTER:
405 case PIPE_CAP_NPOT_TEXTURES:
406 case PIPE_CAP_MIXED_FRAMEBUFFER_SIZES:
407 case PIPE_CAP_PRIMITIVE_RESTART: /* draw module */
408 case PIPE_CAP_PRIMITIVE_RESTART_FIXED_INDEX:
409 case PIPE_CAP_VERTEX_ELEMENT_INSTANCE_DIVISOR:
410 case PIPE_CAP_BLEND_EQUATION_SEPARATE:
411 case PIPE_CAP_VS_INSTANCEID:
412 case PIPE_CAP_VERTEX_COLOR_CLAMPED:
413 case PIPE_CAP_USER_VERTEX_BUFFERS:
414 case PIPE_CAP_MIXED_COLOR_DEPTH_BITS:
415 case PIPE_CAP_TGSI_TEXCOORD:
416 return 1;
417
418 case PIPE_CAP_TEXTURE_TRANSFER_MODES:
419 case PIPE_CAP_PCI_GROUP:
420 case PIPE_CAP_PCI_BUS:
421 case PIPE_CAP_PCI_DEVICE:
422 case PIPE_CAP_PCI_FUNCTION:
423 return 0;
424
425 case PIPE_CAP_ALLOW_MAPPED_BUFFERS_DURING_EXECUTION:
426 return 0;
427
428 case PIPE_CAP_SHAREABLE_SHADERS:
429 /* Can't expose shareable shaders because the draw shaders reference the
430 * draw module's state, which is per-context.
431 */
432 return 0;
433
434 case PIPE_CAP_MAX_GS_INVOCATIONS:
435 return 32;
436
437 case PIPE_CAP_MAX_SHADER_BUFFER_SIZE_UINT:
438 return 1 << 27;
439
440 case PIPE_CAP_MAX_VIEWPORTS:
441 return 1;
442
443 case PIPE_CAP_MIN_MAP_BUFFER_ALIGNMENT:
444 return 64;
445
446 case PIPE_CAP_GLSL_FEATURE_LEVEL:
447 case PIPE_CAP_GLSL_FEATURE_LEVEL_COMPATIBILITY:
448 return 120;
449
450 case PIPE_CAP_CONSTANT_BUFFER_OFFSET_ALIGNMENT:
451 return 16;
452
453 /* Texturing. */
454 case PIPE_CAP_MAX_TEXTURE_2D_SIZE:
455 return 1 << (I915_MAX_TEXTURE_2D_LEVELS - 1);
456 case PIPE_CAP_MAX_TEXTURE_3D_LEVELS:
457 return I915_MAX_TEXTURE_3D_LEVELS;
458 case PIPE_CAP_MAX_TEXTURE_CUBE_LEVELS:
459 return 1 << (I915_MAX_TEXTURE_2D_LEVELS - 1);
460
461 /* Render targets. */
462 case PIPE_CAP_MAX_RENDER_TARGETS:
463 return 1;
464
465 case PIPE_CAP_MAX_VERTEX_ATTRIB_STRIDE:
466 return 2048;
467
468 /* Fragment coordinate conventions. */
469 case PIPE_CAP_FS_COORD_ORIGIN_UPPER_LEFT:
470 case PIPE_CAP_FS_COORD_PIXEL_CENTER_HALF_INTEGER:
471 return 1;
472 case PIPE_CAP_ENDIANNESS:
473 return PIPE_ENDIAN_LITTLE;
474 case PIPE_CAP_MAX_VARYINGS:
475 return 10;
476
477 case PIPE_CAP_NIR_IMAGES_AS_DEREF:
478 return 0;
479
480 case PIPE_CAP_VENDOR_ID:
481 return 0x8086;
482 case PIPE_CAP_DEVICE_ID:
483 return is->iws->pci_id;
484 case PIPE_CAP_ACCELERATED:
485 return 1;
486 case PIPE_CAP_VIDEO_MEMORY: {
487 /* Once a batch uses more than 75% of the maximum mappable size, we
488 * assume that there's some fragmentation, and we start doing extra
489 * flushing, etc. That's the big cliff apps will care about.
490 */
491 const int gpu_mappable_megabytes =
492 is->iws->aperture_size(is->iws) * 3 / 4;
493 uint64_t system_memory;
494
495 if (!os_get_total_physical_memory(&system_memory))
496 return 0;
497
498 return MIN2(gpu_mappable_megabytes, (int)(system_memory >> 20));
499 }
500 case PIPE_CAP_UMA:
501 return 1;
502
503 default:
504 return u_pipe_screen_get_param_defaults(screen, cap);
505 }
506 }
507
508 static float
i915_get_paramf(struct pipe_screen * screen,enum pipe_capf cap)509 i915_get_paramf(struct pipe_screen *screen, enum pipe_capf cap)
510 {
511 switch (cap) {
512 case PIPE_CAPF_MIN_LINE_WIDTH:
513 case PIPE_CAPF_MIN_LINE_WIDTH_AA:
514 case PIPE_CAPF_MIN_POINT_SIZE:
515 case PIPE_CAPF_MIN_POINT_SIZE_AA:
516 return 1;
517
518 case PIPE_CAPF_POINT_SIZE_GRANULARITY:
519 case PIPE_CAPF_LINE_WIDTH_GRANULARITY:
520 return 0.1;
521
522 case PIPE_CAPF_MAX_LINE_WIDTH:
523 FALLTHROUGH;
524 case PIPE_CAPF_MAX_LINE_WIDTH_AA:
525 return 7.5;
526
527 case PIPE_CAPF_MAX_POINT_SIZE:
528 FALLTHROUGH;
529 case PIPE_CAPF_MAX_POINT_SIZE_AA:
530 return 255.0;
531
532 case PIPE_CAPF_MAX_TEXTURE_ANISOTROPY:
533 return 4.0;
534
535 case PIPE_CAPF_MAX_TEXTURE_LOD_BIAS:
536 return 16.0;
537
538 case PIPE_CAPF_MIN_CONSERVATIVE_RASTER_DILATE:
539 FALLTHROUGH;
540 case PIPE_CAPF_MAX_CONSERVATIVE_RASTER_DILATE:
541 FALLTHROUGH;
542 case PIPE_CAPF_CONSERVATIVE_RASTER_DILATE_GRANULARITY:
543 return 0.0f;
544
545 default:
546 debug_printf("%s: Unknown cap %u.\n", __func__, cap);
547 return 0;
548 }
549 }
550
551 bool
i915_is_format_supported(struct pipe_screen * screen,enum pipe_format format,enum pipe_texture_target target,unsigned sample_count,unsigned storage_sample_count,unsigned tex_usage)552 i915_is_format_supported(struct pipe_screen *screen, enum pipe_format format,
553 enum pipe_texture_target target, unsigned sample_count,
554 unsigned storage_sample_count, unsigned tex_usage)
555 {
556 static const enum pipe_format tex_supported[] = {
557 PIPE_FORMAT_B8G8R8A8_UNORM, PIPE_FORMAT_B8G8R8A8_SRGB,
558 PIPE_FORMAT_B8G8R8X8_UNORM, PIPE_FORMAT_R8G8B8A8_UNORM,
559 PIPE_FORMAT_R8G8B8X8_UNORM, PIPE_FORMAT_B4G4R4A4_UNORM,
560 PIPE_FORMAT_B5G6R5_UNORM, PIPE_FORMAT_B5G5R5A1_UNORM,
561 PIPE_FORMAT_B10G10R10A2_UNORM, PIPE_FORMAT_L8_UNORM, PIPE_FORMAT_A8_UNORM,
562 PIPE_FORMAT_I8_UNORM, PIPE_FORMAT_L8A8_UNORM, PIPE_FORMAT_UYVY,
563 PIPE_FORMAT_YUYV,
564 /* XXX why not?
565 PIPE_FORMAT_Z16_UNORM, */
566 PIPE_FORMAT_DXT1_RGB, PIPE_FORMAT_DXT1_SRGB, PIPE_FORMAT_DXT1_RGBA,
567 PIPE_FORMAT_DXT1_SRGBA, PIPE_FORMAT_DXT3_RGBA, PIPE_FORMAT_DXT3_SRGBA,
568 PIPE_FORMAT_DXT5_RGBA, PIPE_FORMAT_DXT5_SRGBA, PIPE_FORMAT_Z24X8_UNORM,
569 PIPE_FORMAT_FXT1_RGB, PIPE_FORMAT_FXT1_RGBA,
570 PIPE_FORMAT_Z24_UNORM_S8_UINT, PIPE_FORMAT_NONE /* list terminator */
571 };
572 static const enum pipe_format render_supported[] = {
573 PIPE_FORMAT_B8G8R8A8_UNORM, PIPE_FORMAT_B8G8R8X8_UNORM,
574 PIPE_FORMAT_R8G8B8A8_UNORM, PIPE_FORMAT_R8G8B8X8_UNORM,
575 PIPE_FORMAT_B5G6R5_UNORM, PIPE_FORMAT_B5G5R5A1_UNORM,
576 PIPE_FORMAT_B4G4R4A4_UNORM, PIPE_FORMAT_B10G10R10A2_UNORM,
577 PIPE_FORMAT_L8_UNORM, PIPE_FORMAT_A8_UNORM,
578 PIPE_FORMAT_I8_UNORM, PIPE_FORMAT_NONE /* list terminator */
579 };
580 static const enum pipe_format depth_supported[] = {
581 /* XXX why not?
582 PIPE_FORMAT_Z16_UNORM, */
583 PIPE_FORMAT_Z24X8_UNORM, PIPE_FORMAT_Z24_UNORM_S8_UINT,
584 PIPE_FORMAT_NONE /* list terminator */
585 };
586 const enum pipe_format *list;
587 uint32_t i;
588
589 if (sample_count > 1)
590 return false;
591
592 if (MAX2(1, sample_count) != MAX2(1, storage_sample_count))
593 return false;
594
595 if (tex_usage & PIPE_BIND_DEPTH_STENCIL)
596 list = depth_supported;
597 else if (tex_usage & PIPE_BIND_RENDER_TARGET)
598 list = render_supported;
599 else if (tex_usage & PIPE_BIND_SAMPLER_VIEW)
600 list = tex_supported;
601 else
602 return true; /* PIPE_BIND_{VERTEX,INDEX}_BUFFER */
603
604 for (i = 0; list[i] != PIPE_FORMAT_NONE; i++) {
605 if (list[i] == format)
606 return true;
607 }
608
609 return false;
610 }
611
612 /*
613 * Fence functions
614 */
615
616 static void
i915_fence_reference(struct pipe_screen * screen,struct pipe_fence_handle ** ptr,struct pipe_fence_handle * fence)617 i915_fence_reference(struct pipe_screen *screen, struct pipe_fence_handle **ptr,
618 struct pipe_fence_handle *fence)
619 {
620 struct i915_screen *is = i915_screen(screen);
621
622 is->iws->fence_reference(is->iws, ptr, fence);
623 }
624
625 static bool
i915_fence_finish(struct pipe_screen * screen,struct pipe_context * ctx,struct pipe_fence_handle * fence,uint64_t timeout)626 i915_fence_finish(struct pipe_screen *screen, struct pipe_context *ctx,
627 struct pipe_fence_handle *fence, uint64_t timeout)
628 {
629 struct i915_screen *is = i915_screen(screen);
630
631 if (!timeout)
632 return is->iws->fence_signalled(is->iws, fence) == 1;
633
634 return is->iws->fence_finish(is->iws, fence) == 1;
635 }
636
637 /*
638 * Generic functions
639 */
640
641 static void
i915_destroy_screen(struct pipe_screen * screen)642 i915_destroy_screen(struct pipe_screen *screen)
643 {
644 struct i915_screen *is = i915_screen(screen);
645
646 if (is->iws)
647 is->iws->destroy(is->iws);
648
649 FREE(is);
650 }
651
652 static int
i915_screen_get_fd(struct pipe_screen * screen)653 i915_screen_get_fd(struct pipe_screen *screen)
654 {
655 struct i915_screen *is = i915_screen(screen);
656
657 return is->iws->get_fd(is->iws);
658 }
659
660 /**
661 * Create a new i915_screen object
662 */
663 struct pipe_screen *
i915_screen_create(struct i915_winsys * iws)664 i915_screen_create(struct i915_winsys *iws)
665 {
666 struct i915_screen *is = CALLOC_STRUCT(i915_screen);
667
668 if (!is)
669 return NULL;
670
671 switch (iws->pci_id) {
672 case PCI_CHIP_I915_G:
673 case PCI_CHIP_I915_GM:
674 is->is_i945 = false;
675 break;
676
677 case PCI_CHIP_I945_G:
678 case PCI_CHIP_I945_GM:
679 case PCI_CHIP_I945_GME:
680 case PCI_CHIP_G33_G:
681 case PCI_CHIP_Q33_G:
682 case PCI_CHIP_Q35_G:
683 case PCI_CHIP_PINEVIEW_G:
684 case PCI_CHIP_PINEVIEW_M:
685 is->is_i945 = true;
686 break;
687
688 default:
689 debug_printf("%s: unknown pci id 0x%x, cannot create screen\n", __func__,
690 iws->pci_id);
691 FREE(is);
692 return NULL;
693 }
694
695 is->iws = iws;
696
697 is->base.destroy = i915_destroy_screen;
698
699 is->base.get_name = i915_get_name;
700 is->base.get_vendor = i915_get_vendor;
701 is->base.get_device_vendor = i915_get_device_vendor;
702 is->base.get_screen_fd = i915_screen_get_fd;
703 is->base.get_param = i915_get_param;
704 is->base.get_shader_param = i915_get_shader_param;
705 is->base.get_paramf = i915_get_paramf;
706 is->base.get_compiler_options = i915_get_compiler_options;
707 is->base.finalize_nir = i915_finalize_nir;
708 is->base.is_format_supported = i915_is_format_supported;
709
710 is->base.context_create = i915_create_context;
711
712 is->base.fence_reference = i915_fence_reference;
713 is->base.fence_finish = i915_fence_finish;
714
715 i915_init_screen_resource_functions(is);
716
717 i915_debug_init(is);
718
719 return &is->base;
720 }
721