1 /**************************************************************************
2 *
3 * Copyright 2008 VMware, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 #include "compiler/nir/nir.h"
29 #include "draw/draw_context.h"
30 #include "util/format/u_format.h"
31 #include "util/format/u_format_s3tc.h"
32 #include "util/os_misc.h"
33 #include "util/u_inlines.h"
34 #include "util/u_memory.h"
35 #include "util/u_screen.h"
36 #include "util/u_string.h"
37
38 #include "i915_context.h"
39 #include "i915_debug.h"
40 #include "i915_public.h"
41 #include "i915_reg.h"
42 #include "i915_resource.h"
43 #include "i915_screen.h"
44 #include "i915_winsys.h"
45
46 /*
47 * Probe functions
48 */
49
50 static const char *
i915_get_vendor(struct pipe_screen * screen)51 i915_get_vendor(struct pipe_screen *screen)
52 {
53 return "Mesa Project";
54 }
55
56 static const char *
i915_get_device_vendor(struct pipe_screen * screen)57 i915_get_device_vendor(struct pipe_screen *screen)
58 {
59 return "Intel";
60 }
61
62 static const char *
i915_get_name(struct pipe_screen * screen)63 i915_get_name(struct pipe_screen *screen)
64 {
65 static char buffer[128];
66 const char *chipset;
67
68 switch (i915_screen(screen)->iws->pci_id) {
69 case PCI_CHIP_I915_G:
70 chipset = "915G";
71 break;
72 case PCI_CHIP_I915_GM:
73 chipset = "915GM";
74 break;
75 case PCI_CHIP_I945_G:
76 chipset = "945G";
77 break;
78 case PCI_CHIP_I945_GM:
79 chipset = "945GM";
80 break;
81 case PCI_CHIP_I945_GME:
82 chipset = "945GME";
83 break;
84 case PCI_CHIP_G33_G:
85 chipset = "G33";
86 break;
87 case PCI_CHIP_Q35_G:
88 chipset = "Q35";
89 break;
90 case PCI_CHIP_Q33_G:
91 chipset = "Q33";
92 break;
93 case PCI_CHIP_PINEVIEW_G:
94 chipset = "Pineview G";
95 break;
96 case PCI_CHIP_PINEVIEW_M:
97 chipset = "Pineview M";
98 break;
99 default:
100 chipset = "unknown";
101 break;
102 }
103
104 snprintf(buffer, sizeof(buffer), "i915 (chipset: %s)", chipset);
105 return buffer;
106 }
107
108 static const nir_shader_compiler_options i915_compiler_options = {
109 .fuse_ffma32 = true,
110 .lower_bitops = true, /* required for !CAP_INTEGERS nir_to_tgsi */
111 .lower_extract_byte = true,
112 .lower_extract_word = true,
113 .lower_fdiv = true,
114 .lower_fdph = true,
115 .lower_flrp32 = true,
116 .lower_fmod = true,
117 .lower_rotate = true,
118 .lower_uniforms_to_ubo = true,
119 .lower_vector_cmp = true,
120 .use_interpolated_input_intrinsics = true,
121 .force_indirect_unrolling = ~0,
122 };
123
124 static const struct nir_shader_compiler_options gallivm_nir_options = {
125 .lower_bitops = true, /* required for !CAP_INTEGERS nir_to_tgsi */
126 .lower_scmp = true,
127 .lower_flrp32 = true,
128 .lower_flrp64 = true,
129 .lower_fsat = true,
130 .lower_bitfield_insert_to_shifts = true,
131 .lower_bitfield_extract_to_shifts = true,
132 .lower_fdph = true,
133 .lower_ffma16 = true,
134 .lower_ffma32 = true,
135 .lower_ffma64 = true,
136 .lower_fmod = true,
137 .lower_hadd = true,
138 .lower_uadd_sat = true,
139 .lower_iadd_sat = true,
140 .lower_ldexp = true,
141 .lower_pack_snorm_2x16 = true,
142 .lower_pack_snorm_4x8 = true,
143 .lower_pack_unorm_2x16 = true,
144 .lower_pack_unorm_4x8 = true,
145 .lower_pack_half_2x16 = true,
146 .lower_pack_split = true,
147 .lower_unpack_snorm_2x16 = true,
148 .lower_unpack_snorm_4x8 = true,
149 .lower_unpack_unorm_2x16 = true,
150 .lower_unpack_unorm_4x8 = true,
151 .lower_unpack_half_2x16 = true,
152 .lower_extract_byte = true,
153 .lower_extract_word = true,
154 .lower_rotate = true,
155 .lower_uadd_carry = true,
156 .lower_usub_borrow = true,
157 .lower_mul_2x32_64 = true,
158 .lower_ifind_msb = true,
159 .max_unroll_iterations = 32,
160 .use_interpolated_input_intrinsics = true,
161 .lower_cs_local_index_from_id = true,
162 .lower_uniforms_to_ubo = true,
163 .lower_vector_cmp = true,
164 .lower_device_index_to_zero = true,
165 /* .support_16bit_alu = true, */
166 };
167
168 static const void *
i915_get_compiler_options(struct pipe_screen * pscreen,enum pipe_shader_ir ir,enum pipe_shader_type shader)169 i915_get_compiler_options(struct pipe_screen *pscreen, enum pipe_shader_ir ir,
170 enum pipe_shader_type shader)
171 {
172 assert(ir == PIPE_SHADER_IR_NIR);
173 if (shader == PIPE_SHADER_FRAGMENT)
174 return &i915_compiler_options;
175 else
176 return &gallivm_nir_options;
177 }
178
179 static void
i915_optimize_nir(struct nir_shader * s)180 i915_optimize_nir(struct nir_shader *s)
181 {
182 bool progress;
183
184 do {
185 progress = false;
186
187 NIR_PASS_V(s, nir_lower_vars_to_ssa);
188
189 NIR_PASS(progress, s, nir_copy_prop);
190 NIR_PASS(progress, s, nir_opt_algebraic);
191 NIR_PASS(progress, s, nir_opt_constant_folding);
192 NIR_PASS(progress, s, nir_opt_remove_phis);
193 NIR_PASS(progress, s, nir_opt_conditional_discard);
194 NIR_PASS(progress, s, nir_opt_dce);
195 NIR_PASS(progress, s, nir_opt_dead_cf);
196 NIR_PASS(progress, s, nir_opt_cse);
197 NIR_PASS(progress, s, nir_opt_find_array_copies);
198 NIR_PASS(progress, s, nir_opt_if, true);
199 NIR_PASS(progress, s, nir_opt_peephole_select, ~0 /* flatten all IFs. */,
200 true, true);
201 NIR_PASS(progress, s, nir_opt_algebraic);
202 NIR_PASS(progress, s, nir_opt_constant_folding);
203 NIR_PASS(progress, s, nir_opt_shrink_vectors, true);
204 NIR_PASS(progress, s, nir_opt_trivial_continues);
205 NIR_PASS(progress, s, nir_opt_undef);
206 NIR_PASS(progress, s, nir_opt_loop_unroll);
207
208 } while (progress);
209
210 NIR_PASS(progress, s, nir_remove_dead_variables, nir_var_function_temp,
211 NULL);
212 }
213
i915_check_control_flow(nir_shader * s)214 static char *i915_check_control_flow(nir_shader *s)
215 {
216 if (s->info.stage == MESA_SHADER_FRAGMENT) {
217 nir_function_impl *impl = nir_shader_get_entrypoint(s);
218 nir_block *first = nir_start_block(impl);
219 nir_cf_node *next = nir_cf_node_next(&first->cf_node);
220
221 if (next) {
222 switch (next->type) {
223 case nir_cf_node_if:
224 return "if/then statements not supported by i915 fragment shaders, should have been flattened by peephole_select.";
225 case nir_cf_node_loop:
226 return "looping not supported i915 fragment shaders, all loops must be statically unrollable.";
227 default:
228 return "Unknown control flow type";
229 }
230 }
231 }
232
233 return NULL;
234 }
235
236 static char *
i915_finalize_nir(struct pipe_screen * pscreen,void * nir)237 i915_finalize_nir(struct pipe_screen *pscreen, void *nir)
238 {
239 nir_shader *s = nir;
240
241 if (s->info.stage == MESA_SHADER_FRAGMENT)
242 i915_optimize_nir(s);
243
244 /* st_program.c's parameter list optimization requires that future nir
245 * variants don't reallocate the uniform storage, so we have to remove
246 * uniforms that occupy storage. But we don't want to remove samplers,
247 * because they're needed for YUV variant lowering.
248 */
249 nir_remove_dead_derefs(s);
250 nir_foreach_uniform_variable_safe(var, s)
251 {
252 if (var->data.mode == nir_var_uniform &&
253 (glsl_type_get_image_count(var->type) ||
254 glsl_type_get_sampler_count(var->type)))
255 continue;
256
257 exec_node_remove(&var->node);
258 }
259 nir_validate_shader(s, "after uniform var removal");
260
261 nir_sweep(s);
262
263 char *msg = i915_check_control_flow(s);
264 if (msg)
265 return strdup(msg);
266
267 return NULL;
268 }
269
270 static int
i915_get_shader_param(struct pipe_screen * screen,enum pipe_shader_type shader,enum pipe_shader_cap cap)271 i915_get_shader_param(struct pipe_screen *screen, enum pipe_shader_type shader,
272 enum pipe_shader_cap cap)
273 {
274 switch (cap) {
275 case PIPE_SHADER_CAP_PREFERRED_IR:
276 return PIPE_SHADER_IR_NIR;
277 case PIPE_SHADER_CAP_SUPPORTED_IRS:
278 return (1 << PIPE_SHADER_IR_NIR) | (1 << PIPE_SHADER_IR_TGSI);
279
280 case PIPE_SHADER_CAP_INTEGERS:
281 /* mesa/st requires that this cap is the same across stages, and the FS
282 * can't do ints.
283 */
284 return 0;
285
286 case PIPE_SHADER_CAP_INT16:
287 return 0;
288
289 case PIPE_SHADER_CAP_INDIRECT_TEMP_ADDR:
290 /* While draw could normally handle this for the VS, the NIR lowering
291 * to regs can't handle our non-native-integers, so we have to lower to
292 * if ladders.
293 */
294 return 0;
295
296 default:
297 break;
298 }
299
300 switch (shader) {
301 case PIPE_SHADER_VERTEX:
302 switch (cap) {
303 case PIPE_SHADER_CAP_MAX_TEXTURE_SAMPLERS:
304 case PIPE_SHADER_CAP_MAX_SAMPLER_VIEWS:
305 return 0;
306 case PIPE_SHADER_CAP_MAX_SHADER_BUFFERS:
307 case PIPE_SHADER_CAP_MAX_SHADER_IMAGES:
308 return 0;
309 default:
310 return draw_get_shader_param(shader, cap);
311 }
312 case PIPE_SHADER_FRAGMENT:
313 /* XXX: some of these are just shader model 2.0 values, fix this! */
314 switch (cap) {
315 case PIPE_SHADER_CAP_MAX_INSTRUCTIONS:
316 return I915_MAX_ALU_INSN + I915_MAX_TEX_INSN;
317 case PIPE_SHADER_CAP_MAX_ALU_INSTRUCTIONS:
318 return I915_MAX_ALU_INSN;
319 case PIPE_SHADER_CAP_MAX_TEX_INSTRUCTIONS:
320 return I915_MAX_TEX_INSN;
321 case PIPE_SHADER_CAP_MAX_TEX_INDIRECTIONS:
322 return 4;
323 case PIPE_SHADER_CAP_MAX_CONTROL_FLOW_DEPTH:
324 return 0;
325 case PIPE_SHADER_CAP_MAX_INPUTS:
326 return 10;
327 case PIPE_SHADER_CAP_MAX_OUTPUTS:
328 return 1;
329 case PIPE_SHADER_CAP_MAX_CONST_BUFFER_SIZE:
330 return 32 * sizeof(float[4]);
331 case PIPE_SHADER_CAP_MAX_CONST_BUFFERS:
332 return 1;
333 case PIPE_SHADER_CAP_MAX_TEMPS:
334 /* 16 inter-phase temps, 3 intra-phase temps. i915c reported 16. too. */
335 return 16;
336 case PIPE_SHADER_CAP_TGSI_CONT_SUPPORTED:
337 case PIPE_SHADER_CAP_TGSI_SQRT_SUPPORTED:
338 return 0;
339 case PIPE_SHADER_CAP_INDIRECT_INPUT_ADDR:
340 case PIPE_SHADER_CAP_INDIRECT_OUTPUT_ADDR:
341 case PIPE_SHADER_CAP_INDIRECT_TEMP_ADDR:
342 case PIPE_SHADER_CAP_INDIRECT_CONST_ADDR:
343 case PIPE_SHADER_CAP_SUBROUTINES:
344 return 0;
345 case PIPE_SHADER_CAP_INT64_ATOMICS:
346 case PIPE_SHADER_CAP_FP16:
347 case PIPE_SHADER_CAP_FP16_DERIVATIVES:
348 case PIPE_SHADER_CAP_FP16_CONST_BUFFERS:
349 case PIPE_SHADER_CAP_INT16:
350 case PIPE_SHADER_CAP_GLSL_16BIT_CONSTS:
351 return 0;
352 case PIPE_SHADER_CAP_MAX_TEXTURE_SAMPLERS:
353 case PIPE_SHADER_CAP_MAX_SAMPLER_VIEWS:
354 return I915_TEX_UNITS;
355 case PIPE_SHADER_CAP_TGSI_DROUND_SUPPORTED:
356 case PIPE_SHADER_CAP_TGSI_DFRACEXP_DLDEXP_SUPPORTED:
357 case PIPE_SHADER_CAP_TGSI_LDEXP_SUPPORTED:
358 case PIPE_SHADER_CAP_TGSI_FMA_SUPPORTED:
359 case PIPE_SHADER_CAP_TGSI_ANY_INOUT_DECL_RANGE:
360 case PIPE_SHADER_CAP_MAX_SHADER_BUFFERS:
361 case PIPE_SHADER_CAP_MAX_SHADER_IMAGES:
362 case PIPE_SHADER_CAP_LOWER_IF_THRESHOLD:
363 case PIPE_SHADER_CAP_TGSI_SKIP_MERGE_REGISTERS:
364 case PIPE_SHADER_CAP_MAX_HW_ATOMIC_COUNTERS:
365 case PIPE_SHADER_CAP_MAX_HW_ATOMIC_COUNTER_BUFFERS:
366 return 0;
367
368 case PIPE_SHADER_CAP_MAX_UNROLL_ITERATIONS_HINT:
369 return 32;
370 default:
371 debug_printf("%s: Unknown cap %u.\n", __FUNCTION__, cap);
372 return 0;
373 }
374 break;
375 default:
376 return 0;
377 }
378 }
379
380 static int
i915_get_param(struct pipe_screen * screen,enum pipe_cap cap)381 i915_get_param(struct pipe_screen *screen, enum pipe_cap cap)
382 {
383 struct i915_screen *is = i915_screen(screen);
384
385 switch (cap) {
386 /* Supported features (boolean caps). */
387 case PIPE_CAP_ANISOTROPIC_FILTER:
388 case PIPE_CAP_NPOT_TEXTURES:
389 case PIPE_CAP_MIXED_FRAMEBUFFER_SIZES:
390 case PIPE_CAP_POINT_SPRITE:
391 case PIPE_CAP_PRIMITIVE_RESTART: /* draw module */
392 case PIPE_CAP_PRIMITIVE_RESTART_FIXED_INDEX:
393 case PIPE_CAP_VERTEX_ELEMENT_INSTANCE_DIVISOR:
394 case PIPE_CAP_BLEND_EQUATION_SEPARATE:
395 case PIPE_CAP_TGSI_INSTANCEID:
396 case PIPE_CAP_VERTEX_COLOR_CLAMPED:
397 case PIPE_CAP_USER_VERTEX_BUFFERS:
398 case PIPE_CAP_MIXED_COLOR_DEPTH_BITS:
399 case PIPE_CAP_TGSI_TEXCOORD:
400 return 1;
401
402 case PIPE_CAP_PREFER_BLIT_BASED_TEXTURE_TRANSFER:
403 case PIPE_CAP_PCI_GROUP:
404 case PIPE_CAP_PCI_BUS:
405 case PIPE_CAP_PCI_DEVICE:
406 case PIPE_CAP_PCI_FUNCTION:
407 return 0;
408
409 case PIPE_CAP_GLSL_OPTIMIZE_CONSERVATIVELY:
410 case PIPE_CAP_ALLOW_MAPPED_BUFFERS_DURING_EXECUTION:
411 return 0;
412
413 case PIPE_CAP_SHAREABLE_SHADERS:
414 /* Can't expose shareable shaders because the draw shaders reference the
415 * draw module's state, which is per-context.
416 */
417 return 0;
418
419 case PIPE_CAP_MAX_GS_INVOCATIONS:
420 return 32;
421
422 case PIPE_CAP_MAX_SHADER_BUFFER_SIZE:
423 return 1 << 27;
424
425 case PIPE_CAP_MAX_VIEWPORTS:
426 return 1;
427
428 case PIPE_CAP_MIN_MAP_BUFFER_ALIGNMENT:
429 return 64;
430
431 case PIPE_CAP_GLSL_FEATURE_LEVEL:
432 case PIPE_CAP_GLSL_FEATURE_LEVEL_COMPATIBILITY:
433 return 120;
434
435 case PIPE_CAP_CONSTANT_BUFFER_OFFSET_ALIGNMENT:
436 return 16;
437
438 /* Features we can lie about (boolean caps). */
439 case PIPE_CAP_OCCLUSION_QUERY:
440 return is->debug.lie ? 1 : 0;
441
442 /* Texturing. */
443 case PIPE_CAP_MAX_TEXTURE_2D_SIZE:
444 return 1 << (I915_MAX_TEXTURE_2D_LEVELS - 1);
445 case PIPE_CAP_MAX_TEXTURE_3D_LEVELS:
446 return I915_MAX_TEXTURE_3D_LEVELS;
447 case PIPE_CAP_MAX_TEXTURE_CUBE_LEVELS:
448 return 1 << (I915_MAX_TEXTURE_2D_LEVELS - 1);
449
450 /* Render targets. */
451 case PIPE_CAP_MAX_RENDER_TARGETS:
452 return 1;
453
454 case PIPE_CAP_MAX_VERTEX_ATTRIB_STRIDE:
455 return 2048;
456
457 /* Fragment coordinate conventions. */
458 case PIPE_CAP_TGSI_FS_COORD_ORIGIN_UPPER_LEFT:
459 case PIPE_CAP_TGSI_FS_COORD_PIXEL_CENTER_HALF_INTEGER:
460 return 1;
461 case PIPE_CAP_ENDIANNESS:
462 return PIPE_ENDIAN_LITTLE;
463 case PIPE_CAP_MAX_VARYINGS:
464 return 10;
465
466 case PIPE_CAP_NIR_IMAGES_AS_DEREF:
467 return 0;
468
469 case PIPE_CAP_VENDOR_ID:
470 return 0x8086;
471 case PIPE_CAP_DEVICE_ID:
472 return is->iws->pci_id;
473 case PIPE_CAP_ACCELERATED:
474 return 1;
475 case PIPE_CAP_VIDEO_MEMORY: {
476 /* Once a batch uses more than 75% of the maximum mappable size, we
477 * assume that there's some fragmentation, and we start doing extra
478 * flushing, etc. That's the big cliff apps will care about.
479 */
480 const int gpu_mappable_megabytes =
481 is->iws->aperture_size(is->iws) * 3 / 4;
482 uint64_t system_memory;
483
484 if (!os_get_total_physical_memory(&system_memory))
485 return 0;
486
487 return MIN2(gpu_mappable_megabytes, (int)(system_memory >> 20));
488 }
489 case PIPE_CAP_UMA:
490 return 1;
491
492 default:
493 return u_pipe_screen_get_param_defaults(screen, cap);
494 }
495 }
496
497 static float
i915_get_paramf(struct pipe_screen * screen,enum pipe_capf cap)498 i915_get_paramf(struct pipe_screen *screen, enum pipe_capf cap)
499 {
500 switch (cap) {
501 case PIPE_CAPF_MAX_LINE_WIDTH:
502 FALLTHROUGH;
503 case PIPE_CAPF_MAX_LINE_WIDTH_AA:
504 return 7.5;
505
506 case PIPE_CAPF_MAX_POINT_WIDTH:
507 FALLTHROUGH;
508 case PIPE_CAPF_MAX_POINT_WIDTH_AA:
509 return 255.0;
510
511 case PIPE_CAPF_MAX_TEXTURE_ANISOTROPY:
512 return 4.0;
513
514 case PIPE_CAPF_MAX_TEXTURE_LOD_BIAS:
515 return 16.0;
516
517 case PIPE_CAPF_MIN_CONSERVATIVE_RASTER_DILATE:
518 FALLTHROUGH;
519 case PIPE_CAPF_MAX_CONSERVATIVE_RASTER_DILATE:
520 FALLTHROUGH;
521 case PIPE_CAPF_CONSERVATIVE_RASTER_DILATE_GRANULARITY:
522 return 0.0f;
523
524 default:
525 debug_printf("%s: Unknown cap %u.\n", __FUNCTION__, cap);
526 return 0;
527 }
528 }
529
530 bool
i915_is_format_supported(struct pipe_screen * screen,enum pipe_format format,enum pipe_texture_target target,unsigned sample_count,unsigned storage_sample_count,unsigned tex_usage)531 i915_is_format_supported(struct pipe_screen *screen, enum pipe_format format,
532 enum pipe_texture_target target, unsigned sample_count,
533 unsigned storage_sample_count, unsigned tex_usage)
534 {
535 static const enum pipe_format tex_supported[] = {
536 PIPE_FORMAT_B8G8R8A8_UNORM, PIPE_FORMAT_B8G8R8A8_SRGB,
537 PIPE_FORMAT_B8G8R8X8_UNORM, PIPE_FORMAT_R8G8B8A8_UNORM,
538 PIPE_FORMAT_R8G8B8X8_UNORM, PIPE_FORMAT_B4G4R4A4_UNORM,
539 PIPE_FORMAT_B5G6R5_UNORM, PIPE_FORMAT_B5G5R5A1_UNORM,
540 PIPE_FORMAT_B10G10R10A2_UNORM, PIPE_FORMAT_L8_UNORM, PIPE_FORMAT_A8_UNORM,
541 PIPE_FORMAT_I8_UNORM, PIPE_FORMAT_L8A8_UNORM, PIPE_FORMAT_UYVY,
542 PIPE_FORMAT_YUYV,
543 /* XXX why not?
544 PIPE_FORMAT_Z16_UNORM, */
545 PIPE_FORMAT_DXT1_RGB, PIPE_FORMAT_DXT1_SRGB, PIPE_FORMAT_DXT1_RGBA,
546 PIPE_FORMAT_DXT1_SRGBA, PIPE_FORMAT_DXT3_RGBA, PIPE_FORMAT_DXT3_SRGBA,
547 PIPE_FORMAT_DXT5_RGBA, PIPE_FORMAT_DXT5_SRGBA, PIPE_FORMAT_Z24X8_UNORM,
548 PIPE_FORMAT_FXT1_RGB, PIPE_FORMAT_FXT1_RGBA,
549 PIPE_FORMAT_Z24_UNORM_S8_UINT, PIPE_FORMAT_NONE /* list terminator */
550 };
551 static const enum pipe_format render_supported[] = {
552 PIPE_FORMAT_B8G8R8A8_UNORM, PIPE_FORMAT_B8G8R8X8_UNORM,
553 PIPE_FORMAT_R8G8B8A8_UNORM, PIPE_FORMAT_R8G8B8X8_UNORM,
554 PIPE_FORMAT_B5G6R5_UNORM, PIPE_FORMAT_B5G5R5A1_UNORM,
555 PIPE_FORMAT_B4G4R4A4_UNORM, PIPE_FORMAT_B10G10R10A2_UNORM,
556 PIPE_FORMAT_L8_UNORM, PIPE_FORMAT_A8_UNORM,
557 PIPE_FORMAT_I8_UNORM, PIPE_FORMAT_NONE /* list terminator */
558 };
559 static const enum pipe_format depth_supported[] = {
560 /* XXX why not?
561 PIPE_FORMAT_Z16_UNORM, */
562 PIPE_FORMAT_Z24X8_UNORM, PIPE_FORMAT_Z24_UNORM_S8_UINT,
563 PIPE_FORMAT_NONE /* list terminator */
564 };
565 const enum pipe_format *list;
566 uint32_t i;
567
568 if (sample_count > 1)
569 return false;
570
571 if (MAX2(1, sample_count) != MAX2(1, storage_sample_count))
572 return false;
573
574 if (tex_usage & PIPE_BIND_DEPTH_STENCIL)
575 list = depth_supported;
576 else if (tex_usage & PIPE_BIND_RENDER_TARGET)
577 list = render_supported;
578 else if (tex_usage & PIPE_BIND_SAMPLER_VIEW)
579 list = tex_supported;
580 else
581 return true; /* PIPE_BIND_{VERTEX,INDEX}_BUFFER */
582
583 for (i = 0; list[i] != PIPE_FORMAT_NONE; i++) {
584 if (list[i] == format)
585 return true;
586 }
587
588 return false;
589 }
590
591 /*
592 * Fence functions
593 */
594
595 static void
i915_fence_reference(struct pipe_screen * screen,struct pipe_fence_handle ** ptr,struct pipe_fence_handle * fence)596 i915_fence_reference(struct pipe_screen *screen, struct pipe_fence_handle **ptr,
597 struct pipe_fence_handle *fence)
598 {
599 struct i915_screen *is = i915_screen(screen);
600
601 is->iws->fence_reference(is->iws, ptr, fence);
602 }
603
604 static bool
i915_fence_finish(struct pipe_screen * screen,struct pipe_context * ctx,struct pipe_fence_handle * fence,uint64_t timeout)605 i915_fence_finish(struct pipe_screen *screen, struct pipe_context *ctx,
606 struct pipe_fence_handle *fence, uint64_t timeout)
607 {
608 struct i915_screen *is = i915_screen(screen);
609
610 if (!timeout)
611 return is->iws->fence_signalled(is->iws, fence) == 1;
612
613 return is->iws->fence_finish(is->iws, fence) == 1;
614 }
615
616 /*
617 * Generic functions
618 */
619
620 static void
i915_flush_frontbuffer(struct pipe_screen * screen,struct pipe_context * pipe,struct pipe_resource * resource,unsigned level,unsigned layer,void * winsys_drawable_handle,struct pipe_box * sub_box)621 i915_flush_frontbuffer(struct pipe_screen *screen, struct pipe_context *pipe,
622 struct pipe_resource *resource, unsigned level,
623 unsigned layer, void *winsys_drawable_handle,
624 struct pipe_box *sub_box)
625 {
626 /* XXX: Dummy right now. */
627 (void)screen;
628 (void)pipe;
629 (void)resource;
630 (void)level;
631 (void)layer;
632 (void)winsys_drawable_handle;
633 (void)sub_box;
634 }
635
636 static void
i915_destroy_screen(struct pipe_screen * screen)637 i915_destroy_screen(struct pipe_screen *screen)
638 {
639 struct i915_screen *is = i915_screen(screen);
640
641 if (is->iws)
642 is->iws->destroy(is->iws);
643
644 FREE(is);
645 }
646
647 /**
648 * Create a new i915_screen object
649 */
650 struct pipe_screen *
i915_screen_create(struct i915_winsys * iws)651 i915_screen_create(struct i915_winsys *iws)
652 {
653 struct i915_screen *is = CALLOC_STRUCT(i915_screen);
654
655 if (!is)
656 return NULL;
657
658 switch (iws->pci_id) {
659 case PCI_CHIP_I915_G:
660 case PCI_CHIP_I915_GM:
661 is->is_i945 = false;
662 break;
663
664 case PCI_CHIP_I945_G:
665 case PCI_CHIP_I945_GM:
666 case PCI_CHIP_I945_GME:
667 case PCI_CHIP_G33_G:
668 case PCI_CHIP_Q33_G:
669 case PCI_CHIP_Q35_G:
670 case PCI_CHIP_PINEVIEW_G:
671 case PCI_CHIP_PINEVIEW_M:
672 is->is_i945 = true;
673 break;
674
675 default:
676 debug_printf("%s: unknown pci id 0x%x, cannot create screen\n",
677 __FUNCTION__, iws->pci_id);
678 FREE(is);
679 return NULL;
680 }
681
682 is->iws = iws;
683
684 is->base.destroy = i915_destroy_screen;
685 is->base.flush_frontbuffer = i915_flush_frontbuffer;
686
687 is->base.get_name = i915_get_name;
688 is->base.get_vendor = i915_get_vendor;
689 is->base.get_device_vendor = i915_get_device_vendor;
690 is->base.get_param = i915_get_param;
691 is->base.get_shader_param = i915_get_shader_param;
692 is->base.get_paramf = i915_get_paramf;
693 is->base.get_compiler_options = i915_get_compiler_options;
694 is->base.finalize_nir = i915_finalize_nir;
695 is->base.is_format_supported = i915_is_format_supported;
696
697 is->base.context_create = i915_create_context;
698
699 is->base.fence_reference = i915_fence_reference;
700 is->base.fence_finish = i915_fence_finish;
701
702 i915_init_screen_resource_functions(is);
703
704 i915_debug_init(is);
705
706 return &is->base;
707 }
708