• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2017 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included
12  * in all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20  * DEALINGS IN THE SOFTWARE.
21  */
22 
23 /**
24  * @file iris_screen.c
25  *
26  * Screen related driver hooks and capability lists.
27  *
28  * A program may use multiple rendering contexts (iris_context), but
29  * they all share a common screen (iris_screen).  Global driver state
30  * can be stored in the screen; it may be accessed by multiple threads.
31  */
32 
33 #include <stdio.h>
34 #include <errno.h>
35 #include <sys/ioctl.h>
36 #include "pipe/p_defines.h"
37 #include "pipe/p_state.h"
38 #include "pipe/p_context.h"
39 #include "pipe/p_screen.h"
40 #include "util/u_debug.h"
41 #include "util/os_file.h"
42 #include "util/u_cpu_detect.h"
43 #include "util/u_inlines.h"
44 #include "util/format/u_format.h"
45 #include "util/u_transfer_helper.h"
46 #include "util/u_upload_mgr.h"
47 #include "util/ralloc.h"
48 #include "util/xmlconfig.h"
49 #include "iris_context.h"
50 #include "iris_defines.h"
51 #include "iris_fence.h"
52 #include "iris_perf.h"
53 #include "iris_pipe.h"
54 #include "iris_resource.h"
55 #include "iris_screen.h"
56 #include "compiler/glsl_types.h"
57 #include "intel/common/intel_debug_identifier.h"
58 #include "intel/common/intel_gem.h"
59 #include "intel/common/intel_l3_config.h"
60 #include "intel/common/intel_uuid.h"
61 #include "iris_monitor.h"
62 
63 #define genX_call(devinfo, func, ...)             \
64    switch ((devinfo)->verx10) {                   \
65    case 300:                                      \
66       gfx30_##func(__VA_ARGS__);                  \
67       break;                                      \
68    case 200:                                      \
69       gfx20_##func(__VA_ARGS__);                  \
70       break;                                      \
71    case 125:                                      \
72       gfx125_##func(__VA_ARGS__);                 \
73       break;                                      \
74    case 120:                                      \
75       gfx12_##func(__VA_ARGS__);                  \
76       break;                                      \
77    case 110:                                      \
78       gfx11_##func(__VA_ARGS__);                  \
79       break;                                      \
80    case 90:                                       \
81       gfx9_##func(__VA_ARGS__);                   \
82       break;                                      \
83    case 80:                                       \
84       gfx8_##func(__VA_ARGS__);                   \
85       break;                                      \
86    default:                                       \
87       unreachable("Unknown hardware generation"); \
88    }
89 
90 #ifndef INTEL_USE_ELK
gfx8_init_screen_state(struct iris_screen * screen)91 static inline void gfx8_init_screen_state(struct iris_screen *screen) { unreachable("no elk support"); }
gfx8_init_screen_gen_state(struct iris_screen * screen)92 static inline void gfx8_init_screen_gen_state(struct iris_screen *screen) { unreachable("no elk support"); }
93 #endif
94 
95 static const char *
iris_get_vendor(struct pipe_screen * pscreen)96 iris_get_vendor(struct pipe_screen *pscreen)
97 {
98    return "Intel";
99 }
100 
101 static const char *
iris_get_device_vendor(struct pipe_screen * pscreen)102 iris_get_device_vendor(struct pipe_screen *pscreen)
103 {
104    return "Intel";
105 }
106 
107 static void
iris_get_device_uuid(struct pipe_screen * pscreen,char * uuid)108 iris_get_device_uuid(struct pipe_screen *pscreen, char *uuid)
109 {
110    struct iris_screen *screen = (struct iris_screen *)pscreen;
111 
112    intel_uuid_compute_device_id((uint8_t *)uuid, screen->devinfo, PIPE_UUID_SIZE);
113 }
114 
115 static void
iris_get_driver_uuid(struct pipe_screen * pscreen,char * uuid)116 iris_get_driver_uuid(struct pipe_screen *pscreen, char *uuid)
117 {
118    struct iris_screen *screen = (struct iris_screen *)pscreen;
119    const struct intel_device_info *devinfo = screen->devinfo;
120 
121    intel_uuid_compute_driver_id((uint8_t *)uuid, devinfo, PIPE_UUID_SIZE);
122 }
123 
124 static void
iris_warn_cl()125 iris_warn_cl()
126 {
127    static bool warned = false;
128    if (warned || INTEL_DEBUG(DEBUG_CL_QUIET))
129       return;
130 
131    warned = true;
132    fprintf(stderr, "WARNING: OpenCL support via iris driver is incomplete.\n"
133                    "For a complete and conformant OpenCL implementation, use\n"
134                    "https://github.com/intel/compute-runtime instead\n");
135 }
136 
137 static const char *
iris_get_name(struct pipe_screen * pscreen)138 iris_get_name(struct pipe_screen *pscreen)
139 {
140    struct iris_screen *screen = (struct iris_screen *)pscreen;
141    const struct intel_device_info *devinfo = screen->devinfo;
142    static char buf[128];
143 
144    snprintf(buf, sizeof(buf), "Mesa %s", devinfo->name);
145    return buf;
146 }
147 
148 static const char *
iris_get_cl_cts_version(struct pipe_screen * pscreen)149 iris_get_cl_cts_version(struct pipe_screen *pscreen)
150 {
151    struct iris_screen *screen = (struct iris_screen *)pscreen;
152    const struct intel_device_info *devinfo = screen->devinfo;
153 
154    /* https://www.khronos.org/conformance/adopters/conformant-products/opencl#submission_405 */
155    if (devinfo->verx10 == 120)
156       return "v2022-04-22-00";
157 
158    return NULL;
159 }
160 
161 static int
iris_get_video_memory(struct iris_screen * screen)162 iris_get_video_memory(struct iris_screen *screen)
163 {
164    uint64_t vram = iris_bufmgr_vram_size(screen->bufmgr);
165    uint64_t sram = iris_bufmgr_sram_size(screen->bufmgr);
166    if (vram) {
167       return vram / (1024 * 1024);
168    } else if (sram) {
169       return sram / (1024 * 1024);
170    } else {
171       /* This is the old code path, it get the GGTT size from the kernel
172        * (which should always be 4Gb on Gfx8+).
173        *
174        * We should probably never end up here. This is just a fallback to get
175        * some kind of value in case os_get_available_system_memory fails.
176        */
177       const struct intel_device_info *devinfo = screen->devinfo;
178       /* Once a batch uses more than 75% of the maximum mappable size, we
179        * assume that there's some fragmentation, and we start doing extra
180        * flushing, etc.  That's the big cliff apps will care about.
181        */
182       const unsigned gpu_mappable_megabytes =
183          (devinfo->aperture_bytes * 3 / 4) / (1024 * 1024);
184 
185       const long system_memory_pages = sysconf(_SC_PHYS_PAGES);
186       const long system_page_size = sysconf(_SC_PAGE_SIZE);
187 
188       if (system_memory_pages <= 0 || system_page_size <= 0)
189          return -1;
190 
191       const uint64_t system_memory_bytes =
192          (uint64_t) system_memory_pages * (uint64_t) system_page_size;
193 
194       const unsigned system_memory_megabytes =
195          (unsigned) (system_memory_bytes / (1024 * 1024));
196 
197       return MIN2(system_memory_megabytes, gpu_mappable_megabytes);
198    }
199 }
200 
201 static int
iris_get_shader_param(struct pipe_screen * pscreen,enum pipe_shader_type p_stage,enum pipe_shader_cap param)202 iris_get_shader_param(struct pipe_screen *pscreen,
203                       enum pipe_shader_type p_stage,
204                       enum pipe_shader_cap param)
205 {
206    gl_shader_stage stage = stage_from_pipe(p_stage);
207 
208    if (p_stage == PIPE_SHADER_MESH ||
209        p_stage == PIPE_SHADER_TASK)
210       return 0;
211 
212    /* this is probably not totally correct.. but it's a start: */
213    switch (param) {
214    case PIPE_SHADER_CAP_MAX_INSTRUCTIONS:
215       return stage == MESA_SHADER_FRAGMENT ? 1024 : 16384;
216    case PIPE_SHADER_CAP_MAX_ALU_INSTRUCTIONS:
217    case PIPE_SHADER_CAP_MAX_TEX_INSTRUCTIONS:
218    case PIPE_SHADER_CAP_MAX_TEX_INDIRECTIONS:
219       return stage == MESA_SHADER_FRAGMENT ? 1024 : 0;
220 
221    case PIPE_SHADER_CAP_MAX_CONTROL_FLOW_DEPTH:
222       return UINT_MAX;
223 
224    case PIPE_SHADER_CAP_MAX_INPUTS:
225       return stage == MESA_SHADER_VERTEX ? 16 : 32;
226    case PIPE_SHADER_CAP_MAX_OUTPUTS:
227       return 32;
228    case PIPE_SHADER_CAP_MAX_CONST_BUFFER0_SIZE:
229       return 16 * 1024 * sizeof(float);
230    case PIPE_SHADER_CAP_MAX_CONST_BUFFERS:
231       return 16;
232    case PIPE_SHADER_CAP_MAX_TEMPS:
233       return 256; /* GL_MAX_PROGRAM_TEMPORARIES_ARB */
234    case PIPE_SHADER_CAP_CONT_SUPPORTED:
235       return 0;
236    case PIPE_SHADER_CAP_INDIRECT_TEMP_ADDR:
237    case PIPE_SHADER_CAP_INDIRECT_CONST_ADDR:
238       /* Lie about these to avoid st/mesa's GLSL IR lowering of indirects,
239        * which we don't want.  Our compiler backend will check brw_compiler's
240        * options and call nir_lower_indirect_derefs appropriately anyway.
241        */
242       return true;
243    case PIPE_SHADER_CAP_SUBROUTINES:
244       return 0;
245    case PIPE_SHADER_CAP_INTEGERS:
246       return 1;
247    case PIPE_SHADER_CAP_INT64_ATOMICS:
248    case PIPE_SHADER_CAP_FP16:
249    case PIPE_SHADER_CAP_FP16_DERIVATIVES:
250    case PIPE_SHADER_CAP_FP16_CONST_BUFFERS:
251    case PIPE_SHADER_CAP_INT16:
252    case PIPE_SHADER_CAP_GLSL_16BIT_CONSTS:
253       return 0;
254    case PIPE_SHADER_CAP_MAX_TEXTURE_SAMPLERS:
255       return IRIS_MAX_SAMPLERS;
256    case PIPE_SHADER_CAP_MAX_SAMPLER_VIEWS:
257       return IRIS_MAX_TEXTURES;
258    case PIPE_SHADER_CAP_MAX_SHADER_IMAGES:
259       return IRIS_MAX_IMAGES;
260    case PIPE_SHADER_CAP_MAX_SHADER_BUFFERS:
261       return IRIS_MAX_ABOS + IRIS_MAX_SSBOS;
262    case PIPE_SHADER_CAP_MAX_HW_ATOMIC_COUNTERS:
263    case PIPE_SHADER_CAP_MAX_HW_ATOMIC_COUNTER_BUFFERS:
264       return 0;
265    case PIPE_SHADER_CAP_SUPPORTED_IRS:
266       return 1 << PIPE_SHADER_IR_NIR;
267    case PIPE_SHADER_CAP_TGSI_ANY_INOUT_DECL_RANGE:
268    case PIPE_SHADER_CAP_TGSI_SQRT_SUPPORTED:
269       return 0;
270    default:
271       unreachable("unknown shader param");
272    }
273 }
274 
275 static int
iris_get_compute_param(struct pipe_screen * pscreen,enum pipe_shader_ir ir_type,enum pipe_compute_cap param,void * ret)276 iris_get_compute_param(struct pipe_screen *pscreen,
277                        enum pipe_shader_ir ir_type,
278                        enum pipe_compute_cap param,
279                        void *ret)
280 {
281    struct iris_screen *screen = (struct iris_screen *)pscreen;
282    const struct intel_device_info *devinfo = screen->devinfo;
283 
284    const uint32_t max_invocations =
285       MIN2(1024, 32 * devinfo->max_cs_workgroup_threads);
286 
287 #define RET(x) do {                  \
288    if (ret)                          \
289       memcpy(ret, x, sizeof(x));     \
290    return sizeof(x);                 \
291 } while (0)
292 
293    switch (param) {
294    case PIPE_COMPUTE_CAP_ADDRESS_BITS:
295       /* This gets queried on OpenCL device init and is never queried by the
296        * OpenGL state tracker.
297        */
298       iris_warn_cl();
299       RET((uint32_t []){ 64 });
300 
301    case PIPE_COMPUTE_CAP_IR_TARGET:
302       if (ret)
303          strcpy(ret, "gen");
304       return 4;
305 
306    case PIPE_COMPUTE_CAP_GRID_DIMENSION:
307       RET((uint64_t []) { 3 });
308 
309    case PIPE_COMPUTE_CAP_MAX_GRID_SIZE:
310       RET(((uint64_t []) { UINT32_MAX, UINT32_MAX, UINT32_MAX }));
311 
312    case PIPE_COMPUTE_CAP_MAX_BLOCK_SIZE:
313       /* MaxComputeWorkGroupSize[0..2] */
314       RET(((uint64_t []) {max_invocations, max_invocations, max_invocations}));
315 
316    case PIPE_COMPUTE_CAP_MAX_THREADS_PER_BLOCK:
317       /* MaxComputeWorkGroupInvocations */
318    case PIPE_COMPUTE_CAP_MAX_VARIABLE_THREADS_PER_BLOCK:
319       /* MaxComputeVariableGroupInvocations */
320       RET((uint64_t []) { max_invocations });
321 
322    case PIPE_COMPUTE_CAP_MAX_LOCAL_SIZE:
323       /* MaxComputeSharedMemorySize */
324       RET((uint64_t []) { 64 * 1024 });
325 
326    case PIPE_COMPUTE_CAP_IMAGES_SUPPORTED:
327       RET((uint32_t []) { 1 });
328 
329    case PIPE_COMPUTE_CAP_SUBGROUP_SIZES:
330       RET((uint32_t []) { 32 | 16 | 8 });
331 
332    case PIPE_COMPUTE_CAP_MAX_SUBGROUPS:
333       RET((uint32_t []) { devinfo->max_cs_workgroup_threads });
334 
335    case PIPE_COMPUTE_CAP_MAX_MEM_ALLOC_SIZE:
336    case PIPE_COMPUTE_CAP_MAX_GLOBAL_SIZE:
337       RET((uint64_t []) { 1 << 30 }); /* TODO */
338 
339    case PIPE_COMPUTE_CAP_MAX_CLOCK_FREQUENCY:
340       RET((uint32_t []) { 400 }); /* TODO */
341 
342    case PIPE_COMPUTE_CAP_MAX_COMPUTE_UNITS: {
343       RET((uint32_t []) { intel_device_info_subslice_total(devinfo) });
344    }
345 
346    case PIPE_COMPUTE_CAP_MAX_PRIVATE_SIZE:
347       /* MaxComputeSharedMemorySize */
348       RET((uint64_t []) { 64 * 1024 });
349 
350    case PIPE_COMPUTE_CAP_MAX_INPUT_SIZE:
351       /* We could probably allow more; this is the OpenCL minimum */
352       RET((uint64_t []) { 1024 });
353 
354    default:
355       unreachable("unknown compute param");
356    }
357 }
358 
359 static void
iris_init_screen_caps(struct iris_screen * screen)360 iris_init_screen_caps(struct iris_screen *screen)
361 {
362    struct pipe_caps *caps = (struct pipe_caps *)&screen->base.caps;
363 
364    u_init_pipe_screen_caps(&screen->base, 1);
365 
366    const struct intel_device_info *devinfo = screen->devinfo;
367 
368    caps->npot_textures = true;
369    caps->anisotropic_filter = true;
370    caps->occlusion_query = true;
371    caps->query_time_elapsed = true;
372    caps->texture_swizzle = true;
373    caps->texture_mirror_clamp_to_edge = true;
374    caps->blend_equation_separate = true;
375    caps->fragment_shader_texture_lod = true;
376    caps->fragment_shader_derivatives = true;
377    caps->primitive_restart = true;
378    caps->primitive_restart_fixed_index = true;
379    caps->indep_blend_enable = true;
380    caps->indep_blend_func = true;
381    caps->fs_coord_origin_upper_left = true;
382    caps->fs_coord_pixel_center_integer = true;
383    caps->depth_clip_disable = true;
384    caps->vs_instanceid = true;
385    caps->vertex_element_instance_divisor = true;
386    caps->seamless_cube_map = true;
387    caps->seamless_cube_map_per_texture = true;
388    caps->conditional_render = true;
389    caps->texture_barrier = true;
390    caps->stream_output_pause_resume = true;
391    caps->vertex_color_unclamped = true;
392    caps->compute = true;
393    caps->start_instance = true;
394    caps->query_timestamp = true;
395    caps->texture_multisample = true;
396    caps->cube_map_array = true;
397    caps->texture_buffer_objects = true;
398    caps->query_pipeline_statistics_single = true;
399    caps->texture_query_lod = true;
400    caps->sample_shading = true;
401    caps->force_persample_interp = true;
402    caps->draw_indirect = true;
403    caps->multi_draw_indirect = true;
404    caps->multi_draw_indirect_params = true;
405    caps->mixed_framebuffer_sizes = true;
406    caps->vs_layer_viewport = true;
407    caps->tes_layer_viewport = true;
408    caps->fs_fine_derivative = true;
409    caps->shader_pack_half_float = true;
410    caps->conditional_render_inverted = true;
411    caps->clip_halfz = true;
412    caps->tgsi_texcoord = true;
413    caps->stream_output_interleave_buffers = true;
414    caps->doubles = true;
415    caps->int64 = true;
416    caps->sampler_view_target = true;
417    caps->robust_buffer_access_behavior = true;
418    caps->device_reset_status_query = true;
419    caps->copy_between_compressed_and_plain_formats = true;
420    caps->framebuffer_no_attachment = true;
421    caps->cull_distance = true;
422    caps->packed_uniforms = true;
423    caps->signed_vertex_buffer_offset = true;
424    caps->texture_float_linear = true;
425    caps->texture_half_float_linear = true;
426    caps->polygon_offset_clamp = true;
427    caps->query_so_overflow = true;
428    caps->query_buffer_object = true;
429    caps->tgsi_tex_txf_lz = true;
430    caps->texture_query_samples = true;
431    caps->shader_clock = true;
432    caps->shader_ballot = true;
433    caps->multisample_z_resolve = true;
434    caps->clear_scissored = true;
435    caps->shader_group_vote = true;
436    caps->vs_window_space_position = true;
437    caps->texture_gather_sm5 = true;
438    caps->shader_array_components = true;
439    caps->glsl_tess_levels_as_inputs = true;
440    caps->load_constbuf = true;
441    caps->draw_parameters = true;
442    caps->fs_position_is_sysval = true;
443    caps->fs_face_is_integer_sysval = true;
444    caps->compute_shader_derivatives = true;
445    caps->invalidate_buffer = true;
446    caps->surface_reinterpret_blocks = true;
447    caps->texture_shadow_lod = true;
448    caps->shader_samples_identical = true;
449    caps->gl_spirv = true;
450    caps->gl_spirv_variable_pointers = true;
451    caps->demote_to_helper_invocation = true;
452    caps->native_fence_fd = true;
453    caps->memobj = true;
454    caps->mixed_color_depth_bits = true;
455    caps->fence_signal = true;
456    caps->image_store_formatted = true;
457    caps->legacy_math_rules = true;
458    caps->alpha_to_coverage_dither_control = true;
459    caps->map_unsynchronized_thread_safe = true;
460    caps->has_const_bw = true;
461    caps->cl_gl_sharing = true;
462    caps->uma = iris_bufmgr_vram_size(screen->bufmgr) == 0;
463    caps->query_memory_info = iris_bufmgr_vram_size(screen->bufmgr) != 0;
464    caps->prefer_back_buffer_reuse = false;
465    caps->fbfetch = IRIS_MAX_DRAW_BUFFERS;
466    caps->fbfetch_coherent = devinfo->ver >= 9 && devinfo->ver < 20;
467    caps->conservative_raster_inner_coverage =
468    caps->post_depth_coverage =
469    caps->shader_stencil_export =
470    caps->depth_clip_disable_separate =
471    caps->fragment_shader_interlock =
472    caps->atomic_float_minmax = devinfo->ver >= 9;
473    caps->depth_bounds_test = devinfo->ver >= 12;
474    caps->max_dual_source_render_targets = 1;
475    caps->max_render_targets = IRIS_MAX_DRAW_BUFFERS;
476    caps->max_texture_2d_size = 16384;
477    caps->max_texture_cube_levels = IRIS_MAX_MIPLEVELS; /* 16384x16384 */
478    caps->max_texture_3d_levels = 12; /* 2048x2048 */
479    caps->max_stream_output_buffers = 4;
480    caps->max_texture_array_layers = 2048;
481    caps->max_stream_output_separate_components =
482       IRIS_MAX_SOL_BINDINGS / IRIS_MAX_SOL_BUFFERS;
483    caps->max_stream_output_interleaved_components = IRIS_MAX_SOL_BINDINGS;
484    caps->glsl_feature_level =
485    caps->glsl_feature_level_compatibility = 460;
486    /* 3DSTATE_CONSTANT_XS requires the start of UBOs to be 32B aligned */
487    caps->constant_buffer_offset_alignment = 32;
488    caps->min_map_buffer_alignment = IRIS_MAP_BUFFER_ALIGNMENT;
489    caps->shader_buffer_offset_alignment = 4;
490    caps->max_shader_buffer_size = 1 << 27;
491    caps->texture_buffer_offset_alignment = 16; // XXX: u_screen says 256 is the minimum value...
492    caps->linear_image_pitch_alignment = 1;
493    caps->linear_image_base_address_alignment = 1;
494    caps->texture_transfer_modes = PIPE_TEXTURE_TRANSFER_BLIT;
495    caps->max_texel_buffer_elements = IRIS_MAX_TEXTURE_BUFFER_SIZE;
496    caps->max_viewports = 16;
497    caps->max_geometry_output_vertices = 256;
498    caps->max_geometry_total_output_components = 1024;
499    caps->max_gs_invocations = 32;
500    caps->max_texture_gather_components = 4;
501    caps->min_texture_gather_offset = -32;
502    caps->max_texture_gather_offset = 31;
503    caps->max_vertex_streams = 4;
504    caps->vendor_id = 0x8086;
505    caps->device_id = screen->devinfo->pci_device_id;
506    caps->video_memory = iris_get_video_memory(screen);
507    caps->max_shader_patch_varyings =
508    caps->max_varyings = 32;
509    /* We want immediate arrays to go get uploaded as nir->constant_data by
510     * nir_opt_large_constants() instead.
511     */
512    caps->prefer_imm_arrays_as_constbuf = false;
513    /* AMD_pinned_memory assumes the flexibility of using client memory
514     * for any buffer (incl. vertex buffers) which rules out the prospect
515     * of using snooped buffers, as using snooped buffers without
516     * cogniscience is likely to be detrimental to performance and require
517     * extensive checking in the driver for correctness, e.g. to prevent
518     * illegal snoop <-> snoop transfers.
519     */
520    caps->resource_from_user_memory = devinfo->has_llc;
521    caps->throttle = !screen->driconf.disable_throttling;
522 
523    caps->context_priority_mask =
524       PIPE_CONTEXT_PRIORITY_LOW |
525       PIPE_CONTEXT_PRIORITY_MEDIUM |
526       PIPE_CONTEXT_PRIORITY_HIGH;
527 
528    caps->frontend_noop = true;
529 
530    // XXX: don't hardcode 00:00:02.0 PCI here
531    caps->pci_group = 0;
532    caps->pci_bus = 0;
533    caps->pci_device = 2;
534    caps->pci_function = 0;
535 
536    caps->opencl_integer_functions =
537    caps->integer_multiply_32x16 = true;
538 
539    /* Internal details of VF cache make this optimization harmful on GFX
540     * version 8 and 9, because generated VERTEX_BUFFER_STATEs are cached
541     * separately.
542     */
543    caps->allow_dynamic_vao_fastpath = devinfo->ver >= 11;
544 
545    caps->timer_resolution = DIV_ROUND_UP(1000000000ull, devinfo->timestamp_frequency);
546 
547    caps->device_protected_context =
548       screen->kernel_features & KERNEL_HAS_PROTECTED_CONTEXT;
549 
550    caps->astc_void_extents_need_denorm_flush =
551       devinfo->ver == 9 && !intel_device_info_is_9lp(devinfo);
552 
553    caps->min_line_width =
554    caps->min_line_width_aa =
555    caps->min_point_size =
556    caps->min_point_size_aa = 1;
557 
558    caps->point_size_granularity =
559    caps->line_width_granularity = 0.1;
560 
561    caps->max_line_width =
562    caps->max_line_width_aa = 7.375f;
563 
564    caps->max_point_size =
565    caps->max_point_size_aa = 255.0f;
566 
567    caps->max_texture_anisotropy = 16.0f;
568    caps->max_texture_lod_bias = 15.0f;
569 }
570 
571 static uint64_t
iris_get_timestamp(struct pipe_screen * pscreen)572 iris_get_timestamp(struct pipe_screen *pscreen)
573 {
574    struct iris_screen *screen = (struct iris_screen *) pscreen;
575    uint64_t result;
576 
577    if (!intel_gem_read_render_timestamp(iris_bufmgr_get_fd(screen->bufmgr),
578                                         screen->devinfo->kmd_type, &result))
579       return 0;
580 
581    result = intel_device_info_timebase_scale(screen->devinfo, result);
582 
583    return result;
584 }
585 
586 void
iris_screen_destroy(struct iris_screen * screen)587 iris_screen_destroy(struct iris_screen *screen)
588 {
589    intel_perf_free(screen->perf_cfg);
590    iris_destroy_screen_measure(screen);
591    util_queue_destroy(&screen->shader_compiler_queue);
592    glsl_type_singleton_decref();
593    iris_bo_unreference(screen->workaround_bo);
594    iris_bo_unreference(screen->breakpoint_bo);
595    u_transfer_helper_destroy(screen->base.transfer_helper);
596    iris_bufmgr_unref(screen->bufmgr);
597    disk_cache_destroy(screen->disk_cache);
598    close(screen->winsys_fd);
599    ralloc_free(screen);
600 }
601 
602 static void
iris_screen_unref(struct pipe_screen * pscreen)603 iris_screen_unref(struct pipe_screen *pscreen)
604 {
605    iris_pscreen_unref(pscreen);
606 }
607 
608 static void
iris_query_memory_info(struct pipe_screen * pscreen,struct pipe_memory_info * info)609 iris_query_memory_info(struct pipe_screen *pscreen,
610                        struct pipe_memory_info *info)
611 {
612    struct iris_screen *screen = (struct iris_screen *)pscreen;
613    struct intel_device_info di;
614    memcpy(&di, screen->devinfo, sizeof(di));
615 
616    if (!intel_device_info_update_memory_info(&di, screen->fd))
617       return;
618 
619    info->total_device_memory =
620       (di.mem.vram.mappable.size + di.mem.vram.unmappable.size) / 1024;
621    info->avail_device_memory =
622       (di.mem.vram.mappable.free + di.mem.vram.unmappable.free) / 1024;
623    info->total_staging_memory = di.mem.sram.mappable.size / 1024;
624    info->avail_staging_memory = di.mem.sram.mappable.free / 1024;
625 
626    /* Neither kernel gives us any way to calculate this information */
627    info->device_memory_evicted = 0;
628    info->nr_device_memory_evictions = 0;
629 }
630 
631 static struct disk_cache *
iris_get_disk_shader_cache(struct pipe_screen * pscreen)632 iris_get_disk_shader_cache(struct pipe_screen *pscreen)
633 {
634    struct iris_screen *screen = (struct iris_screen *) pscreen;
635    return screen->disk_cache;
636 }
637 
638 static const struct intel_l3_config *
iris_get_default_l3_config(const struct intel_device_info * devinfo,bool compute)639 iris_get_default_l3_config(const struct intel_device_info *devinfo,
640                            bool compute)
641 {
642    bool wants_dc_cache = true;
643    bool has_slm = compute;
644    const struct intel_l3_weights w =
645       intel_get_default_l3_weights(devinfo, wants_dc_cache, has_slm);
646    return intel_get_l3_config(devinfo, w);
647 }
648 
649 static void
iris_detect_kernel_features(struct iris_screen * screen)650 iris_detect_kernel_features(struct iris_screen *screen)
651 {
652    const struct intel_device_info *devinfo = screen->devinfo;
653    /* Kernel 5.2+ */
654    if (intel_gem_supports_syncobj_wait(screen->fd))
655       screen->kernel_features |= KERNEL_HAS_WAIT_FOR_SUBMIT;
656    if (intel_gem_supports_protected_context(screen->fd, devinfo->kmd_type))
657       screen->kernel_features |= KERNEL_HAS_PROTECTED_CONTEXT;
658 }
659 
660 static bool
iris_init_identifier_bo(struct iris_screen * screen)661 iris_init_identifier_bo(struct iris_screen *screen)
662 {
663    void *bo_map;
664 
665    bo_map = iris_bo_map(NULL, screen->workaround_bo, MAP_READ | MAP_WRITE);
666    if (!bo_map)
667       return false;
668 
669    assert(iris_bo_is_real(screen->workaround_bo));
670 
671    screen->workaround_address = (struct iris_address) {
672       .bo = screen->workaround_bo,
673       .offset = ALIGN(
674          intel_debug_write_identifiers(bo_map, 4096, "Iris"), 32),
675    };
676 
677    iris_bo_unmap(screen->workaround_bo);
678 
679    return true;
680 }
681 
682 static int
iris_screen_get_fd(struct pipe_screen * pscreen)683 iris_screen_get_fd(struct pipe_screen *pscreen)
684 {
685    struct iris_screen *screen = (struct iris_screen *) pscreen;
686 
687    return screen->winsys_fd;
688 }
689 
690 static void
iris_set_damage_region(struct pipe_screen * pscreen,struct pipe_resource * pres,unsigned int nrects,const struct pipe_box * rects)691 iris_set_damage_region(struct pipe_screen *pscreen, struct pipe_resource *pres,
692                        unsigned int nrects, const struct pipe_box *rects)
693 {
694    struct iris_resource *res = (struct iris_resource *)pres;
695 
696    res->use_damage = nrects > 0;
697    if (!res->use_damage)
698       return;
699 
700    res->damage.x = INT32_MAX;
701    res->damage.y = INT32_MAX;
702    res->damage.width = 0;
703    res->damage.height = 0;
704 
705    for (unsigned i = 0; i < nrects; i++) {
706       res->damage.x = MIN2(res->damage.x, rects[i].x);
707       res->damage.y = MIN2(res->damage.y, rects[i].y);
708       res->damage.width = MAX2(res->damage.width, rects[i].width + rects[i].x);
709       res->damage.height = MAX2(res->damage.height, rects[i].height + rects[i].y);
710 
711       if (unlikely(res->damage.x == 0 &&
712                    res->damage.y == 0 &&
713                    res->damage.width == res->base.b.width0 &&
714                    res->damage.height == res->base.b.height0))
715          break;
716    }
717 
718    res->damage.x = MAX2(res->damage.x, 0);
719    res->damage.y = MAX2(res->damage.y, 0);
720    res->damage.width = MIN2(res->damage.width, res->base.b.width0);
721    res->damage.height = MIN2(res->damage.height, res->base.b.height0);
722 }
723 
724 struct pipe_screen *
iris_screen_create(int fd,const struct pipe_screen_config * config)725 iris_screen_create(int fd, const struct pipe_screen_config *config)
726 {
727    struct iris_screen *screen = rzalloc(NULL, struct iris_screen);
728    if (!screen)
729       return NULL;
730 
731    driParseConfigFiles(config->options, config->options_info, 0, "iris",
732                        NULL, NULL, NULL, 0, NULL, 0);
733 
734    bool bo_reuse = false;
735    int bo_reuse_mode = driQueryOptioni(config->options, "bo_reuse");
736    switch (bo_reuse_mode) {
737    case DRI_CONF_BO_REUSE_DISABLED:
738       break;
739    case DRI_CONF_BO_REUSE_ALL:
740       bo_reuse = true;
741       break;
742    }
743 
744    process_intel_debug_variable();
745 
746    screen->bufmgr = iris_bufmgr_get_for_fd(fd, bo_reuse);
747    if (!screen->bufmgr)
748       return NULL;
749 
750    screen->devinfo = iris_bufmgr_get_device_info(screen->bufmgr);
751    p_atomic_set(&screen->refcount, 1);
752 
753    /* Here are the i915 features we need for Iris (in chronological order) :
754     *    - I915_PARAM_HAS_EXEC_NO_RELOC     (3.10)
755     *    - I915_PARAM_HAS_EXEC_HANDLE_LUT   (3.10)
756     *    - I915_PARAM_HAS_EXEC_BATCH_FIRST  (4.13)
757     *    - I915_PARAM_HAS_EXEC_FENCE_ARRAY  (4.14)
758     *    - I915_PARAM_HAS_CONTEXT_ISOLATION (4.16)
759     *
760     * Checking the last feature availability will include all previous ones.
761     */
762    if (!screen->devinfo->has_context_isolation) {
763       debug_error("Kernel is too old (4.16+ required) or unusable for Iris.\n"
764                   "Check your dmesg logs for loading failures.\n");
765       return NULL;
766    }
767 
768    screen->fd = iris_bufmgr_get_fd(screen->bufmgr);
769    screen->winsys_fd = os_dupfd_cloexec(fd);
770 
771    screen->id = iris_bufmgr_create_screen_id(screen->bufmgr);
772 
773    screen->workaround_bo =
774       iris_bo_alloc(screen->bufmgr, "workaround", 4096, 4096,
775                     IRIS_MEMZONE_OTHER, BO_ALLOC_NO_SUBALLOC | BO_ALLOC_CAPTURE);
776    if (!screen->workaround_bo)
777       return NULL;
778 
779    screen->breakpoint_bo = iris_bo_alloc(screen->bufmgr, "breakpoint", 4, 4,
780                                          IRIS_MEMZONE_OTHER, BO_ALLOC_ZEROED);
781    if (!screen->breakpoint_bo)
782       return NULL;
783 
784    if (!iris_init_identifier_bo(screen))
785       return NULL;
786 
787    screen->driconf.dual_color_blend_by_location =
788       driQueryOptionb(config->options, "dual_color_blend_by_location");
789    screen->driconf.disable_throttling =
790       driQueryOptionb(config->options, "disable_throttling");
791    screen->driconf.always_flush_cache = INTEL_DEBUG(DEBUG_STALL) ||
792       driQueryOptionb(config->options, "always_flush_cache");
793    screen->driconf.sync_compile =
794       driQueryOptionb(config->options, "sync_compile");
795    screen->driconf.limit_trig_input_range =
796       driQueryOptionb(config->options, "limit_trig_input_range");
797    screen->driconf.lower_depth_range_rate =
798       driQueryOptionf(config->options, "lower_depth_range_rate");
799    screen->driconf.intel_enable_wa_14018912822 =
800       driQueryOptionb(config->options, "intel_enable_wa_14018912822");
801    screen->driconf.enable_tbimr =
802       driQueryOptionb(config->options, "intel_tbimr");
803    screen->driconf.generated_indirect_threshold =
804       driQueryOptioni(config->options, "generated_indirect_threshold");
805 
806    screen->precompile = debug_get_bool_option("shader_precompile", true);
807 
808    isl_device_init(&screen->isl_dev, screen->devinfo);
809    screen->isl_dev.dummy_aux_address = iris_bufmgr_get_dummy_aux_address(screen->bufmgr);
810 
811    screen->isl_dev.sampler_route_to_lsc =
812       driQueryOptionb(config->options, "intel_sampler_route_to_lsc");
813 
814    iris_compiler_init(screen);
815 
816    screen->l3_config_3d = iris_get_default_l3_config(screen->devinfo, false);
817    screen->l3_config_cs = iris_get_default_l3_config(screen->devinfo, true);
818 
819    iris_disk_cache_init(screen);
820 
821    slab_create_parent(&screen->transfer_pool,
822                       sizeof(struct iris_transfer), 64);
823 
824    iris_detect_kernel_features(screen);
825 
826    struct pipe_screen *pscreen = &screen->base;
827 
828    iris_init_screen_fence_functions(pscreen);
829    iris_init_screen_resource_functions(pscreen);
830    iris_init_screen_measure(screen);
831 
832    pscreen->destroy = iris_screen_unref;
833    pscreen->get_name = iris_get_name;
834    pscreen->get_vendor = iris_get_vendor;
835    pscreen->get_device_vendor = iris_get_device_vendor;
836    pscreen->get_cl_cts_version = iris_get_cl_cts_version;
837    pscreen->get_screen_fd = iris_screen_get_fd;
838    pscreen->get_shader_param = iris_get_shader_param;
839    pscreen->get_compute_param = iris_get_compute_param;
840    pscreen->get_compiler_options = iris_get_compiler_options;
841    pscreen->get_device_uuid = iris_get_device_uuid;
842    pscreen->get_driver_uuid = iris_get_driver_uuid;
843    pscreen->get_disk_shader_cache = iris_get_disk_shader_cache;
844    pscreen->is_format_supported = iris_is_format_supported;
845    pscreen->context_create = iris_create_context;
846    pscreen->get_timestamp = iris_get_timestamp;
847    pscreen->query_memory_info = iris_query_memory_info;
848    pscreen->get_driver_query_group_info = iris_get_monitor_group_info;
849    pscreen->get_driver_query_info = iris_get_monitor_info;
850    pscreen->set_damage_region = iris_set_damage_region;
851    iris_init_screen_program_functions(pscreen);
852 
853    iris_init_screen_caps(screen);
854 
855    genX_call(screen->devinfo, init_screen_state, screen);
856    genX_call(screen->devinfo, init_screen_gen_state, screen);
857 
858    glsl_type_singleton_init_or_ref();
859 
860    intel_driver_ds_init();
861 
862    /* FINISHME: Big core vs little core (for CPUs that have both kinds of
863     * cores) and, possibly, thread vs core should be considered here too.
864     */
865    unsigned compiler_threads = 1;
866    const struct util_cpu_caps_t *caps = util_get_cpu_caps();
867    unsigned hw_threads = caps->nr_cpus;
868 
869    if (hw_threads >= 12) {
870       compiler_threads = hw_threads * 3 / 4;
871    } else if (hw_threads >= 6) {
872       compiler_threads = hw_threads - 2;
873    } else if (hw_threads >= 2) {
874       compiler_threads = hw_threads - 1;
875    }
876 
877    if (!util_queue_init(&screen->shader_compiler_queue,
878                         "sh", 64, compiler_threads,
879                         UTIL_QUEUE_INIT_RESIZE_IF_FULL |
880                         UTIL_QUEUE_INIT_SET_FULL_THREAD_AFFINITY,
881                         NULL)) {
882       iris_screen_destroy(screen);
883       return NULL;
884    }
885 
886    return pscreen;
887 }
888