1 /*
2 * Copyright © 2012 Rob Clark <robclark@freedesktop.org>
3 * SPDX-License-Identifier: MIT
4 *
5 * Authors:
6 * Rob Clark <robclark@freedesktop.org>
7 */
8
9 #include "pipe/p_defines.h"
10 #include "pipe/p_screen.h"
11 #include "pipe/p_state.h"
12
13 #include "util/format/u_format.h"
14 #include "util/format/u_format_s3tc.h"
15 #include "util/u_debug.h"
16 #include "util/u_inlines.h"
17 #include "util/u_memory.h"
18 #include "util/u_screen.h"
19 #include "util/u_string.h"
20 #include "util/xmlconfig.h"
21
22 #include "util/os_time.h"
23
24 #include <errno.h>
25 #include <stdio.h>
26 #include <stdlib.h>
27 #include "drm-uapi/drm_fourcc.h"
28 #include <sys/sysinfo.h>
29
30 #include "freedreno_fence.h"
31 #include "freedreno_perfetto.h"
32 #include "freedreno_query.h"
33 #include "freedreno_resource.h"
34 #include "freedreno_screen.h"
35 #include "freedreno_util.h"
36
37 #include "a2xx/fd2_screen.h"
38 #include "a3xx/fd3_screen.h"
39 #include "a4xx/fd4_screen.h"
40 #include "a5xx/fd5_screen.h"
41 #include "a6xx/fd6_screen.h"
42
43 /* for fd_get_driver/device_uuid() */
44 #include "common/freedreno_uuid.h"
45
46 #include "a2xx/ir2.h"
47 #include "ir3/ir3_descriptor.h"
48 #include "ir3/ir3_gallium.h"
49 #include "ir3/ir3_nir.h"
50
51 /* clang-format off */
52 static const struct debug_named_value fd_debug_options[] = {
53 {"msgs", FD_DBG_MSGS, "Print debug messages"},
54 {"disasm", FD_DBG_DISASM, "Dump TGSI and adreno shader disassembly (a2xx only, see IR3_SHADER_DEBUG)"},
55 {"dclear", FD_DBG_DCLEAR, "Mark all state dirty after clear"},
56 {"ddraw", FD_DBG_DDRAW, "Mark all state dirty after draw"},
57 {"noscis", FD_DBG_NOSCIS, "Disable scissor optimization"},
58 {"direct", FD_DBG_DIRECT, "Force inline (SS_DIRECT) state loads"},
59 {"gmem", FD_DBG_GMEM, "Use gmem rendering when it is permitted"},
60 {"perf", FD_DBG_PERF, "Enable performance warnings"},
61 {"nobin", FD_DBG_NOBIN, "Disable hw binning"},
62 {"sysmem", FD_DBG_SYSMEM, "Use sysmem only rendering (no tiling)"},
63 {"serialc", FD_DBG_SERIALC, "Disable asynchronous shader compile"},
64 {"shaderdb", FD_DBG_SHADERDB, "Enable shaderdb output"},
65 {"nolrzfc", FD_DBG_NOLRZFC, "Disable LRZ fast-clear"},
66 {"flush", FD_DBG_FLUSH, "Force flush after every draw"},
67 {"inorder", FD_DBG_INORDER, "Disable reordering for draws/blits"},
68 {"bstat", FD_DBG_BSTAT, "Print batch stats at context destroy"},
69 {"nogrow", FD_DBG_NOGROW, "Disable \"growable\" cmdstream buffers, even if kernel supports it"},
70 {"lrz", FD_DBG_LRZ, "Enable experimental LRZ support (a5xx)"},
71 {"noindirect",FD_DBG_NOINDR, "Disable hw indirect draws (emulate on CPU)"},
72 {"noblit", FD_DBG_NOBLIT, "Disable blitter (fallback to generic blit path)"},
73 {"hiprio", FD_DBG_HIPRIO, "Force high-priority context"},
74 {"ttile", FD_DBG_TTILE, "Enable texture tiling (a2xx/a3xx/a5xx)"},
75 {"perfcntrs", FD_DBG_PERFC, "Expose performance counters"},
76 {"noubwc", FD_DBG_NOUBWC, "Disable UBWC for all internal buffers"},
77 {"nolrz", FD_DBG_NOLRZ, "Disable LRZ (a6xx)"},
78 {"notile", FD_DBG_NOTILE, "Disable tiling for all internal buffers"},
79 {"layout", FD_DBG_LAYOUT, "Dump resource layouts"},
80 {"nofp16", FD_DBG_NOFP16, "Disable mediump precision lowering"},
81 {"nohw", FD_DBG_NOHW, "Disable submitting commands to the HW"},
82 {"nosbin", FD_DBG_NOSBIN, "Execute GMEM bins in raster order instead of 'S' pattern"},
83 {"stomp", FD_DBG_STOMP, "Enable register stomper"},
84 DEBUG_NAMED_VALUE_END
85 };
86 /* clang-format on */
87
88 DEBUG_GET_ONCE_FLAGS_OPTION(fd_mesa_debug, "FD_MESA_DEBUG", fd_debug_options, 0)
89
90 int fd_mesa_debug = 0;
91 bool fd_binning_enabled = true;
92
93 static const char *
fd_screen_get_name(struct pipe_screen * pscreen)94 fd_screen_get_name(struct pipe_screen *pscreen)
95 {
96 return fd_dev_name(fd_screen(pscreen)->dev_id);
97 }
98
99 static const char *
fd_screen_get_vendor(struct pipe_screen * pscreen)100 fd_screen_get_vendor(struct pipe_screen *pscreen)
101 {
102 return "freedreno";
103 }
104
105 static const char *
fd_screen_get_device_vendor(struct pipe_screen * pscreen)106 fd_screen_get_device_vendor(struct pipe_screen *pscreen)
107 {
108 return "Qualcomm";
109 }
110
111 static void
fd_get_sample_pixel_grid(struct pipe_screen * pscreen,unsigned sample_count,unsigned * out_width,unsigned * out_height)112 fd_get_sample_pixel_grid(struct pipe_screen *pscreen, unsigned sample_count,
113 unsigned *out_width, unsigned *out_height)
114 {
115 *out_width = 1;
116 *out_height = 1;
117 }
118
119 static uint64_t
fd_screen_get_timestamp(struct pipe_screen * pscreen)120 fd_screen_get_timestamp(struct pipe_screen *pscreen)
121 {
122 struct fd_screen *screen = fd_screen(pscreen);
123
124 if (screen->has_timestamp) {
125 uint64_t n;
126 fd_pipe_get_param(screen->pipe, FD_TIMESTAMP, &n);
127 return ticks_to_ns(n);
128 } else {
129 int64_t cpu_time = os_time_get_nano();
130 return cpu_time + screen->cpu_gpu_time_delta;
131 }
132 }
133
134 static void
fd_screen_destroy(struct pipe_screen * pscreen)135 fd_screen_destroy(struct pipe_screen *pscreen)
136 {
137 struct fd_screen *screen = fd_screen(pscreen);
138
139 if (screen->aux_ctx)
140 screen->aux_ctx->destroy(screen->aux_ctx);
141
142 if (screen->tess_bo)
143 fd_bo_del(screen->tess_bo);
144
145 if (screen->pipe)
146 fd_pipe_del(screen->pipe);
147
148 if (screen->dev) {
149 fd_device_purge(screen->dev);
150 fd_device_del(screen->dev);
151 }
152
153 if (screen->ro)
154 screen->ro->destroy(screen->ro);
155
156 fd_bc_fini(&screen->batch_cache);
157 fd_gmem_screen_fini(pscreen);
158
159 slab_destroy_parent(&screen->transfer_pool);
160
161 simple_mtx_destroy(&screen->lock);
162
163 util_idalloc_mt_fini(&screen->buffer_ids);
164
165 u_transfer_helper_destroy(pscreen->transfer_helper);
166
167 if (screen->compiler)
168 ir3_screen_fini(pscreen);
169
170 free(screen->perfcntr_queries);
171 free(screen);
172 }
173
174 static uint64_t
get_memory_size(struct fd_screen * screen)175 get_memory_size(struct fd_screen *screen)
176 {
177 uint64_t system_memory;
178
179 if (!os_get_total_physical_memory(&system_memory))
180 return 0;
181 if (fd_device_version(screen->dev) >= FD_VERSION_VA_SIZE) {
182 uint64_t va_size;
183 if (!fd_pipe_get_param(screen->pipe, FD_VA_SIZE, &va_size)) {
184 system_memory = MIN2(system_memory, va_size);
185 }
186 }
187
188 return system_memory;
189 }
190
191 static void
fd_query_memory_info(struct pipe_screen * pscreen,struct pipe_memory_info * info)192 fd_query_memory_info(struct pipe_screen *pscreen,
193 struct pipe_memory_info *info)
194 {
195 unsigned mem = get_memory_size(fd_screen(pscreen)) >> 10;
196
197 memset(info, 0, sizeof(*info));
198
199 info->total_device_memory = mem;
200 info->avail_device_memory = mem;
201 }
202
203 static int
fd_screen_get_shader_param(struct pipe_screen * pscreen,enum pipe_shader_type shader,enum pipe_shader_cap param)204 fd_screen_get_shader_param(struct pipe_screen *pscreen,
205 enum pipe_shader_type shader,
206 enum pipe_shader_cap param)
207 {
208 struct fd_screen *screen = fd_screen(pscreen);
209
210 switch (shader) {
211 case PIPE_SHADER_FRAGMENT:
212 case PIPE_SHADER_VERTEX:
213 break;
214 case PIPE_SHADER_TESS_CTRL:
215 case PIPE_SHADER_TESS_EVAL:
216 case PIPE_SHADER_GEOMETRY:
217 if (is_a6xx(screen))
218 break;
219 return 0;
220 case PIPE_SHADER_COMPUTE:
221 if (has_compute(screen))
222 break;
223 return 0;
224 case PIPE_SHADER_TASK:
225 case PIPE_SHADER_MESH:
226 return 0;
227 default:
228 mesa_loge("unknown shader type %d", shader);
229 return 0;
230 }
231
232 /* this is probably not totally correct.. but it's a start: */
233 switch (param) {
234 case PIPE_SHADER_CAP_MAX_INSTRUCTIONS:
235 case PIPE_SHADER_CAP_MAX_ALU_INSTRUCTIONS:
236 case PIPE_SHADER_CAP_MAX_TEX_INSTRUCTIONS:
237 case PIPE_SHADER_CAP_MAX_TEX_INDIRECTIONS:
238 return 16384;
239 case PIPE_SHADER_CAP_MAX_CONTROL_FLOW_DEPTH:
240 return 8; /* XXX */
241 case PIPE_SHADER_CAP_MAX_INPUTS:
242 if (shader == PIPE_SHADER_GEOMETRY && is_a6xx(screen))
243 return 16;
244 return is_a6xx(screen) ?
245 (screen->info->a6xx.vs_max_inputs_count) : 16;
246 case PIPE_SHADER_CAP_MAX_OUTPUTS:
247 return is_a6xx(screen) ? 32 : 16;
248 case PIPE_SHADER_CAP_MAX_TEMPS:
249 return 64; /* Max native temporaries. */
250 case PIPE_SHADER_CAP_MAX_CONST_BUFFER0_SIZE:
251 /* NOTE: seems to be limit for a3xx is actually 512 but
252 * split between VS and FS. Use lower limit of 256 to
253 * avoid getting into impossible situations:
254 */
255 return ((is_a3xx(screen) || is_a4xx(screen) || is_a5xx(screen) ||
256 is_a6xx(screen))
257 ? 4096
258 : 64) *
259 sizeof(float[4]);
260 case PIPE_SHADER_CAP_MAX_CONST_BUFFERS:
261 return is_ir3(screen) ? 16 : 1;
262 case PIPE_SHADER_CAP_CONT_SUPPORTED:
263 return 1;
264 case PIPE_SHADER_CAP_INDIRECT_TEMP_ADDR:
265 case PIPE_SHADER_CAP_INDIRECT_CONST_ADDR:
266 /* a2xx compiler doesn't handle indirect: */
267 return is_ir3(screen) ? 1 : 0;
268 case PIPE_SHADER_CAP_SUBROUTINES:
269 case PIPE_SHADER_CAP_TGSI_ANY_INOUT_DECL_RANGE:
270 case PIPE_SHADER_CAP_MAX_HW_ATOMIC_COUNTERS:
271 case PIPE_SHADER_CAP_MAX_HW_ATOMIC_COUNTER_BUFFERS:
272 return 0;
273 case PIPE_SHADER_CAP_TGSI_SQRT_SUPPORTED:
274 return 1;
275 case PIPE_SHADER_CAP_INTEGERS:
276 return is_ir3(screen) ? 1 : 0;
277 case PIPE_SHADER_CAP_INT64_ATOMICS:
278 case PIPE_SHADER_CAP_FP16_DERIVATIVES:
279 case PIPE_SHADER_CAP_FP16_CONST_BUFFERS:
280 case PIPE_SHADER_CAP_GLSL_16BIT_CONSTS:
281 return 0;
282 case PIPE_SHADER_CAP_INT16:
283 case PIPE_SHADER_CAP_FP16:
284 return (
285 (is_a5xx(screen) || is_a6xx(screen)) &&
286 (shader == PIPE_SHADER_COMPUTE || shader == PIPE_SHADER_FRAGMENT) &&
287 !FD_DBG(NOFP16));
288 case PIPE_SHADER_CAP_MAX_TEXTURE_SAMPLERS:
289 case PIPE_SHADER_CAP_MAX_SAMPLER_VIEWS:
290 return 16;
291 case PIPE_SHADER_CAP_SUPPORTED_IRS:
292 return (1 << PIPE_SHADER_IR_NIR) |
293 /* tgsi_to_nir doesn't support all stages: */
294 COND((shader == PIPE_SHADER_VERTEX) ||
295 (shader == PIPE_SHADER_FRAGMENT) ||
296 (shader == PIPE_SHADER_COMPUTE),
297 (1 << PIPE_SHADER_IR_TGSI));
298 case PIPE_SHADER_CAP_MAX_SHADER_BUFFERS:
299 case PIPE_SHADER_CAP_MAX_SHADER_IMAGES:
300 if (is_a6xx(screen)) {
301 if (param == PIPE_SHADER_CAP_MAX_SHADER_BUFFERS) {
302 return IR3_BINDLESS_SSBO_COUNT;
303 } else {
304 return IR3_BINDLESS_IMAGE_COUNT;
305 }
306 } else if (is_a4xx(screen) || is_a5xx(screen) || is_a6xx(screen)) {
307 /* a5xx (and a4xx for that matter) has one state-block
308 * for compute-shader SSBO's and another that is shared
309 * by VS/HS/DS/GS/FS.. so to simplify things for now
310 * just advertise SSBOs for FS and CS. We could possibly
311 * do what blob does, and partition the space for
312 * VS/HS/DS/GS/FS. The blob advertises:
313 *
314 * GL_MAX_VERTEX_SHADER_STORAGE_BLOCKS: 4
315 * GL_MAX_GEOMETRY_SHADER_STORAGE_BLOCKS: 4
316 * GL_MAX_TESS_CONTROL_SHADER_STORAGE_BLOCKS: 4
317 * GL_MAX_TESS_EVALUATION_SHADER_STORAGE_BLOCKS: 4
318 * GL_MAX_FRAGMENT_SHADER_STORAGE_BLOCKS: 4
319 * GL_MAX_COMPUTE_SHADER_STORAGE_BLOCKS: 24
320 * GL_MAX_COMBINED_SHADER_STORAGE_BLOCKS: 24
321 *
322 * I think that way we could avoid having to patch shaders
323 * for actual SSBO indexes by using a static partitioning.
324 *
325 * Note same state block is used for images and buffers,
326 * but images also need texture state for read access
327 * (isam/isam.3d)
328 */
329 switch (shader) {
330 case PIPE_SHADER_FRAGMENT:
331 case PIPE_SHADER_COMPUTE:
332 return 24;
333 default:
334 return 0;
335 }
336 }
337 return 0;
338 }
339 mesa_loge("unknown shader param %d", param);
340 return 0;
341 }
342
343 /* TODO depending on how much the limits differ for a3xx/a4xx, maybe move this
344 * into per-generation backend?
345 */
346 static int
fd_get_compute_param(struct pipe_screen * pscreen,enum pipe_shader_ir ir_type,enum pipe_compute_cap param,void * ret)347 fd_get_compute_param(struct pipe_screen *pscreen, enum pipe_shader_ir ir_type,
348 enum pipe_compute_cap param, void *ret)
349 {
350 struct fd_screen *screen = fd_screen(pscreen);
351 const char *const ir = "ir3";
352
353 if (!has_compute(screen))
354 return 0;
355
356 struct ir3_compiler *compiler = screen->compiler;
357
358 #define RET(x) \
359 do { \
360 if (ret) \
361 memcpy(ret, x, sizeof(x)); \
362 return sizeof(x); \
363 } while (0)
364
365 switch (param) {
366 case PIPE_COMPUTE_CAP_ADDRESS_BITS:
367 if (screen->gen >= 5)
368 RET((uint32_t[]){64});
369 RET((uint32_t[]){32});
370
371 case PIPE_COMPUTE_CAP_IR_TARGET:
372 if (ret)
373 sprintf(ret, "%s", ir);
374 return strlen(ir) * sizeof(char);
375
376 case PIPE_COMPUTE_CAP_GRID_DIMENSION:
377 RET((uint64_t[]){3});
378
379 case PIPE_COMPUTE_CAP_MAX_GRID_SIZE:
380 RET(((uint64_t[]){65535, 65535, 65535}));
381
382 case PIPE_COMPUTE_CAP_MAX_BLOCK_SIZE:
383 RET(((uint64_t[]){1024, 1024, 64}));
384
385 case PIPE_COMPUTE_CAP_MAX_THREADS_PER_BLOCK:
386 RET((uint64_t[]){1024});
387
388 case PIPE_COMPUTE_CAP_MAX_GLOBAL_SIZE:
389 RET((uint64_t[]){screen->ram_size});
390
391 case PIPE_COMPUTE_CAP_MAX_LOCAL_SIZE:
392 RET((uint64_t[]){screen->info->cs_shared_mem_size});
393
394 case PIPE_COMPUTE_CAP_MAX_PRIVATE_SIZE:
395 case PIPE_COMPUTE_CAP_MAX_INPUT_SIZE:
396 RET((uint64_t[]){4096});
397
398 case PIPE_COMPUTE_CAP_MAX_MEM_ALLOC_SIZE:
399 RET((uint64_t[]){screen->ram_size});
400
401 case PIPE_COMPUTE_CAP_MAX_CLOCK_FREQUENCY:
402 RET((uint32_t[]){screen->max_freq / 1000000});
403
404 case PIPE_COMPUTE_CAP_MAX_COMPUTE_UNITS:
405 RET((uint32_t[]){9999}); // TODO
406
407 case PIPE_COMPUTE_CAP_IMAGES_SUPPORTED:
408 RET((uint32_t[]){1});
409
410 case PIPE_COMPUTE_CAP_SUBGROUP_SIZES:
411 RET((uint32_t[]){32}); // TODO
412
413 case PIPE_COMPUTE_CAP_MAX_SUBGROUPS:
414 RET((uint32_t[]){0}); // TODO
415
416 case PIPE_COMPUTE_CAP_MAX_VARIABLE_THREADS_PER_BLOCK:
417 RET((uint64_t[]){ compiler->max_variable_workgroup_size });
418 }
419
420 return 0;
421 }
422
423 static void
fd_init_screen_caps(struct fd_screen * screen)424 fd_init_screen_caps(struct fd_screen *screen)
425 {
426 struct pipe_caps *caps = (struct pipe_caps *)&screen->base.caps;
427
428 u_init_pipe_screen_caps(&screen->base, 1);
429
430 /* this is probably not totally correct.. but it's a start: */
431
432 /* Supported features (boolean caps). */
433 caps->npot_textures = true;
434 caps->mixed_framebuffer_sizes = true;
435 caps->anisotropic_filter = true;
436 caps->blend_equation_separate = true;
437 caps->texture_swizzle = true;
438 caps->fs_coord_origin_upper_left = true;
439 caps->seamless_cube_map = true;
440 caps->vertex_color_unclamped = true;
441 caps->quads_follow_provoking_vertex_convention = true;
442 caps->string_marker = true;
443 caps->mixed_color_depth_bits = true;
444 caps->texture_barrier = true;
445 caps->invalidate_buffer = true;
446 caps->glsl_tess_levels_as_inputs = true;
447 caps->texture_mirror_clamp_to_edge = true;
448 caps->gl_spirv = true;
449 caps->fbfetch_coherent = true;
450 caps->has_const_bw = true;
451
452 caps->copy_between_compressed_and_plain_formats =
453 caps->multi_draw_indirect =
454 caps->draw_parameters =
455 caps->multi_draw_indirect_params =
456 caps->depth_bounds_test = is_a6xx(screen);
457
458 caps->vertex_input_alignment =
459 is_a2xx(screen) ? PIPE_VERTEX_INPUT_ALIGNMENT_4BYTE : PIPE_VERTEX_INPUT_ALIGNMENT_NONE;
460
461 caps->fs_coord_pixel_center_integer = is_a2xx(screen);
462 caps->fs_coord_pixel_center_half_integer = !is_a2xx(screen);
463
464 caps->packed_uniforms = !is_a2xx(screen);
465
466 caps->robust_buffer_access_behavior =
467 caps->device_reset_status_query = screen->has_robustness;
468
469 caps->compute = has_compute(screen);
470
471 caps->texture_transfer_modes = screen->gen >= 6 ? PIPE_TEXTURE_TRANSFER_BLIT : 0;
472
473 caps->pci_group =
474 caps->pci_bus =
475 caps->pci_device =
476 caps->pci_function = 0;
477
478 caps->supported_prim_modes =
479 caps->supported_prim_modes_with_restart = screen->primtypes_mask;
480
481 caps->fragment_shader_texture_lod =
482 caps->fragment_shader_derivatives =
483 caps->primitive_restart =
484 caps->primitive_restart_fixed_index =
485 caps->vs_instanceid =
486 caps->vertex_element_instance_divisor =
487 caps->indep_blend_enable =
488 caps->indep_blend_func =
489 caps->texture_buffer_objects =
490 caps->texture_half_float_linear =
491 caps->conditional_render =
492 caps->conditional_render_inverted =
493 caps->seamless_cube_map_per_texture =
494 caps->clip_halfz =
495 is_a3xx(screen) || is_a4xx(screen) || is_a5xx(screen) || is_a6xx(screen);
496
497 caps->texture_multisample =
498 caps->image_store_formatted =
499 caps->image_load_formatted = is_a5xx(screen) || is_a6xx(screen);
500
501 caps->fake_sw_msaa = !caps->texture_multisample;
502
503 caps->surface_sample_count = is_a6xx(screen);
504
505 caps->depth_clip_disable = is_a3xx(screen) || is_a4xx(screen) || is_a6xx(screen);
506
507 caps->post_depth_coverage =
508 caps->depth_clip_disable_separate =
509 caps->demote_to_helper_invocation = is_a6xx(screen);
510
511 caps->sampler_reduction_minmax =
512 caps->sampler_reduction_minmax_arb =
513 is_a6xx(screen) && screen->info->a6xx.has_sampler_minmax;
514
515 caps->programmable_sample_locations =
516 is_a6xx(screen) && screen->info->a6xx.has_sample_locations;
517
518 caps->polygon_offset_clamp = is_a4xx(screen) || is_a5xx(screen) || is_a6xx(screen);
519
520 caps->prefer_imm_arrays_as_constbuf = false;
521
522 caps->texture_buffer_offset_alignment = is_a3xx(screen) ? 16 :
523 (is_a4xx(screen) || is_a5xx(screen) || is_a6xx(screen) ? 64 : 0);
524 caps->max_texel_buffer_elements =
525 /* We could possibly emulate more by pretending 2d/rect textures and
526 * splitting high bits of index into 2nd dimension..
527 */
528 is_a3xx(screen) ? A3XX_MAX_TEXEL_BUFFER_ELEMENTS_UINT :
529 /* Note that the Vulkan blob on a540 and 640 report a
530 * maxTexelBufferElements of just 65536 (the GLES3.2 and Vulkan
531 * minimum).
532 */
533 (is_a4xx(screen) || is_a5xx(screen) || is_a6xx(screen) ?
534 A4XX_MAX_TEXEL_BUFFER_ELEMENTS_UINT : 0);
535
536 caps->texture_border_color_quirk = PIPE_QUIRK_TEXTURE_BORDER_COLOR_SWIZZLE_FREEDRENO;
537
538 caps->texture_float_linear =
539 caps->cube_map_array =
540 caps->sampler_view_target =
541 caps->texture_query_lod = is_a4xx(screen) || is_a5xx(screen) || is_a6xx(screen);
542
543 /* Note that a5xx can do this, it just can't (at least with
544 * current firmware) do draw_indirect with base_instance.
545 * Since draw_indirect is needed sooner (gles31 and gl40 vs
546 * gl42), hide base_instance on a5xx. :-/
547 */
548 caps->start_instance = is_a4xx(screen) || is_a6xx(screen);
549
550 caps->constant_buffer_offset_alignment = 64;
551
552 caps->int64 =
553 caps->doubles = is_ir3(screen);
554
555 caps->glsl_feature_level =
556 caps->glsl_feature_level_compatibility =
557 is_a6xx(screen) ? 460 : (is_ir3(screen) ? 140 : 120);
558
559 caps->essl_feature_level =
560 is_a4xx(screen) || is_a5xx(screen) || is_a6xx(screen) ? 320 :
561 (is_ir3(screen) ? 300 : 120);
562
563 caps->shader_buffer_offset_alignment =
564 is_a6xx(screen) ? 64 : (is_a5xx(screen) || is_a4xx(screen) ? 4 : 0);
565
566 caps->max_texture_gather_components =
567 is_a4xx(screen) || is_a5xx(screen) || is_a6xx(screen) ? 4 : 0;
568
569 /* TODO if we need this, do it in nir/ir3 backend to avoid breaking
570 * precompile: */
571 caps->force_persample_interp = false;
572
573 caps->fbfetch =
574 fd_device_version(screen->dev) >= FD_VERSION_GMEM_BASE && is_a6xx(screen) ?
575 screen->max_rts : 0;
576 caps->sample_shading = is_a6xx(screen);
577
578 caps->context_priority_mask = screen->priority_mask;
579
580 caps->draw_indirect = is_a4xx(screen) || is_a5xx(screen) || is_a6xx(screen);
581
582 caps->framebuffer_no_attachment =
583 is_a4xx(screen) || is_a5xx(screen) || is_a6xx(screen);
584
585 /* name is confusing, but this turns on std430 packing */
586 caps->load_constbuf = is_ir3(screen);
587
588 caps->nir_images_as_deref = false;
589
590 caps->vs_layer_viewport =
591 caps->tes_layer_viewport = is_a6xx(screen);
592
593 caps->max_viewports = is_a6xx(screen) ? 16 : 1;
594
595 caps->max_varyings = is_a6xx(screen) ? 31 : 16;
596
597 /* We don't really have a limit on this, it all goes into the main
598 * memory buffer. Needs to be at least 120 / 4 (minimum requirement
599 * for GL_MAX_TESS_PATCH_COMPONENTS).
600 */
601 caps->max_shader_patch_varyings = 128;
602
603 caps->max_texture_upload_memory_budget = 64 * 1024 * 1024;
604
605 caps->shareable_shaders = is_ir3(screen);
606
607 /* Geometry shaders.. */
608 caps->max_geometry_output_vertices = 256;
609 caps->max_geometry_total_output_components = 2048;
610 caps->max_gs_invocations = 32;
611
612 /* Only a2xx has the half-border clamp mode in HW, just have mesa/st lower
613 * it for later HW.
614 */
615 caps->gl_clamp = is_a2xx(screen);
616
617 caps->clip_planes =
618 /* Gens that support GS, have GS lowered into a quasi-VS which confuses
619 * the frontend clip-plane lowering. So we handle this in the backend
620 *
621 */
622 screen->base.get_shader_param(&screen->base, PIPE_SHADER_GEOMETRY,
623 PIPE_SHADER_CAP_MAX_INSTRUCTIONS) ? 1 :
624 /* On a3xx, there is HW support for GL user clip planes that
625 * occasionally has to fall back to shader key-based lowering to clip
626 * distances in the VS, and we don't support clip distances so that is
627 * always shader-based lowering in the FS.
628 *
629 * On a4xx, there is no HW support for clip planes, so they are
630 * always lowered to clip distances. We also lack SW support for the
631 * HW's clip distances in HW, so we do shader-based lowering in the FS
632 * in the driver backend.
633 *
634 * On a5xx-a6xx, we have the HW clip distances hooked up, so we just let
635 * mesa/st lower desktop GL's clip planes to clip distances in the last
636 * vertex shader stage.
637 *
638 * NOTE: but see comment above about geometry shaders
639 */
640 (is_a5xx(screen) ? 0 : 1);
641
642 /* Stream output. */
643 caps->max_vertex_streams = is_a6xx(screen) ? /* has SO + GS */
644 PIPE_MAX_SO_BUFFERS : 0;
645 caps->max_stream_output_buffers = is_ir3(screen) ? PIPE_MAX_SO_BUFFERS : 0;
646 caps->stream_output_pause_resume =
647 caps->stream_output_interleave_buffers =
648 caps->fs_position_is_sysval =
649 caps->tgsi_texcoord =
650 caps->shader_array_components =
651 caps->texture_query_samples =
652 caps->fs_fine_derivative = is_ir3(screen);
653 caps->shader_group_vote = is_a6xx(screen);
654 caps->fs_face_is_integer_sysval = true;
655 caps->fs_point_is_sysval = is_a2xx(screen);
656 caps->max_stream_output_separate_components =
657 caps->max_stream_output_interleaved_components = is_ir3(screen) ?
658 16 * 4 /* should only be shader out limit? */ : 0;
659
660 /* Texturing. */
661 caps->max_texture_2d_size =
662 is_a6xx(screen) || is_a5xx(screen) || is_a4xx(screen) ? 16384 : 8192;
663 caps->max_texture_cube_levels =
664 is_a6xx(screen) || is_a5xx(screen) || is_a4xx(screen) ? 15 : 14;
665
666 caps->max_texture_3d_levels = is_a3xx(screen) ? 11 : 12;
667
668 caps->max_texture_array_layers = is_a6xx(screen) ? 2048 :
669 (is_a3xx(screen) || is_a4xx(screen) || is_a5xx(screen) ? 256 : 0);
670
671 /* Render targets. */
672 caps->max_render_targets = screen->max_rts;
673 caps->max_dual_source_render_targets = (is_a3xx(screen) || is_a6xx(screen)) ? 1 : 0;
674
675 /* Queries. */
676 caps->occlusion_query =
677 is_a3xx(screen) || is_a4xx(screen) || is_a5xx(screen) || is_a6xx(screen);
678 caps->query_timestamp =
679 caps->query_time_elapsed =
680 /* only a4xx, requires new enough kernel so we know max_freq: */
681 (screen->max_freq > 0) && (is_a4xx(screen) || is_a5xx(screen) || is_a6xx(screen));
682 caps->timer_resolution = ticks_to_ns(1);
683 caps->query_buffer_object =
684 caps->query_so_overflow =
685 caps->query_pipeline_statistics_single = is_a6xx(screen);
686
687 caps->vendor_id = 0x5143;
688 caps->device_id = 0xFFFFFFFF;
689
690 caps->video_memory = get_memory_size(screen) >> 20;
691
692 /* Enables GL_ATI_meminfo */
693 caps->query_memory_info = get_memory_size(screen) != 0;
694
695 caps->uma = true;
696 caps->memobj = fd_device_version(screen->dev) >= FD_VERSION_MEMORY_FD;
697 caps->native_fence_fd = fd_device_version(screen->dev) >= FD_VERSION_FENCE_FD;
698 caps->fence_signal = screen->has_syncobj;
699 caps->cull_distance = is_a6xx(screen);
700 caps->shader_stencil_export = is_a6xx(screen);
701 caps->two_sided_color = false;
702 caps->throttle = screen->driconf.enable_throttling;
703
704 caps->min_line_width =
705 caps->min_line_width_aa =
706 caps->min_point_size =
707 caps->min_point_size_aa = 1;
708
709 caps->point_size_granularity =
710 caps->line_width_granularity = 0.1f;
711
712 caps->max_line_width =
713 caps->max_line_width_aa = 127.0f;
714
715 caps->max_point_size =
716 caps->max_point_size_aa = 4092.0f;
717
718 caps->max_texture_anisotropy = 16.0f;
719 caps->max_texture_lod_bias = 15.0f;
720 }
721
722 static const void *
fd_get_compiler_options(struct pipe_screen * pscreen,enum pipe_shader_ir ir,enum pipe_shader_type shader)723 fd_get_compiler_options(struct pipe_screen *pscreen, enum pipe_shader_ir ir,
724 enum pipe_shader_type shader)
725 {
726 struct fd_screen *screen = fd_screen(pscreen);
727
728 if (is_ir3(screen))
729 return ir3_get_compiler_options(screen->compiler);
730
731 return ir2_get_compiler_options();
732 }
733
734 static struct disk_cache *
fd_get_disk_shader_cache(struct pipe_screen * pscreen)735 fd_get_disk_shader_cache(struct pipe_screen *pscreen)
736 {
737 struct fd_screen *screen = fd_screen(pscreen);
738
739 if (is_ir3(screen)) {
740 struct ir3_compiler *compiler = screen->compiler;
741 return compiler->disk_cache;
742 }
743
744 return NULL;
745 }
746
747 bool
fd_screen_bo_get_handle(struct pipe_screen * pscreen,struct fd_bo * bo,struct renderonly_scanout * scanout,unsigned stride,struct winsys_handle * whandle)748 fd_screen_bo_get_handle(struct pipe_screen *pscreen, struct fd_bo *bo,
749 struct renderonly_scanout *scanout, unsigned stride,
750 struct winsys_handle *whandle)
751 {
752 struct fd_screen *screen = fd_screen(pscreen);
753
754 whandle->stride = stride;
755
756 if (whandle->type == WINSYS_HANDLE_TYPE_SHARED) {
757 return fd_bo_get_name(bo, &whandle->handle) == 0;
758 } else if (whandle->type == WINSYS_HANDLE_TYPE_KMS) {
759 if (screen->ro) {
760 return renderonly_get_handle(scanout, whandle);
761 } else {
762 uint32_t handle = fd_bo_handle(bo);
763 if (!handle)
764 return false;
765 whandle->handle = handle;
766 return true;
767 }
768 } else if (whandle->type == WINSYS_HANDLE_TYPE_FD) {
769 int fd = fd_bo_dmabuf(bo);
770 if (fd < 0)
771 return false;
772 whandle->handle = fd;
773 return true;
774 } else {
775 return false;
776 }
777 }
778
779 static bool
is_format_supported(struct pipe_screen * pscreen,enum pipe_format format,uint64_t modifier)780 is_format_supported(struct pipe_screen *pscreen,
781 enum pipe_format format,
782 uint64_t modifier)
783 {
784 struct fd_screen *screen = fd_screen(pscreen);
785 if (screen->is_format_supported)
786 return screen->is_format_supported(pscreen, format, modifier);
787 return modifier == DRM_FORMAT_MOD_LINEAR;
788 }
789
790 static void
fd_screen_query_dmabuf_modifiers(struct pipe_screen * pscreen,enum pipe_format format,int max,uint64_t * modifiers,unsigned int * external_only,int * count)791 fd_screen_query_dmabuf_modifiers(struct pipe_screen *pscreen,
792 enum pipe_format format, int max,
793 uint64_t *modifiers,
794 unsigned int *external_only, int *count)
795 {
796 const uint64_t all_modifiers[] = {
797 DRM_FORMAT_MOD_LINEAR,
798 DRM_FORMAT_MOD_QCOM_COMPRESSED,
799 DRM_FORMAT_MOD_QCOM_TILED3,
800 };
801
802 int num = 0;
803
804 for (int i = 0; i < ARRAY_SIZE(all_modifiers); i++) {
805 if (!is_format_supported(pscreen, format, all_modifiers[i]))
806 continue;
807
808 if (num < max) {
809 if (modifiers)
810 modifiers[num] = all_modifiers[i];
811
812 if (external_only)
813 external_only[num] = false;
814 }
815
816 num++;
817 }
818
819 *count = num;
820 }
821
822 static bool
fd_screen_is_dmabuf_modifier_supported(struct pipe_screen * pscreen,uint64_t modifier,enum pipe_format format,bool * external_only)823 fd_screen_is_dmabuf_modifier_supported(struct pipe_screen *pscreen,
824 uint64_t modifier,
825 enum pipe_format format,
826 bool *external_only)
827 {
828 return is_format_supported(pscreen, format, modifier);
829 }
830
831 struct fd_bo *
fd_screen_bo_from_handle(struct pipe_screen * pscreen,struct winsys_handle * whandle)832 fd_screen_bo_from_handle(struct pipe_screen *pscreen,
833 struct winsys_handle *whandle)
834 {
835 struct fd_screen *screen = fd_screen(pscreen);
836 struct fd_bo *bo;
837
838 if (whandle->type == WINSYS_HANDLE_TYPE_SHARED) {
839 bo = fd_bo_from_name(screen->dev, whandle->handle);
840 } else if (whandle->type == WINSYS_HANDLE_TYPE_KMS) {
841 bo = fd_bo_from_handle(screen->dev, whandle->handle, 0);
842 } else if (whandle->type == WINSYS_HANDLE_TYPE_FD) {
843 bo = fd_bo_from_dmabuf(screen->dev, whandle->handle);
844 } else {
845 DBG("Attempt to import unsupported handle type %d", whandle->type);
846 return NULL;
847 }
848
849 if (!bo) {
850 DBG("ref name 0x%08x failed", whandle->handle);
851 return NULL;
852 }
853
854 return bo;
855 }
856
857 static void
_fd_fence_ref(struct pipe_screen * pscreen,struct pipe_fence_handle ** ptr,struct pipe_fence_handle * pfence)858 _fd_fence_ref(struct pipe_screen *pscreen, struct pipe_fence_handle **ptr,
859 struct pipe_fence_handle *pfence)
860 {
861 fd_pipe_fence_ref(ptr, pfence);
862 }
863
864 static void
fd_screen_get_device_uuid(struct pipe_screen * pscreen,char * uuid)865 fd_screen_get_device_uuid(struct pipe_screen *pscreen, char *uuid)
866 {
867 struct fd_screen *screen = fd_screen(pscreen);
868
869 fd_get_device_uuid(uuid, screen->dev_id);
870 }
871
872 static void
fd_screen_get_driver_uuid(struct pipe_screen * pscreen,char * uuid)873 fd_screen_get_driver_uuid(struct pipe_screen *pscreen, char *uuid)
874 {
875 fd_get_driver_uuid(uuid);
876 }
877
878 static int
fd_screen_get_fd(struct pipe_screen * pscreen)879 fd_screen_get_fd(struct pipe_screen *pscreen)
880 {
881 struct fd_screen *screen = fd_screen(pscreen);
882 return fd_device_fd(screen->dev);
883 }
884
885 struct pipe_screen *
fd_screen_create(int fd,const struct pipe_screen_config * config,struct renderonly * ro)886 fd_screen_create(int fd,
887 const struct pipe_screen_config *config,
888 struct renderonly *ro)
889 {
890 struct fd_device *dev = fd_device_new_dup(fd);
891 if (!dev)
892 return NULL;
893
894 struct fd_screen *screen = CALLOC_STRUCT(fd_screen);
895 struct pipe_screen *pscreen;
896 uint64_t val;
897
898 fd_mesa_debug = debug_get_option_fd_mesa_debug();
899
900 if (FD_DBG(NOBIN))
901 fd_binning_enabled = false;
902
903 if (!screen)
904 return NULL;
905
906 #ifdef HAVE_PERFETTO
907 fd_perfetto_init();
908 #endif
909
910 util_gpuvis_init();
911
912 pscreen = &screen->base;
913
914 screen->dev = dev;
915 screen->ro = ro;
916
917 // maybe this should be in context?
918 screen->pipe = fd_pipe_new(screen->dev, FD_PIPE_3D);
919 if (!screen->pipe) {
920 DBG("could not create 3d pipe");
921 goto fail;
922 }
923
924 if (fd_pipe_get_param(screen->pipe, FD_GMEM_SIZE, &val)) {
925 DBG("could not get GMEM size");
926 goto fail;
927 }
928 screen->gmemsize_bytes = debug_get_num_option("FD_MESA_GMEM", val);
929
930 if (fd_device_version(dev) >= FD_VERSION_GMEM_BASE) {
931 fd_pipe_get_param(screen->pipe, FD_GMEM_BASE, &screen->gmem_base);
932 }
933
934 if (fd_pipe_get_param(screen->pipe, FD_MAX_FREQ, &val)) {
935 DBG("could not get gpu freq");
936 /* this limits what performance related queries are
937 * supported but is not fatal
938 */
939 screen->max_freq = 0;
940 } else {
941 screen->max_freq = val;
942 }
943
944 if (fd_pipe_get_param(screen->pipe, FD_TIMESTAMP, &val) == 0)
945 screen->has_timestamp = true;
946
947 screen->dev_id = fd_pipe_dev_id(screen->pipe);
948
949 if (fd_pipe_get_param(screen->pipe, FD_GPU_ID, &val)) {
950 DBG("could not get gpu-id");
951 goto fail;
952 }
953 screen->gpu_id = val;
954
955 if (fd_pipe_get_param(screen->pipe, FD_CHIP_ID, &val)) {
956 DBG("could not get chip-id");
957 /* older kernels may not have this property: */
958 unsigned core = screen->gpu_id / 100;
959 unsigned major = (screen->gpu_id % 100) / 10;
960 unsigned minor = screen->gpu_id % 10;
961 unsigned patch = 0; /* assume the worst */
962 val = (patch & 0xff) | ((minor & 0xff) << 8) | ((major & 0xff) << 16) |
963 ((core & 0xff) << 24);
964 }
965 screen->chip_id = val;
966 screen->gen = fd_dev_gen(screen->dev_id);
967
968 if (fd_pipe_get_param(screen->pipe, FD_NR_PRIORITIES, &val)) {
969 DBG("could not get # of rings");
970 screen->priority_mask = 0;
971 } else {
972 /* # of rings equates to number of unique priority values: */
973 screen->priority_mask = (1 << val) - 1;
974
975 /* Lowest numerical value (ie. zero) is highest priority: */
976 screen->prio_high = 0;
977
978 /* Highest numerical value is lowest priority: */
979 screen->prio_low = val - 1;
980
981 /* Pick midpoint for normal priority.. note that whatever the
982 * range of possible priorities, since we divide by 2 the
983 * result will either be an integer or an integer plus 0.5,
984 * in which case it will round down to an integer, so int
985 * division will give us an appropriate result in either
986 * case:
987 */
988 screen->prio_norm = val / 2;
989 }
990
991 if (fd_device_version(dev) >= FD_VERSION_ROBUSTNESS)
992 screen->has_robustness = true;
993
994 screen->has_syncobj = fd_has_syncobj(screen->dev);
995
996 /* parse driconf configuration now for device specific overrides: */
997 driParseConfigFiles(config->options, config->options_info, 0, "msm",
998 NULL, fd_dev_name(screen->dev_id), NULL, 0, NULL, 0);
999
1000 screen->driconf.conservative_lrz =
1001 !driQueryOptionb(config->options, "disable_conservative_lrz");
1002 screen->driconf.enable_throttling =
1003 !driQueryOptionb(config->options, "disable_throttling");
1004 screen->driconf.dual_color_blend_by_location =
1005 driQueryOptionb(config->options, "dual_color_blend_by_location");
1006
1007 struct sysinfo si;
1008 sysinfo(&si);
1009 screen->ram_size = si.totalram;
1010
1011 DBG("Pipe Info:");
1012 DBG(" GPU-id: %s", fd_dev_name(screen->dev_id));
1013 DBG(" Chip-id: 0x%016"PRIx64, screen->chip_id);
1014 DBG(" GMEM size: 0x%08x", screen->gmemsize_bytes);
1015
1016 const struct fd_dev_info info = fd_dev_info(screen->dev_id);
1017 if (!info.chip) {
1018 mesa_loge("unsupported GPU: a%03d", screen->gpu_id);
1019 goto fail;
1020 }
1021
1022 screen->dev_info = info;
1023 screen->info = &screen->dev_info;
1024
1025 /* explicitly checking for GPU revisions that are known to work. This
1026 * may be overly conservative for a3xx, where spoofing the gpu_id with
1027 * the blob driver seems to generate identical cmdstream dumps. But
1028 * on a2xx, there seem to be small differences between the GPU revs
1029 * so it is probably better to actually test first on real hardware
1030 * before enabling:
1031 *
1032 * If you have a different adreno version, feel free to add it to one
1033 * of the cases below and see what happens. And if it works, please
1034 * send a patch ;-)
1035 */
1036 switch (screen->gen) {
1037 case 2:
1038 fd2_screen_init(pscreen);
1039 break;
1040 case 3:
1041 fd3_screen_init(pscreen);
1042 break;
1043 case 4:
1044 fd4_screen_init(pscreen);
1045 break;
1046 case 5:
1047 fd5_screen_init(pscreen);
1048 break;
1049 case 6:
1050 case 7:
1051 fd6_screen_init(pscreen);
1052 break;
1053 default:
1054 mesa_loge("unsupported GPU generation: a%uxx", screen->gen);
1055 goto fail;
1056 }
1057
1058 /* fdN_screen_init() should set this: */
1059 assert(screen->primtypes);
1060 screen->primtypes_mask = 0;
1061 for (unsigned i = 0; i <= MESA_PRIM_COUNT; i++)
1062 if (screen->primtypes[i])
1063 screen->primtypes_mask |= (1 << i);
1064
1065 if (FD_DBG(PERFC)) {
1066 screen->perfcntr_groups =
1067 fd_perfcntrs(screen->dev_id, &screen->num_perfcntr_groups);
1068 }
1069
1070 /* NOTE: don't enable if we have too old of a kernel to support
1071 * growable cmdstream buffers, since memory requirement for cmdstream
1072 * buffers would be too much otherwise.
1073 */
1074 if (fd_device_version(dev) >= FD_VERSION_UNLIMITED_CMDS)
1075 screen->reorder = !FD_DBG(INORDER);
1076
1077 fd_bc_init(&screen->batch_cache);
1078
1079 list_inithead(&screen->context_list);
1080
1081 util_idalloc_mt_init_tc(&screen->buffer_ids);
1082
1083 (void)simple_mtx_init(&screen->lock, mtx_plain);
1084
1085 pscreen->destroy = fd_screen_destroy;
1086 pscreen->get_screen_fd = fd_screen_get_fd;
1087 pscreen->query_memory_info = fd_query_memory_info;
1088 pscreen->get_shader_param = fd_screen_get_shader_param;
1089 pscreen->get_compute_param = fd_get_compute_param;
1090 pscreen->get_compiler_options = fd_get_compiler_options;
1091 pscreen->get_disk_shader_cache = fd_get_disk_shader_cache;
1092
1093 fd_resource_screen_init(pscreen);
1094 fd_query_screen_init(pscreen);
1095 fd_gmem_screen_init(pscreen);
1096
1097 pscreen->get_name = fd_screen_get_name;
1098 pscreen->get_vendor = fd_screen_get_vendor;
1099 pscreen->get_device_vendor = fd_screen_get_device_vendor;
1100
1101 pscreen->get_sample_pixel_grid = fd_get_sample_pixel_grid;
1102
1103 pscreen->get_timestamp = fd_screen_get_timestamp;
1104
1105 pscreen->fence_reference = _fd_fence_ref;
1106 pscreen->fence_finish = fd_pipe_fence_finish;
1107 pscreen->fence_get_fd = fd_pipe_fence_get_fd;
1108
1109 pscreen->query_dmabuf_modifiers = fd_screen_query_dmabuf_modifiers;
1110 pscreen->is_dmabuf_modifier_supported =
1111 fd_screen_is_dmabuf_modifier_supported;
1112
1113 pscreen->get_device_uuid = fd_screen_get_device_uuid;
1114 pscreen->get_driver_uuid = fd_screen_get_driver_uuid;
1115
1116 fd_init_screen_caps(screen);
1117
1118 slab_create_parent(&screen->transfer_pool, sizeof(struct fd_transfer), 16);
1119
1120 simple_mtx_init(&screen->aux_ctx_lock, mtx_plain);
1121
1122 return pscreen;
1123
1124 fail:
1125 fd_screen_destroy(pscreen);
1126 return NULL;
1127 }
1128
1129 struct fd_context *
fd_screen_aux_context_get(struct pipe_screen * pscreen)1130 fd_screen_aux_context_get(struct pipe_screen *pscreen)
1131 {
1132 struct fd_screen *screen = fd_screen(pscreen);
1133
1134 simple_mtx_lock(&screen->aux_ctx_lock);
1135
1136 if (!screen->aux_ctx) {
1137 screen->aux_ctx = pscreen->context_create(pscreen, NULL, 0);
1138 }
1139
1140 return fd_context(screen->aux_ctx);
1141 }
1142
1143 void
fd_screen_aux_context_put(struct pipe_screen * pscreen)1144 fd_screen_aux_context_put(struct pipe_screen *pscreen)
1145 {
1146 struct fd_screen *screen = fd_screen(pscreen);
1147
1148 screen->aux_ctx->flush(screen->aux_ctx, NULL, 0);
1149 simple_mtx_unlock(&screen->aux_ctx_lock);
1150 }
1151