1 /* 2 * Copyright © 2017 Advanced Micro Devices, Inc. 3 * 4 * SPDX-License-Identifier: MIT 5 */ 6 7 #ifndef AC_GPU_INFO_H 8 #define AC_GPU_INFO_H 9 10 #include <stdbool.h> 11 #include "util/macros.h" 12 #include "amd_family.h" 13 14 #ifdef __cplusplus 15 extern "C" { 16 #endif 17 18 #define AMD_MAX_SE 32 19 #define AMD_MAX_SA_PER_SE 2 20 #define AMD_MAX_WGP 60 21 22 struct amdgpu_gpu_info; 23 24 struct amd_ip_info { 25 uint8_t ver_major; 26 uint8_t ver_minor; 27 uint8_t ver_rev; 28 uint8_t num_queues; 29 uint32_t ib_alignment; 30 uint32_t ib_pad_dw_mask; 31 }; 32 33 struct radeon_info { 34 /* Device info. */ 35 const char *name; 36 char lowercase_name[32]; 37 const char *marketing_name; 38 char dev_filename[32]; 39 uint32_t num_se; /* only enabled SEs */ 40 uint32_t num_rb; /* only enabled RBs */ 41 uint32_t num_cu; /* only enabled CUs */ 42 uint32_t max_gpu_freq_mhz; /* also known as the shader clock */ 43 uint32_t max_gflops; 44 uint32_t sqc_inst_cache_size; 45 uint32_t sqc_scalar_cache_size; 46 uint32_t num_sqc_per_wgp; 47 uint32_t tcp_cache_size; 48 uint32_t l1_cache_size; 49 uint32_t l2_cache_size; 50 uint32_t l3_cache_size_mb; 51 uint32_t num_tcc_blocks; /* also the number of memory channels */ 52 uint32_t memory_freq_mhz; 53 uint32_t memory_freq_mhz_effective; 54 uint32_t memory_bus_width; 55 uint32_t memory_bandwidth_gbps; 56 uint32_t pcie_gen; 57 uint32_t pcie_num_lanes; 58 uint32_t pcie_bandwidth_mbps; 59 uint32_t clock_crystal_freq; 60 struct amd_ip_info ip[AMD_NUM_IP_TYPES]; 61 62 /* Identification. */ 63 /* PCI info: domain:bus:dev:func */ 64 struct { 65 uint32_t domain; 66 uint32_t bus; 67 uint32_t dev; 68 uint32_t func; 69 bool valid; 70 } pci; 71 72 uint32_t pci_id; 73 uint32_t pci_rev_id; 74 enum radeon_family family; 75 enum amd_gfx_level gfx_level; 76 uint32_t family_id; 77 uint32_t chip_external_rev; 78 uint32_t chip_rev; /* 0 = A0, 1 = A1, etc. */ 79 80 /* Flags. */ 81 bool family_overridden; /* AMD_FORCE_FAMILY was used, skip command submission */ 82 bool is_pro_graphics; 83 bool has_graphics; /* false if the chip is compute-only */ 84 bool has_clear_state; 85 bool has_distributed_tess; 86 bool has_dcc_constant_encode; 87 bool has_rbplus; /* if RB+ registers exist */ 88 bool rbplus_allowed; /* if RB+ is allowed */ 89 bool has_load_ctx_reg_pkt; 90 bool has_out_of_order_rast; 91 bool has_packed_math_16bit; 92 bool has_accelerated_dot_product; 93 bool cpdma_prefetch_writes_memory; 94 bool has_gfx9_scissor_bug; 95 bool has_tc_compat_zrange_bug; 96 bool has_small_prim_filter_sample_loc_bug; 97 bool has_ls_vgpr_init_bug; 98 bool has_pops_missed_overlap_bug; 99 bool has_zero_index_buffer_bug; 100 bool has_image_load_dcc_bug; 101 bool has_two_planes_iterate256_bug; 102 bool has_vgt_flush_ngg_legacy_bug; 103 bool has_cs_regalloc_hang_bug; 104 bool has_async_compute_threadgroup_bug; 105 bool has_async_compute_align32_bug; 106 bool has_32bit_predication; 107 bool has_3d_cube_border_color_mipmap; 108 bool has_image_opcodes; 109 bool never_stop_sq_perf_counters; 110 bool has_sqtt_rb_harvest_bug; 111 bool has_sqtt_auto_flush_mode_bug; 112 bool never_send_perfcounter_stop; 113 bool discardable_allows_big_page; 114 bool has_export_conflict_bug; 115 bool has_vrs_ds_export_bug; 116 bool has_taskmesh_indirect0_bug; 117 bool sdma_supports_sparse; /* Whether SDMA can safely access sparse resources. */ 118 bool sdma_supports_compression; /* Whether SDMA supports DCC and HTILE. */ 119 bool has_set_context_pairs_packed; 120 bool has_set_sh_pairs_packed; 121 122 /* conformant_trunc_coord is equal to TA_CNTL2.TRUNCATE_COORD_MODE, which exists since gfx11. 123 * 124 * If TA_CNTL2.TRUNCATE_COORD_MODE == 0, coordinate truncation is the same as gfx10 and older. 125 * 126 * If TA_CNTL2.TRUNCATE_COORD_MODE == 1, coordinate truncation is adjusted to be D3D9/GL/Vulkan 127 * conformant if you also set TRUNC_COORD. Coordinate truncation uses D3D10+ behaviour if 128 * TRUNC_COORD is unset. 129 * 130 * Behavior if TA_CNTL2.TRUNCATE_COORD_MODE == 1: 131 * truncate_coord_xy = TRUNC_COORD && (xy_filter == Point && !gather); 132 * truncate_coord_z = TRUNC_COORD && (z_filter == Point); 133 * truncate_coord_layer = false; 134 * 135 * Behavior if TA_CNTL2.TRUNCATE_COORD_MODE == 0: 136 * truncate_coord_xy = TRUNC_COORD; 137 * truncate_coord_z = TRUNC_COORD; 138 * truncate_coord_layer = TRUNC_COORD; 139 * 140 * AnisoPoint is treated as Point. 141 */ 142 bool conformant_trunc_coord; 143 144 /* Display features. */ 145 /* There are 2 display DCC codepaths, because display expects unaligned DCC. */ 146 /* Disable RB and pipe alignment to skip the retile blit. (1 RB chips only) */ 147 bool use_display_dcc_unaligned; 148 /* Allocate both aligned and unaligned DCC and use the retile blit. */ 149 bool use_display_dcc_with_retile_blit; 150 151 /* Memory info. */ 152 uint32_t pte_fragment_size; 153 uint32_t gart_page_size; 154 uint32_t gart_size_kb; 155 uint32_t vram_size_kb; 156 uint64_t vram_vis_size_kb; 157 uint32_t vram_type; 158 uint32_t max_heap_size_kb; 159 uint32_t min_alloc_size; 160 uint32_t address32_hi; 161 bool has_dedicated_vram; 162 bool all_vram_visible; 163 bool has_l2_uncached; 164 bool r600_has_virtual_memory; 165 uint32_t max_tcc_blocks; 166 uint32_t tcc_cache_line_size; 167 bool tcc_rb_non_coherent; /* whether L2 inv is needed for render->texture transitions */ 168 unsigned pc_lines; 169 uint32_t lds_size_per_workgroup; 170 uint32_t lds_alloc_granularity; 171 uint32_t lds_encode_granularity; 172 173 /* CP info. */ 174 bool gfx_ib_pad_with_type2; 175 uint32_t me_fw_version; 176 uint32_t me_fw_feature; 177 uint32_t mec_fw_version; 178 uint32_t mec_fw_feature; 179 uint32_t pfp_fw_version; 180 uint32_t pfp_fw_feature; 181 182 /* Multimedia info. */ 183 uint32_t uvd_fw_version; 184 uint32_t vce_fw_version; 185 uint32_t vce_harvest_config; 186 struct video_caps_info { 187 struct video_codec_cap { 188 uint32_t valid; 189 uint32_t max_width; 190 uint32_t max_height; 191 uint32_t max_pixels_per_frame; 192 uint32_t max_level; 193 uint32_t pad; 194 } codec_info[8]; /* the number of available codecs */ 195 } dec_caps, enc_caps; 196 197 enum vcn_version vcn_ip_version; 198 enum sdma_version sdma_ip_version; 199 200 /* Kernel & winsys capabilities. */ 201 uint32_t drm_major; /* version */ 202 uint32_t drm_minor; 203 uint32_t drm_patchlevel; 204 uint32_t max_submitted_ibs[AMD_NUM_IP_TYPES]; 205 bool is_amdgpu; 206 bool has_userptr; 207 bool has_timeline_syncobj; 208 bool has_local_buffers; 209 bool has_bo_metadata; 210 bool has_eqaa_surface_allocator; 211 bool has_sparse_vm_mappings; 212 bool has_scheduled_fence_dependency; 213 bool has_gang_submit; 214 bool has_gpuvm_fault_query; 215 bool has_pcie_bandwidth_info; 216 bool has_stable_pstate; 217 /* Whether SR-IOV is enabled or amdgpu.mcbp=1 was set on the kernel command line. */ 218 bool register_shadowing_required; 219 bool has_tmz_support; 220 bool kernel_has_modifiers; 221 222 /* If the kernel driver uses CU reservation for high priority compute on gfx10+, it programs 223 * a global CU mask in the hw that is AND'ed with CU_EN register fields set by userspace. 224 * The packet that does the AND'ing is SET_SH_REG_INDEX(index = 3). If you don't use 225 * SET_SH_REG_INDEX, the global CU mask will not be applied. 226 * 227 * If uses_kernel_cu_mask is true, use SET_SH_REG_INDEX. 228 * 229 * If uses_kernel_cu_mask is false, SET_SH_REG_INDEX shouldn't be used because it only 230 * increases CP overhead and doesn't have any other effect. 231 * 232 * The alternative to this is to set the AMD_CU_MASK environment variable that has the same 233 * effect on radeonsi and RADV and doesn't need SET_SH_REG_INDEX. 234 */ 235 bool uses_kernel_cu_mask; 236 237 /* Shader cores. */ 238 uint16_t cu_mask[AMD_MAX_SE][AMD_MAX_SA_PER_SE]; 239 uint32_t r600_max_quad_pipes; /* wave size / 16 */ 240 uint32_t max_good_cu_per_sa; 241 uint32_t min_good_cu_per_sa; /* min != max if SAs have different # of CUs */ 242 uint32_t max_se; /* number of shader engines incl. disabled ones */ 243 uint32_t max_sa_per_se; /* shader arrays per shader engine */ 244 uint32_t num_cu_per_sh; 245 uint32_t max_waves_per_simd; 246 uint32_t num_physical_sgprs_per_simd; 247 uint32_t num_physical_wave64_vgprs_per_simd; 248 uint32_t num_simd_per_compute_unit; 249 uint32_t min_sgpr_alloc; 250 uint32_t max_sgpr_alloc; 251 uint32_t sgpr_alloc_granularity; 252 uint32_t min_wave64_vgpr_alloc; 253 uint32_t max_vgpr_alloc; 254 uint32_t wave64_vgpr_alloc_granularity; 255 uint32_t max_scratch_waves; 256 uint32_t attribute_ring_size_per_se; 257 258 /* Render backends (color + depth blocks). */ 259 uint32_t r300_num_gb_pipes; 260 uint32_t r300_num_z_pipes; 261 uint32_t r600_gb_backend_map; /* R600 harvest config */ 262 bool r600_gb_backend_map_valid; 263 uint32_t r600_num_banks; 264 uint32_t mc_arb_ramcfg; 265 uint32_t gb_addr_config; 266 uint32_t pa_sc_tile_steering_override; /* CLEAR_STATE also sets this */ 267 uint32_t max_render_backends; /* number of render backends incl. disabled ones */ 268 uint32_t num_tile_pipes; /* pipe count from PIPE_CONFIG */ 269 uint32_t pipe_interleave_bytes; 270 uint64_t enabled_rb_mask; /* bitmask of enabled physical RBs, up to max_render_backends bits */ 271 uint64_t max_alignment; /* from addrlib */ 272 uint32_t pbb_max_alloc_count; 273 274 /* Tile modes. */ 275 uint32_t si_tile_mode_array[32]; 276 uint32_t cik_macrotile_mode_array[16]; 277 278 /* AMD_CU_MASK environment variable or ~0. */ 279 bool spi_cu_en_has_effect; 280 uint32_t spi_cu_en; 281 282 struct { 283 uint32_t shadow_size; 284 uint32_t shadow_alignment; 285 uint32_t csa_size; 286 uint32_t csa_alignment; 287 } fw_based_mcbp; 288 bool has_fw_based_shadowing; 289 }; 290 291 bool ac_query_gpu_info(int fd, void *dev_p, struct radeon_info *info, 292 bool require_pci_bus_info); 293 294 void ac_compute_driver_uuid(char *uuid, size_t size); 295 296 void ac_compute_device_uuid(const struct radeon_info *info, char *uuid, size_t size); 297 void ac_print_gpu_info(const struct radeon_info *info, FILE *f); 298 int ac_get_gs_table_depth(enum amd_gfx_level gfx_level, enum radeon_family family); 299 void ac_get_raster_config(const struct radeon_info *info, uint32_t *raster_config_p, 300 uint32_t *raster_config_1_p, uint32_t *se_tile_repeat_p); 301 void ac_get_harvested_configs(const struct radeon_info *info, unsigned raster_config, 302 unsigned *cik_raster_config_1_p, unsigned *raster_config_se); 303 unsigned ac_get_compute_resource_limits(const struct radeon_info *info, 304 unsigned waves_per_threadgroup, unsigned max_waves_per_sh, 305 unsigned threadgroups_per_cu); 306 307 struct ac_hs_info { 308 uint32_t tess_offchip_block_dw_size; 309 uint32_t max_offchip_buffers; 310 uint32_t hs_offchip_param; 311 uint32_t tess_factor_ring_size; 312 uint32_t tess_offchip_ring_offset; 313 uint32_t tess_offchip_ring_size; 314 }; 315 316 void ac_get_hs_info(const struct radeon_info *info, 317 struct ac_hs_info *hs); 318 319 /* Task rings BO layout information. 320 * This BO is shared between GFX and ACE queues so that the ACE and GFX 321 * firmware can cooperate on task->mesh dispatches and is also used to 322 * store the task payload which is passed to mesh shaders. 323 * 324 * The driver only needs to create this BO once, 325 * and it will always be able to accommodate the maximum needed 326 * task payload size. 327 * 328 * The following memory layout is used: 329 * 1. Control buffer: 9 DWORDs, 256 byte aligned 330 * Used by the firmware to maintain the current state. 331 * (padding) 332 * 2. Draw ring: 4 DWORDs per entry, 256 byte aligned 333 * Task shaders store the mesh dispatch size here. 334 * (padding) 335 * 3. Payload ring: 16K bytes per entry, 256 byte aligned. 336 * This is where task payload is stored by task shaders and 337 * read by mesh shaders. 338 * 339 */ 340 struct ac_task_info { 341 uint32_t draw_ring_offset; 342 uint32_t payload_ring_offset; 343 uint32_t bo_size_bytes; 344 uint16_t num_entries; 345 }; 346 347 /* Size of each payload entry in the task payload ring. 348 * Spec requires minimum 16K bytes. 349 */ 350 #define AC_TASK_PAYLOAD_ENTRY_BYTES 16384 351 352 /* Size of each draw entry in the task draw ring. 353 * 4 DWORDs per entry. 354 */ 355 #define AC_TASK_DRAW_ENTRY_BYTES 16 356 357 /* Size of the task control buffer. 9 DWORDs. */ 358 #define AC_TASK_CTRLBUF_BYTES 36 359 360 void ac_get_task_info(const struct radeon_info *info, 361 struct ac_task_info *task_info); 362 363 uint32_t ac_memory_ops_per_clock(uint32_t vram_type); 364 365 #ifdef __cplusplus 366 } 367 #endif 368 369 #endif /* AC_GPU_INFO_H */ 370