• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2017 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining
5  * a copy of this software and associated documentation files (the
6  * "Software"), to deal in the Software without restriction, including
7  * without limitation the rights to use, copy, modify, merge, publish,
8  * distribute, sub license, and/or sell copies of the Software, and to
9  * permit persons to whom the Software is furnished to do so, subject to
10  * the following conditions:
11  *
12  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
13  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
14  * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
15  * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
16  * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
18  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19  * USE OR OTHER DEALINGS IN THE SOFTWARE.
20  *
21  * The above copyright notice and this permission notice (including the
22  * next paragraph) shall be included in all copies or substantial portions
23  * of the Software.
24  */
25 
26 #include "ac_gpu_info.h"
27 
28 #include "addrlib/src/amdgpu_asic_addr.h"
29 #include "sid.h"
30 #include "util/macros.h"
31 #include "util/u_cpu_detect.h"
32 #include "util/u_math.h"
33 
34 #include <stdio.h>
35 
36 #ifdef _WIN32
37 #define DRM_CAP_ADDFB2_MODIFIERS 0x10
38 #define DRM_CAP_SYNCOBJ 0x13
39 #define DRM_CAP_SYNCOBJ_TIMELINE 0x14
40 #define AMDGPU_GEM_DOMAIN_GTT 0x2
41 #define AMDGPU_GEM_DOMAIN_VRAM 0x4
42 #define AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED (1 << 0)
43 #define AMDGPU_GEM_CREATE_ENCRYPTED (1 << 10)
44 #define AMDGPU_HW_IP_GFX 0
45 #define AMDGPU_HW_IP_COMPUTE 1
46 #define AMDGPU_HW_IP_DMA 2
47 #define AMDGPU_HW_IP_UVD 3
48 #define AMDGPU_HW_IP_VCE 4
49 #define AMDGPU_HW_IP_UVD_ENC 5
50 #define AMDGPU_HW_IP_VCN_DEC 6
51 #define AMDGPU_HW_IP_VCN_ENC 7
52 #define AMDGPU_HW_IP_VCN_JPEG 8
53 #define AMDGPU_IDS_FLAGS_FUSION 0x1
54 #define AMDGPU_IDS_FLAGS_PREEMPTION 0x2
55 #define AMDGPU_IDS_FLAGS_TMZ 0x4
56 #define AMDGPU_INFO_FW_VCE 0x1
57 #define AMDGPU_INFO_FW_UVD 0x2
58 #define AMDGPU_INFO_FW_GFX_ME 0x04
59 #define AMDGPU_INFO_FW_GFX_PFP 0x05
60 #define AMDGPU_INFO_FW_GFX_CE 0x06
61 #define AMDGPU_INFO_DEV_INFO 0x16
62 #define AMDGPU_INFO_MEMORY 0x19
63 #define AMDGPU_INFO_VIDEO_CAPS_DECODE 0
64 #define AMDGPU_INFO_VIDEO_CAPS_ENCODE 1
65 struct drm_amdgpu_heap_info {
66    uint64_t total_heap_size;
67 };
68 struct drm_amdgpu_memory_info {
69    struct drm_amdgpu_heap_info vram;
70    struct drm_amdgpu_heap_info cpu_accessible_vram;
71    struct drm_amdgpu_heap_info gtt;
72 };
73 struct drm_amdgpu_info_device {
74    uint32_t num_tcc_blocks;
75    uint32_t pa_sc_tile_steering_override;
76    uint64_t tcc_disabled_mask;
77 };
78 struct drm_amdgpu_info_hw_ip {
79    uint32_t ib_start_alignment;
80    uint32_t ib_size_alignment;
81    uint32_t available_rings;
82 };
83 typedef struct _drmPciBusInfo {
84    uint16_t domain;
85    uint8_t bus;
86    uint8_t dev;
87    uint8_t func;
88 } drmPciBusInfo, *drmPciBusInfoPtr;
89 typedef struct _drmDevice {
90    union {
91       drmPciBusInfoPtr pci;
92    } businfo;
93 } drmDevice, *drmDevicePtr;
94 enum amdgpu_sw_info {
95    amdgpu_sw_info_address32_hi = 0,
96 };
97 typedef struct amdgpu_device *amdgpu_device_handle;
98 typedef struct amdgpu_bo *amdgpu_bo_handle;
99 struct amdgpu_bo_alloc_request {
100    uint64_t alloc_size;
101    uint64_t phys_alignment;
102    uint32_t preferred_heap;
103    uint64_t flags;
104 };
105 struct amdgpu_gds_resource_info {
106    uint32_t gds_gfx_partition_size;
107    uint32_t gds_total_size;
108 };
109 struct amdgpu_buffer_size_alignments {
110    uint64_t size_local;
111    uint64_t size_remote;
112 };
113 struct amdgpu_heap_info {
114    uint64_t heap_size;
115 };
116 struct amdgpu_gpu_info {
117    uint32_t asic_id;
118    uint32_t chip_external_rev;
119    uint32_t family_id;
120    uint64_t ids_flags;
121    uint64_t max_engine_clk;
122    uint64_t max_memory_clk;
123    uint32_t num_shader_engines;
124    uint32_t num_shader_arrays_per_engine;
125    uint32_t rb_pipes;
126    uint32_t enabled_rb_pipes_mask;
127    uint32_t gpu_counter_freq;
128    uint32_t mc_arb_ramcfg;
129    uint32_t gb_addr_cfg;
130    uint32_t gb_tile_mode[32];
131    uint32_t gb_macro_tile_mode[16];
132    uint32_t cu_bitmap[4][4];
133    uint32_t vram_type;
134    uint32_t vram_bit_width;
135    uint32_t ce_ram_size;
136    uint32_t vce_harvest_config;
137    uint32_t pci_rev_id;
138 };
drmGetCap(int fd,uint64_t capability,uint64_t * value)139 static int drmGetCap(int fd, uint64_t capability, uint64_t *value)
140 {
141    return -EINVAL;
142 }
drmFreeDevice(drmDevicePtr * device)143 static void drmFreeDevice(drmDevicePtr *device)
144 {
145 }
drmGetDevice2(int fd,uint32_t flags,drmDevicePtr * device)146 static int drmGetDevice2(int fd, uint32_t flags, drmDevicePtr *device)
147 {
148    return -ENODEV;
149 }
amdgpu_bo_alloc(amdgpu_device_handle dev,struct amdgpu_bo_alloc_request * alloc_buffer,amdgpu_bo_handle * buf_handle)150 static int amdgpu_bo_alloc(amdgpu_device_handle dev,
151    struct amdgpu_bo_alloc_request *alloc_buffer,
152    amdgpu_bo_handle *buf_handle)
153 {
154    return -EINVAL;
155 }
amdgpu_bo_free(amdgpu_bo_handle buf_handle)156 static int amdgpu_bo_free(amdgpu_bo_handle buf_handle)
157 {
158    return -EINVAL;
159 }
amdgpu_query_buffer_size_alignment(amdgpu_device_handle dev,struct amdgpu_buffer_size_alignments * info)160 static int amdgpu_query_buffer_size_alignment(amdgpu_device_handle dev,
161    struct amdgpu_buffer_size_alignments
162    *info)
163 {
164    return -EINVAL;
165 }
amdgpu_query_firmware_version(amdgpu_device_handle dev,unsigned fw_type,unsigned ip_instance,unsigned index,uint32_t * version,uint32_t * feature)166 static int amdgpu_query_firmware_version(amdgpu_device_handle dev, unsigned fw_type,
167    unsigned ip_instance, unsigned index,
168    uint32_t *version, uint32_t *feature)
169 {
170    return -EINVAL;
171 }
amdgpu_query_hw_ip_info(amdgpu_device_handle dev,unsigned type,unsigned ip_instance,struct drm_amdgpu_info_hw_ip * info)172 static int amdgpu_query_hw_ip_info(amdgpu_device_handle dev, unsigned type,
173    unsigned ip_instance,
174    struct drm_amdgpu_info_hw_ip *info)
175 {
176    return -EINVAL;
177 }
amdgpu_query_heap_info(amdgpu_device_handle dev,uint32_t heap,uint32_t flags,struct amdgpu_heap_info * info)178 static int amdgpu_query_heap_info(amdgpu_device_handle dev, uint32_t heap,
179    uint32_t flags, struct amdgpu_heap_info *info)
180 {
181    return -EINVAL;
182 }
amdgpu_query_gpu_info(amdgpu_device_handle dev,struct amdgpu_gpu_info * info)183 static int amdgpu_query_gpu_info(amdgpu_device_handle dev,
184    struct amdgpu_gpu_info *info)
185 {
186    return -EINVAL;
187 }
amdgpu_query_info(amdgpu_device_handle dev,unsigned info_id,unsigned size,void * value)188 static int amdgpu_query_info(amdgpu_device_handle dev, unsigned info_id,
189    unsigned size, void *value)
190 {
191    return -EINVAL;
192 }
amdgpu_query_sw_info(amdgpu_device_handle dev,enum amdgpu_sw_info info,void * value)193 static int amdgpu_query_sw_info(amdgpu_device_handle dev, enum amdgpu_sw_info info,
194    void *value)
195 {
196    return -EINVAL;
197 }
amdgpu_query_gds_info(amdgpu_device_handle dev,struct amdgpu_gds_resource_info * gds_info)198 static int amdgpu_query_gds_info(amdgpu_device_handle dev,
199    struct amdgpu_gds_resource_info *gds_info)
200 {
201    return -EINVAL;
202 }
amdgpu_query_video_caps_info(amdgpu_device_handle dev,unsigned cap_type,unsigned size,void * value)203 static int amdgpu_query_video_caps_info(amdgpu_device_handle dev, unsigned cap_type,
204                                  unsigned size, void *value)
205 {
206    return -EINVAL;
207 }
amdgpu_get_marketing_name(amdgpu_device_handle dev)208 static const char *amdgpu_get_marketing_name(amdgpu_device_handle dev)
209 {
210    return NULL;
211 }
212 #else
213 #include "drm-uapi/amdgpu_drm.h"
214 #include <amdgpu.h>
215 #include <xf86drm.h>
216 #endif
217 
218 #define CIK_TILE_MODE_COLOR_2D 14
219 
220 #define CIK__GB_TILE_MODE__PIPE_CONFIG(x)           (((x) >> 6) & 0x1f)
221 #define CIK__PIPE_CONFIG__ADDR_SURF_P2              0
222 #define CIK__PIPE_CONFIG__ADDR_SURF_P4_8x16         4
223 #define CIK__PIPE_CONFIG__ADDR_SURF_P4_16x16        5
224 #define CIK__PIPE_CONFIG__ADDR_SURF_P4_16x32        6
225 #define CIK__PIPE_CONFIG__ADDR_SURF_P4_32x32        7
226 #define CIK__PIPE_CONFIG__ADDR_SURF_P8_16x16_8x16   8
227 #define CIK__PIPE_CONFIG__ADDR_SURF_P8_16x32_8x16   9
228 #define CIK__PIPE_CONFIG__ADDR_SURF_P8_32x32_8x16   10
229 #define CIK__PIPE_CONFIG__ADDR_SURF_P8_16x32_16x16  11
230 #define CIK__PIPE_CONFIG__ADDR_SURF_P8_32x32_16x16  12
231 #define CIK__PIPE_CONFIG__ADDR_SURF_P8_32x32_16x32  13
232 #define CIK__PIPE_CONFIG__ADDR_SURF_P8_32x64_32x32  14
233 #define CIK__PIPE_CONFIG__ADDR_SURF_P16_32X32_8X16  16
234 #define CIK__PIPE_CONFIG__ADDR_SURF_P16_32X32_16X16 17
235 
cik_get_num_tile_pipes(struct amdgpu_gpu_info * info)236 static unsigned cik_get_num_tile_pipes(struct amdgpu_gpu_info *info)
237 {
238    unsigned mode2d = info->gb_tile_mode[CIK_TILE_MODE_COLOR_2D];
239 
240    switch (CIK__GB_TILE_MODE__PIPE_CONFIG(mode2d)) {
241    case CIK__PIPE_CONFIG__ADDR_SURF_P2:
242       return 2;
243    case CIK__PIPE_CONFIG__ADDR_SURF_P4_8x16:
244    case CIK__PIPE_CONFIG__ADDR_SURF_P4_16x16:
245    case CIK__PIPE_CONFIG__ADDR_SURF_P4_16x32:
246    case CIK__PIPE_CONFIG__ADDR_SURF_P4_32x32:
247       return 4;
248    case CIK__PIPE_CONFIG__ADDR_SURF_P8_16x16_8x16:
249    case CIK__PIPE_CONFIG__ADDR_SURF_P8_16x32_8x16:
250    case CIK__PIPE_CONFIG__ADDR_SURF_P8_32x32_8x16:
251    case CIK__PIPE_CONFIG__ADDR_SURF_P8_16x32_16x16:
252    case CIK__PIPE_CONFIG__ADDR_SURF_P8_32x32_16x16:
253    case CIK__PIPE_CONFIG__ADDR_SURF_P8_32x32_16x32:
254    case CIK__PIPE_CONFIG__ADDR_SURF_P8_32x64_32x32:
255       return 8;
256    case CIK__PIPE_CONFIG__ADDR_SURF_P16_32X32_8X16:
257    case CIK__PIPE_CONFIG__ADDR_SURF_P16_32X32_16X16:
258       return 16;
259    default:
260       fprintf(stderr, "Invalid GFX7 pipe configuration, assuming P2\n");
261       assert(!"this should never occur");
262       return 2;
263    }
264 }
265 
has_syncobj(int fd)266 static bool has_syncobj(int fd)
267 {
268    uint64_t value;
269    if (drmGetCap(fd, DRM_CAP_SYNCOBJ, &value))
270       return false;
271    return value ? true : false;
272 }
273 
has_timeline_syncobj(int fd)274 static bool has_timeline_syncobj(int fd)
275 {
276    uint64_t value;
277    if (drmGetCap(fd, DRM_CAP_SYNCOBJ_TIMELINE, &value))
278       return false;
279    return value ? true : false;
280 }
281 
has_modifiers(int fd)282 static bool has_modifiers(int fd)
283 {
284    uint64_t value;
285    if (drmGetCap(fd, DRM_CAP_ADDFB2_MODIFIERS, &value))
286       return false;
287    return value ? true : false;
288 }
289 
fix_vram_size(uint64_t size)290 static uint64_t fix_vram_size(uint64_t size)
291 {
292    /* The VRAM size is underreported, so we need to fix it, because
293     * it's used to compute the number of memory modules for harvesting.
294     */
295    return align64(size, 256 * 1024 * 1024);
296 }
297 
298 static bool
has_tmz_support(amdgpu_device_handle dev,struct radeon_info * info,struct amdgpu_gpu_info * amdinfo)299 has_tmz_support(amdgpu_device_handle dev,
300                 struct radeon_info *info,
301                 struct amdgpu_gpu_info *amdinfo)
302 {
303    struct amdgpu_bo_alloc_request request = {0};
304    int r;
305    amdgpu_bo_handle bo;
306 
307    if (amdinfo->ids_flags & AMDGPU_IDS_FLAGS_TMZ)
308       return true;
309 
310    /* AMDGPU_IDS_FLAGS_TMZ is supported starting from drm_minor 40 */
311    if (info->drm_minor >= 40)
312       return false;
313 
314    /* Find out ourselves if TMZ is enabled */
315    if (info->chip_class < GFX9)
316       return false;
317 
318    if (info->drm_minor < 36)
319       return false;
320 
321    request.alloc_size = 256;
322    request.phys_alignment = 1024;
323    request.preferred_heap = AMDGPU_GEM_DOMAIN_VRAM;
324    request.flags = AMDGPU_GEM_CREATE_ENCRYPTED;
325    r = amdgpu_bo_alloc(dev, &request, &bo);
326    if (r)
327       return false;
328    amdgpu_bo_free(bo);
329    return true;
330 }
331 
332 
ac_query_gpu_info(int fd,void * dev_p,struct radeon_info * info,struct amdgpu_gpu_info * amdinfo)333 bool ac_query_gpu_info(int fd, void *dev_p, struct radeon_info *info,
334                        struct amdgpu_gpu_info *amdinfo)
335 {
336    struct drm_amdgpu_info_device device_info = {0};
337    struct amdgpu_buffer_size_alignments alignment_info = {0};
338    struct drm_amdgpu_info_hw_ip dma = {0}, compute = {0}, uvd = {0};
339    struct drm_amdgpu_info_hw_ip uvd_enc = {0}, vce = {0}, vcn_dec = {0}, vcn_jpeg = {0};
340    struct drm_amdgpu_info_hw_ip vcn_enc = {0}, gfx = {0};
341    struct amdgpu_gds_resource_info gds = {0};
342    uint32_t vce_version = 0, vce_feature = 0, uvd_version = 0, uvd_feature = 0;
343    int r, i, j;
344    amdgpu_device_handle dev = dev_p;
345    drmDevicePtr devinfo;
346 
347    /* Get PCI info. */
348    r = drmGetDevice2(fd, 0, &devinfo);
349    if (r) {
350       fprintf(stderr, "amdgpu: drmGetDevice2 failed.\n");
351       return false;
352    }
353    info->pci_domain = devinfo->businfo.pci->domain;
354    info->pci_bus = devinfo->businfo.pci->bus;
355    info->pci_dev = devinfo->businfo.pci->dev;
356    info->pci_func = devinfo->businfo.pci->func;
357    drmFreeDevice(&devinfo);
358 
359    assert(info->drm_major == 3);
360    info->is_amdgpu = true;
361 
362    /* Query hardware and driver information. */
363    r = amdgpu_query_gpu_info(dev, amdinfo);
364    if (r) {
365       fprintf(stderr, "amdgpu: amdgpu_query_gpu_info failed.\n");
366       return false;
367    }
368 
369    r = amdgpu_query_info(dev, AMDGPU_INFO_DEV_INFO, sizeof(device_info), &device_info);
370    if (r) {
371       fprintf(stderr, "amdgpu: amdgpu_query_info(dev_info) failed.\n");
372       return false;
373    }
374 
375    r = amdgpu_query_buffer_size_alignment(dev, &alignment_info);
376    if (r) {
377       fprintf(stderr, "amdgpu: amdgpu_query_buffer_size_alignment failed.\n");
378       return false;
379    }
380 
381    r = amdgpu_query_hw_ip_info(dev, AMDGPU_HW_IP_DMA, 0, &dma);
382    if (r) {
383       fprintf(stderr, "amdgpu: amdgpu_query_hw_ip_info(dma) failed.\n");
384       return false;
385    }
386 
387    r = amdgpu_query_hw_ip_info(dev, AMDGPU_HW_IP_GFX, 0, &gfx);
388    if (r) {
389       fprintf(stderr, "amdgpu: amdgpu_query_hw_ip_info(gfx) failed.\n");
390       return false;
391    }
392 
393    r = amdgpu_query_hw_ip_info(dev, AMDGPU_HW_IP_COMPUTE, 0, &compute);
394    if (r) {
395       fprintf(stderr, "amdgpu: amdgpu_query_hw_ip_info(compute) failed.\n");
396       return false;
397    }
398 
399    r = amdgpu_query_hw_ip_info(dev, AMDGPU_HW_IP_UVD, 0, &uvd);
400    if (r) {
401       fprintf(stderr, "amdgpu: amdgpu_query_hw_ip_info(uvd) failed.\n");
402       return false;
403    }
404 
405    if (info->drm_minor >= 17) {
406       r = amdgpu_query_hw_ip_info(dev, AMDGPU_HW_IP_UVD_ENC, 0, &uvd_enc);
407       if (r) {
408          fprintf(stderr, "amdgpu: amdgpu_query_hw_ip_info(uvd_enc) failed.\n");
409          return false;
410       }
411    }
412 
413    if (info->drm_minor >= 17) {
414       r = amdgpu_query_hw_ip_info(dev, AMDGPU_HW_IP_VCN_DEC, 0, &vcn_dec);
415       if (r) {
416          fprintf(stderr, "amdgpu: amdgpu_query_hw_ip_info(vcn_dec) failed.\n");
417          return false;
418       }
419    }
420 
421    if (info->drm_minor >= 17) {
422       r = amdgpu_query_hw_ip_info(dev, AMDGPU_HW_IP_VCN_ENC, 0, &vcn_enc);
423       if (r) {
424          fprintf(stderr, "amdgpu: amdgpu_query_hw_ip_info(vcn_enc) failed.\n");
425          return false;
426       }
427    }
428 
429    if (info->drm_minor >= 27) {
430       r = amdgpu_query_hw_ip_info(dev, AMDGPU_HW_IP_VCN_JPEG, 0, &vcn_jpeg);
431       if (r) {
432          fprintf(stderr, "amdgpu: amdgpu_query_hw_ip_info(vcn_jpeg) failed.\n");
433          return false;
434       }
435    }
436 
437    r = amdgpu_query_firmware_version(dev, AMDGPU_INFO_FW_GFX_ME, 0, 0, &info->me_fw_version,
438                                      &info->me_fw_feature);
439    if (r) {
440       fprintf(stderr, "amdgpu: amdgpu_query_firmware_version(me) failed.\n");
441       return false;
442    }
443 
444    r = amdgpu_query_firmware_version(dev, AMDGPU_INFO_FW_GFX_PFP, 0, 0, &info->pfp_fw_version,
445                                      &info->pfp_fw_feature);
446    if (r) {
447       fprintf(stderr, "amdgpu: amdgpu_query_firmware_version(pfp) failed.\n");
448       return false;
449    }
450 
451    r = amdgpu_query_firmware_version(dev, AMDGPU_INFO_FW_GFX_CE, 0, 0, &info->ce_fw_version,
452                                      &info->ce_fw_feature);
453    if (r) {
454       fprintf(stderr, "amdgpu: amdgpu_query_firmware_version(ce) failed.\n");
455       return false;
456    }
457 
458    r = amdgpu_query_firmware_version(dev, AMDGPU_INFO_FW_UVD, 0, 0, &uvd_version, &uvd_feature);
459    if (r) {
460       fprintf(stderr, "amdgpu: amdgpu_query_firmware_version(uvd) failed.\n");
461       return false;
462    }
463 
464    r = amdgpu_query_hw_ip_info(dev, AMDGPU_HW_IP_VCE, 0, &vce);
465    if (r) {
466       fprintf(stderr, "amdgpu: amdgpu_query_hw_ip_info(vce) failed.\n");
467       return false;
468    }
469 
470    r = amdgpu_query_firmware_version(dev, AMDGPU_INFO_FW_VCE, 0, 0, &vce_version, &vce_feature);
471    if (r) {
472       fprintf(stderr, "amdgpu: amdgpu_query_firmware_version(vce) failed.\n");
473       return false;
474    }
475 
476    r = amdgpu_query_sw_info(dev, amdgpu_sw_info_address32_hi, &info->address32_hi);
477    if (r) {
478       fprintf(stderr, "amdgpu: amdgpu_query_sw_info(address32_hi) failed.\n");
479       return false;
480    }
481 
482    r = amdgpu_query_gds_info(dev, &gds);
483    if (r) {
484       fprintf(stderr, "amdgpu: amdgpu_query_gds_info failed.\n");
485       return false;
486    }
487 
488    if (info->drm_minor >= 9) {
489       struct drm_amdgpu_memory_info meminfo = {0};
490 
491       r = amdgpu_query_info(dev, AMDGPU_INFO_MEMORY, sizeof(meminfo), &meminfo);
492       if (r) {
493          fprintf(stderr, "amdgpu: amdgpu_query_info(memory) failed.\n");
494          return false;
495       }
496 
497       /* Note: usable_heap_size values can be random and can't be relied on. */
498       info->gart_size = meminfo.gtt.total_heap_size;
499       info->vram_size = fix_vram_size(meminfo.vram.total_heap_size);
500       info->vram_vis_size = meminfo.cpu_accessible_vram.total_heap_size;
501    } else {
502       /* This is a deprecated interface, which reports usable sizes
503        * (total minus pinned), but the pinned size computation is
504        * buggy, so the values returned from these functions can be
505        * random.
506        */
507       struct amdgpu_heap_info vram, vram_vis, gtt;
508 
509       r = amdgpu_query_heap_info(dev, AMDGPU_GEM_DOMAIN_VRAM, 0, &vram);
510       if (r) {
511          fprintf(stderr, "amdgpu: amdgpu_query_heap_info(vram) failed.\n");
512          return false;
513       }
514 
515       r = amdgpu_query_heap_info(dev, AMDGPU_GEM_DOMAIN_VRAM, AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
516                                  &vram_vis);
517       if (r) {
518          fprintf(stderr, "amdgpu: amdgpu_query_heap_info(vram_vis) failed.\n");
519          return false;
520       }
521 
522       r = amdgpu_query_heap_info(dev, AMDGPU_GEM_DOMAIN_GTT, 0, &gtt);
523       if (r) {
524          fprintf(stderr, "amdgpu: amdgpu_query_heap_info(gtt) failed.\n");
525          return false;
526       }
527 
528       info->gart_size = gtt.heap_size;
529       info->vram_size = fix_vram_size(vram.heap_size);
530       info->vram_vis_size = vram_vis.heap_size;
531    }
532 
533    info->gart_size_kb = DIV_ROUND_UP(info->gart_size, 1024);
534    info->vram_size_kb = DIV_ROUND_UP(info->vram_size, 1024);
535 
536    if (info->drm_minor >= 41) {
537       r = amdgpu_query_video_caps_info(dev, AMDGPU_INFO_VIDEO_CAPS_DECODE,
538             sizeof(info->dec_caps), &(info->dec_caps));
539       if (r) {
540          fprintf(stderr, "amdgpu: amdgpu_query_video_caps_info for decode failed.\n");
541          return r;
542       }
543 
544       r = amdgpu_query_video_caps_info(dev, AMDGPU_INFO_VIDEO_CAPS_ENCODE,
545             sizeof(info->enc_caps), &(info->enc_caps));
546       if (r) {
547          fprintf(stderr, "amdgpu: amdgpu_query_video_caps_info for encode failed.\n");
548          return r;
549       }
550    }
551 
552    /* Add some margin of error, though this shouldn't be needed in theory. */
553    info->all_vram_visible = info->vram_size * 0.9 < info->vram_vis_size;
554 
555    util_cpu_detect();
556 
557    /* Set chip identification. */
558    info->pci_id = amdinfo->asic_id; /* TODO: is this correct? */
559    info->pci_rev_id = amdinfo->pci_rev_id;
560    info->vce_harvest_config = amdinfo->vce_harvest_config;
561 
562 #define identify_chip2(asic, chipname)                                                             \
563    if (ASICREV_IS(amdinfo->chip_external_rev, asic)) {                                             \
564       info->family = CHIP_##chipname;                                                              \
565       info->name = #chipname;                                                                      \
566    }
567 #define identify_chip(chipname) identify_chip2(chipname, chipname)
568 
569    switch (amdinfo->family_id) {
570    case FAMILY_SI:
571       identify_chip(TAHITI);
572       identify_chip(PITCAIRN);
573       identify_chip2(CAPEVERDE, VERDE);
574       identify_chip(OLAND);
575       identify_chip(HAINAN);
576       break;
577    case FAMILY_CI:
578       identify_chip(BONAIRE);
579       identify_chip(HAWAII);
580       break;
581    case FAMILY_KV:
582       identify_chip2(SPECTRE, KAVERI);
583       identify_chip2(SPOOKY, KAVERI);
584       identify_chip2(KALINDI, KABINI);
585       identify_chip2(GODAVARI, KABINI);
586       break;
587    case FAMILY_VI:
588       identify_chip(ICELAND);
589       identify_chip(TONGA);
590       identify_chip(FIJI);
591       identify_chip(POLARIS10);
592       identify_chip(POLARIS11);
593       identify_chip(POLARIS12);
594       identify_chip(VEGAM);
595       break;
596    case FAMILY_CZ:
597       identify_chip(CARRIZO);
598       identify_chip(STONEY);
599       break;
600    case FAMILY_AI:
601       identify_chip(VEGA10);
602       identify_chip(VEGA12);
603       identify_chip(VEGA20);
604       identify_chip(ARCTURUS);
605       identify_chip(ALDEBARAN);
606       break;
607    case FAMILY_RV:
608       identify_chip(RAVEN);
609       identify_chip(RAVEN2);
610       identify_chip(RENOIR);
611       break;
612    case FAMILY_NV:
613       identify_chip(NAVI10);
614       identify_chip(NAVI12);
615       identify_chip(NAVI14);
616       identify_chip(SIENNA_CICHLID);
617       identify_chip(NAVY_FLOUNDER);
618       identify_chip(DIMGREY_CAVEFISH);
619       identify_chip(BEIGE_GOBY);
620       break;
621    case FAMILY_VGH:
622       identify_chip(VANGOGH);
623       break;
624    case FAMILY_YC:
625       identify_chip(YELLOW_CARP);
626       break;
627    }
628 
629    if (!info->name) {
630       fprintf(stderr, "amdgpu: unknown (family_id, chip_external_rev): (%u, %u)\n",
631               amdinfo->family_id, amdinfo->chip_external_rev);
632       return false;
633    }
634 
635    if (info->family >= CHIP_SIENNA_CICHLID)
636       info->chip_class = GFX10_3;
637    else if (info->family >= CHIP_NAVI10)
638       info->chip_class = GFX10;
639    else if (info->family >= CHIP_VEGA10)
640       info->chip_class = GFX9;
641    else if (info->family >= CHIP_TONGA)
642       info->chip_class = GFX8;
643    else if (info->family >= CHIP_BONAIRE)
644       info->chip_class = GFX7;
645    else if (info->family >= CHIP_TAHITI)
646       info->chip_class = GFX6;
647    else {
648       fprintf(stderr, "amdgpu: Unknown family.\n");
649       return false;
650    }
651 
652    info->smart_access_memory = info->all_vram_visible &&
653                                info->chip_class >= GFX10_3 &&
654                                util_get_cpu_caps()->family >= CPU_AMD_ZEN3 &&
655                                util_get_cpu_caps()->family < CPU_AMD_LAST;
656 
657    info->family_id = amdinfo->family_id;
658    info->chip_external_rev = amdinfo->chip_external_rev;
659    info->marketing_name = amdgpu_get_marketing_name(dev);
660    info->is_pro_graphics = info->marketing_name && (strstr(info->marketing_name, "Pro") ||
661                                                     strstr(info->marketing_name, "PRO") ||
662                                                     strstr(info->marketing_name, "Frontier"));
663 
664    /* Set which chips have dedicated VRAM. */
665    info->has_dedicated_vram = !(amdinfo->ids_flags & AMDGPU_IDS_FLAGS_FUSION);
666 
667    /* The kernel can split large buffers in VRAM but not in GTT, so large
668     * allocations can fail or cause buffer movement failures in the kernel.
669     */
670    if (info->has_dedicated_vram)
671       info->max_alloc_size = info->vram_size * 0.8;
672    else
673       info->max_alloc_size = info->gart_size * 0.7;
674 
675    info->vram_type = amdinfo->vram_type;
676    info->vram_bit_width = amdinfo->vram_bit_width;
677    info->ce_ram_size = amdinfo->ce_ram_size;
678 
679    /* Set which chips have uncached device memory. */
680    info->has_l2_uncached = info->chip_class >= GFX9;
681 
682    /* Set hardware information. */
683    info->gds_size = gds.gds_total_size;
684    info->gds_gfx_partition_size = gds.gds_gfx_partition_size;
685    /* convert the shader/memory clocks from KHz to MHz */
686    info->max_shader_clock = amdinfo->max_engine_clk / 1000;
687    info->max_memory_clock = amdinfo->max_memory_clk / 1000;
688    info->max_tcc_blocks = device_info.num_tcc_blocks;
689    info->max_se = amdinfo->num_shader_engines;
690    info->max_sa_per_se = amdinfo->num_shader_arrays_per_engine;
691    info->uvd_fw_version = uvd.available_rings ? uvd_version : 0;
692    info->vce_fw_version = vce.available_rings ? vce_version : 0;
693    info->has_video_hw.uvd_decode = uvd.available_rings != 0;
694    info->has_video_hw.vcn_decode = vcn_dec.available_rings != 0;
695    info->has_video_hw.jpeg_decode = vcn_jpeg.available_rings != 0;
696    info->has_video_hw.vce_encode = vce.available_rings != 0;
697    info->has_video_hw.uvd_encode = uvd_enc.available_rings != 0;
698    info->has_video_hw.vcn_encode = vcn_enc.available_rings != 0;
699    info->has_userptr = true;
700    info->has_syncobj = has_syncobj(fd);
701    info->has_timeline_syncobj = has_timeline_syncobj(fd);
702    info->has_fence_to_handle = info->has_syncobj && info->drm_minor >= 21;
703    info->has_local_buffers = info->drm_minor >= 20;
704    info->kernel_flushes_hdp_before_ib = true;
705    info->htile_cmask_support_1d_tiling = true;
706    info->si_TA_CS_BC_BASE_ADDR_allowed = true;
707    info->has_bo_metadata = true;
708    info->has_gpu_reset_status_query = true;
709    info->has_eqaa_surface_allocator = true;
710    info->has_format_bc1_through_bc7 = true;
711    /* DRM 3.1.0 doesn't flush TC for GFX8 correctly. */
712    info->kernel_flushes_tc_l2_after_ib = info->chip_class != GFX8 || info->drm_minor >= 2;
713    info->has_indirect_compute_dispatch = true;
714    /* GFX6 doesn't support unaligned loads. */
715    info->has_unaligned_shader_loads = info->chip_class != GFX6;
716    /* Disable sparse mappings on GFX6 due to VM faults in CP DMA. Enable them once
717     * these faults are mitigated in software.
718     */
719    info->has_sparse_vm_mappings = info->chip_class >= GFX7 && info->drm_minor >= 13;
720    info->has_2d_tiling = true;
721    info->has_read_registers_query = true;
722    info->has_scheduled_fence_dependency = info->drm_minor >= 28;
723    info->mid_command_buffer_preemption_enabled = amdinfo->ids_flags & AMDGPU_IDS_FLAGS_PREEMPTION;
724    info->has_tmz_support = has_tmz_support(dev, info, amdinfo);
725    info->kernel_has_modifiers = has_modifiers(fd);
726    info->has_graphics = gfx.available_rings > 0;
727 
728    info->pa_sc_tile_steering_override = device_info.pa_sc_tile_steering_override;
729    info->max_render_backends = amdinfo->rb_pipes;
730    /* The value returned by the kernel driver was wrong. */
731    if (info->family == CHIP_KAVERI)
732       info->max_render_backends = 2;
733 
734    /* Guess the number of enabled SEs because the kernel doesn't tell us. */
735    if (info->chip_class >= GFX10_3 && info->max_se > 1) {
736       unsigned num_rbs_per_se = info->max_render_backends / info->max_se;
737       info->num_se = util_bitcount(amdinfo->enabled_rb_pipes_mask) / num_rbs_per_se;
738    } else {
739       info->num_se = info->max_se;
740    }
741 
742    info->clock_crystal_freq = amdinfo->gpu_counter_freq;
743    if (!info->clock_crystal_freq) {
744       fprintf(stderr, "amdgpu: clock crystal frequency is 0, timestamps will be wrong\n");
745       info->clock_crystal_freq = 1;
746    }
747    if (info->chip_class >= GFX10) {
748       info->tcc_cache_line_size = 128;
749 
750       if (info->drm_minor >= 35) {
751          info->num_tcc_blocks = info->max_tcc_blocks - util_bitcount64(device_info.tcc_disabled_mask);
752       } else {
753          /* This is a hack, but it's all we can do without a kernel upgrade. */
754          info->num_tcc_blocks = info->vram_size / (512 * 1024 * 1024);
755          if (info->num_tcc_blocks > info->max_tcc_blocks)
756             info->num_tcc_blocks /= 2;
757       }
758    } else {
759       if (!info->has_graphics && info->family >= CHIP_ALDEBARAN)
760          info->tcc_cache_line_size = 128;
761       else
762          info->tcc_cache_line_size = 64;
763 
764       info->num_tcc_blocks = info->max_tcc_blocks;
765    }
766 
767    info->tcc_rb_non_coherent = !util_is_power_of_two_or_zero(info->num_tcc_blocks);
768 
769    switch (info->family) {
770    case CHIP_TAHITI:
771    case CHIP_PITCAIRN:
772    case CHIP_OLAND:
773    case CHIP_HAWAII:
774    case CHIP_KABINI:
775    case CHIP_TONGA:
776    case CHIP_STONEY:
777    case CHIP_RAVEN2:
778       info->l2_cache_size = info->num_tcc_blocks * 64 * 1024;
779       break;
780    case CHIP_VERDE:
781    case CHIP_HAINAN:
782    case CHIP_BONAIRE:
783    case CHIP_KAVERI:
784    case CHIP_ICELAND:
785    case CHIP_CARRIZO:
786    case CHIP_FIJI:
787    case CHIP_POLARIS12:
788    case CHIP_VEGAM:
789       info->l2_cache_size = info->num_tcc_blocks * 128 * 1024;
790       break;
791    default:
792       info->l2_cache_size = info->num_tcc_blocks * 256 * 1024;
793       break;
794    }
795 
796    info->l1_cache_size = 16384;
797 
798    info->mc_arb_ramcfg = amdinfo->mc_arb_ramcfg;
799    info->gb_addr_config = amdinfo->gb_addr_cfg;
800    if (info->chip_class >= GFX9) {
801       info->num_tile_pipes = 1 << G_0098F8_NUM_PIPES(amdinfo->gb_addr_cfg);
802       info->pipe_interleave_bytes = 256 << G_0098F8_PIPE_INTERLEAVE_SIZE_GFX9(amdinfo->gb_addr_cfg);
803    } else {
804       info->num_tile_pipes = cik_get_num_tile_pipes(amdinfo);
805       info->pipe_interleave_bytes = 256 << G_0098F8_PIPE_INTERLEAVE_SIZE_GFX6(amdinfo->gb_addr_cfg);
806    }
807    info->r600_has_virtual_memory = true;
808 
809    /* LDS is 64KB per CU (4 SIMDs), which is 16KB per SIMD (usage above
810     * 16KB makes some SIMDs unoccupied).
811     *
812     * LDS is 128KB in WGP mode and 64KB in CU mode. Assume the WGP mode is used.
813     */
814    info->lds_size_per_workgroup = info->chip_class >= GFX10 ? 128 * 1024 : 64 * 1024;
815    /* lds_encode_granularity is the block size used for encoding registers.
816     * lds_alloc_granularity is what the hardware will align the LDS size to.
817     */
818    info->lds_encode_granularity = info->chip_class >= GFX7 ? 128 * 4 : 64 * 4;
819    info->lds_alloc_granularity = info->chip_class >= GFX10_3 ? 256 * 4 : info->lds_encode_granularity;
820 
821    assert(util_is_power_of_two_or_zero(dma.available_rings + 1));
822    assert(util_is_power_of_two_or_zero(compute.available_rings + 1));
823 
824    info->num_rings[RING_GFX] = util_bitcount(gfx.available_rings);
825    info->num_rings[RING_COMPUTE] = util_bitcount(compute.available_rings);
826    info->num_rings[RING_DMA] = util_bitcount(dma.available_rings);
827    info->num_rings[RING_UVD] = util_bitcount(uvd.available_rings);
828    info->num_rings[RING_VCE] = util_bitcount(vce.available_rings);
829    info->num_rings[RING_UVD_ENC] = util_bitcount(uvd_enc.available_rings);
830    info->num_rings[RING_VCN_DEC] = util_bitcount(vcn_dec.available_rings);
831    info->num_rings[RING_VCN_ENC] = util_bitcount(vcn_enc.available_rings);
832    info->num_rings[RING_VCN_JPEG] = util_bitcount(vcn_jpeg.available_rings);
833 
834    /* This is "align_mask" copied from the kernel, maximums of all IP versions. */
835    info->ib_pad_dw_mask[RING_GFX] = 0xff;
836    info->ib_pad_dw_mask[RING_COMPUTE] = 0xff;
837    info->ib_pad_dw_mask[RING_DMA] = 0xf;
838    info->ib_pad_dw_mask[RING_UVD] = 0xf;
839    info->ib_pad_dw_mask[RING_VCE] = 0x3f;
840    info->ib_pad_dw_mask[RING_UVD_ENC] = 0x3f;
841    info->ib_pad_dw_mask[RING_VCN_DEC] = 0xf;
842    info->ib_pad_dw_mask[RING_VCN_ENC] = 0x3f;
843    info->ib_pad_dw_mask[RING_VCN_JPEG] = 0xf;
844 
845    /* The mere presence of CLEAR_STATE in the IB causes random GPU hangs
846     * on GFX6. Some CLEAR_STATE cause asic hang on radeon kernel, etc.
847     * SPI_VS_OUT_CONFIG. So only enable GFX7 CLEAR_STATE on amdgpu kernel.
848     */
849    info->has_clear_state = info->chip_class >= GFX7;
850 
851    info->has_distributed_tess =
852       info->chip_class >= GFX10 || (info->chip_class >= GFX8 && info->max_se >= 2);
853 
854    info->has_dcc_constant_encode =
855       info->family == CHIP_RAVEN2 || info->family == CHIP_RENOIR || info->chip_class >= GFX10;
856 
857    info->has_rbplus = info->family == CHIP_STONEY || info->chip_class >= GFX9;
858 
859    /* Some chips have RB+ registers, but don't support RB+. Those must
860     * always disable it.
861     */
862    info->rbplus_allowed =
863       info->has_rbplus &&
864       (info->family == CHIP_STONEY || info->family == CHIP_VEGA12 || info->family == CHIP_RAVEN ||
865        info->family == CHIP_RAVEN2 || info->family == CHIP_RENOIR || info->chip_class >= GFX10_3);
866 
867    info->has_out_of_order_rast =
868       info->chip_class >= GFX8 && info->chip_class <= GFX9 && info->max_se >= 2;
869 
870    /* Whether chips support double rate packed math instructions. */
871    info->has_packed_math_16bit = info->chip_class >= GFX9;
872 
873    /* Whether chips support dot product instructions. A subset of these support a smaller
874     * instruction encoding which accumulates with the destination.
875     */
876    info->has_accelerated_dot_product =
877       info->family == CHIP_ARCTURUS || info->family == CHIP_ALDEBARAN ||
878       info->family == CHIP_VEGA20 || info->family >= CHIP_NAVI12;
879 
880    /* TODO: Figure out how to use LOAD_CONTEXT_REG on GFX6-GFX7. */
881    info->has_load_ctx_reg_pkt =
882       info->chip_class >= GFX9 || (info->chip_class >= GFX8 && info->me_fw_feature >= 41);
883 
884    info->cpdma_prefetch_writes_memory = info->chip_class <= GFX8;
885 
886    info->has_gfx9_scissor_bug = info->family == CHIP_VEGA10 || info->family == CHIP_RAVEN;
887 
888    info->has_tc_compat_zrange_bug = info->chip_class >= GFX8 && info->chip_class <= GFX9;
889 
890    info->has_msaa_sample_loc_bug =
891       (info->family >= CHIP_POLARIS10 && info->family <= CHIP_POLARIS12) ||
892       info->family == CHIP_VEGA10 || info->family == CHIP_RAVEN;
893 
894    info->has_ls_vgpr_init_bug = info->family == CHIP_VEGA10 || info->family == CHIP_RAVEN;
895 
896    /* Drawing from 0-sized index buffers causes hangs on gfx10. */
897    info->has_zero_index_buffer_bug = info->chip_class == GFX10;
898 
899    /* Whether chips are affected by the image load/sample/gather hw bug when
900     * DCC is enabled (ie. WRITE_COMPRESS_ENABLE should be 0).
901     */
902    info->has_image_load_dcc_bug = info->family == CHIP_DIMGREY_CAVEFISH ||
903                                   info->family == CHIP_VANGOGH ||
904                                   info->family == CHIP_YELLOW_CARP;
905 
906    /* DB has a bug when ITERATE_256 is set to 1 that can cause a hang. The
907     * workaround is to set DECOMPRESS_ON_Z_PLANES to 2 for 4X MSAA D/S images.
908     */
909    info->has_two_planes_iterate256_bug = info->chip_class == GFX10;
910 
911    /* GFX10+Sienna: NGG->legacy transitions require VGT_FLUSH. */
912    info->has_vgt_flush_ngg_legacy_bug = info->chip_class == GFX10 ||
913                                         info->family == CHIP_SIENNA_CICHLID;
914 
915    /* HW bug workaround when CS threadgroups > 256 threads and async compute
916     * isn't used, i.e. only one compute job can run at a time.  If async
917     * compute is possible, the threadgroup size must be limited to 256 threads
918     * on all queues to avoid the bug.
919     * Only GFX6 and certain GFX7 chips are affected.
920     *
921     * FIXME: RADV doesn't limit the number of threads for async compute.
922     */
923    info->has_cs_regalloc_hang_bug = info->chip_class == GFX6 ||
924                                     info->family == CHIP_BONAIRE ||
925                                     info->family == CHIP_KABINI;
926 
927    /* Support for GFX10.3 was added with F32_ME_FEATURE_VERSION_31 but the
928     * feature version wasn't bumped.
929     */
930    info->has_32bit_predication = (info->chip_class >= GFX10 &&
931                                   info->me_fw_feature >= 32) ||
932                                  (info->chip_class == GFX9 &&
933                                   info->me_fw_feature >= 52);
934 
935    /* Get the number of good compute units. */
936    info->num_good_compute_units = 0;
937    for (i = 0; i < info->max_se; i++) {
938       for (j = 0; j < info->max_sa_per_se; j++) {
939          /*
940           * The cu bitmap in amd gpu info structure is
941           * 4x4 size array, and it's usually suitable for Vega
942           * ASICs which has 4*2 SE/SH layout.
943           * But for Arcturus, SE/SH layout is changed to 8*1.
944           * To mostly reduce the impact, we make it compatible
945           * with current bitmap array as below:
946           *    SE4,SH0 --> cu_bitmap[0][1]
947           *    SE5,SH0 --> cu_bitmap[1][1]
948           *    SE6,SH0 --> cu_bitmap[2][1]
949           *    SE7,SH0 --> cu_bitmap[3][1]
950           */
951          info->cu_mask[i % 4][j + i / 4] = amdinfo->cu_bitmap[i % 4][j + i / 4];
952          info->num_good_compute_units += util_bitcount(info->cu_mask[i][j]);
953       }
954    }
955 
956    /* On GFX10, only whole WGPs (in units of 2 CUs) can be disabled,
957     * and max - min <= 2.
958     */
959    unsigned cu_group = info->chip_class >= GFX10 ? 2 : 1;
960    info->max_good_cu_per_sa =
961       DIV_ROUND_UP(info->num_good_compute_units, (info->num_se * info->max_sa_per_se * cu_group)) *
962       cu_group;
963    info->min_good_cu_per_sa =
964       (info->num_good_compute_units / (info->num_se * info->max_sa_per_se * cu_group)) * cu_group;
965 
966    memcpy(info->si_tile_mode_array, amdinfo->gb_tile_mode, sizeof(amdinfo->gb_tile_mode));
967    info->enabled_rb_mask = amdinfo->enabled_rb_pipes_mask;
968 
969    memcpy(info->cik_macrotile_mode_array, amdinfo->gb_macro_tile_mode,
970           sizeof(amdinfo->gb_macro_tile_mode));
971 
972    info->pte_fragment_size = alignment_info.size_local;
973    info->gart_page_size = alignment_info.size_remote;
974 
975    if (info->chip_class == GFX6)
976       info->gfx_ib_pad_with_type2 = true;
977 
978    unsigned ib_align = 0;
979    ib_align = MAX2(ib_align, gfx.ib_start_alignment);
980    ib_align = MAX2(ib_align, gfx.ib_size_alignment);
981    ib_align = MAX2(ib_align, compute.ib_start_alignment);
982    ib_align = MAX2(ib_align, compute.ib_size_alignment);
983    ib_align = MAX2(ib_align, dma.ib_start_alignment);
984    ib_align = MAX2(ib_align, dma.ib_size_alignment);
985    ib_align = MAX2(ib_align, uvd.ib_start_alignment);
986    ib_align = MAX2(ib_align, uvd.ib_size_alignment);
987    ib_align = MAX2(ib_align, uvd_enc.ib_start_alignment);
988    ib_align = MAX2(ib_align, uvd_enc.ib_size_alignment);
989    ib_align = MAX2(ib_align, vce.ib_start_alignment);
990    ib_align = MAX2(ib_align, vce.ib_size_alignment);
991    ib_align = MAX2(ib_align, vcn_dec.ib_start_alignment);
992    ib_align = MAX2(ib_align, vcn_dec.ib_size_alignment);
993    ib_align = MAX2(ib_align, vcn_enc.ib_start_alignment);
994    ib_align = MAX2(ib_align, vcn_enc.ib_size_alignment);
995    ib_align = MAX2(ib_align, vcn_jpeg.ib_start_alignment);
996    ib_align = MAX2(ib_align, vcn_jpeg.ib_size_alignment);
997    /* GFX10 and maybe GFX9 need this alignment for cache coherency. */
998    if (info->chip_class >= GFX9)
999       ib_align = MAX2(ib_align, info->tcc_cache_line_size);
1000    /* The kernel pads gfx and compute IBs to 256 dwords since:
1001     *   66f3b2d527154bd258a57c8815004b5964aa1cf5
1002     * Do the same.
1003     */
1004    ib_align = MAX2(ib_align, 1024);
1005    info->ib_alignment = ib_align;
1006 
1007    if ((info->drm_minor >= 31 && (info->family == CHIP_RAVEN || info->family == CHIP_RAVEN2 ||
1008                                   info->family == CHIP_RENOIR)) ||
1009        (info->drm_minor >= 34 && (info->family == CHIP_NAVI12 || info->family == CHIP_NAVI14)) ||
1010        info->chip_class >= GFX10_3) {
1011       if (info->max_render_backends == 1)
1012          info->use_display_dcc_unaligned = true;
1013       else
1014          info->use_display_dcc_with_retile_blit = true;
1015    }
1016 
1017    info->has_gds_ordered_append = info->chip_class >= GFX7 && info->drm_minor >= 29;
1018 
1019    if (info->chip_class >= GFX9 && info->has_graphics) {
1020       unsigned pc_lines = 0;
1021 
1022       switch (info->family) {
1023       case CHIP_VEGA10:
1024       case CHIP_VEGA12:
1025       case CHIP_VEGA20:
1026          pc_lines = 2048;
1027          break;
1028       case CHIP_RAVEN:
1029       case CHIP_RAVEN2:
1030       case CHIP_RENOIR:
1031       case CHIP_NAVI10:
1032       case CHIP_NAVI12:
1033       case CHIP_SIENNA_CICHLID:
1034       case CHIP_NAVY_FLOUNDER:
1035       case CHIP_DIMGREY_CAVEFISH:
1036          pc_lines = 1024;
1037          break;
1038       case CHIP_NAVI14:
1039       case CHIP_BEIGE_GOBY:
1040          pc_lines = 512;
1041          break;
1042       case CHIP_VANGOGH:
1043       case CHIP_YELLOW_CARP:
1044          pc_lines = 256;
1045          break;
1046       default:
1047          assert(0);
1048       }
1049 
1050       info->pc_lines = pc_lines;
1051 
1052       if (info->chip_class >= GFX10) {
1053          info->pbb_max_alloc_count = pc_lines / 3;
1054       } else {
1055          info->pbb_max_alloc_count = MIN2(128, pc_lines / (4 * info->max_se));
1056       }
1057    }
1058 
1059    if (info->chip_class >= GFX10_3)
1060       info->max_wave64_per_simd = 16;
1061    else if (info->chip_class == GFX10)
1062       info->max_wave64_per_simd = 20;
1063    else if (info->family >= CHIP_POLARIS10 && info->family <= CHIP_VEGAM)
1064       info->max_wave64_per_simd = 8;
1065    else
1066       info->max_wave64_per_simd = 10;
1067 
1068    if (info->chip_class >= GFX10) {
1069       info->num_physical_sgprs_per_simd = 128 * info->max_wave64_per_simd;
1070       info->min_sgpr_alloc = 128;
1071       info->sgpr_alloc_granularity = 128;
1072    } else if (info->chip_class >= GFX8) {
1073       info->num_physical_sgprs_per_simd = 800;
1074       info->min_sgpr_alloc = 16;
1075       info->sgpr_alloc_granularity = 16;
1076    } else {
1077       info->num_physical_sgprs_per_simd = 512;
1078       info->min_sgpr_alloc = 8;
1079       info->sgpr_alloc_granularity = 8;
1080    }
1081 
1082    info->has_3d_cube_border_color_mipmap = info->has_graphics || info->family == CHIP_ARCTURUS;
1083    info->max_sgpr_alloc = info->family == CHIP_TONGA || info->family == CHIP_ICELAND ? 96 : 104;
1084 
1085    if (!info->has_graphics && info->family >= CHIP_ALDEBARAN) {
1086       info->min_wave64_vgpr_alloc = 8;
1087       info->max_vgpr_alloc = 512;
1088       info->wave64_vgpr_alloc_granularity = 8;
1089    } else {
1090       info->min_wave64_vgpr_alloc = 4;
1091       info->max_vgpr_alloc = 256;
1092       info->wave64_vgpr_alloc_granularity = 4;
1093    }
1094 
1095    info->num_physical_wave64_vgprs_per_simd = info->chip_class >= GFX10 ? 512 : 256;
1096    info->num_simd_per_compute_unit = info->chip_class >= GFX10 ? 2 : 4;
1097 
1098    return true;
1099 }
1100 
ac_compute_driver_uuid(char * uuid,size_t size)1101 void ac_compute_driver_uuid(char *uuid, size_t size)
1102 {
1103    char amd_uuid[] = "AMD-MESA-DRV";
1104 
1105    assert(size >= sizeof(amd_uuid));
1106 
1107    memset(uuid, 0, size);
1108    strncpy(uuid, amd_uuid, size);
1109 }
1110 
ac_compute_device_uuid(struct radeon_info * info,char * uuid,size_t size)1111 void ac_compute_device_uuid(struct radeon_info *info, char *uuid, size_t size)
1112 {
1113    uint32_t *uint_uuid = (uint32_t *)uuid;
1114 
1115    assert(size >= sizeof(uint32_t) * 4);
1116 
1117    /**
1118     * Use the device info directly instead of using a sha1. GL/VK UUIDs
1119     * are 16 byte vs 20 byte for sha1, and the truncation that would be
1120     * required would get rid of part of the little entropy we have.
1121     * */
1122    memset(uuid, 0, size);
1123    uint_uuid[0] = info->pci_domain;
1124    uint_uuid[1] = info->pci_bus;
1125    uint_uuid[2] = info->pci_dev;
1126    uint_uuid[3] = info->pci_func;
1127 }
1128 
ac_print_gpu_info(struct radeon_info * info,FILE * f)1129 void ac_print_gpu_info(struct radeon_info *info, FILE *f)
1130 {
1131    fprintf(f, "Device info:\n");
1132    fprintf(f, "    pci (domain:bus:dev.func): %04x:%02x:%02x.%x\n", info->pci_domain, info->pci_bus,
1133            info->pci_dev, info->pci_func);
1134 
1135    fprintf(f, "    name = %s\n", info->name);
1136    fprintf(f, "    marketing_name = %s\n", info->marketing_name);
1137    fprintf(f, "    is_pro_graphics = %u\n", info->is_pro_graphics);
1138    fprintf(f, "    pci_id = 0x%x\n", info->pci_id);
1139    fprintf(f, "    pci_rev_id = 0x%x\n", info->pci_rev_id);
1140    fprintf(f, "    family = %i\n", info->family);
1141    fprintf(f, "    chip_class = %i\n", info->chip_class);
1142    fprintf(f, "    family_id = %i\n", info->family_id);
1143    fprintf(f, "    chip_external_rev = %i\n", info->chip_external_rev);
1144    fprintf(f, "    clock_crystal_freq = %i\n", info->clock_crystal_freq);
1145 
1146    fprintf(f, "Features:\n");
1147    fprintf(f, "    has_graphics = %i\n", info->has_graphics);
1148    fprintf(f, "    num_rings[RING_GFX] = %i\n", info->num_rings[RING_GFX]);
1149    fprintf(f, "    num_rings[RING_DMA] = %i\n", info->num_rings[RING_DMA]);
1150    fprintf(f, "    num_rings[RING_COMPUTE] = %u\n", info->num_rings[RING_COMPUTE]);
1151    fprintf(f, "    num_rings[RING_UVD] = %i\n", info->num_rings[RING_UVD]);
1152    fprintf(f, "    num_rings[RING_VCE] = %i\n", info->num_rings[RING_VCE]);
1153    fprintf(f, "    num_rings[RING_UVD_ENC] = %i\n", info->num_rings[RING_UVD_ENC]);
1154    fprintf(f, "    num_rings[RING_VCN_DEC] = %i\n", info->num_rings[RING_VCN_DEC]);
1155    fprintf(f, "    num_rings[RING_VCN_ENC] = %i\n", info->num_rings[RING_VCN_ENC]);
1156    fprintf(f, "    num_rings[RING_VCN_JPEG] = %i\n", info->num_rings[RING_VCN_JPEG]);
1157    fprintf(f, "    has_clear_state = %u\n", info->has_clear_state);
1158    fprintf(f, "    has_distributed_tess = %u\n", info->has_distributed_tess);
1159    fprintf(f, "    has_dcc_constant_encode = %u\n", info->has_dcc_constant_encode);
1160    fprintf(f, "    has_rbplus = %u\n", info->has_rbplus);
1161    fprintf(f, "    rbplus_allowed = %u\n", info->rbplus_allowed);
1162    fprintf(f, "    has_load_ctx_reg_pkt = %u\n", info->has_load_ctx_reg_pkt);
1163    fprintf(f, "    has_out_of_order_rast = %u\n", info->has_out_of_order_rast);
1164    fprintf(f, "    cpdma_prefetch_writes_memory = %u\n", info->cpdma_prefetch_writes_memory);
1165    fprintf(f, "    has_gfx9_scissor_bug = %i\n", info->has_gfx9_scissor_bug);
1166    fprintf(f, "    has_tc_compat_zrange_bug = %i\n", info->has_tc_compat_zrange_bug);
1167    fprintf(f, "    has_msaa_sample_loc_bug = %i\n", info->has_msaa_sample_loc_bug);
1168    fprintf(f, "    has_ls_vgpr_init_bug = %i\n", info->has_ls_vgpr_init_bug);
1169    fprintf(f, "    has_32bit_predication = %i\n", info->has_32bit_predication);
1170    fprintf(f, "    has_3d_cube_border_color_mipmap = %i\n", info->has_3d_cube_border_color_mipmap);
1171 
1172    fprintf(f, "Display features:\n");
1173    fprintf(f, "    use_display_dcc_unaligned = %u\n", info->use_display_dcc_unaligned);
1174    fprintf(f, "    use_display_dcc_with_retile_blit = %u\n", info->use_display_dcc_with_retile_blit);
1175 
1176    fprintf(f, "Memory info:\n");
1177    fprintf(f, "    pte_fragment_size = %u\n", info->pte_fragment_size);
1178    fprintf(f, "    gart_page_size = %u\n", info->gart_page_size);
1179    fprintf(f, "    gart_size = %i MB\n", (int)DIV_ROUND_UP(info->gart_size, 1024 * 1024));
1180    fprintf(f, "    vram_size = %i MB\n", (int)DIV_ROUND_UP(info->vram_size, 1024 * 1024));
1181    fprintf(f, "    vram_vis_size = %i MB\n", (int)DIV_ROUND_UP(info->vram_vis_size, 1024 * 1024));
1182    fprintf(f, "    vram_type = %i\n", info->vram_type);
1183    fprintf(f, "    vram_bit_width = %i\n", info->vram_bit_width);
1184    fprintf(f, "    gds_size = %u kB\n", info->gds_size / 1024);
1185    fprintf(f, "    gds_gfx_partition_size = %u kB\n", info->gds_gfx_partition_size / 1024);
1186    fprintf(f, "    max_alloc_size = %i MB\n", (int)DIV_ROUND_UP(info->max_alloc_size, 1024 * 1024));
1187    fprintf(f, "    min_alloc_size = %u\n", info->min_alloc_size);
1188    fprintf(f, "    address32_hi = %u\n", info->address32_hi);
1189    fprintf(f, "    has_dedicated_vram = %u\n", info->has_dedicated_vram);
1190    fprintf(f, "    all_vram_visible = %u\n", info->all_vram_visible);
1191    fprintf(f, "    smart_access_memory = %u\n", info->smart_access_memory);
1192    fprintf(f, "    max_tcc_blocks = %i\n", info->max_tcc_blocks);
1193    fprintf(f, "    num_tcc_blocks = %i\n", info->num_tcc_blocks);
1194    fprintf(f, "    tcc_cache_line_size = %u\n", info->tcc_cache_line_size);
1195    fprintf(f, "    tcc_rb_non_coherent = %u\n", info->tcc_rb_non_coherent);
1196    fprintf(f, "    pc_lines = %u\n", info->pc_lines);
1197    fprintf(f, "    lds_size_per_workgroup = %u\n", info->lds_size_per_workgroup);
1198    fprintf(f, "    lds_alloc_granularity = %i\n", info->lds_alloc_granularity);
1199    fprintf(f, "    lds_encode_granularity = %i\n", info->lds_encode_granularity);
1200    fprintf(f, "    max_memory_clock = %i\n", info->max_memory_clock);
1201    fprintf(f, "    ce_ram_size = %i\n", info->ce_ram_size);
1202    fprintf(f, "    l1_cache_size = %i\n", info->l1_cache_size);
1203    fprintf(f, "    l2_cache_size = %i\n", info->l2_cache_size);
1204 
1205    fprintf(f, "CP info:\n");
1206    fprintf(f, "    gfx_ib_pad_with_type2 = %i\n", info->gfx_ib_pad_with_type2);
1207    fprintf(f, "    ib_alignment = %u\n", info->ib_alignment);
1208    fprintf(f, "    me_fw_version = %i\n", info->me_fw_version);
1209    fprintf(f, "    me_fw_feature = %i\n", info->me_fw_feature);
1210    fprintf(f, "    pfp_fw_version = %i\n", info->pfp_fw_version);
1211    fprintf(f, "    pfp_fw_feature = %i\n", info->pfp_fw_feature);
1212    fprintf(f, "    ce_fw_version = %i\n", info->ce_fw_version);
1213    fprintf(f, "    ce_fw_feature = %i\n", info->ce_fw_feature);
1214 
1215    fprintf(f, "Multimedia info:\n");
1216    fprintf(f, "    uvd_decode = %u\n", info->has_video_hw.uvd_decode);
1217    fprintf(f, "    vcn_decode = %u\n", info->has_video_hw.vcn_decode);
1218    fprintf(f, "    jpeg_decode = %u\n", info->has_video_hw.jpeg_decode);
1219    fprintf(f, "    vce_encode = %u\n", info->has_video_hw.vce_encode);
1220    fprintf(f, "    uvd_encode = %u\n", info->has_video_hw.uvd_encode);
1221    fprintf(f, "    vcn_encode = %u\n", info->has_video_hw.vcn_encode);
1222    fprintf(f, "    uvd_fw_version = %u\n", info->uvd_fw_version);
1223    fprintf(f, "    vce_fw_version = %u\n", info->vce_fw_version);
1224    fprintf(f, "    vce_harvest_config = %i\n", info->vce_harvest_config);
1225 
1226    fprintf(f, "Kernel & winsys capabilities:\n");
1227    fprintf(f, "    drm = %i.%i.%i\n", info->drm_major, info->drm_minor, info->drm_patchlevel);
1228    fprintf(f, "    has_userptr = %i\n", info->has_userptr);
1229    fprintf(f, "    has_syncobj = %u\n", info->has_syncobj);
1230    fprintf(f, "    has_timeline_syncobj = %u\n", info->has_timeline_syncobj);
1231    fprintf(f, "    has_fence_to_handle = %u\n", info->has_fence_to_handle);
1232    fprintf(f, "    has_local_buffers = %u\n", info->has_local_buffers);
1233    fprintf(f, "    kernel_flushes_hdp_before_ib = %u\n", info->kernel_flushes_hdp_before_ib);
1234    fprintf(f, "    htile_cmask_support_1d_tiling = %u\n", info->htile_cmask_support_1d_tiling);
1235    fprintf(f, "    si_TA_CS_BC_BASE_ADDR_allowed = %u\n", info->si_TA_CS_BC_BASE_ADDR_allowed);
1236    fprintf(f, "    has_bo_metadata = %u\n", info->has_bo_metadata);
1237    fprintf(f, "    has_gpu_reset_status_query = %u\n", info->has_gpu_reset_status_query);
1238    fprintf(f, "    has_eqaa_surface_allocator = %u\n", info->has_eqaa_surface_allocator);
1239    fprintf(f, "    has_format_bc1_through_bc7 = %u\n", info->has_format_bc1_through_bc7);
1240    fprintf(f, "    kernel_flushes_tc_l2_after_ib = %u\n", info->kernel_flushes_tc_l2_after_ib);
1241    fprintf(f, "    has_indirect_compute_dispatch = %u\n", info->has_indirect_compute_dispatch);
1242    fprintf(f, "    has_unaligned_shader_loads = %u\n", info->has_unaligned_shader_loads);
1243    fprintf(f, "    has_sparse_vm_mappings = %u\n", info->has_sparse_vm_mappings);
1244    fprintf(f, "    has_2d_tiling = %u\n", info->has_2d_tiling);
1245    fprintf(f, "    has_read_registers_query = %u\n", info->has_read_registers_query);
1246    fprintf(f, "    has_gds_ordered_append = %u\n", info->has_gds_ordered_append);
1247    fprintf(f, "    has_scheduled_fence_dependency = %u\n", info->has_scheduled_fence_dependency);
1248    fprintf(f, "    mid_command_buffer_preemption_enabled = %u\n",
1249            info->mid_command_buffer_preemption_enabled);
1250    fprintf(f, "    has_tmz_support = %u\n", info->has_tmz_support);
1251 
1252    fprintf(f, "Shader core info:\n");
1253    fprintf(f, "    max_shader_clock = %i\n", info->max_shader_clock);
1254    fprintf(f, "    num_good_compute_units = %i\n", info->num_good_compute_units);
1255    fprintf(f, "    max_good_cu_per_sa = %i\n", info->max_good_cu_per_sa);
1256    fprintf(f, "    min_good_cu_per_sa = %i\n", info->min_good_cu_per_sa);
1257    fprintf(f, "    max_se = %i\n", info->max_se);
1258    fprintf(f, "    num_se = %i\n", info->num_se);
1259    fprintf(f, "    max_sa_per_se = %i\n", info->max_sa_per_se);
1260    fprintf(f, "    max_wave64_per_simd = %i\n", info->max_wave64_per_simd);
1261    fprintf(f, "    num_physical_sgprs_per_simd = %i\n", info->num_physical_sgprs_per_simd);
1262    fprintf(f, "    num_physical_wave64_vgprs_per_simd = %i\n",
1263            info->num_physical_wave64_vgprs_per_simd);
1264    fprintf(f, "    num_simd_per_compute_unit = %i\n", info->num_simd_per_compute_unit);
1265    fprintf(f, "    min_sgpr_alloc = %i\n", info->min_sgpr_alloc);
1266    fprintf(f, "    max_sgpr_alloc = %i\n", info->max_sgpr_alloc);
1267    fprintf(f, "    sgpr_alloc_granularity = %i\n", info->sgpr_alloc_granularity);
1268    fprintf(f, "    min_wave64_vgpr_alloc = %i\n", info->min_wave64_vgpr_alloc);
1269    fprintf(f, "    max_vgpr_alloc = %i\n", info->max_vgpr_alloc);
1270    fprintf(f, "    wave64_vgpr_alloc_granularity = %i\n", info->wave64_vgpr_alloc_granularity);
1271 
1272    fprintf(f, "Render backend info:\n");
1273    fprintf(f, "    pa_sc_tile_steering_override = 0x%x\n", info->pa_sc_tile_steering_override);
1274    fprintf(f, "    max_render_backends = %i\n", info->max_render_backends);
1275    fprintf(f, "    num_tile_pipes = %i\n", info->num_tile_pipes);
1276    fprintf(f, "    pipe_interleave_bytes = %i\n", info->pipe_interleave_bytes);
1277    fprintf(f, "    enabled_rb_mask = 0x%x\n", info->enabled_rb_mask);
1278    fprintf(f, "    max_alignment = %u\n", (unsigned)info->max_alignment);
1279    fprintf(f, "    pbb_max_alloc_count = %u\n", info->pbb_max_alloc_count);
1280 
1281    fprintf(f, "GB_ADDR_CONFIG: 0x%08x\n", info->gb_addr_config);
1282    if (info->chip_class >= GFX10) {
1283       fprintf(f, "    num_pipes = %u\n", 1 << G_0098F8_NUM_PIPES(info->gb_addr_config));
1284       fprintf(f, "    pipe_interleave_size = %u\n",
1285               256 << G_0098F8_PIPE_INTERLEAVE_SIZE_GFX9(info->gb_addr_config));
1286       fprintf(f, "    max_compressed_frags = %u\n",
1287               1 << G_0098F8_MAX_COMPRESSED_FRAGS(info->gb_addr_config));
1288       if (info->chip_class >= GFX10_3)
1289          fprintf(f, "    num_pkrs = %u\n", 1 << G_0098F8_NUM_PKRS(info->gb_addr_config));
1290    } else if (info->chip_class == GFX9) {
1291       fprintf(f, "    num_pipes = %u\n", 1 << G_0098F8_NUM_PIPES(info->gb_addr_config));
1292       fprintf(f, "    pipe_interleave_size = %u\n",
1293               256 << G_0098F8_PIPE_INTERLEAVE_SIZE_GFX9(info->gb_addr_config));
1294       fprintf(f, "    max_compressed_frags = %u\n",
1295               1 << G_0098F8_MAX_COMPRESSED_FRAGS(info->gb_addr_config));
1296       fprintf(f, "    bank_interleave_size = %u\n",
1297               1 << G_0098F8_BANK_INTERLEAVE_SIZE(info->gb_addr_config));
1298       fprintf(f, "    num_banks = %u\n", 1 << G_0098F8_NUM_BANKS(info->gb_addr_config));
1299       fprintf(f, "    shader_engine_tile_size = %u\n",
1300               16 << G_0098F8_SHADER_ENGINE_TILE_SIZE(info->gb_addr_config));
1301       fprintf(f, "    num_shader_engines = %u\n",
1302               1 << G_0098F8_NUM_SHADER_ENGINES_GFX9(info->gb_addr_config));
1303       fprintf(f, "    num_gpus = %u (raw)\n", G_0098F8_NUM_GPUS_GFX9(info->gb_addr_config));
1304       fprintf(f, "    multi_gpu_tile_size = %u (raw)\n",
1305               G_0098F8_MULTI_GPU_TILE_SIZE(info->gb_addr_config));
1306       fprintf(f, "    num_rb_per_se = %u\n", 1 << G_0098F8_NUM_RB_PER_SE(info->gb_addr_config));
1307       fprintf(f, "    row_size = %u\n", 1024 << G_0098F8_ROW_SIZE(info->gb_addr_config));
1308       fprintf(f, "    num_lower_pipes = %u (raw)\n", G_0098F8_NUM_LOWER_PIPES(info->gb_addr_config));
1309       fprintf(f, "    se_enable = %u (raw)\n", G_0098F8_SE_ENABLE(info->gb_addr_config));
1310    } else {
1311       fprintf(f, "    num_pipes = %u\n", 1 << G_0098F8_NUM_PIPES(info->gb_addr_config));
1312       fprintf(f, "    pipe_interleave_size = %u\n",
1313               256 << G_0098F8_PIPE_INTERLEAVE_SIZE_GFX6(info->gb_addr_config));
1314       fprintf(f, "    bank_interleave_size = %u\n",
1315               1 << G_0098F8_BANK_INTERLEAVE_SIZE(info->gb_addr_config));
1316       fprintf(f, "    num_shader_engines = %u\n",
1317               1 << G_0098F8_NUM_SHADER_ENGINES_GFX6(info->gb_addr_config));
1318       fprintf(f, "    shader_engine_tile_size = %u\n",
1319               16 << G_0098F8_SHADER_ENGINE_TILE_SIZE(info->gb_addr_config));
1320       fprintf(f, "    num_gpus = %u (raw)\n", G_0098F8_NUM_GPUS_GFX6(info->gb_addr_config));
1321       fprintf(f, "    multi_gpu_tile_size = %u (raw)\n",
1322               G_0098F8_MULTI_GPU_TILE_SIZE(info->gb_addr_config));
1323       fprintf(f, "    row_size = %u\n", 1024 << G_0098F8_ROW_SIZE(info->gb_addr_config));
1324       fprintf(f, "    num_lower_pipes = %u (raw)\n", G_0098F8_NUM_LOWER_PIPES(info->gb_addr_config));
1325    }
1326 }
1327 
ac_get_gs_table_depth(enum chip_class chip_class,enum radeon_family family)1328 int ac_get_gs_table_depth(enum chip_class chip_class, enum radeon_family family)
1329 {
1330    if (chip_class >= GFX9)
1331       return -1;
1332 
1333    switch (family) {
1334    case CHIP_OLAND:
1335    case CHIP_HAINAN:
1336    case CHIP_KAVERI:
1337    case CHIP_KABINI:
1338    case CHIP_ICELAND:
1339    case CHIP_CARRIZO:
1340    case CHIP_STONEY:
1341       return 16;
1342    case CHIP_TAHITI:
1343    case CHIP_PITCAIRN:
1344    case CHIP_VERDE:
1345    case CHIP_BONAIRE:
1346    case CHIP_HAWAII:
1347    case CHIP_TONGA:
1348    case CHIP_FIJI:
1349    case CHIP_POLARIS10:
1350    case CHIP_POLARIS11:
1351    case CHIP_POLARIS12:
1352    case CHIP_VEGAM:
1353       return 32;
1354    default:
1355       unreachable("Unknown GPU");
1356    }
1357 }
1358 
ac_get_raster_config(struct radeon_info * info,uint32_t * raster_config_p,uint32_t * raster_config_1_p,uint32_t * se_tile_repeat_p)1359 void ac_get_raster_config(struct radeon_info *info, uint32_t *raster_config_p,
1360                           uint32_t *raster_config_1_p, uint32_t *se_tile_repeat_p)
1361 {
1362    unsigned raster_config, raster_config_1, se_tile_repeat;
1363 
1364    switch (info->family) {
1365    /* 1 SE / 1 RB */
1366    case CHIP_HAINAN:
1367    case CHIP_KABINI:
1368    case CHIP_STONEY:
1369       raster_config = 0x00000000;
1370       raster_config_1 = 0x00000000;
1371       break;
1372    /* 1 SE / 4 RBs */
1373    case CHIP_VERDE:
1374       raster_config = 0x0000124a;
1375       raster_config_1 = 0x00000000;
1376       break;
1377    /* 1 SE / 2 RBs (Oland is special) */
1378    case CHIP_OLAND:
1379       raster_config = 0x00000082;
1380       raster_config_1 = 0x00000000;
1381       break;
1382    /* 1 SE / 2 RBs */
1383    case CHIP_KAVERI:
1384    case CHIP_ICELAND:
1385    case CHIP_CARRIZO:
1386       raster_config = 0x00000002;
1387       raster_config_1 = 0x00000000;
1388       break;
1389    /* 2 SEs / 4 RBs */
1390    case CHIP_BONAIRE:
1391    case CHIP_POLARIS11:
1392    case CHIP_POLARIS12:
1393       raster_config = 0x16000012;
1394       raster_config_1 = 0x00000000;
1395       break;
1396    /* 2 SEs / 8 RBs */
1397    case CHIP_TAHITI:
1398    case CHIP_PITCAIRN:
1399       raster_config = 0x2a00126a;
1400       raster_config_1 = 0x00000000;
1401       break;
1402    /* 4 SEs / 8 RBs */
1403    case CHIP_TONGA:
1404    case CHIP_POLARIS10:
1405       raster_config = 0x16000012;
1406       raster_config_1 = 0x0000002a;
1407       break;
1408    /* 4 SEs / 16 RBs */
1409    case CHIP_HAWAII:
1410    case CHIP_FIJI:
1411    case CHIP_VEGAM:
1412       raster_config = 0x3a00161a;
1413       raster_config_1 = 0x0000002e;
1414       break;
1415    default:
1416       fprintf(stderr, "ac: Unknown GPU, using 0 for raster_config\n");
1417       raster_config = 0x00000000;
1418       raster_config_1 = 0x00000000;
1419       break;
1420    }
1421 
1422    /* drm/radeon on Kaveri is buggy, so disable 1 RB to work around it.
1423     * This decreases performance by up to 50% when the RB is the bottleneck.
1424     */
1425    if (info->family == CHIP_KAVERI && !info->is_amdgpu)
1426       raster_config = 0x00000000;
1427 
1428    /* Fiji: Old kernels have incorrect tiling config. This decreases
1429     * RB performance by 25%. (it disables 1 RB in the second packer)
1430     */
1431    if (info->family == CHIP_FIJI && info->cik_macrotile_mode_array[0] == 0x000000e8) {
1432       raster_config = 0x16000012;
1433       raster_config_1 = 0x0000002a;
1434    }
1435 
1436    unsigned se_width = 8 << G_028350_SE_XSEL_GFX6(raster_config);
1437    unsigned se_height = 8 << G_028350_SE_YSEL_GFX6(raster_config);
1438 
1439    /* I don't know how to calculate this, though this is probably a good guess. */
1440    se_tile_repeat = MAX2(se_width, se_height) * info->max_se;
1441 
1442    *raster_config_p = raster_config;
1443    *raster_config_1_p = raster_config_1;
1444    if (se_tile_repeat_p)
1445       *se_tile_repeat_p = se_tile_repeat;
1446 }
1447 
ac_get_harvested_configs(struct radeon_info * info,unsigned raster_config,unsigned * cik_raster_config_1_p,unsigned * raster_config_se)1448 void ac_get_harvested_configs(struct radeon_info *info, unsigned raster_config,
1449                               unsigned *cik_raster_config_1_p, unsigned *raster_config_se)
1450 {
1451    unsigned sh_per_se = MAX2(info->max_sa_per_se, 1);
1452    unsigned num_se = MAX2(info->max_se, 1);
1453    unsigned rb_mask = info->enabled_rb_mask;
1454    unsigned num_rb = MIN2(info->max_render_backends, 16);
1455    unsigned rb_per_pkr = MIN2(num_rb / num_se / sh_per_se, 2);
1456    unsigned rb_per_se = num_rb / num_se;
1457    unsigned se_mask[4];
1458    unsigned se;
1459 
1460    se_mask[0] = ((1 << rb_per_se) - 1) & rb_mask;
1461    se_mask[1] = (se_mask[0] << rb_per_se) & rb_mask;
1462    se_mask[2] = (se_mask[1] << rb_per_se) & rb_mask;
1463    se_mask[3] = (se_mask[2] << rb_per_se) & rb_mask;
1464 
1465    assert(num_se == 1 || num_se == 2 || num_se == 4);
1466    assert(sh_per_se == 1 || sh_per_se == 2);
1467    assert(rb_per_pkr == 1 || rb_per_pkr == 2);
1468 
1469    if (info->chip_class >= GFX7) {
1470       unsigned raster_config_1 = *cik_raster_config_1_p;
1471       if ((num_se > 2) && ((!se_mask[0] && !se_mask[1]) || (!se_mask[2] && !se_mask[3]))) {
1472          raster_config_1 &= C_028354_SE_PAIR_MAP;
1473 
1474          if (!se_mask[0] && !se_mask[1]) {
1475             raster_config_1 |= S_028354_SE_PAIR_MAP(V_028354_RASTER_CONFIG_SE_PAIR_MAP_3);
1476          } else {
1477             raster_config_1 |= S_028354_SE_PAIR_MAP(V_028354_RASTER_CONFIG_SE_PAIR_MAP_0);
1478          }
1479          *cik_raster_config_1_p = raster_config_1;
1480       }
1481    }
1482 
1483    for (se = 0; se < num_se; se++) {
1484       unsigned pkr0_mask = ((1 << rb_per_pkr) - 1) << (se * rb_per_se);
1485       unsigned pkr1_mask = pkr0_mask << rb_per_pkr;
1486       int idx = (se / 2) * 2;
1487 
1488       raster_config_se[se] = raster_config;
1489       if ((num_se > 1) && (!se_mask[idx] || !se_mask[idx + 1])) {
1490          raster_config_se[se] &= C_028350_SE_MAP;
1491 
1492          if (!se_mask[idx]) {
1493             raster_config_se[se] |= S_028350_SE_MAP(V_028350_RASTER_CONFIG_SE_MAP_3);
1494          } else {
1495             raster_config_se[se] |= S_028350_SE_MAP(V_028350_RASTER_CONFIG_SE_MAP_0);
1496          }
1497       }
1498 
1499       pkr0_mask &= rb_mask;
1500       pkr1_mask &= rb_mask;
1501       if (rb_per_se > 2 && (!pkr0_mask || !pkr1_mask)) {
1502          raster_config_se[se] &= C_028350_PKR_MAP;
1503 
1504          if (!pkr0_mask) {
1505             raster_config_se[se] |= S_028350_PKR_MAP(V_028350_RASTER_CONFIG_PKR_MAP_3);
1506          } else {
1507             raster_config_se[se] |= S_028350_PKR_MAP(V_028350_RASTER_CONFIG_PKR_MAP_0);
1508          }
1509       }
1510 
1511       if (rb_per_se >= 2) {
1512          unsigned rb0_mask = 1 << (se * rb_per_se);
1513          unsigned rb1_mask = rb0_mask << 1;
1514 
1515          rb0_mask &= rb_mask;
1516          rb1_mask &= rb_mask;
1517          if (!rb0_mask || !rb1_mask) {
1518             raster_config_se[se] &= C_028350_RB_MAP_PKR0;
1519 
1520             if (!rb0_mask) {
1521                raster_config_se[se] |= S_028350_RB_MAP_PKR0(V_028350_RASTER_CONFIG_RB_MAP_3);
1522             } else {
1523                raster_config_se[se] |= S_028350_RB_MAP_PKR0(V_028350_RASTER_CONFIG_RB_MAP_0);
1524             }
1525          }
1526 
1527          if (rb_per_se > 2) {
1528             rb0_mask = 1 << (se * rb_per_se + rb_per_pkr);
1529             rb1_mask = rb0_mask << 1;
1530             rb0_mask &= rb_mask;
1531             rb1_mask &= rb_mask;
1532             if (!rb0_mask || !rb1_mask) {
1533                raster_config_se[se] &= C_028350_RB_MAP_PKR1;
1534 
1535                if (!rb0_mask) {
1536                   raster_config_se[se] |= S_028350_RB_MAP_PKR1(V_028350_RASTER_CONFIG_RB_MAP_3);
1537                } else {
1538                   raster_config_se[se] |= S_028350_RB_MAP_PKR1(V_028350_RASTER_CONFIG_RB_MAP_0);
1539                }
1540             }
1541          }
1542       }
1543    }
1544 }
1545 
ac_get_compute_resource_limits(struct radeon_info * info,unsigned waves_per_threadgroup,unsigned max_waves_per_sh,unsigned threadgroups_per_cu)1546 unsigned ac_get_compute_resource_limits(struct radeon_info *info, unsigned waves_per_threadgroup,
1547                                         unsigned max_waves_per_sh, unsigned threadgroups_per_cu)
1548 {
1549    unsigned compute_resource_limits = S_00B854_SIMD_DEST_CNTL(waves_per_threadgroup % 4 == 0);
1550 
1551    if (info->chip_class >= GFX7) {
1552       unsigned num_cu_per_se = info->num_good_compute_units / info->num_se;
1553 
1554       /* Force even distribution on all SIMDs in CU if the workgroup
1555        * size is 64. This has shown some good improvements if # of CUs
1556        * per SE is not a multiple of 4.
1557        */
1558       if (num_cu_per_se % 4 && waves_per_threadgroup == 1)
1559          compute_resource_limits |= S_00B854_FORCE_SIMD_DIST(1);
1560 
1561       assert(threadgroups_per_cu >= 1 && threadgroups_per_cu <= 8);
1562       compute_resource_limits |=
1563          S_00B854_WAVES_PER_SH(max_waves_per_sh) | S_00B854_CU_GROUP_COUNT(threadgroups_per_cu - 1);
1564    } else {
1565       /* GFX6 */
1566       if (max_waves_per_sh) {
1567          unsigned limit_div16 = DIV_ROUND_UP(max_waves_per_sh, 16);
1568          compute_resource_limits |= S_00B854_WAVES_PER_SH_GFX6(limit_div16);
1569       }
1570    }
1571    return compute_resource_limits;
1572 }
1573