1 /*
2 * Copyright © 2014 Advanced Micro Devices, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 *
23 */
24
25 #include <errno.h>
26 #include <string.h>
27
28 #include "amdgpu.h"
29 #include "amdgpu_drm.h"
30 #include "amdgpu_internal.h"
31 #include "xf86drm.h"
32
amdgpu_query_info(amdgpu_device_handle dev,unsigned info_id,unsigned size,void * value)33 drm_public int amdgpu_query_info(amdgpu_device_handle dev, unsigned info_id,
34 unsigned size, void *value)
35 {
36 struct drm_amdgpu_info request;
37
38 memset(&request, 0, sizeof(request));
39 request.return_pointer = (uintptr_t)value;
40 request.return_size = size;
41 request.query = info_id;
42
43 return drmCommandWrite(dev->fd, DRM_AMDGPU_INFO, &request,
44 sizeof(struct drm_amdgpu_info));
45 }
46
amdgpu_query_crtc_from_id(amdgpu_device_handle dev,unsigned id,int32_t * result)47 drm_public int amdgpu_query_crtc_from_id(amdgpu_device_handle dev, unsigned id,
48 int32_t *result)
49 {
50 struct drm_amdgpu_info request;
51
52 memset(&request, 0, sizeof(request));
53 request.return_pointer = (uintptr_t)result;
54 request.return_size = sizeof(*result);
55 request.query = AMDGPU_INFO_CRTC_FROM_ID;
56 request.mode_crtc.id = id;
57
58 return drmCommandWrite(dev->fd, DRM_AMDGPU_INFO, &request,
59 sizeof(struct drm_amdgpu_info));
60 }
61
amdgpu_read_mm_registers(amdgpu_device_handle dev,unsigned dword_offset,unsigned count,uint32_t instance,uint32_t flags,uint32_t * values)62 drm_public int amdgpu_read_mm_registers(amdgpu_device_handle dev,
63 unsigned dword_offset, unsigned count, uint32_t instance,
64 uint32_t flags, uint32_t *values)
65 {
66 struct drm_amdgpu_info request;
67
68 memset(&request, 0, sizeof(request));
69 request.return_pointer = (uintptr_t)values;
70 request.return_size = count * sizeof(uint32_t);
71 request.query = AMDGPU_INFO_READ_MMR_REG;
72 request.read_mmr_reg.dword_offset = dword_offset;
73 request.read_mmr_reg.count = count;
74 request.read_mmr_reg.instance = instance;
75 request.read_mmr_reg.flags = flags;
76
77 return drmCommandWrite(dev->fd, DRM_AMDGPU_INFO, &request,
78 sizeof(struct drm_amdgpu_info));
79 }
80
amdgpu_query_hw_ip_count(amdgpu_device_handle dev,unsigned type,uint32_t * count)81 drm_public int amdgpu_query_hw_ip_count(amdgpu_device_handle dev,
82 unsigned type,
83 uint32_t *count)
84 {
85 struct drm_amdgpu_info request;
86
87 memset(&request, 0, sizeof(request));
88 request.return_pointer = (uintptr_t)count;
89 request.return_size = sizeof(*count);
90 request.query = AMDGPU_INFO_HW_IP_COUNT;
91 request.query_hw_ip.type = type;
92
93 return drmCommandWrite(dev->fd, DRM_AMDGPU_INFO, &request,
94 sizeof(struct drm_amdgpu_info));
95 }
96
amdgpu_query_hw_ip_info(amdgpu_device_handle dev,unsigned type,unsigned ip_instance,struct drm_amdgpu_info_hw_ip * info)97 drm_public int amdgpu_query_hw_ip_info(amdgpu_device_handle dev, unsigned type,
98 unsigned ip_instance,
99 struct drm_amdgpu_info_hw_ip *info)
100 {
101 struct drm_amdgpu_info request;
102
103 memset(&request, 0, sizeof(request));
104 request.return_pointer = (uintptr_t)info;
105 request.return_size = sizeof(*info);
106 request.query = AMDGPU_INFO_HW_IP_INFO;
107 request.query_hw_ip.type = type;
108 request.query_hw_ip.ip_instance = ip_instance;
109
110 return drmCommandWrite(dev->fd, DRM_AMDGPU_INFO, &request,
111 sizeof(struct drm_amdgpu_info));
112 }
113
amdgpu_query_firmware_version(amdgpu_device_handle dev,unsigned fw_type,unsigned ip_instance,unsigned index,uint32_t * version,uint32_t * feature)114 drm_public int amdgpu_query_firmware_version(amdgpu_device_handle dev,
115 unsigned fw_type, unsigned ip_instance, unsigned index,
116 uint32_t *version, uint32_t *feature)
117 {
118 struct drm_amdgpu_info request;
119 struct drm_amdgpu_info_firmware firmware = {};
120 int r;
121
122 memset(&request, 0, sizeof(request));
123 request.return_pointer = (uintptr_t)&firmware;
124 request.return_size = sizeof(firmware);
125 request.query = AMDGPU_INFO_FW_VERSION;
126 request.query_fw.fw_type = fw_type;
127 request.query_fw.ip_instance = ip_instance;
128 request.query_fw.index = index;
129
130 r = drmCommandWrite(dev->fd, DRM_AMDGPU_INFO, &request,
131 sizeof(struct drm_amdgpu_info));
132 if (r)
133 return r;
134
135 *version = firmware.ver;
136 *feature = firmware.feature;
137 return 0;
138 }
139
amdgpu_query_gpu_info_init(amdgpu_device_handle dev)140 drm_private int amdgpu_query_gpu_info_init(amdgpu_device_handle dev)
141 {
142 int r, i;
143
144 r = amdgpu_query_info(dev, AMDGPU_INFO_DEV_INFO, sizeof(dev->dev_info),
145 &dev->dev_info);
146 if (r)
147 return r;
148
149 dev->info.asic_id = dev->dev_info.device_id;
150 dev->info.chip_rev = dev->dev_info.chip_rev;
151 dev->info.chip_external_rev = dev->dev_info.external_rev;
152 dev->info.family_id = dev->dev_info.family;
153 dev->info.max_engine_clk = dev->dev_info.max_engine_clock;
154 dev->info.max_memory_clk = dev->dev_info.max_memory_clock;
155 dev->info.gpu_counter_freq = dev->dev_info.gpu_counter_freq;
156 dev->info.enabled_rb_pipes_mask = dev->dev_info.enabled_rb_pipes_mask;
157 dev->info.rb_pipes = dev->dev_info.num_rb_pipes;
158 dev->info.ids_flags = dev->dev_info.ids_flags;
159 dev->info.num_hw_gfx_contexts = dev->dev_info.num_hw_gfx_contexts;
160 dev->info.num_shader_engines = dev->dev_info.num_shader_engines;
161 dev->info.num_shader_arrays_per_engine =
162 dev->dev_info.num_shader_arrays_per_engine;
163 dev->info.vram_type = dev->dev_info.vram_type;
164 dev->info.vram_bit_width = dev->dev_info.vram_bit_width;
165 dev->info.ce_ram_size = dev->dev_info.ce_ram_size;
166 dev->info.vce_harvest_config = dev->dev_info.vce_harvest_config;
167 dev->info.pci_rev_id = dev->dev_info.pci_rev;
168
169 if (dev->info.family_id < AMDGPU_FAMILY_AI) {
170 for (i = 0; i < (int)dev->info.num_shader_engines; i++) {
171 unsigned instance = (i << AMDGPU_INFO_MMR_SE_INDEX_SHIFT) |
172 (AMDGPU_INFO_MMR_SH_INDEX_MASK <<
173 AMDGPU_INFO_MMR_SH_INDEX_SHIFT);
174
175 r = amdgpu_read_mm_registers(dev, 0x263d, 1, instance, 0,
176 &dev->info.backend_disable[i]);
177 if (r)
178 return r;
179 /* extract bitfield CC_RB_BACKEND_DISABLE.BACKEND_DISABLE */
180 dev->info.backend_disable[i] =
181 (dev->info.backend_disable[i] >> 16) & 0xff;
182
183 r = amdgpu_read_mm_registers(dev, 0xa0d4, 1, instance, 0,
184 &dev->info.pa_sc_raster_cfg[i]);
185 if (r)
186 return r;
187
188 if (dev->info.family_id >= AMDGPU_FAMILY_CI) {
189 r = amdgpu_read_mm_registers(dev, 0xa0d5, 1, instance, 0,
190 &dev->info.pa_sc_raster_cfg1[i]);
191 if (r)
192 return r;
193 }
194 }
195 }
196
197 r = amdgpu_read_mm_registers(dev, 0x263e, 1, 0xffffffff, 0,
198 &dev->info.gb_addr_cfg);
199 if (r)
200 return r;
201
202 if (dev->info.family_id < AMDGPU_FAMILY_AI) {
203 r = amdgpu_read_mm_registers(dev, 0x2644, 32, 0xffffffff, 0,
204 dev->info.gb_tile_mode);
205 if (r)
206 return r;
207
208 if (dev->info.family_id >= AMDGPU_FAMILY_CI) {
209 r = amdgpu_read_mm_registers(dev, 0x2664, 16, 0xffffffff, 0,
210 dev->info.gb_macro_tile_mode);
211 if (r)
212 return r;
213 }
214
215 r = amdgpu_read_mm_registers(dev, 0x9d8, 1, 0xffffffff, 0,
216 &dev->info.mc_arb_ramcfg);
217 if (r)
218 return r;
219 }
220
221 dev->info.cu_active_number = dev->dev_info.cu_active_number;
222 dev->info.cu_ao_mask = dev->dev_info.cu_ao_mask;
223 memcpy(&dev->info.cu_bitmap[0][0], &dev->dev_info.cu_bitmap[0][0], sizeof(dev->info.cu_bitmap));
224
225 /* TODO: info->max_quad_shader_pipes is not set */
226 /* TODO: info->avail_quad_shader_pipes is not set */
227 /* TODO: info->cache_entries_per_quad_pipe is not set */
228 return 0;
229 }
230
amdgpu_query_gpu_info(amdgpu_device_handle dev,struct amdgpu_gpu_info * info)231 drm_public int amdgpu_query_gpu_info(amdgpu_device_handle dev,
232 struct amdgpu_gpu_info *info)
233 {
234 if (!dev || !info)
235 return -EINVAL;
236
237 /* Get ASIC info*/
238 *info = dev->info;
239
240 return 0;
241 }
242
amdgpu_query_heap_info(amdgpu_device_handle dev,uint32_t heap,uint32_t flags,struct amdgpu_heap_info * info)243 drm_public int amdgpu_query_heap_info(amdgpu_device_handle dev,
244 uint32_t heap,
245 uint32_t flags,
246 struct amdgpu_heap_info *info)
247 {
248 struct drm_amdgpu_info_vram_gtt vram_gtt_info = {};
249 int r;
250
251 r = amdgpu_query_info(dev, AMDGPU_INFO_VRAM_GTT,
252 sizeof(vram_gtt_info), &vram_gtt_info);
253 if (r)
254 return r;
255
256 /* Get heap information */
257 switch (heap) {
258 case AMDGPU_GEM_DOMAIN_VRAM:
259 /* query visible only vram heap */
260 if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
261 info->heap_size = vram_gtt_info.vram_cpu_accessible_size;
262 else /* query total vram heap */
263 info->heap_size = vram_gtt_info.vram_size;
264
265 info->max_allocation = vram_gtt_info.vram_cpu_accessible_size;
266
267 if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
268 r = amdgpu_query_info(dev, AMDGPU_INFO_VIS_VRAM_USAGE,
269 sizeof(info->heap_usage),
270 &info->heap_usage);
271 else
272 r = amdgpu_query_info(dev, AMDGPU_INFO_VRAM_USAGE,
273 sizeof(info->heap_usage),
274 &info->heap_usage);
275 if (r)
276 return r;
277 break;
278 case AMDGPU_GEM_DOMAIN_GTT:
279 info->heap_size = vram_gtt_info.gtt_size;
280 info->max_allocation = vram_gtt_info.vram_cpu_accessible_size;
281
282 r = amdgpu_query_info(dev, AMDGPU_INFO_GTT_USAGE,
283 sizeof(info->heap_usage),
284 &info->heap_usage);
285 if (r)
286 return r;
287 break;
288 default:
289 return -EINVAL;
290 }
291
292 return 0;
293 }
294
amdgpu_query_gds_info(amdgpu_device_handle dev,struct amdgpu_gds_resource_info * gds_info)295 drm_public int amdgpu_query_gds_info(amdgpu_device_handle dev,
296 struct amdgpu_gds_resource_info *gds_info)
297 {
298 struct drm_amdgpu_info_gds gds_config = {};
299 int r;
300
301 if (!gds_info)
302 return -EINVAL;
303
304 r = amdgpu_query_info(dev, AMDGPU_INFO_GDS_CONFIG,
305 sizeof(gds_config), &gds_config);
306 if (r)
307 return r;
308
309 gds_info->gds_gfx_partition_size = gds_config.gds_gfx_partition_size;
310 gds_info->compute_partition_size = gds_config.compute_partition_size;
311 gds_info->gds_total_size = gds_config.gds_total_size;
312 gds_info->gws_per_gfx_partition = gds_config.gws_per_gfx_partition;
313 gds_info->gws_per_compute_partition = gds_config.gws_per_compute_partition;
314 gds_info->oa_per_gfx_partition = gds_config.oa_per_gfx_partition;
315 gds_info->oa_per_compute_partition = gds_config.oa_per_compute_partition;
316
317 return 0;
318 }
319
amdgpu_query_sensor_info(amdgpu_device_handle dev,unsigned sensor_type,unsigned size,void * value)320 drm_public int amdgpu_query_sensor_info(amdgpu_device_handle dev, unsigned sensor_type,
321 unsigned size, void *value)
322 {
323 struct drm_amdgpu_info request;
324
325 memset(&request, 0, sizeof(request));
326 request.return_pointer = (uintptr_t)value;
327 request.return_size = size;
328 request.query = AMDGPU_INFO_SENSOR;
329 request.sensor_info.type = sensor_type;
330
331 return drmCommandWrite(dev->fd, DRM_AMDGPU_INFO, &request,
332 sizeof(struct drm_amdgpu_info));
333 }
334