1 /*
2 * Copyright © 2014 Advanced Micro Devices, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 *
23 */
24
25 #include <errno.h>
26 #include <string.h>
27
28 #include "amdgpu.h"
29 #include "amdgpu_drm.h"
30 #include "amdgpu_internal.h"
31 #include "xf86drm.h"
32
amdgpu_query_info(amdgpu_device_handle dev,unsigned info_id,unsigned size,void * value)33 int amdgpu_query_info(amdgpu_device_handle dev, unsigned info_id,
34 unsigned size, void *value)
35 {
36 struct drm_amdgpu_info request;
37
38 memset(&request, 0, sizeof(request));
39 request.return_pointer = (uintptr_t)value;
40 request.return_size = size;
41 request.query = info_id;
42
43 return drmCommandWrite(dev->fd, DRM_AMDGPU_INFO, &request,
44 sizeof(struct drm_amdgpu_info));
45 }
46
amdgpu_query_crtc_from_id(amdgpu_device_handle dev,unsigned id,int32_t * result)47 int amdgpu_query_crtc_from_id(amdgpu_device_handle dev, unsigned id,
48 int32_t *result)
49 {
50 struct drm_amdgpu_info request;
51
52 memset(&request, 0, sizeof(request));
53 request.return_pointer = (uintptr_t)result;
54 request.return_size = sizeof(*result);
55 request.query = AMDGPU_INFO_CRTC_FROM_ID;
56 request.mode_crtc.id = id;
57
58 return drmCommandWrite(dev->fd, DRM_AMDGPU_INFO, &request,
59 sizeof(struct drm_amdgpu_info));
60 }
61
amdgpu_read_mm_registers(amdgpu_device_handle dev,unsigned dword_offset,unsigned count,uint32_t instance,uint32_t flags,uint32_t * values)62 int amdgpu_read_mm_registers(amdgpu_device_handle dev, unsigned dword_offset,
63 unsigned count, uint32_t instance, uint32_t flags,
64 uint32_t *values)
65 {
66 struct drm_amdgpu_info request;
67
68 memset(&request, 0, sizeof(request));
69 request.return_pointer = (uintptr_t)values;
70 request.return_size = count * sizeof(uint32_t);
71 request.query = AMDGPU_INFO_READ_MMR_REG;
72 request.read_mmr_reg.dword_offset = dword_offset;
73 request.read_mmr_reg.count = count;
74 request.read_mmr_reg.instance = instance;
75 request.read_mmr_reg.flags = flags;
76
77 return drmCommandWrite(dev->fd, DRM_AMDGPU_INFO, &request,
78 sizeof(struct drm_amdgpu_info));
79 }
80
amdgpu_query_hw_ip_count(amdgpu_device_handle dev,unsigned type,uint32_t * count)81 int amdgpu_query_hw_ip_count(amdgpu_device_handle dev, unsigned type,
82 uint32_t *count)
83 {
84 struct drm_amdgpu_info request;
85
86 memset(&request, 0, sizeof(request));
87 request.return_pointer = (uintptr_t)count;
88 request.return_size = sizeof(*count);
89 request.query = AMDGPU_INFO_HW_IP_COUNT;
90 request.query_hw_ip.type = type;
91
92 return drmCommandWrite(dev->fd, DRM_AMDGPU_INFO, &request,
93 sizeof(struct drm_amdgpu_info));
94 }
95
amdgpu_query_hw_ip_info(amdgpu_device_handle dev,unsigned type,unsigned ip_instance,struct drm_amdgpu_info_hw_ip * info)96 int amdgpu_query_hw_ip_info(amdgpu_device_handle dev, unsigned type,
97 unsigned ip_instance,
98 struct drm_amdgpu_info_hw_ip *info)
99 {
100 struct drm_amdgpu_info request;
101
102 memset(&request, 0, sizeof(request));
103 request.return_pointer = (uintptr_t)info;
104 request.return_size = sizeof(*info);
105 request.query = AMDGPU_INFO_HW_IP_INFO;
106 request.query_hw_ip.type = type;
107 request.query_hw_ip.ip_instance = ip_instance;
108
109 return drmCommandWrite(dev->fd, DRM_AMDGPU_INFO, &request,
110 sizeof(struct drm_amdgpu_info));
111 }
112
amdgpu_query_firmware_version(amdgpu_device_handle dev,unsigned fw_type,unsigned ip_instance,unsigned index,uint32_t * version,uint32_t * feature)113 int amdgpu_query_firmware_version(amdgpu_device_handle dev, unsigned fw_type,
114 unsigned ip_instance, unsigned index,
115 uint32_t *version, uint32_t *feature)
116 {
117 struct drm_amdgpu_info request;
118 struct drm_amdgpu_info_firmware firmware = {};
119 int r;
120
121 memset(&request, 0, sizeof(request));
122 request.return_pointer = (uintptr_t)&firmware;
123 request.return_size = sizeof(firmware);
124 request.query = AMDGPU_INFO_FW_VERSION;
125 request.query_fw.fw_type = fw_type;
126 request.query_fw.ip_instance = ip_instance;
127 request.query_fw.index = index;
128
129 r = drmCommandWrite(dev->fd, DRM_AMDGPU_INFO, &request,
130 sizeof(struct drm_amdgpu_info));
131 if (r)
132 return r;
133
134 *version = firmware.ver;
135 *feature = firmware.feature;
136 return 0;
137 }
138
amdgpu_query_gpu_info_init(amdgpu_device_handle dev)139 drm_private int amdgpu_query_gpu_info_init(amdgpu_device_handle dev)
140 {
141 int r, i;
142
143 r = amdgpu_query_info(dev, AMDGPU_INFO_DEV_INFO, sizeof(dev->dev_info),
144 &dev->dev_info);
145 if (r)
146 return r;
147
148 dev->info.asic_id = dev->dev_info.device_id;
149 dev->info.chip_rev = dev->dev_info.chip_rev;
150 dev->info.chip_external_rev = dev->dev_info.external_rev;
151 dev->info.family_id = dev->dev_info.family;
152 dev->info.max_engine_clk = dev->dev_info.max_engine_clock;
153 dev->info.max_memory_clk = dev->dev_info.max_memory_clock;
154 dev->info.gpu_counter_freq = dev->dev_info.gpu_counter_freq;
155 dev->info.enabled_rb_pipes_mask = dev->dev_info.enabled_rb_pipes_mask;
156 dev->info.rb_pipes = dev->dev_info.num_rb_pipes;
157 dev->info.ids_flags = dev->dev_info.ids_flags;
158 dev->info.num_hw_gfx_contexts = dev->dev_info.num_hw_gfx_contexts;
159 dev->info.num_shader_engines = dev->dev_info.num_shader_engines;
160 dev->info.num_shader_arrays_per_engine =
161 dev->dev_info.num_shader_arrays_per_engine;
162 dev->info.vram_type = dev->dev_info.vram_type;
163 dev->info.vram_bit_width = dev->dev_info.vram_bit_width;
164 dev->info.ce_ram_size = dev->dev_info.ce_ram_size;
165 dev->info.vce_harvest_config = dev->dev_info.vce_harvest_config;
166 dev->info.pci_rev_id = dev->dev_info.pci_rev;
167
168 if (dev->info.family_id < AMDGPU_FAMILY_AI) {
169 for (i = 0; i < (int)dev->info.num_shader_engines; i++) {
170 unsigned instance = (i << AMDGPU_INFO_MMR_SE_INDEX_SHIFT) |
171 (AMDGPU_INFO_MMR_SH_INDEX_MASK <<
172 AMDGPU_INFO_MMR_SH_INDEX_SHIFT);
173
174 r = amdgpu_read_mm_registers(dev, 0x263d, 1, instance, 0,
175 &dev->info.backend_disable[i]);
176 if (r)
177 return r;
178 /* extract bitfield CC_RB_BACKEND_DISABLE.BACKEND_DISABLE */
179 dev->info.backend_disable[i] =
180 (dev->info.backend_disable[i] >> 16) & 0xff;
181
182 r = amdgpu_read_mm_registers(dev, 0xa0d4, 1, instance, 0,
183 &dev->info.pa_sc_raster_cfg[i]);
184 if (r)
185 return r;
186
187 if (dev->info.family_id >= AMDGPU_FAMILY_CI) {
188 r = amdgpu_read_mm_registers(dev, 0xa0d5, 1, instance, 0,
189 &dev->info.pa_sc_raster_cfg1[i]);
190 if (r)
191 return r;
192 }
193 }
194 }
195
196 r = amdgpu_read_mm_registers(dev, 0x263e, 1, 0xffffffff, 0,
197 &dev->info.gb_addr_cfg);
198 if (r)
199 return r;
200
201 if (dev->info.family_id < AMDGPU_FAMILY_AI) {
202 r = amdgpu_read_mm_registers(dev, 0x2644, 32, 0xffffffff, 0,
203 dev->info.gb_tile_mode);
204 if (r)
205 return r;
206
207 if (dev->info.family_id >= AMDGPU_FAMILY_CI) {
208 r = amdgpu_read_mm_registers(dev, 0x2664, 16, 0xffffffff, 0,
209 dev->info.gb_macro_tile_mode);
210 if (r)
211 return r;
212 }
213
214 r = amdgpu_read_mm_registers(dev, 0x9d8, 1, 0xffffffff, 0,
215 &dev->info.mc_arb_ramcfg);
216 if (r)
217 return r;
218 }
219
220 dev->info.cu_active_number = dev->dev_info.cu_active_number;
221 dev->info.cu_ao_mask = dev->dev_info.cu_ao_mask;
222 memcpy(&dev->info.cu_bitmap[0][0], &dev->dev_info.cu_bitmap[0][0], sizeof(dev->info.cu_bitmap));
223
224 /* TODO: info->max_quad_shader_pipes is not set */
225 /* TODO: info->avail_quad_shader_pipes is not set */
226 /* TODO: info->cache_entries_per_quad_pipe is not set */
227 return 0;
228 }
229
amdgpu_query_gpu_info(amdgpu_device_handle dev,struct amdgpu_gpu_info * info)230 int amdgpu_query_gpu_info(amdgpu_device_handle dev,
231 struct amdgpu_gpu_info *info)
232 {
233 if (!dev || !info)
234 return -EINVAL;
235
236 /* Get ASIC info*/
237 *info = dev->info;
238
239 return 0;
240 }
241
amdgpu_query_heap_info(amdgpu_device_handle dev,uint32_t heap,uint32_t flags,struct amdgpu_heap_info * info)242 int amdgpu_query_heap_info(amdgpu_device_handle dev,
243 uint32_t heap,
244 uint32_t flags,
245 struct amdgpu_heap_info *info)
246 {
247 struct drm_amdgpu_info_vram_gtt vram_gtt_info = {};
248 int r;
249
250 r = amdgpu_query_info(dev, AMDGPU_INFO_VRAM_GTT,
251 sizeof(vram_gtt_info), &vram_gtt_info);
252 if (r)
253 return r;
254
255 /* Get heap information */
256 switch (heap) {
257 case AMDGPU_GEM_DOMAIN_VRAM:
258 /* query visible only vram heap */
259 if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
260 info->heap_size = vram_gtt_info.vram_cpu_accessible_size;
261 else /* query total vram heap */
262 info->heap_size = vram_gtt_info.vram_size;
263
264 info->max_allocation = vram_gtt_info.vram_cpu_accessible_size;
265
266 if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
267 r = amdgpu_query_info(dev, AMDGPU_INFO_VIS_VRAM_USAGE,
268 sizeof(info->heap_usage),
269 &info->heap_usage);
270 else
271 r = amdgpu_query_info(dev, AMDGPU_INFO_VRAM_USAGE,
272 sizeof(info->heap_usage),
273 &info->heap_usage);
274 if (r)
275 return r;
276 break;
277 case AMDGPU_GEM_DOMAIN_GTT:
278 info->heap_size = vram_gtt_info.gtt_size;
279 info->max_allocation = vram_gtt_info.vram_cpu_accessible_size;
280
281 r = amdgpu_query_info(dev, AMDGPU_INFO_GTT_USAGE,
282 sizeof(info->heap_usage),
283 &info->heap_usage);
284 if (r)
285 return r;
286 break;
287 default:
288 return -EINVAL;
289 }
290
291 return 0;
292 }
293
amdgpu_query_gds_info(amdgpu_device_handle dev,struct amdgpu_gds_resource_info * gds_info)294 int amdgpu_query_gds_info(amdgpu_device_handle dev,
295 struct amdgpu_gds_resource_info *gds_info)
296 {
297 struct drm_amdgpu_info_gds gds_config = {};
298 int r;
299
300 if (!gds_info)
301 return -EINVAL;
302
303 r = amdgpu_query_info(dev, AMDGPU_INFO_GDS_CONFIG,
304 sizeof(gds_config), &gds_config);
305 if (r)
306 return r;
307
308 gds_info->gds_gfx_partition_size = gds_config.gds_gfx_partition_size;
309 gds_info->compute_partition_size = gds_config.compute_partition_size;
310 gds_info->gds_total_size = gds_config.gds_total_size;
311 gds_info->gws_per_gfx_partition = gds_config.gws_per_gfx_partition;
312 gds_info->gws_per_compute_partition = gds_config.gws_per_compute_partition;
313 gds_info->oa_per_gfx_partition = gds_config.oa_per_gfx_partition;
314 gds_info->oa_per_compute_partition = gds_config.oa_per_compute_partition;
315
316 return 0;
317 }
318
amdgpu_query_sensor_info(amdgpu_device_handle dev,unsigned sensor_type,unsigned size,void * value)319 int amdgpu_query_sensor_info(amdgpu_device_handle dev, unsigned sensor_type,
320 unsigned size, void *value)
321 {
322 struct drm_amdgpu_info request;
323
324 memset(&request, 0, sizeof(request));
325 request.return_pointer = (uintptr_t)value;
326 request.return_size = size;
327 request.query = AMDGPU_INFO_SENSOR;
328 request.sensor_info.type = sensor_type;
329
330 return drmCommandWrite(dev->fd, DRM_AMDGPU_INFO, &request,
331 sizeof(struct drm_amdgpu_info));
332 }
333