1 /*
2 * Copyright 2017 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24 #include "CUnit/Basic.h"
25
26 #include "amdgpu_test.h"
27 #include "amdgpu_drm.h"
28 #include "amdgpu_internal.h"
29
30 static amdgpu_device_handle device_handle;
31 static uint32_t major_version;
32 static uint32_t minor_version;
33
34 static void amdgpu_vmid_reserve_test(void);
35 static void amdgpu_vm_unaligned_map(void);
36 static void amdgpu_vm_mapping_test(void);
37
suite_vm_tests_enable(void)38 CU_BOOL suite_vm_tests_enable(void)
39 {
40 CU_BOOL enable = CU_TRUE;
41
42 if (amdgpu_device_initialize(drm_amdgpu[0], &major_version,
43 &minor_version, &device_handle))
44 return CU_FALSE;
45
46 if (device_handle->info.family_id == AMDGPU_FAMILY_SI) {
47 printf("\n\nCurrently hangs the CP on this ASIC, VM suite disabled\n");
48 enable = CU_FALSE;
49 }
50
51 if (amdgpu_device_deinitialize(device_handle))
52 return CU_FALSE;
53
54 return enable;
55 }
56
suite_vm_tests_init(void)57 int suite_vm_tests_init(void)
58 {
59 int r;
60
61 r = amdgpu_device_initialize(drm_amdgpu[0], &major_version,
62 &minor_version, &device_handle);
63
64 if (r) {
65 if ((r == -EACCES) && (errno == EACCES))
66 printf("\n\nError:%s. "
67 "Hint:Try to run this test program as root.",
68 strerror(errno));
69 return CUE_SINIT_FAILED;
70 }
71
72 return CUE_SUCCESS;
73 }
74
suite_vm_tests_clean(void)75 int suite_vm_tests_clean(void)
76 {
77 int r = amdgpu_device_deinitialize(device_handle);
78
79 if (r == 0)
80 return CUE_SUCCESS;
81 else
82 return CUE_SCLEAN_FAILED;
83 }
84
85
86 CU_TestInfo vm_tests[] = {
87 { "resere vmid test", amdgpu_vmid_reserve_test },
88 { "unaligned map", amdgpu_vm_unaligned_map },
89 { "vm mapping test", amdgpu_vm_mapping_test },
90 CU_TEST_INFO_NULL,
91 };
92
amdgpu_vmid_reserve_test(void)93 static void amdgpu_vmid_reserve_test(void)
94 {
95 amdgpu_context_handle context_handle;
96 amdgpu_bo_handle ib_result_handle;
97 void *ib_result_cpu;
98 uint64_t ib_result_mc_address;
99 struct amdgpu_cs_request ibs_request;
100 struct amdgpu_cs_ib_info ib_info;
101 struct amdgpu_cs_fence fence_status;
102 uint32_t expired, flags;
103 int i, r;
104 amdgpu_bo_list_handle bo_list;
105 amdgpu_va_handle va_handle;
106 static uint32_t *ptr;
107 struct amdgpu_gpu_info gpu_info = {0};
108 unsigned gc_ip_type;
109
110 r = amdgpu_query_gpu_info(device_handle, &gpu_info);
111 CU_ASSERT_EQUAL(r, 0);
112
113 gc_ip_type = (asic_is_arcturus(gpu_info.asic_id)) ?
114 AMDGPU_HW_IP_COMPUTE : AMDGPU_HW_IP_GFX;
115
116 r = amdgpu_cs_ctx_create(device_handle, &context_handle);
117 CU_ASSERT_EQUAL(r, 0);
118
119 flags = 0;
120 r = amdgpu_vm_reserve_vmid(device_handle, flags);
121 CU_ASSERT_EQUAL(r, 0);
122
123
124 r = amdgpu_bo_alloc_and_map(device_handle, 4096, 4096,
125 AMDGPU_GEM_DOMAIN_GTT, 0,
126 &ib_result_handle, &ib_result_cpu,
127 &ib_result_mc_address, &va_handle);
128 CU_ASSERT_EQUAL(r, 0);
129
130 r = amdgpu_get_bo_list(device_handle, ib_result_handle, NULL,
131 &bo_list);
132 CU_ASSERT_EQUAL(r, 0);
133
134 ptr = ib_result_cpu;
135
136 for (i = 0; i < 16; ++i)
137 ptr[i] = 0xffff1000;
138
139 memset(&ib_info, 0, sizeof(struct amdgpu_cs_ib_info));
140 ib_info.ib_mc_address = ib_result_mc_address;
141 ib_info.size = 16;
142
143 memset(&ibs_request, 0, sizeof(struct amdgpu_cs_request));
144 ibs_request.ip_type = gc_ip_type;
145 ibs_request.ring = 0;
146 ibs_request.number_of_ibs = 1;
147 ibs_request.ibs = &ib_info;
148 ibs_request.resources = bo_list;
149 ibs_request.fence_info.handle = NULL;
150
151 r = amdgpu_cs_submit(context_handle, 0,&ibs_request, 1);
152 CU_ASSERT_EQUAL(r, 0);
153
154
155 memset(&fence_status, 0, sizeof(struct amdgpu_cs_fence));
156 fence_status.context = context_handle;
157 fence_status.ip_type = gc_ip_type;
158 fence_status.ip_instance = 0;
159 fence_status.ring = 0;
160 fence_status.fence = ibs_request.seq_no;
161
162 r = amdgpu_cs_query_fence_status(&fence_status,
163 AMDGPU_TIMEOUT_INFINITE,0, &expired);
164 CU_ASSERT_EQUAL(r, 0);
165
166 r = amdgpu_bo_list_destroy(bo_list);
167 CU_ASSERT_EQUAL(r, 0);
168
169 r = amdgpu_bo_unmap_and_free(ib_result_handle, va_handle,
170 ib_result_mc_address, 4096);
171 CU_ASSERT_EQUAL(r, 0);
172
173 flags = 0;
174 r = amdgpu_vm_unreserve_vmid(device_handle, flags);
175 CU_ASSERT_EQUAL(r, 0);
176
177
178 r = amdgpu_cs_ctx_free(context_handle);
179 CU_ASSERT_EQUAL(r, 0);
180 }
181
amdgpu_vm_unaligned_map(void)182 static void amdgpu_vm_unaligned_map(void)
183 {
184 const uint64_t map_size = (4ULL << 30) - (2 << 12);
185 struct amdgpu_bo_alloc_request request = {};
186 amdgpu_bo_handle buf_handle;
187 amdgpu_va_handle handle;
188 uint64_t vmc_addr;
189 int r;
190
191 request.alloc_size = 4ULL << 30;
192 request.phys_alignment = 4096;
193 request.preferred_heap = AMDGPU_GEM_DOMAIN_VRAM;
194 request.flags = AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
195
196 r = amdgpu_bo_alloc(device_handle, &request, &buf_handle);
197 /* Don't let the test fail if the device doesn't have enough VRAM */
198 if (r)
199 return;
200
201 r = amdgpu_va_range_alloc(device_handle, amdgpu_gpu_va_range_general,
202 4ULL << 30, 1ULL << 30, 0, &vmc_addr,
203 &handle, 0);
204 CU_ASSERT_EQUAL(r, 0);
205 if (r)
206 goto error_va_alloc;
207
208 vmc_addr += 1 << 12;
209
210 r = amdgpu_bo_va_op(buf_handle, 0, map_size, vmc_addr, 0,
211 AMDGPU_VA_OP_MAP);
212 CU_ASSERT_EQUAL(r, 0);
213 if (r)
214 goto error_va_alloc;
215
216 amdgpu_bo_va_op(buf_handle, 0, map_size, vmc_addr, 0,
217 AMDGPU_VA_OP_UNMAP);
218
219 error_va_alloc:
220 amdgpu_bo_free(buf_handle);
221 }
222
amdgpu_vm_mapping_test(void)223 static void amdgpu_vm_mapping_test(void)
224 {
225 struct amdgpu_bo_alloc_request req = {0};
226 struct drm_amdgpu_info_device dev_info;
227 const uint64_t size = 4096;
228 amdgpu_bo_handle buf;
229 uint64_t addr;
230 int r;
231
232 req.alloc_size = size;
233 req.phys_alignment = 0;
234 req.preferred_heap = AMDGPU_GEM_DOMAIN_GTT;
235 req.flags = 0;
236
237 r = amdgpu_bo_alloc(device_handle, &req, &buf);
238 CU_ASSERT_EQUAL(r, 0);
239
240 r = amdgpu_query_info(device_handle, AMDGPU_INFO_DEV_INFO,
241 sizeof(dev_info), &dev_info);
242 CU_ASSERT_EQUAL(r, 0);
243
244 addr = dev_info.virtual_address_offset;
245 r = amdgpu_bo_va_op(buf, 0, size, addr, 0, AMDGPU_VA_OP_MAP);
246 CU_ASSERT_EQUAL(r, 0);
247
248 addr = dev_info.virtual_address_max - size;
249 r = amdgpu_bo_va_op(buf, 0, size, addr, 0, AMDGPU_VA_OP_MAP);
250 CU_ASSERT_EQUAL(r, 0);
251
252 if (dev_info.high_va_offset) {
253 addr = dev_info.high_va_offset;
254 r = amdgpu_bo_va_op(buf, 0, size, addr, 0, AMDGPU_VA_OP_MAP);
255 CU_ASSERT_EQUAL(r, 0);
256
257 addr = dev_info.high_va_max - size;
258 r = amdgpu_bo_va_op(buf, 0, size, addr, 0, AMDGPU_VA_OP_MAP);
259 CU_ASSERT_EQUAL(r, 0);
260 }
261
262 amdgpu_bo_free(buf);
263 }
264