• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* basic set of prime tests between intel and nouveau */
2 
3 /* test list -
4    1. share buffer from intel -> nouveau.
5    2. share buffer from nouveau -> intel
6    3. share intel->nouveau, map on both, write intel, read nouveau
7    4. share intel->nouveau, blit intel fill, readback on nouveau
8    test 1 + map buffer, read/write, map other size.
9    do some hw actions on the buffer
10    some illegal operations -
11        close prime fd try and map
12 
13    TODO add some nouveau rendering tests
14 */
15 
16 
17 #include "igt.h"
18 #include <stdio.h>
19 #include <stdlib.h>
20 #include <unistd.h>
21 #include <fcntl.h>
22 #include <string.h>
23 #include <sys/stat.h>
24 #include <sys/ioctl.h>
25 
26 #include "intel_bufmgr.h"
27 #include "nouveau.h"
28 
29 int intel_fd = -1, nouveau_fd = -1;
30 drm_intel_bufmgr *bufmgr;
31 struct nouveau_device *ndev;
32 struct nouveau_client *nclient;
33 uint32_t devid;
34 struct intel_batchbuffer *intel_batch;
35 
36 #define BO_SIZE (256*1024)
37 
find_and_open_devices(void)38 static int find_and_open_devices(void)
39 {
40 	int i;
41 	char path[80];
42 	struct stat buf;
43 	FILE *fl;
44 	char vendor_id[8];
45 	int venid;
46 	for (i = 0; i < 9; i++) {
47 		char *ret;
48 
49 		sprintf(path, "/sys/class/drm/card%d/device/vendor", i);
50 		if (stat(path, &buf))
51 			break;
52 
53 		fl = fopen(path, "r");
54 		if (!fl)
55 			break;
56 
57 		ret = fgets(vendor_id, 8, fl);
58 		igt_assert(ret);
59 		fclose(fl);
60 
61 		venid = strtoul(vendor_id, NULL, 16);
62 		sprintf(path, "/dev/dri/card%d", i);
63 		if (venid == 0x8086) {
64 			intel_fd = open(path, O_RDWR);
65 			if (!intel_fd)
66 				return -1;
67 		} else if (venid == 0x10de) {
68 			nouveau_fd = open(path, O_RDWR);
69 			if (!nouveau_fd)
70 				return -1;
71 		}
72 	}
73 	return 0;
74 }
75 
76 /*
77  * prime test 1 -
78  * allocate buffer on intel,
79  * set prime on buffer,
80  * retrive buffer from nouveau,
81  * close prime_fd,
82  *  unref buffers
83  */
test_i915_nv_sharing(void)84 static void test_i915_nv_sharing(void)
85 {
86 	drm_intel_bo *test_intel_bo;
87 	int prime_fd;
88 	struct nouveau_bo *nvbo;
89 
90 	test_intel_bo = drm_intel_bo_alloc(bufmgr, "test bo", BO_SIZE, 4096);
91 	igt_assert(test_intel_bo);
92 
93 	drm_intel_bo_gem_export_to_prime(test_intel_bo, &prime_fd);
94 
95 	igt_assert(nouveau_bo_prime_handle_ref(ndev, prime_fd, &nvbo) == 0);
96 	close(prime_fd);
97 
98 	nouveau_bo_ref(NULL, &nvbo);
99 	drm_intel_bo_unreference(test_intel_bo);
100 }
101 
102 /*
103  * prime test 2 -
104  * allocate buffer on nouveau
105  * set prime on buffer,
106  * retrive buffer from intel
107  * close prime_fd,
108  *  unref buffers
109  */
test_nv_i915_sharing(void)110 static void test_nv_i915_sharing(void)
111 {
112 	drm_intel_bo *test_intel_bo;
113 	int prime_fd;
114 	struct nouveau_bo *nvbo;
115 
116 	igt_assert(nouveau_bo_new(ndev, NOUVEAU_BO_GART | NOUVEAU_BO_MAP,
117 				  0, BO_SIZE, NULL, &nvbo) == 0);
118 	igt_assert(nouveau_bo_set_prime(nvbo, &prime_fd) == 0);
119 
120 	test_intel_bo = drm_intel_bo_gem_create_from_prime(bufmgr, prime_fd, BO_SIZE);
121 	close(prime_fd);
122 	igt_assert(test_intel_bo);
123 
124 	nouveau_bo_ref(NULL, &nvbo);
125 	drm_intel_bo_unreference(test_intel_bo);
126 }
127 
128 /*
129  * allocate intel, give to nouveau, map on nouveau
130  * write 0xdeadbeef, non-gtt map on intel, read
131  */
test_nv_write_i915_cpu_mmap_read(void)132 static void test_nv_write_i915_cpu_mmap_read(void)
133 {
134 	drm_intel_bo *test_intel_bo;
135 	int prime_fd;
136 	struct nouveau_bo *nvbo = NULL;
137 	uint32_t *ptr;
138 
139 	test_intel_bo = drm_intel_bo_alloc(bufmgr, "test bo", BO_SIZE, 4096);
140 
141 	drm_intel_bo_gem_export_to_prime(test_intel_bo, &prime_fd);
142 
143 	igt_assert(nouveau_bo_prime_handle_ref(ndev, prime_fd, &nvbo) == 0);
144 	close(prime_fd);
145 
146 	igt_assert(nouveau_bo_map(nvbo, NOUVEAU_BO_RDWR, nclient) == 0);
147 	ptr = nvbo->map;
148 	*ptr = 0xdeadbeef;
149 
150 	drm_intel_bo_map(test_intel_bo, 1);
151 	ptr = test_intel_bo->virtual;
152 	igt_assert(ptr);
153 
154 	igt_assert(*ptr == 0xdeadbeef);
155 	nouveau_bo_ref(NULL, &nvbo);
156 	drm_intel_bo_unreference(test_intel_bo);
157 }
158 
159 /*
160  * allocate intel, give to nouveau, map on nouveau
161  * write 0xdeadbeef, gtt map on intel, read
162  */
test_nv_write_i915_gtt_mmap_read(void)163 static void test_nv_write_i915_gtt_mmap_read(void)
164 {
165 	drm_intel_bo *test_intel_bo;
166 	int prime_fd;
167 	struct nouveau_bo *nvbo = NULL;
168 	uint32_t *ptr;
169 
170 	test_intel_bo = drm_intel_bo_alloc(bufmgr, "test bo", BO_SIZE, 4096);
171 
172 	drm_intel_bo_gem_export_to_prime(test_intel_bo, &prime_fd);
173 
174 	igt_assert(nouveau_bo_prime_handle_ref(ndev, prime_fd, &nvbo) == 0);
175 	close(prime_fd);
176 	igt_assert(nouveau_bo_map(nvbo, NOUVEAU_BO_RDWR, nclient) == 0);
177 	ptr = nvbo->map;
178 	*ptr = 0xdeadbeef;
179 
180 	drm_intel_gem_bo_map_gtt(test_intel_bo);
181 	ptr = test_intel_bo->virtual;
182 	igt_assert(ptr);
183 
184 	igt_assert(*ptr == 0xdeadbeef);
185 
186 	nouveau_bo_ref(NULL, &nvbo);
187 	drm_intel_bo_unreference(test_intel_bo);
188 }
189 
190 /* test drm_intel_bo_map doesn't work properly,
191    this tries to map the backing shmem fd, which doesn't exist
192    for these objects */
test_i915_import_cpu_mmap(void)193 static void test_i915_import_cpu_mmap(void)
194 {
195 	drm_intel_bo *test_intel_bo;
196 	int prime_fd;
197 	struct nouveau_bo *nvbo;
198 	uint32_t *ptr;
199 
200 	igt_skip("cpu mmap support for imported dma-bufs not yet implemented\n");
201 
202 	igt_assert(nouveau_bo_new(ndev, NOUVEAU_BO_GART | NOUVEAU_BO_MAP,
203 				  0, BO_SIZE, NULL, &nvbo) == 0);
204 	igt_assert(nouveau_bo_set_prime(nvbo, &prime_fd) == 0);
205 	test_intel_bo = drm_intel_bo_gem_create_from_prime(bufmgr, prime_fd, BO_SIZE);
206 	close(prime_fd);
207 	igt_assert(test_intel_bo);
208 
209 	igt_assert(nouveau_bo_map(nvbo, NOUVEAU_BO_RDWR, nclient) == 0);
210 
211 	ptr = nvbo->map;
212 	*ptr = 0xdeadbeef;
213 
214 	igt_assert(drm_intel_bo_map(test_intel_bo, 0) == 0);
215 	igt_assert(test_intel_bo->virtual);
216 	ptr = test_intel_bo->virtual;
217 
218 	igt_assert(*ptr == 0xdeadbeef);
219 	nouveau_bo_ref(NULL, &nvbo);
220 	drm_intel_bo_unreference(test_intel_bo);
221 }
222 
223 /* test drm_intel_bo_map_gtt works properly,
224    this tries to map the backing shmem fd, which doesn't exist
225    for these objects */
test_i915_import_gtt_mmap(void)226 static void test_i915_import_gtt_mmap(void)
227 {
228 	drm_intel_bo *test_intel_bo;
229 	int prime_fd;
230 	struct nouveau_bo *nvbo;
231 	uint32_t *ptr;
232 
233 	igt_assert(nouveau_bo_new(ndev, NOUVEAU_BO_GART | NOUVEAU_BO_MAP,
234 				  0, BO_SIZE, NULL, &nvbo) == 0);
235 	igt_assert(nouveau_bo_set_prime(nvbo, &prime_fd) == 0);
236 
237 	test_intel_bo = drm_intel_bo_gem_create_from_prime(bufmgr, prime_fd, BO_SIZE);
238 	close(prime_fd);
239 	igt_assert(test_intel_bo);
240 
241 	igt_assert(nouveau_bo_map(nvbo, NOUVEAU_BO_RDWR, nclient) == 0);
242 
243 	ptr = nvbo->map;
244 	*ptr = 0xdeadbeef;
245 	*(ptr + 1) = 0xa55a55;
246 
247 	igt_assert(drm_intel_gem_bo_map_gtt(test_intel_bo) == 0);
248 	igt_assert(test_intel_bo->virtual);
249 	ptr = test_intel_bo->virtual;
250 
251 	igt_assert(*ptr == 0xdeadbeef);
252 	nouveau_bo_ref(NULL, &nvbo);
253 	drm_intel_bo_unreference(test_intel_bo);
254 }
255 
256 /* test 7 - import from nouveau into intel, test pread/pwrite fail */
test_i915_import_pread_pwrite(void)257 static void test_i915_import_pread_pwrite(void)
258 {
259 	drm_intel_bo *test_intel_bo;
260 	int prime_fd;
261 	struct nouveau_bo *nvbo;
262 	uint32_t *ptr;
263 	uint32_t buf[64];
264 
265 	igt_assert(nouveau_bo_new(ndev, NOUVEAU_BO_GART | NOUVEAU_BO_MAP,
266 				  0, BO_SIZE, NULL, &nvbo) == 0);
267 	igt_assert(nouveau_bo_set_prime(nvbo, &prime_fd) == 0);
268 
269 	test_intel_bo = drm_intel_bo_gem_create_from_prime(bufmgr, prime_fd, BO_SIZE);
270 	close(prime_fd);
271 	igt_assert(test_intel_bo);
272 
273 	igt_assert(nouveau_bo_map(nvbo, NOUVEAU_BO_RDWR, nclient) == 0);
274 
275 	ptr = nvbo->map;
276 	*ptr = 0xdeadbeef;
277 
278 	gem_read(intel_fd, test_intel_bo->handle, 0, buf, 256);
279 	igt_assert(buf[0] == 0xdeadbeef);
280 	buf[0] = 0xabcdef55;
281 
282 	gem_write(intel_fd, test_intel_bo->handle, 0, buf, 4);
283 
284 	igt_assert(*ptr == 0xabcdef55);
285 
286 	nouveau_bo_ref(NULL, &nvbo);
287 	drm_intel_bo_unreference(test_intel_bo);
288 }
289 
290 static void
set_bo(drm_intel_bo * bo,uint32_t val,int width,int height)291 set_bo(drm_intel_bo *bo, uint32_t val, int width, int height)
292 {
293         int size = width * height;
294         uint32_t *vaddr;
295 
296         drm_intel_gem_bo_start_gtt_access(bo, true);
297         vaddr = bo->virtual;
298         while (size--)
299                 *vaddr++ = val;
300 }
301 
302 static drm_intel_bo *
create_bo(drm_intel_bufmgr * ibufmgr,uint32_t val,int width,int height)303 create_bo(drm_intel_bufmgr *ibufmgr, uint32_t val, int width, int height)
304 {
305         drm_intel_bo *bo;
306 
307         bo = drm_intel_bo_alloc(ibufmgr, "bo", 4*width*height, 0);
308         igt_assert(bo);
309 
310         /* gtt map doesn't have a write parameter, so just keep the mapping
311          * around (to avoid the set_domain with the gtt write domain set) and
312          * manually tell the kernel when we start access the gtt. */
313         drm_intel_gem_bo_map_gtt(bo);
314 
315         set_bo(bo, val, width, height);
316 
317         return bo;
318 }
319 
320 /* use intel hw to fill the BO with a blit from another BO,
321    then readback from the nouveau bo, check value is correct */
test_i915_blt_fill_nv_read(void)322 static void test_i915_blt_fill_nv_read(void)
323 {
324 	drm_intel_bo *test_intel_bo, *src_bo;
325 	int prime_fd;
326 	struct nouveau_bo *nvbo = NULL;
327 	uint32_t *ptr;
328 
329 	src_bo = create_bo(bufmgr, 0xaa55aa55, 256, 1);
330 
331 	test_intel_bo = drm_intel_bo_alloc(bufmgr, "test bo", BO_SIZE, 4096);
332 
333 	drm_intel_bo_gem_export_to_prime(test_intel_bo, &prime_fd);
334 
335 	igt_assert(nouveau_bo_prime_handle_ref(ndev, prime_fd, &nvbo) == 0);
336 	close(prime_fd);
337 
338 	intel_copy_bo(intel_batch, test_intel_bo, src_bo, BO_SIZE);
339 
340 	igt_assert(nouveau_bo_map(nvbo, NOUVEAU_BO_RDWR, nclient) == 0);
341 
342 	drm_intel_bo_map(test_intel_bo, 0);
343 
344 	ptr = nvbo->map;
345 	igt_assert(*ptr == 0xaa55aa55);
346 	nouveau_bo_ref(NULL, &nvbo);
347 	drm_intel_bo_unreference(test_intel_bo);
348 }
349 
350 /* test 8 use nouveau to do blit */
351 
352 /* test 9 nouveau copy engine?? */
353 
354 igt_main
355 {
356 	igt_fixture {
357 		igt_assert(find_and_open_devices() == 0);
358 
359 		igt_require(nouveau_fd != -1);
360 		igt_require(intel_fd != -1);
361 
362 		/* set up intel bufmgr */
363 		bufmgr = drm_intel_bufmgr_gem_init(intel_fd, 4096);
364 		igt_assert(bufmgr);
365 		/* Do not enable reuse, we share (almost) all buffers. */
366 		//drm_intel_bufmgr_gem_enable_reuse(bufmgr);
367 
368 		/* set up nouveau bufmgr */
369 		igt_assert(nouveau_device_wrap(nouveau_fd, 0, &ndev) == 0);
370 		igt_assert(nouveau_client_new(ndev, &nclient) == 0);
371 
372 		/* set up an intel batch buffer */
373 		devid = intel_get_drm_devid(intel_fd);
374 		intel_batch = intel_batchbuffer_alloc(bufmgr, devid);
375 	}
376 
377 #define xtest(name) \
378 	igt_subtest(#name) \
379 		test_##name();
380 
381 	xtest(i915_nv_sharing);
382 	xtest(nv_i915_sharing);
383 	xtest(nv_write_i915_cpu_mmap_read);
384 	xtest(nv_write_i915_gtt_mmap_read);
385 	xtest(i915_import_cpu_mmap);
386 	xtest(i915_import_gtt_mmap);
387 	xtest(i915_import_pread_pwrite);
388 	xtest(i915_blt_fill_nv_read);
389 
390 	igt_fixture {
391 		intel_batchbuffer_free(intel_batch);
392 
393 		nouveau_device_del(&ndev);
394 		drm_intel_bufmgr_destroy(bufmgr);
395 
396 		close(intel_fd);
397 		close(nouveau_fd);
398 	}
399 }
400