• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**************************************************************************
2  *
3  * Copyright � 2007 Red Hat Inc.
4  * Copyright � 2007-2012 Intel Corporation
5  * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
6  * All Rights Reserved.
7  *
8  * Permission is hereby granted, free of charge, to any person obtaining a
9  * copy of this software and associated documentation files (the
10  * "Software"), to deal in the Software without restriction, including
11  * without limitation the rights to use, copy, modify, merge, publish,
12  * distribute, sub license, and/or sell copies of the Software, and to
13  * permit persons to whom the Software is furnished to do so, subject to
14  * the following conditions:
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
20  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22  * USE OR OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * The above copyright notice and this permission notice (including the
25  * next paragraph) shall be included in all copies or substantial portions
26  * of the Software.
27  *
28  *
29  **************************************************************************/
30 /*
31  * Authors: Thomas Hellstr�m <thomas-at-tungstengraphics-dot-com>
32  *          Keith Whitwell <keithw-at-tungstengraphics-dot-com>
33  *	    Eric Anholt <eric@anholt.net>
34  *	    Dave Airlie <airlied@linux.ie>
35  */
36 
37 #ifdef HAVE_CONFIG_H
38 #include "config.h"
39 #endif
40 
41 #include <xf86drm.h>
42 #include <xf86atomic.h>
43 #include <fcntl.h>
44 #include <stdio.h>
45 #include <stdlib.h>
46 #include <string.h>
47 #include <unistd.h>
48 #include <assert.h>
49 #include <pthread.h>
50 #include <sys/ioctl.h>
51 #include <sys/stat.h>
52 #include <sys/types.h>
53 #include <stdbool.h>
54 
55 #include "errno.h"
56 #ifndef ETIME
57 #define ETIME ETIMEDOUT
58 #endif
59 #include "libdrm_macros.h"
60 #include "libdrm_lists.h"
61 #include "intel_bufmgr.h"
62 #include "intel_bufmgr_priv.h"
63 #include "intel_chipset.h"
64 #include "string.h"
65 
66 #include "i915_drm.h"
67 #include "uthash.h"
68 
69 #ifdef HAVE_VALGRIND
70 #include <valgrind.h>
71 #include <memcheck.h>
72 #define VG(x) x
73 #else
74 #define VG(x)
75 #endif
76 
77 #define memclear(s) memset(&s, 0, sizeof(s))
78 
79 #define DBG(...) do {					\
80 	if (bufmgr_gem->bufmgr.debug)			\
81 		fprintf(stderr, __VA_ARGS__);		\
82 } while (0)
83 
84 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
85 #define MAX2(A, B) ((A) > (B) ? (A) : (B))
86 
87 /**
88  * upper_32_bits - return bits 32-63 of a number
89  * @n: the number we're accessing
90  *
91  * A basic shift-right of a 64- or 32-bit quantity.  Use this to suppress
92  * the "right shift count >= width of type" warning when that quantity is
93  * 32-bits.
94  */
95 #define upper_32_bits(n) ((__u32)(((n) >> 16) >> 16))
96 
97 /**
98  * lower_32_bits - return bits 0-31 of a number
99  * @n: the number we're accessing
100  */
101 #define lower_32_bits(n) ((__u32)(n))
102 
103 typedef struct _drm_intel_bo_gem drm_intel_bo_gem;
104 
105 struct drm_intel_gem_bo_bucket {
106 	drmMMListHead head;
107 	unsigned long size;
108 };
109 
110 typedef struct _drm_intel_bufmgr_gem {
111 	drm_intel_bufmgr bufmgr;
112 
113 	atomic_t refcount;
114 
115 	int fd;
116 
117 	int max_relocs;
118 
119 	pthread_mutex_t lock;
120 
121 	struct drm_i915_gem_exec_object *exec_objects;
122 	struct drm_i915_gem_exec_object2 *exec2_objects;
123 	drm_intel_bo **exec_bos;
124 	int exec_size;
125 	int exec_count;
126 
127 	/** Array of lists of cached gem objects of power-of-two sizes */
128 	struct drm_intel_gem_bo_bucket cache_bucket[14 * 4];
129 	int num_buckets;
130 	time_t time;
131 
132 	drmMMListHead managers;
133 
134 	drm_intel_bo_gem *name_table;
135 	drm_intel_bo_gem *handle_table;
136 
137 	drmMMListHead vma_cache;
138 	int vma_count, vma_open, vma_max;
139 
140 	uint64_t gtt_size;
141 	int available_fences;
142 	int pci_device;
143 	int gen;
144 	unsigned int has_bsd : 1;
145 	unsigned int has_blt : 1;
146 	unsigned int has_relaxed_fencing : 1;
147 	unsigned int has_llc : 1;
148 	unsigned int has_wait_timeout : 1;
149 	unsigned int bo_reuse : 1;
150 	unsigned int no_exec : 1;
151 	unsigned int has_vebox : 1;
152 	unsigned int has_exec_async : 1;
153 	bool fenced_relocs;
154 
155 	struct {
156 		void *ptr;
157 		uint32_t handle;
158 	} userptr_active;
159 
160 } drm_intel_bufmgr_gem;
161 
162 #define DRM_INTEL_RELOC_FENCE (1<<0)
163 
164 typedef struct _drm_intel_reloc_target_info {
165 	drm_intel_bo *bo;
166 	int flags;
167 } drm_intel_reloc_target;
168 
169 struct _drm_intel_bo_gem {
170 	drm_intel_bo bo;
171 
172 	atomic_t refcount;
173 	uint32_t gem_handle;
174 	const char *name;
175 
176 	/**
177 	 * Kenel-assigned global name for this object
178          *
179          * List contains both flink named and prime fd'd objects
180 	 */
181 	unsigned int global_name;
182 
183 	UT_hash_handle handle_hh;
184 	UT_hash_handle name_hh;
185 
186 	/**
187 	 * Index of the buffer within the validation list while preparing a
188 	 * batchbuffer execution.
189 	 */
190 	int validate_index;
191 
192 	/**
193 	 * Current tiling mode
194 	 */
195 	uint32_t tiling_mode;
196 	uint32_t swizzle_mode;
197 	unsigned long stride;
198 
199 	unsigned long kflags;
200 
201 	time_t free_time;
202 
203 	/** Array passed to the DRM containing relocation information. */
204 	struct drm_i915_gem_relocation_entry *relocs;
205 	/**
206 	 * Array of info structs corresponding to relocs[i].target_handle etc
207 	 */
208 	drm_intel_reloc_target *reloc_target_info;
209 	/** Number of entries in relocs */
210 	int reloc_count;
211 	/** Array of BOs that are referenced by this buffer and will be softpinned */
212 	drm_intel_bo **softpin_target;
213 	/** Number softpinned BOs that are referenced by this buffer */
214 	int softpin_target_count;
215 	/** Maximum amount of softpinned BOs that are referenced by this buffer */
216 	int softpin_target_size;
217 
218 	/** Mapped address for the buffer, saved across map/unmap cycles */
219 	void *mem_virtual;
220 	/** GTT virtual address for the buffer, saved across map/unmap cycles */
221 	void *gtt_virtual;
222 	/** WC CPU address for the buffer, saved across map/unmap cycles */
223 	void *wc_virtual;
224 	/**
225 	 * Virtual address of the buffer allocated by user, used for userptr
226 	 * objects only.
227 	 */
228 	void *user_virtual;
229 	int map_count;
230 	drmMMListHead vma_list;
231 
232 	/** BO cache list */
233 	drmMMListHead head;
234 
235 	/**
236 	 * Boolean of whether this BO and its children have been included in
237 	 * the current drm_intel_bufmgr_check_aperture_space() total.
238 	 */
239 	bool included_in_check_aperture;
240 
241 	/**
242 	 * Boolean of whether this buffer has been used as a relocation
243 	 * target and had its size accounted for, and thus can't have any
244 	 * further relocations added to it.
245 	 */
246 	bool used_as_reloc_target;
247 
248 	/**
249 	 * Boolean of whether we have encountered an error whilst building the relocation tree.
250 	 */
251 	bool has_error;
252 
253 	/**
254 	 * Boolean of whether this buffer can be re-used
255 	 */
256 	bool reusable;
257 
258 	/**
259 	 * Boolean of whether the GPU is definitely not accessing the buffer.
260 	 *
261 	 * This is only valid when reusable, since non-reusable
262 	 * buffers are those that have been shared with other
263 	 * processes, so we don't know their state.
264 	 */
265 	bool idle;
266 
267 	/**
268 	 * Boolean of whether this buffer was allocated with userptr
269 	 */
270 	bool is_userptr;
271 
272 	/**
273 	 * Boolean of whether this buffer can be placed in the full 48-bit
274 	 * address range on gen8+.
275 	 *
276 	 * By default, buffers will be keep in a 32-bit range, unless this
277 	 * flag is explicitly set.
278 	 */
279 	bool use_48b_address_range;
280 
281 	/**
282 	 * Whether this buffer is softpinned at offset specified by the user
283 	 */
284 	bool is_softpin;
285 
286 	/**
287 	 * Size in bytes of this buffer and its relocation descendents.
288 	 *
289 	 * Used to avoid costly tree walking in
290 	 * drm_intel_bufmgr_check_aperture in the common case.
291 	 */
292 	int reloc_tree_size;
293 
294 	/**
295 	 * Number of potential fence registers required by this buffer and its
296 	 * relocations.
297 	 */
298 	int reloc_tree_fences;
299 
300 	/** Flags that we may need to do the SW_FINISH ioctl on unmap. */
301 	bool mapped_cpu_write;
302 };
303 
304 static unsigned int
305 drm_intel_gem_estimate_batch_space(drm_intel_bo ** bo_array, int count);
306 
307 static unsigned int
308 drm_intel_gem_compute_batch_space(drm_intel_bo ** bo_array, int count);
309 
310 static int
311 drm_intel_gem_bo_get_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
312 			    uint32_t * swizzle_mode);
313 
314 static int
315 drm_intel_gem_bo_set_tiling_internal(drm_intel_bo *bo,
316 				     uint32_t tiling_mode,
317 				     uint32_t stride);
318 
319 static void drm_intel_gem_bo_unreference_locked_timed(drm_intel_bo *bo,
320 						      time_t time);
321 
322 static void drm_intel_gem_bo_unreference(drm_intel_bo *bo);
323 
324 static void drm_intel_gem_bo_free(drm_intel_bo *bo);
325 
to_bo_gem(drm_intel_bo * bo)326 static inline drm_intel_bo_gem *to_bo_gem(drm_intel_bo *bo)
327 {
328         return (drm_intel_bo_gem *)bo;
329 }
330 
331 static unsigned long
drm_intel_gem_bo_tile_size(drm_intel_bufmgr_gem * bufmgr_gem,unsigned long size,uint32_t * tiling_mode)332 drm_intel_gem_bo_tile_size(drm_intel_bufmgr_gem *bufmgr_gem, unsigned long size,
333 			   uint32_t *tiling_mode)
334 {
335 	unsigned long min_size, max_size;
336 	unsigned long i;
337 
338 	if (*tiling_mode == I915_TILING_NONE)
339 		return size;
340 
341 	/* 965+ just need multiples of page size for tiling */
342 	if (bufmgr_gem->gen >= 4)
343 		return ROUND_UP_TO(size, 4096);
344 
345 	/* Older chips need powers of two, of at least 512k or 1M */
346 	if (bufmgr_gem->gen == 3) {
347 		min_size = 1024*1024;
348 		max_size = 128*1024*1024;
349 	} else {
350 		min_size = 512*1024;
351 		max_size = 64*1024*1024;
352 	}
353 
354 	if (size > max_size) {
355 		*tiling_mode = I915_TILING_NONE;
356 		return size;
357 	}
358 
359 	/* Do we need to allocate every page for the fence? */
360 	if (bufmgr_gem->has_relaxed_fencing)
361 		return ROUND_UP_TO(size, 4096);
362 
363 	for (i = min_size; i < size; i <<= 1)
364 		;
365 
366 	return i;
367 }
368 
369 /*
370  * Round a given pitch up to the minimum required for X tiling on a
371  * given chip.  We use 512 as the minimum to allow for a later tiling
372  * change.
373  */
374 static unsigned long
drm_intel_gem_bo_tile_pitch(drm_intel_bufmgr_gem * bufmgr_gem,unsigned long pitch,uint32_t * tiling_mode)375 drm_intel_gem_bo_tile_pitch(drm_intel_bufmgr_gem *bufmgr_gem,
376 			    unsigned long pitch, uint32_t *tiling_mode)
377 {
378 	unsigned long tile_width;
379 	unsigned long i;
380 
381 	/* If untiled, then just align it so that we can do rendering
382 	 * to it with the 3D engine.
383 	 */
384 	if (*tiling_mode == I915_TILING_NONE)
385 		return ALIGN(pitch, 64);
386 
387 	if (*tiling_mode == I915_TILING_X
388 			|| (IS_915(bufmgr_gem->pci_device)
389 			    && *tiling_mode == I915_TILING_Y))
390 		tile_width = 512;
391 	else
392 		tile_width = 128;
393 
394 	/* 965 is flexible */
395 	if (bufmgr_gem->gen >= 4)
396 		return ROUND_UP_TO(pitch, tile_width);
397 
398 	/* The older hardware has a maximum pitch of 8192 with tiled
399 	 * surfaces, so fallback to untiled if it's too large.
400 	 */
401 	if (pitch > 8192) {
402 		*tiling_mode = I915_TILING_NONE;
403 		return ALIGN(pitch, 64);
404 	}
405 
406 	/* Pre-965 needs power of two tile width */
407 	for (i = tile_width; i < pitch; i <<= 1)
408 		;
409 
410 	return i;
411 }
412 
413 static struct drm_intel_gem_bo_bucket *
drm_intel_gem_bo_bucket_for_size(drm_intel_bufmgr_gem * bufmgr_gem,unsigned long size)414 drm_intel_gem_bo_bucket_for_size(drm_intel_bufmgr_gem *bufmgr_gem,
415 				 unsigned long size)
416 {
417 	int i;
418 
419 	for (i = 0; i < bufmgr_gem->num_buckets; i++) {
420 		struct drm_intel_gem_bo_bucket *bucket =
421 		    &bufmgr_gem->cache_bucket[i];
422 		if (bucket->size >= size) {
423 			return bucket;
424 		}
425 	}
426 
427 	return NULL;
428 }
429 
430 static void
drm_intel_gem_dump_validation_list(drm_intel_bufmgr_gem * bufmgr_gem)431 drm_intel_gem_dump_validation_list(drm_intel_bufmgr_gem *bufmgr_gem)
432 {
433 	int i, j;
434 
435 	for (i = 0; i < bufmgr_gem->exec_count; i++) {
436 		drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
437 		drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
438 
439 		if (bo_gem->relocs == NULL && bo_gem->softpin_target == NULL) {
440 			DBG("%2d: %d %s(%s)\n", i, bo_gem->gem_handle,
441 			    bo_gem->is_softpin ? "*" : "",
442 			    bo_gem->name);
443 			continue;
444 		}
445 
446 		for (j = 0; j < bo_gem->reloc_count; j++) {
447 			drm_intel_bo *target_bo = bo_gem->reloc_target_info[j].bo;
448 			drm_intel_bo_gem *target_gem =
449 			    (drm_intel_bo_gem *) target_bo;
450 
451 			DBG("%2d: %d %s(%s)@0x%08x %08x -> "
452 			    "%d (%s)@0x%08x %08x + 0x%08x\n",
453 			    i,
454 			    bo_gem->gem_handle,
455 			    bo_gem->is_softpin ? "*" : "",
456 			    bo_gem->name,
457 			    upper_32_bits(bo_gem->relocs[j].offset),
458 			    lower_32_bits(bo_gem->relocs[j].offset),
459 			    target_gem->gem_handle,
460 			    target_gem->name,
461 			    upper_32_bits(target_bo->offset64),
462 			    lower_32_bits(target_bo->offset64),
463 			    bo_gem->relocs[j].delta);
464 		}
465 
466 		for (j = 0; j < bo_gem->softpin_target_count; j++) {
467 			drm_intel_bo *target_bo = bo_gem->softpin_target[j];
468 			drm_intel_bo_gem *target_gem =
469 			    (drm_intel_bo_gem *) target_bo;
470 			DBG("%2d: %d %s(%s) -> "
471 			    "%d *(%s)@0x%08x %08x\n",
472 			    i,
473 			    bo_gem->gem_handle,
474 			    bo_gem->is_softpin ? "*" : "",
475 			    bo_gem->name,
476 			    target_gem->gem_handle,
477 			    target_gem->name,
478 			    upper_32_bits(target_bo->offset64),
479 			    lower_32_bits(target_bo->offset64));
480 		}
481 	}
482 }
483 
484 static inline void
drm_intel_gem_bo_reference(drm_intel_bo * bo)485 drm_intel_gem_bo_reference(drm_intel_bo *bo)
486 {
487 	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
488 
489 	atomic_inc(&bo_gem->refcount);
490 }
491 
492 /**
493  * Adds the given buffer to the list of buffers to be validated (moved into the
494  * appropriate memory type) with the next batch submission.
495  *
496  * If a buffer is validated multiple times in a batch submission, it ends up
497  * with the intersection of the memory type flags and the union of the
498  * access flags.
499  */
500 static void
drm_intel_add_validate_buffer(drm_intel_bo * bo)501 drm_intel_add_validate_buffer(drm_intel_bo *bo)
502 {
503 	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
504 	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
505 	int index;
506 
507 	if (bo_gem->validate_index != -1)
508 		return;
509 
510 	/* Extend the array of validation entries as necessary. */
511 	if (bufmgr_gem->exec_count == bufmgr_gem->exec_size) {
512 		int new_size = bufmgr_gem->exec_size * 2;
513 
514 		if (new_size == 0)
515 			new_size = 5;
516 
517 		bufmgr_gem->exec_objects =
518 		    realloc(bufmgr_gem->exec_objects,
519 			    sizeof(*bufmgr_gem->exec_objects) * new_size);
520 		bufmgr_gem->exec_bos =
521 		    realloc(bufmgr_gem->exec_bos,
522 			    sizeof(*bufmgr_gem->exec_bos) * new_size);
523 		bufmgr_gem->exec_size = new_size;
524 	}
525 
526 	index = bufmgr_gem->exec_count;
527 	bo_gem->validate_index = index;
528 	/* Fill in array entry */
529 	bufmgr_gem->exec_objects[index].handle = bo_gem->gem_handle;
530 	bufmgr_gem->exec_objects[index].relocation_count = bo_gem->reloc_count;
531 	bufmgr_gem->exec_objects[index].relocs_ptr = (uintptr_t) bo_gem->relocs;
532 	bufmgr_gem->exec_objects[index].alignment = bo->align;
533 	bufmgr_gem->exec_objects[index].offset = 0;
534 	bufmgr_gem->exec_bos[index] = bo;
535 	bufmgr_gem->exec_count++;
536 }
537 
538 static void
drm_intel_add_validate_buffer2(drm_intel_bo * bo,int need_fence)539 drm_intel_add_validate_buffer2(drm_intel_bo *bo, int need_fence)
540 {
541 	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
542 	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
543 	int index;
544 	int flags = 0;
545 
546 	if (need_fence)
547 		flags |= EXEC_OBJECT_NEEDS_FENCE;
548 	if (bo_gem->use_48b_address_range)
549 		flags |= EXEC_OBJECT_SUPPORTS_48B_ADDRESS;
550 	if (bo_gem->is_softpin)
551 		flags |= EXEC_OBJECT_PINNED;
552 
553 	if (bo_gem->validate_index != -1) {
554 		bufmgr_gem->exec2_objects[bo_gem->validate_index].flags |= flags;
555 		return;
556 	}
557 
558 	/* Extend the array of validation entries as necessary. */
559 	if (bufmgr_gem->exec_count == bufmgr_gem->exec_size) {
560 		int new_size = bufmgr_gem->exec_size * 2;
561 
562 		if (new_size == 0)
563 			new_size = 5;
564 
565 		bufmgr_gem->exec2_objects =
566 			realloc(bufmgr_gem->exec2_objects,
567 				sizeof(*bufmgr_gem->exec2_objects) * new_size);
568 		bufmgr_gem->exec_bos =
569 			realloc(bufmgr_gem->exec_bos,
570 				sizeof(*bufmgr_gem->exec_bos) * new_size);
571 		bufmgr_gem->exec_size = new_size;
572 	}
573 
574 	index = bufmgr_gem->exec_count;
575 	bo_gem->validate_index = index;
576 	/* Fill in array entry */
577 	bufmgr_gem->exec2_objects[index].handle = bo_gem->gem_handle;
578 	bufmgr_gem->exec2_objects[index].relocation_count = bo_gem->reloc_count;
579 	bufmgr_gem->exec2_objects[index].relocs_ptr = (uintptr_t)bo_gem->relocs;
580 	bufmgr_gem->exec2_objects[index].alignment = bo->align;
581 	bufmgr_gem->exec2_objects[index].offset = bo->offset64;
582 	bufmgr_gem->exec2_objects[index].flags = flags | bo_gem->kflags;
583 	bufmgr_gem->exec2_objects[index].rsvd1 = 0;
584 	bufmgr_gem->exec2_objects[index].rsvd2 = 0;
585 	bufmgr_gem->exec_bos[index] = bo;
586 	bufmgr_gem->exec_count++;
587 }
588 
589 #define RELOC_BUF_SIZE(x) ((I915_RELOC_HEADER + x * I915_RELOC0_STRIDE) * \
590 	sizeof(uint32_t))
591 
592 static void
drm_intel_bo_gem_set_in_aperture_size(drm_intel_bufmgr_gem * bufmgr_gem,drm_intel_bo_gem * bo_gem,unsigned int alignment)593 drm_intel_bo_gem_set_in_aperture_size(drm_intel_bufmgr_gem *bufmgr_gem,
594 				      drm_intel_bo_gem *bo_gem,
595 				      unsigned int alignment)
596 {
597 	unsigned int size;
598 
599 	assert(!bo_gem->used_as_reloc_target);
600 
601 	/* The older chipsets are far-less flexible in terms of tiling,
602 	 * and require tiled buffer to be size aligned in the aperture.
603 	 * This means that in the worst possible case we will need a hole
604 	 * twice as large as the object in order for it to fit into the
605 	 * aperture. Optimal packing is for wimps.
606 	 */
607 	size = bo_gem->bo.size;
608 	if (bufmgr_gem->gen < 4 && bo_gem->tiling_mode != I915_TILING_NONE) {
609 		unsigned int min_size;
610 
611 		if (bufmgr_gem->has_relaxed_fencing) {
612 			if (bufmgr_gem->gen == 3)
613 				min_size = 1024*1024;
614 			else
615 				min_size = 512*1024;
616 
617 			while (min_size < size)
618 				min_size *= 2;
619 		} else
620 			min_size = size;
621 
622 		/* Account for worst-case alignment. */
623 		alignment = MAX2(alignment, min_size);
624 	}
625 
626 	bo_gem->reloc_tree_size = size + alignment;
627 }
628 
629 static int
drm_intel_setup_reloc_list(drm_intel_bo * bo)630 drm_intel_setup_reloc_list(drm_intel_bo *bo)
631 {
632 	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
633 	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
634 	unsigned int max_relocs = bufmgr_gem->max_relocs;
635 
636 	if (bo->size / 4 < max_relocs)
637 		max_relocs = bo->size / 4;
638 
639 	bo_gem->relocs = malloc(max_relocs *
640 				sizeof(struct drm_i915_gem_relocation_entry));
641 	bo_gem->reloc_target_info = malloc(max_relocs *
642 					   sizeof(drm_intel_reloc_target));
643 	if (bo_gem->relocs == NULL || bo_gem->reloc_target_info == NULL) {
644 		bo_gem->has_error = true;
645 
646 		free (bo_gem->relocs);
647 		bo_gem->relocs = NULL;
648 
649 		free (bo_gem->reloc_target_info);
650 		bo_gem->reloc_target_info = NULL;
651 
652 		return 1;
653 	}
654 
655 	return 0;
656 }
657 
658 static int
drm_intel_gem_bo_busy(drm_intel_bo * bo)659 drm_intel_gem_bo_busy(drm_intel_bo *bo)
660 {
661 	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
662 	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
663 	struct drm_i915_gem_busy busy;
664 	int ret;
665 
666 	if (bo_gem->reusable && bo_gem->idle)
667 		return false;
668 
669 	memclear(busy);
670 	busy.handle = bo_gem->gem_handle;
671 
672 	ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_BUSY, &busy);
673 	if (ret == 0) {
674 		bo_gem->idle = !busy.busy;
675 		return busy.busy;
676 	} else {
677 		return false;
678 	}
679 	return (ret == 0 && busy.busy);
680 }
681 
682 static int
drm_intel_gem_bo_madvise_internal(drm_intel_bufmgr_gem * bufmgr_gem,drm_intel_bo_gem * bo_gem,int state)683 drm_intel_gem_bo_madvise_internal(drm_intel_bufmgr_gem *bufmgr_gem,
684 				  drm_intel_bo_gem *bo_gem, int state)
685 {
686 	struct drm_i915_gem_madvise madv;
687 
688 	memclear(madv);
689 	madv.handle = bo_gem->gem_handle;
690 	madv.madv = state;
691 	madv.retained = 1;
692 	drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_MADVISE, &madv);
693 
694 	return madv.retained;
695 }
696 
697 static int
drm_intel_gem_bo_madvise(drm_intel_bo * bo,int madv)698 drm_intel_gem_bo_madvise(drm_intel_bo *bo, int madv)
699 {
700 	return drm_intel_gem_bo_madvise_internal
701 		((drm_intel_bufmgr_gem *) bo->bufmgr,
702 		 (drm_intel_bo_gem *) bo,
703 		 madv);
704 }
705 
706 /* drop the oldest entries that have been purged by the kernel */
707 static void
drm_intel_gem_bo_cache_purge_bucket(drm_intel_bufmgr_gem * bufmgr_gem,struct drm_intel_gem_bo_bucket * bucket)708 drm_intel_gem_bo_cache_purge_bucket(drm_intel_bufmgr_gem *bufmgr_gem,
709 				    struct drm_intel_gem_bo_bucket *bucket)
710 {
711 	while (!DRMLISTEMPTY(&bucket->head)) {
712 		drm_intel_bo_gem *bo_gem;
713 
714 		bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
715 				      bucket->head.next, head);
716 		if (drm_intel_gem_bo_madvise_internal
717 		    (bufmgr_gem, bo_gem, I915_MADV_DONTNEED))
718 			break;
719 
720 		DRMLISTDEL(&bo_gem->head);
721 		drm_intel_gem_bo_free(&bo_gem->bo);
722 	}
723 }
724 
725 static drm_intel_bo *
drm_intel_gem_bo_alloc_internal(drm_intel_bufmgr * bufmgr,const char * name,unsigned long size,unsigned long flags,uint32_t tiling_mode,unsigned long stride,unsigned int alignment)726 drm_intel_gem_bo_alloc_internal(drm_intel_bufmgr *bufmgr,
727 				const char *name,
728 				unsigned long size,
729 				unsigned long flags,
730 				uint32_t tiling_mode,
731 				unsigned long stride,
732 				unsigned int alignment)
733 {
734 	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
735 	drm_intel_bo_gem *bo_gem;
736 	unsigned int page_size = getpagesize();
737 	int ret;
738 	struct drm_intel_gem_bo_bucket *bucket;
739 	bool alloc_from_cache;
740 	unsigned long bo_size;
741 	bool for_render = false;
742 
743 	if (flags & BO_ALLOC_FOR_RENDER)
744 		for_render = true;
745 
746 	/* Round the allocated size up to a power of two number of pages. */
747 	bucket = drm_intel_gem_bo_bucket_for_size(bufmgr_gem, size);
748 
749 	/* If we don't have caching at this size, don't actually round the
750 	 * allocation up.
751 	 */
752 	if (bucket == NULL) {
753 		bo_size = size;
754 		if (bo_size < page_size)
755 			bo_size = page_size;
756 	} else {
757 		bo_size = bucket->size;
758 	}
759 
760 	pthread_mutex_lock(&bufmgr_gem->lock);
761 	/* Get a buffer out of the cache if available */
762 retry:
763 	alloc_from_cache = false;
764 	if (bucket != NULL && !DRMLISTEMPTY(&bucket->head)) {
765 		if (for_render) {
766 			/* Allocate new render-target BOs from the tail (MRU)
767 			 * of the list, as it will likely be hot in the GPU
768 			 * cache and in the aperture for us.
769 			 */
770 			bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
771 					      bucket->head.prev, head);
772 			DRMLISTDEL(&bo_gem->head);
773 			alloc_from_cache = true;
774 			bo_gem->bo.align = alignment;
775 		} else {
776 			assert(alignment == 0);
777 			/* For non-render-target BOs (where we're probably
778 			 * going to map it first thing in order to fill it
779 			 * with data), check if the last BO in the cache is
780 			 * unbusy, and only reuse in that case. Otherwise,
781 			 * allocating a new buffer is probably faster than
782 			 * waiting for the GPU to finish.
783 			 */
784 			bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
785 					      bucket->head.next, head);
786 			if (!drm_intel_gem_bo_busy(&bo_gem->bo)) {
787 				alloc_from_cache = true;
788 				DRMLISTDEL(&bo_gem->head);
789 			}
790 		}
791 
792 		if (alloc_from_cache) {
793 			if (!drm_intel_gem_bo_madvise_internal
794 			    (bufmgr_gem, bo_gem, I915_MADV_WILLNEED)) {
795 				drm_intel_gem_bo_free(&bo_gem->bo);
796 				drm_intel_gem_bo_cache_purge_bucket(bufmgr_gem,
797 								    bucket);
798 				goto retry;
799 			}
800 
801 			if (drm_intel_gem_bo_set_tiling_internal(&bo_gem->bo,
802 								 tiling_mode,
803 								 stride)) {
804 				drm_intel_gem_bo_free(&bo_gem->bo);
805 				goto retry;
806 			}
807 		}
808 	}
809 
810 	if (!alloc_from_cache) {
811 		struct drm_i915_gem_create create;
812 
813 		bo_gem = calloc(1, sizeof(*bo_gem));
814 		if (!bo_gem)
815 			goto err;
816 
817 		/* drm_intel_gem_bo_free calls DRMLISTDEL() for an uninitialized
818 		   list (vma_list), so better set the list head here */
819 		DRMINITLISTHEAD(&bo_gem->vma_list);
820 
821 		bo_gem->bo.size = bo_size;
822 
823 		memclear(create);
824 		create.size = bo_size;
825 
826 		ret = drmIoctl(bufmgr_gem->fd,
827 			       DRM_IOCTL_I915_GEM_CREATE,
828 			       &create);
829 		if (ret != 0) {
830 			free(bo_gem);
831 			goto err;
832 		}
833 
834 		bo_gem->gem_handle = create.handle;
835 		bo_gem->bo.handle = bo_gem->gem_handle;
836 		bo_gem->bo.bufmgr = bufmgr;
837 		bo_gem->bo.align = alignment;
838 
839 		bo_gem->tiling_mode = I915_TILING_NONE;
840 		bo_gem->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
841 		bo_gem->stride = 0;
842 
843 		if (drm_intel_gem_bo_set_tiling_internal(&bo_gem->bo,
844 							 tiling_mode,
845 							 stride))
846 			goto err_free;
847 
848 		HASH_ADD(handle_hh, bufmgr_gem->handle_table,
849 			 gem_handle, sizeof(bo_gem->gem_handle),
850 			 bo_gem);
851 	}
852 
853 	bo_gem->name = name;
854 	atomic_set(&bo_gem->refcount, 1);
855 	bo_gem->validate_index = -1;
856 	bo_gem->reloc_tree_fences = 0;
857 	bo_gem->used_as_reloc_target = false;
858 	bo_gem->has_error = false;
859 	bo_gem->reusable = true;
860 	bo_gem->use_48b_address_range = false;
861 
862 	drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem, alignment);
863 	pthread_mutex_unlock(&bufmgr_gem->lock);
864 
865 	DBG("bo_create: buf %d (%s) %ldb\n",
866 	    bo_gem->gem_handle, bo_gem->name, size);
867 
868 	return &bo_gem->bo;
869 
870 err_free:
871 	drm_intel_gem_bo_free(&bo_gem->bo);
872 err:
873 	pthread_mutex_unlock(&bufmgr_gem->lock);
874 	return NULL;
875 }
876 
877 static drm_intel_bo *
drm_intel_gem_bo_alloc_for_render(drm_intel_bufmgr * bufmgr,const char * name,unsigned long size,unsigned int alignment)878 drm_intel_gem_bo_alloc_for_render(drm_intel_bufmgr *bufmgr,
879 				  const char *name,
880 				  unsigned long size,
881 				  unsigned int alignment)
882 {
883 	return drm_intel_gem_bo_alloc_internal(bufmgr, name, size,
884 					       BO_ALLOC_FOR_RENDER,
885 					       I915_TILING_NONE, 0,
886 					       alignment);
887 }
888 
889 static drm_intel_bo *
drm_intel_gem_bo_alloc(drm_intel_bufmgr * bufmgr,const char * name,unsigned long size,unsigned int alignment)890 drm_intel_gem_bo_alloc(drm_intel_bufmgr *bufmgr,
891 		       const char *name,
892 		       unsigned long size,
893 		       unsigned int alignment)
894 {
895 	return drm_intel_gem_bo_alloc_internal(bufmgr, name, size, 0,
896 					       I915_TILING_NONE, 0, 0);
897 }
898 
899 static drm_intel_bo *
drm_intel_gem_bo_alloc_tiled(drm_intel_bufmgr * bufmgr,const char * name,int x,int y,int cpp,uint32_t * tiling_mode,unsigned long * pitch,unsigned long flags)900 drm_intel_gem_bo_alloc_tiled(drm_intel_bufmgr *bufmgr, const char *name,
901 			     int x, int y, int cpp, uint32_t *tiling_mode,
902 			     unsigned long *pitch, unsigned long flags)
903 {
904 	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
905 	unsigned long size, stride;
906 	uint32_t tiling;
907 
908 	do {
909 		unsigned long aligned_y, height_alignment;
910 
911 		tiling = *tiling_mode;
912 
913 		/* If we're tiled, our allocations are in 8 or 32-row blocks,
914 		 * so failure to align our height means that we won't allocate
915 		 * enough pages.
916 		 *
917 		 * If we're untiled, we still have to align to 2 rows high
918 		 * because the data port accesses 2x2 blocks even if the
919 		 * bottom row isn't to be rendered, so failure to align means
920 		 * we could walk off the end of the GTT and fault.  This is
921 		 * documented on 965, and may be the case on older chipsets
922 		 * too so we try to be careful.
923 		 */
924 		aligned_y = y;
925 		height_alignment = 2;
926 
927 		if ((bufmgr_gem->gen == 2) && tiling != I915_TILING_NONE)
928 			height_alignment = 16;
929 		else if (tiling == I915_TILING_X
930 			|| (IS_915(bufmgr_gem->pci_device)
931 			    && tiling == I915_TILING_Y))
932 			height_alignment = 8;
933 		else if (tiling == I915_TILING_Y)
934 			height_alignment = 32;
935 		aligned_y = ALIGN(y, height_alignment);
936 
937 		stride = x * cpp;
938 		stride = drm_intel_gem_bo_tile_pitch(bufmgr_gem, stride, tiling_mode);
939 		size = stride * aligned_y;
940 		size = drm_intel_gem_bo_tile_size(bufmgr_gem, size, tiling_mode);
941 	} while (*tiling_mode != tiling);
942 	*pitch = stride;
943 
944 	if (tiling == I915_TILING_NONE)
945 		stride = 0;
946 
947 	return drm_intel_gem_bo_alloc_internal(bufmgr, name, size, flags,
948 					       tiling, stride, 0);
949 }
950 
951 static drm_intel_bo *
drm_intel_gem_bo_alloc_userptr(drm_intel_bufmgr * bufmgr,const char * name,void * addr,uint32_t tiling_mode,uint32_t stride,unsigned long size,unsigned long flags)952 drm_intel_gem_bo_alloc_userptr(drm_intel_bufmgr *bufmgr,
953 				const char *name,
954 				void *addr,
955 				uint32_t tiling_mode,
956 				uint32_t stride,
957 				unsigned long size,
958 				unsigned long flags)
959 {
960 	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
961 	drm_intel_bo_gem *bo_gem;
962 	int ret;
963 	struct drm_i915_gem_userptr userptr;
964 
965 	/* Tiling with userptr surfaces is not supported
966 	 * on all hardware so refuse it for time being.
967 	 */
968 	if (tiling_mode != I915_TILING_NONE)
969 		return NULL;
970 
971 	bo_gem = calloc(1, sizeof(*bo_gem));
972 	if (!bo_gem)
973 		return NULL;
974 
975 	atomic_set(&bo_gem->refcount, 1);
976 	DRMINITLISTHEAD(&bo_gem->vma_list);
977 
978 	bo_gem->bo.size = size;
979 
980 	memclear(userptr);
981 	userptr.user_ptr = (__u64)((unsigned long)addr);
982 	userptr.user_size = size;
983 	userptr.flags = flags;
984 
985 	ret = drmIoctl(bufmgr_gem->fd,
986 			DRM_IOCTL_I915_GEM_USERPTR,
987 			&userptr);
988 	if (ret != 0) {
989 		DBG("bo_create_userptr: "
990 		    "ioctl failed with user ptr %p size 0x%lx, "
991 		    "user flags 0x%lx\n", addr, size, flags);
992 		free(bo_gem);
993 		return NULL;
994 	}
995 
996 	pthread_mutex_lock(&bufmgr_gem->lock);
997 
998 	bo_gem->gem_handle = userptr.handle;
999 	bo_gem->bo.handle = bo_gem->gem_handle;
1000 	bo_gem->bo.bufmgr    = bufmgr;
1001 	bo_gem->is_userptr   = true;
1002 	bo_gem->bo.virtual   = addr;
1003 	/* Save the address provided by user */
1004 	bo_gem->user_virtual = addr;
1005 	bo_gem->tiling_mode  = I915_TILING_NONE;
1006 	bo_gem->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
1007 	bo_gem->stride       = 0;
1008 
1009 	HASH_ADD(handle_hh, bufmgr_gem->handle_table,
1010 		 gem_handle, sizeof(bo_gem->gem_handle),
1011 		 bo_gem);
1012 
1013 	bo_gem->name = name;
1014 	bo_gem->validate_index = -1;
1015 	bo_gem->reloc_tree_fences = 0;
1016 	bo_gem->used_as_reloc_target = false;
1017 	bo_gem->has_error = false;
1018 	bo_gem->reusable = false;
1019 	bo_gem->use_48b_address_range = false;
1020 
1021 	drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem, 0);
1022 	pthread_mutex_unlock(&bufmgr_gem->lock);
1023 
1024 	DBG("bo_create_userptr: "
1025 	    "ptr %p buf %d (%s) size %ldb, stride 0x%x, tile mode %d\n",
1026 		addr, bo_gem->gem_handle, bo_gem->name,
1027 		size, stride, tiling_mode);
1028 
1029 	return &bo_gem->bo;
1030 }
1031 
1032 static bool
has_userptr(drm_intel_bufmgr_gem * bufmgr_gem)1033 has_userptr(drm_intel_bufmgr_gem *bufmgr_gem)
1034 {
1035 	int ret;
1036 	void *ptr;
1037 	long pgsz;
1038 	struct drm_i915_gem_userptr userptr;
1039 
1040 	pgsz = sysconf(_SC_PAGESIZE);
1041 	assert(pgsz > 0);
1042 
1043 	ret = posix_memalign(&ptr, pgsz, pgsz);
1044 	if (ret) {
1045 		DBG("Failed to get a page (%ld) for userptr detection!\n",
1046 			pgsz);
1047 		return false;
1048 	}
1049 
1050 	memclear(userptr);
1051 	userptr.user_ptr = (__u64)(unsigned long)ptr;
1052 	userptr.user_size = pgsz;
1053 
1054 retry:
1055 	ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_USERPTR, &userptr);
1056 	if (ret) {
1057 		if (errno == ENODEV && userptr.flags == 0) {
1058 			userptr.flags = I915_USERPTR_UNSYNCHRONIZED;
1059 			goto retry;
1060 		}
1061 		free(ptr);
1062 		return false;
1063 	}
1064 
1065 	/* We don't release the userptr bo here as we want to keep the
1066 	 * kernel mm tracking alive for our lifetime. The first time we
1067 	 * create a userptr object the kernel has to install a mmu_notifer
1068 	 * which is a heavyweight operation (e.g. it requires taking all
1069 	 * mm_locks and stop_machine()).
1070 	 */
1071 
1072 	bufmgr_gem->userptr_active.ptr = ptr;
1073 	bufmgr_gem->userptr_active.handle = userptr.handle;
1074 
1075 	return true;
1076 }
1077 
1078 static drm_intel_bo *
check_bo_alloc_userptr(drm_intel_bufmgr * bufmgr,const char * name,void * addr,uint32_t tiling_mode,uint32_t stride,unsigned long size,unsigned long flags)1079 check_bo_alloc_userptr(drm_intel_bufmgr *bufmgr,
1080 		       const char *name,
1081 		       void *addr,
1082 		       uint32_t tiling_mode,
1083 		       uint32_t stride,
1084 		       unsigned long size,
1085 		       unsigned long flags)
1086 {
1087 	if (has_userptr((drm_intel_bufmgr_gem *)bufmgr))
1088 		bufmgr->bo_alloc_userptr = drm_intel_gem_bo_alloc_userptr;
1089 	else
1090 		bufmgr->bo_alloc_userptr = NULL;
1091 
1092 	return drm_intel_bo_alloc_userptr(bufmgr, name, addr,
1093 					  tiling_mode, stride, size, flags);
1094 }
1095 
1096 /**
1097  * Returns a drm_intel_bo wrapping the given buffer object handle.
1098  *
1099  * This can be used when one application needs to pass a buffer object
1100  * to another.
1101  */
1102 drm_intel_bo *
drm_intel_bo_gem_create_from_name(drm_intel_bufmgr * bufmgr,const char * name,unsigned int handle)1103 drm_intel_bo_gem_create_from_name(drm_intel_bufmgr *bufmgr,
1104 				  const char *name,
1105 				  unsigned int handle)
1106 {
1107 	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
1108 	drm_intel_bo_gem *bo_gem;
1109 	int ret;
1110 	struct drm_gem_open open_arg;
1111 	struct drm_i915_gem_get_tiling get_tiling;
1112 
1113 	/* At the moment most applications only have a few named bo.
1114 	 * For instance, in a DRI client only the render buffers passed
1115 	 * between X and the client are named. And since X returns the
1116 	 * alternating names for the front/back buffer a linear search
1117 	 * provides a sufficiently fast match.
1118 	 */
1119 	pthread_mutex_lock(&bufmgr_gem->lock);
1120 	HASH_FIND(name_hh, bufmgr_gem->name_table,
1121 		  &handle, sizeof(handle), bo_gem);
1122 	if (bo_gem) {
1123 		drm_intel_gem_bo_reference(&bo_gem->bo);
1124 		goto out;
1125 	}
1126 
1127 	memclear(open_arg);
1128 	open_arg.name = handle;
1129 	ret = drmIoctl(bufmgr_gem->fd,
1130 		       DRM_IOCTL_GEM_OPEN,
1131 		       &open_arg);
1132 	if (ret != 0) {
1133 		DBG("Couldn't reference %s handle 0x%08x: %s\n",
1134 		    name, handle, strerror(errno));
1135 		bo_gem = NULL;
1136 		goto out;
1137 	}
1138         /* Now see if someone has used a prime handle to get this
1139          * object from the kernel before by looking through the list
1140          * again for a matching gem_handle
1141          */
1142 	HASH_FIND(handle_hh, bufmgr_gem->handle_table,
1143 		  &open_arg.handle, sizeof(open_arg.handle), bo_gem);
1144 	if (bo_gem) {
1145 		drm_intel_gem_bo_reference(&bo_gem->bo);
1146 		goto out;
1147 	}
1148 
1149 	bo_gem = calloc(1, sizeof(*bo_gem));
1150 	if (!bo_gem)
1151 		goto out;
1152 
1153 	atomic_set(&bo_gem->refcount, 1);
1154 	DRMINITLISTHEAD(&bo_gem->vma_list);
1155 
1156 	bo_gem->bo.size = open_arg.size;
1157 	bo_gem->bo.offset = 0;
1158 	bo_gem->bo.offset64 = 0;
1159 	bo_gem->bo.virtual = NULL;
1160 	bo_gem->bo.bufmgr = bufmgr;
1161 	bo_gem->name = name;
1162 	bo_gem->validate_index = -1;
1163 	bo_gem->gem_handle = open_arg.handle;
1164 	bo_gem->bo.handle = open_arg.handle;
1165 	bo_gem->global_name = handle;
1166 	bo_gem->reusable = false;
1167 	bo_gem->use_48b_address_range = false;
1168 
1169 	HASH_ADD(handle_hh, bufmgr_gem->handle_table,
1170 		 gem_handle, sizeof(bo_gem->gem_handle), bo_gem);
1171 	HASH_ADD(name_hh, bufmgr_gem->name_table,
1172 		 global_name, sizeof(bo_gem->global_name), bo_gem);
1173 
1174 	memclear(get_tiling);
1175 	get_tiling.handle = bo_gem->gem_handle;
1176 	ret = drmIoctl(bufmgr_gem->fd,
1177 		       DRM_IOCTL_I915_GEM_GET_TILING,
1178 		       &get_tiling);
1179 	if (ret != 0)
1180 		goto err_unref;
1181 
1182 	bo_gem->tiling_mode = get_tiling.tiling_mode;
1183 	bo_gem->swizzle_mode = get_tiling.swizzle_mode;
1184 	/* XXX stride is unknown */
1185 	drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem, 0);
1186 	DBG("bo_create_from_handle: %d (%s)\n", handle, bo_gem->name);
1187 
1188 out:
1189 	pthread_mutex_unlock(&bufmgr_gem->lock);
1190 	return &bo_gem->bo;
1191 
1192 err_unref:
1193 	drm_intel_gem_bo_free(&bo_gem->bo);
1194 	pthread_mutex_unlock(&bufmgr_gem->lock);
1195 	return NULL;
1196 }
1197 
1198 static void
drm_intel_gem_bo_free(drm_intel_bo * bo)1199 drm_intel_gem_bo_free(drm_intel_bo *bo)
1200 {
1201 	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1202 	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1203 	struct drm_gem_close close;
1204 	int ret;
1205 
1206 	DRMLISTDEL(&bo_gem->vma_list);
1207 	if (bo_gem->mem_virtual) {
1208 		VG(VALGRIND_FREELIKE_BLOCK(bo_gem->mem_virtual, 0));
1209 		drm_munmap(bo_gem->mem_virtual, bo_gem->bo.size);
1210 		bufmgr_gem->vma_count--;
1211 	}
1212 	if (bo_gem->wc_virtual) {
1213 		VG(VALGRIND_FREELIKE_BLOCK(bo_gem->wc_virtual, 0));
1214 		drm_munmap(bo_gem->wc_virtual, bo_gem->bo.size);
1215 		bufmgr_gem->vma_count--;
1216 	}
1217 	if (bo_gem->gtt_virtual) {
1218 		drm_munmap(bo_gem->gtt_virtual, bo_gem->bo.size);
1219 		bufmgr_gem->vma_count--;
1220 	}
1221 
1222 	if (bo_gem->global_name)
1223 		HASH_DELETE(name_hh, bufmgr_gem->name_table, bo_gem);
1224 	HASH_DELETE(handle_hh, bufmgr_gem->handle_table, bo_gem);
1225 
1226 	/* Close this object */
1227 	memclear(close);
1228 	close.handle = bo_gem->gem_handle;
1229 	ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_GEM_CLOSE, &close);
1230 	if (ret != 0) {
1231 		DBG("DRM_IOCTL_GEM_CLOSE %d failed (%s): %s\n",
1232 		    bo_gem->gem_handle, bo_gem->name, strerror(errno));
1233 	}
1234 	free(bo);
1235 }
1236 
1237 static void
drm_intel_gem_bo_mark_mmaps_incoherent(drm_intel_bo * bo)1238 drm_intel_gem_bo_mark_mmaps_incoherent(drm_intel_bo *bo)
1239 {
1240 #if HAVE_VALGRIND
1241 	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1242 
1243 	if (bo_gem->mem_virtual)
1244 		VALGRIND_MAKE_MEM_NOACCESS(bo_gem->mem_virtual, bo->size);
1245 
1246 	if (bo_gem->wc_virtual)
1247 		VALGRIND_MAKE_MEM_NOACCESS(bo_gem->wc_virtual, bo->size);
1248 
1249 	if (bo_gem->gtt_virtual)
1250 		VALGRIND_MAKE_MEM_NOACCESS(bo_gem->gtt_virtual, bo->size);
1251 #endif
1252 }
1253 
1254 /** Frees all cached buffers significantly older than @time. */
1255 static void
drm_intel_gem_cleanup_bo_cache(drm_intel_bufmgr_gem * bufmgr_gem,time_t time)1256 drm_intel_gem_cleanup_bo_cache(drm_intel_bufmgr_gem *bufmgr_gem, time_t time)
1257 {
1258 	int i;
1259 
1260 	if (bufmgr_gem->time == time)
1261 		return;
1262 
1263 	for (i = 0; i < bufmgr_gem->num_buckets; i++) {
1264 		struct drm_intel_gem_bo_bucket *bucket =
1265 		    &bufmgr_gem->cache_bucket[i];
1266 
1267 		while (!DRMLISTEMPTY(&bucket->head)) {
1268 			drm_intel_bo_gem *bo_gem;
1269 
1270 			bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
1271 					      bucket->head.next, head);
1272 			if (time - bo_gem->free_time <= 1)
1273 				break;
1274 
1275 			DRMLISTDEL(&bo_gem->head);
1276 
1277 			drm_intel_gem_bo_free(&bo_gem->bo);
1278 		}
1279 	}
1280 
1281 	bufmgr_gem->time = time;
1282 }
1283 
drm_intel_gem_bo_purge_vma_cache(drm_intel_bufmgr_gem * bufmgr_gem)1284 static void drm_intel_gem_bo_purge_vma_cache(drm_intel_bufmgr_gem *bufmgr_gem)
1285 {
1286 	int limit;
1287 
1288 	DBG("%s: cached=%d, open=%d, limit=%d\n", __FUNCTION__,
1289 	    bufmgr_gem->vma_count, bufmgr_gem->vma_open, bufmgr_gem->vma_max);
1290 
1291 	if (bufmgr_gem->vma_max < 0)
1292 		return;
1293 
1294 	/* We may need to evict a few entries in order to create new mmaps */
1295 	limit = bufmgr_gem->vma_max - 2*bufmgr_gem->vma_open;
1296 	if (limit < 0)
1297 		limit = 0;
1298 
1299 	while (bufmgr_gem->vma_count > limit) {
1300 		drm_intel_bo_gem *bo_gem;
1301 
1302 		bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
1303 				      bufmgr_gem->vma_cache.next,
1304 				      vma_list);
1305 		assert(bo_gem->map_count == 0);
1306 		DRMLISTDELINIT(&bo_gem->vma_list);
1307 
1308 		if (bo_gem->mem_virtual) {
1309 			drm_munmap(bo_gem->mem_virtual, bo_gem->bo.size);
1310 			bo_gem->mem_virtual = NULL;
1311 			bufmgr_gem->vma_count--;
1312 		}
1313 		if (bo_gem->wc_virtual) {
1314 			drm_munmap(bo_gem->wc_virtual, bo_gem->bo.size);
1315 			bo_gem->wc_virtual = NULL;
1316 			bufmgr_gem->vma_count--;
1317 		}
1318 		if (bo_gem->gtt_virtual) {
1319 			drm_munmap(bo_gem->gtt_virtual, bo_gem->bo.size);
1320 			bo_gem->gtt_virtual = NULL;
1321 			bufmgr_gem->vma_count--;
1322 		}
1323 	}
1324 }
1325 
drm_intel_gem_bo_close_vma(drm_intel_bufmgr_gem * bufmgr_gem,drm_intel_bo_gem * bo_gem)1326 static void drm_intel_gem_bo_close_vma(drm_intel_bufmgr_gem *bufmgr_gem,
1327 				       drm_intel_bo_gem *bo_gem)
1328 {
1329 	bufmgr_gem->vma_open--;
1330 	DRMLISTADDTAIL(&bo_gem->vma_list, &bufmgr_gem->vma_cache);
1331 	if (bo_gem->mem_virtual)
1332 		bufmgr_gem->vma_count++;
1333 	if (bo_gem->wc_virtual)
1334 		bufmgr_gem->vma_count++;
1335 	if (bo_gem->gtt_virtual)
1336 		bufmgr_gem->vma_count++;
1337 	drm_intel_gem_bo_purge_vma_cache(bufmgr_gem);
1338 }
1339 
drm_intel_gem_bo_open_vma(drm_intel_bufmgr_gem * bufmgr_gem,drm_intel_bo_gem * bo_gem)1340 static void drm_intel_gem_bo_open_vma(drm_intel_bufmgr_gem *bufmgr_gem,
1341 				      drm_intel_bo_gem *bo_gem)
1342 {
1343 	bufmgr_gem->vma_open++;
1344 	DRMLISTDEL(&bo_gem->vma_list);
1345 	if (bo_gem->mem_virtual)
1346 		bufmgr_gem->vma_count--;
1347 	if (bo_gem->wc_virtual)
1348 		bufmgr_gem->vma_count--;
1349 	if (bo_gem->gtt_virtual)
1350 		bufmgr_gem->vma_count--;
1351 	drm_intel_gem_bo_purge_vma_cache(bufmgr_gem);
1352 }
1353 
1354 static void
drm_intel_gem_bo_unreference_final(drm_intel_bo * bo,time_t time)1355 drm_intel_gem_bo_unreference_final(drm_intel_bo *bo, time_t time)
1356 {
1357 	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1358 	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1359 	struct drm_intel_gem_bo_bucket *bucket;
1360 	int i;
1361 
1362 	/* Unreference all the target buffers */
1363 	for (i = 0; i < bo_gem->reloc_count; i++) {
1364 		if (bo_gem->reloc_target_info[i].bo != bo) {
1365 			drm_intel_gem_bo_unreference_locked_timed(bo_gem->
1366 								  reloc_target_info[i].bo,
1367 								  time);
1368 		}
1369 	}
1370 	for (i = 0; i < bo_gem->softpin_target_count; i++)
1371 		drm_intel_gem_bo_unreference_locked_timed(bo_gem->softpin_target[i],
1372 								  time);
1373 	bo_gem->kflags = 0;
1374 	bo_gem->reloc_count = 0;
1375 	bo_gem->used_as_reloc_target = false;
1376 	bo_gem->softpin_target_count = 0;
1377 
1378 	DBG("bo_unreference final: %d (%s)\n",
1379 	    bo_gem->gem_handle, bo_gem->name);
1380 
1381 	/* release memory associated with this object */
1382 	if (bo_gem->reloc_target_info) {
1383 		free(bo_gem->reloc_target_info);
1384 		bo_gem->reloc_target_info = NULL;
1385 	}
1386 	if (bo_gem->relocs) {
1387 		free(bo_gem->relocs);
1388 		bo_gem->relocs = NULL;
1389 	}
1390 	if (bo_gem->softpin_target) {
1391 		free(bo_gem->softpin_target);
1392 		bo_gem->softpin_target = NULL;
1393 		bo_gem->softpin_target_size = 0;
1394 	}
1395 
1396 	/* Clear any left-over mappings */
1397 	if (bo_gem->map_count) {
1398 		DBG("bo freed with non-zero map-count %d\n", bo_gem->map_count);
1399 		bo_gem->map_count = 0;
1400 		drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
1401 		drm_intel_gem_bo_mark_mmaps_incoherent(bo);
1402 	}
1403 
1404 	bucket = drm_intel_gem_bo_bucket_for_size(bufmgr_gem, bo->size);
1405 	/* Put the buffer into our internal cache for reuse if we can. */
1406 	if (bufmgr_gem->bo_reuse && bo_gem->reusable && bucket != NULL &&
1407 	    drm_intel_gem_bo_madvise_internal(bufmgr_gem, bo_gem,
1408 					      I915_MADV_DONTNEED)) {
1409 		bo_gem->free_time = time;
1410 
1411 		bo_gem->name = NULL;
1412 		bo_gem->validate_index = -1;
1413 
1414 		bo_gem->kflags = 0;
1415 
1416 		DRMLISTADDTAIL(&bo_gem->head, &bucket->head);
1417 	} else {
1418 		drm_intel_gem_bo_free(bo);
1419 	}
1420 }
1421 
drm_intel_gem_bo_unreference_locked_timed(drm_intel_bo * bo,time_t time)1422 static void drm_intel_gem_bo_unreference_locked_timed(drm_intel_bo *bo,
1423 						      time_t time)
1424 {
1425 	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1426 
1427 	assert(atomic_read(&bo_gem->refcount) > 0);
1428 	if (atomic_dec_and_test(&bo_gem->refcount))
1429 		drm_intel_gem_bo_unreference_final(bo, time);
1430 }
1431 
drm_intel_gem_bo_unreference(drm_intel_bo * bo)1432 static void drm_intel_gem_bo_unreference(drm_intel_bo *bo)
1433 {
1434 	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1435 
1436 	assert(atomic_read(&bo_gem->refcount) > 0);
1437 
1438 	if (atomic_add_unless(&bo_gem->refcount, -1, 1)) {
1439 		drm_intel_bufmgr_gem *bufmgr_gem =
1440 		    (drm_intel_bufmgr_gem *) bo->bufmgr;
1441 		struct timespec time;
1442 
1443 		clock_gettime(CLOCK_MONOTONIC, &time);
1444 
1445 		pthread_mutex_lock(&bufmgr_gem->lock);
1446 
1447 		if (atomic_dec_and_test(&bo_gem->refcount)) {
1448 			drm_intel_gem_bo_unreference_final(bo, time.tv_sec);
1449 			drm_intel_gem_cleanup_bo_cache(bufmgr_gem, time.tv_sec);
1450 		}
1451 
1452 		pthread_mutex_unlock(&bufmgr_gem->lock);
1453 	}
1454 }
1455 
drm_intel_gem_bo_map(drm_intel_bo * bo,int write_enable)1456 static int drm_intel_gem_bo_map(drm_intel_bo *bo, int write_enable)
1457 {
1458 	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1459 	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1460 	struct drm_i915_gem_set_domain set_domain;
1461 	int ret;
1462 
1463 	if (bo_gem->is_userptr) {
1464 		/* Return the same user ptr */
1465 		bo->virtual = bo_gem->user_virtual;
1466 		return 0;
1467 	}
1468 
1469 	pthread_mutex_lock(&bufmgr_gem->lock);
1470 
1471 	if (bo_gem->map_count++ == 0)
1472 		drm_intel_gem_bo_open_vma(bufmgr_gem, bo_gem);
1473 
1474 	if (!bo_gem->mem_virtual) {
1475 		struct drm_i915_gem_mmap mmap_arg;
1476 
1477 		DBG("bo_map: %d (%s), map_count=%d\n",
1478 		    bo_gem->gem_handle, bo_gem->name, bo_gem->map_count);
1479 
1480 		memclear(mmap_arg);
1481 		mmap_arg.handle = bo_gem->gem_handle;
1482 		mmap_arg.size = bo->size;
1483 		ret = drmIoctl(bufmgr_gem->fd,
1484 			       DRM_IOCTL_I915_GEM_MMAP,
1485 			       &mmap_arg);
1486 		if (ret != 0) {
1487 			ret = -errno;
1488 			DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
1489 			    __FILE__, __LINE__, bo_gem->gem_handle,
1490 			    bo_gem->name, strerror(errno));
1491 			if (--bo_gem->map_count == 0)
1492 				drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
1493 			pthread_mutex_unlock(&bufmgr_gem->lock);
1494 			return ret;
1495 		}
1496 		VG(VALGRIND_MALLOCLIKE_BLOCK(mmap_arg.addr_ptr, mmap_arg.size, 0, 1));
1497 		bo_gem->mem_virtual = (void *)(uintptr_t) mmap_arg.addr_ptr;
1498 	}
1499 	DBG("bo_map: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name,
1500 	    bo_gem->mem_virtual);
1501 	bo->virtual = bo_gem->mem_virtual;
1502 
1503 	memclear(set_domain);
1504 	set_domain.handle = bo_gem->gem_handle;
1505 	set_domain.read_domains = I915_GEM_DOMAIN_CPU;
1506 	if (write_enable)
1507 		set_domain.write_domain = I915_GEM_DOMAIN_CPU;
1508 	else
1509 		set_domain.write_domain = 0;
1510 	ret = drmIoctl(bufmgr_gem->fd,
1511 		       DRM_IOCTL_I915_GEM_SET_DOMAIN,
1512 		       &set_domain);
1513 	if (ret != 0) {
1514 		DBG("%s:%d: Error setting to CPU domain %d: %s\n",
1515 		    __FILE__, __LINE__, bo_gem->gem_handle,
1516 		    strerror(errno));
1517 	}
1518 
1519 	if (write_enable)
1520 		bo_gem->mapped_cpu_write = true;
1521 
1522 	drm_intel_gem_bo_mark_mmaps_incoherent(bo);
1523 	VG(VALGRIND_MAKE_MEM_DEFINED(bo_gem->mem_virtual, bo->size));
1524 	pthread_mutex_unlock(&bufmgr_gem->lock);
1525 
1526 	return 0;
1527 }
1528 
1529 static int
map_gtt(drm_intel_bo * bo)1530 map_gtt(drm_intel_bo *bo)
1531 {
1532 	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1533 	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1534 	int ret;
1535 
1536 	if (bo_gem->is_userptr)
1537 		return -EINVAL;
1538 
1539 	if (bo_gem->map_count++ == 0)
1540 		drm_intel_gem_bo_open_vma(bufmgr_gem, bo_gem);
1541 
1542 	/* Get a mapping of the buffer if we haven't before. */
1543 	if (bo_gem->gtt_virtual == NULL) {
1544 		struct drm_i915_gem_mmap_gtt mmap_arg;
1545 
1546 		DBG("bo_map_gtt: mmap %d (%s), map_count=%d\n",
1547 		    bo_gem->gem_handle, bo_gem->name, bo_gem->map_count);
1548 
1549 		memclear(mmap_arg);
1550 		mmap_arg.handle = bo_gem->gem_handle;
1551 
1552 		/* Get the fake offset back... */
1553 		ret = drmIoctl(bufmgr_gem->fd,
1554 			       DRM_IOCTL_I915_GEM_MMAP_GTT,
1555 			       &mmap_arg);
1556 		if (ret != 0) {
1557 			ret = -errno;
1558 			DBG("%s:%d: Error preparing buffer map %d (%s): %s .\n",
1559 			    __FILE__, __LINE__,
1560 			    bo_gem->gem_handle, bo_gem->name,
1561 			    strerror(errno));
1562 			if (--bo_gem->map_count == 0)
1563 				drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
1564 			return ret;
1565 		}
1566 
1567 		/* and mmap it */
1568 		bo_gem->gtt_virtual = drm_mmap(0, bo->size, PROT_READ | PROT_WRITE,
1569 					       MAP_SHARED, bufmgr_gem->fd,
1570 					       mmap_arg.offset);
1571 		if (bo_gem->gtt_virtual == MAP_FAILED) {
1572 			bo_gem->gtt_virtual = NULL;
1573 			ret = -errno;
1574 			DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
1575 			    __FILE__, __LINE__,
1576 			    bo_gem->gem_handle, bo_gem->name,
1577 			    strerror(errno));
1578 			if (--bo_gem->map_count == 0)
1579 				drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
1580 			return ret;
1581 		}
1582 	}
1583 
1584 	bo->virtual = bo_gem->gtt_virtual;
1585 
1586 	DBG("bo_map_gtt: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name,
1587 	    bo_gem->gtt_virtual);
1588 
1589 	return 0;
1590 }
1591 
1592 int
drm_intel_gem_bo_map_gtt(drm_intel_bo * bo)1593 drm_intel_gem_bo_map_gtt(drm_intel_bo *bo)
1594 {
1595 	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1596 	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1597 	struct drm_i915_gem_set_domain set_domain;
1598 	int ret;
1599 
1600 	pthread_mutex_lock(&bufmgr_gem->lock);
1601 
1602 	ret = map_gtt(bo);
1603 	if (ret) {
1604 		pthread_mutex_unlock(&bufmgr_gem->lock);
1605 		return ret;
1606 	}
1607 
1608 	/* Now move it to the GTT domain so that the GPU and CPU
1609 	 * caches are flushed and the GPU isn't actively using the
1610 	 * buffer.
1611 	 *
1612 	 * The pagefault handler does this domain change for us when
1613 	 * it has unbound the BO from the GTT, but it's up to us to
1614 	 * tell it when we're about to use things if we had done
1615 	 * rendering and it still happens to be bound to the GTT.
1616 	 */
1617 	memclear(set_domain);
1618 	set_domain.handle = bo_gem->gem_handle;
1619 	set_domain.read_domains = I915_GEM_DOMAIN_GTT;
1620 	set_domain.write_domain = I915_GEM_DOMAIN_GTT;
1621 	ret = drmIoctl(bufmgr_gem->fd,
1622 		       DRM_IOCTL_I915_GEM_SET_DOMAIN,
1623 		       &set_domain);
1624 	if (ret != 0) {
1625 		DBG("%s:%d: Error setting domain %d: %s\n",
1626 		    __FILE__, __LINE__, bo_gem->gem_handle,
1627 		    strerror(errno));
1628 	}
1629 
1630 	drm_intel_gem_bo_mark_mmaps_incoherent(bo);
1631 	VG(VALGRIND_MAKE_MEM_DEFINED(bo_gem->gtt_virtual, bo->size));
1632 	pthread_mutex_unlock(&bufmgr_gem->lock);
1633 
1634 	return 0;
1635 }
1636 
1637 /**
1638  * Performs a mapping of the buffer object like the normal GTT
1639  * mapping, but avoids waiting for the GPU to be done reading from or
1640  * rendering to the buffer.
1641  *
1642  * This is used in the implementation of GL_ARB_map_buffer_range: The
1643  * user asks to create a buffer, then does a mapping, fills some
1644  * space, runs a drawing command, then asks to map it again without
1645  * synchronizing because it guarantees that it won't write over the
1646  * data that the GPU is busy using (or, more specifically, that if it
1647  * does write over the data, it acknowledges that rendering is
1648  * undefined).
1649  */
1650 
1651 int
drm_intel_gem_bo_map_unsynchronized(drm_intel_bo * bo)1652 drm_intel_gem_bo_map_unsynchronized(drm_intel_bo *bo)
1653 {
1654 	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1655 #ifdef HAVE_VALGRIND
1656 	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1657 #endif
1658 	int ret;
1659 
1660 	/* If the CPU cache isn't coherent with the GTT, then use a
1661 	 * regular synchronized mapping.  The problem is that we don't
1662 	 * track where the buffer was last used on the CPU side in
1663 	 * terms of drm_intel_bo_map vs drm_intel_gem_bo_map_gtt, so
1664 	 * we would potentially corrupt the buffer even when the user
1665 	 * does reasonable things.
1666 	 */
1667 	if (!bufmgr_gem->has_llc)
1668 		return drm_intel_gem_bo_map_gtt(bo);
1669 
1670 	pthread_mutex_lock(&bufmgr_gem->lock);
1671 
1672 	ret = map_gtt(bo);
1673 	if (ret == 0) {
1674 		drm_intel_gem_bo_mark_mmaps_incoherent(bo);
1675 		VG(VALGRIND_MAKE_MEM_DEFINED(bo_gem->gtt_virtual, bo->size));
1676 	}
1677 
1678 	pthread_mutex_unlock(&bufmgr_gem->lock);
1679 
1680 	return ret;
1681 }
1682 
drm_intel_gem_bo_unmap(drm_intel_bo * bo)1683 static int drm_intel_gem_bo_unmap(drm_intel_bo *bo)
1684 {
1685 	drm_intel_bufmgr_gem *bufmgr_gem;
1686 	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1687 	int ret = 0;
1688 
1689 	if (bo == NULL)
1690 		return 0;
1691 
1692 	if (bo_gem->is_userptr)
1693 		return 0;
1694 
1695 	bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1696 
1697 	pthread_mutex_lock(&bufmgr_gem->lock);
1698 
1699 	if (bo_gem->map_count <= 0) {
1700 		DBG("attempted to unmap an unmapped bo\n");
1701 		pthread_mutex_unlock(&bufmgr_gem->lock);
1702 		/* Preserve the old behaviour of just treating this as a
1703 		 * no-op rather than reporting the error.
1704 		 */
1705 		return 0;
1706 	}
1707 
1708 	if (bo_gem->mapped_cpu_write) {
1709 		struct drm_i915_gem_sw_finish sw_finish;
1710 
1711 		/* Cause a flush to happen if the buffer's pinned for
1712 		 * scanout, so the results show up in a timely manner.
1713 		 * Unlike GTT set domains, this only does work if the
1714 		 * buffer should be scanout-related.
1715 		 */
1716 		memclear(sw_finish);
1717 		sw_finish.handle = bo_gem->gem_handle;
1718 		ret = drmIoctl(bufmgr_gem->fd,
1719 			       DRM_IOCTL_I915_GEM_SW_FINISH,
1720 			       &sw_finish);
1721 		ret = ret == -1 ? -errno : 0;
1722 
1723 		bo_gem->mapped_cpu_write = false;
1724 	}
1725 
1726 	/* We need to unmap after every innovation as we cannot track
1727 	 * an open vma for every bo as that will exhaust the system
1728 	 * limits and cause later failures.
1729 	 */
1730 	if (--bo_gem->map_count == 0) {
1731 		drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
1732 		drm_intel_gem_bo_mark_mmaps_incoherent(bo);
1733 		bo->virtual = NULL;
1734 	}
1735 	pthread_mutex_unlock(&bufmgr_gem->lock);
1736 
1737 	return ret;
1738 }
1739 
1740 int
drm_intel_gem_bo_unmap_gtt(drm_intel_bo * bo)1741 drm_intel_gem_bo_unmap_gtt(drm_intel_bo *bo)
1742 {
1743 	return drm_intel_gem_bo_unmap(bo);
1744 }
1745 
1746 static int
drm_intel_gem_bo_subdata(drm_intel_bo * bo,unsigned long offset,unsigned long size,const void * data)1747 drm_intel_gem_bo_subdata(drm_intel_bo *bo, unsigned long offset,
1748 			 unsigned long size, const void *data)
1749 {
1750 	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1751 	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1752 	struct drm_i915_gem_pwrite pwrite;
1753 	int ret;
1754 
1755 	if (bo_gem->is_userptr)
1756 		return -EINVAL;
1757 
1758 	memclear(pwrite);
1759 	pwrite.handle = bo_gem->gem_handle;
1760 	pwrite.offset = offset;
1761 	pwrite.size = size;
1762 	pwrite.data_ptr = (uint64_t) (uintptr_t) data;
1763 	ret = drmIoctl(bufmgr_gem->fd,
1764 		       DRM_IOCTL_I915_GEM_PWRITE,
1765 		       &pwrite);
1766 	if (ret != 0) {
1767 		ret = -errno;
1768 		DBG("%s:%d: Error writing data to buffer %d: (%d %d) %s .\n",
1769 		    __FILE__, __LINE__, bo_gem->gem_handle, (int)offset,
1770 		    (int)size, strerror(errno));
1771 	}
1772 
1773 	return ret;
1774 }
1775 
1776 static int
drm_intel_gem_get_pipe_from_crtc_id(drm_intel_bufmgr * bufmgr,int crtc_id)1777 drm_intel_gem_get_pipe_from_crtc_id(drm_intel_bufmgr *bufmgr, int crtc_id)
1778 {
1779 	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
1780 	struct drm_i915_get_pipe_from_crtc_id get_pipe_from_crtc_id;
1781 	int ret;
1782 
1783 	memclear(get_pipe_from_crtc_id);
1784 	get_pipe_from_crtc_id.crtc_id = crtc_id;
1785 	ret = drmIoctl(bufmgr_gem->fd,
1786 		       DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID,
1787 		       &get_pipe_from_crtc_id);
1788 	if (ret != 0) {
1789 		/* We return -1 here to signal that we don't
1790 		 * know which pipe is associated with this crtc.
1791 		 * This lets the caller know that this information
1792 		 * isn't available; using the wrong pipe for
1793 		 * vblank waiting can cause the chipset to lock up
1794 		 */
1795 		return -1;
1796 	}
1797 
1798 	return get_pipe_from_crtc_id.pipe;
1799 }
1800 
1801 static int
drm_intel_gem_bo_get_subdata(drm_intel_bo * bo,unsigned long offset,unsigned long size,void * data)1802 drm_intel_gem_bo_get_subdata(drm_intel_bo *bo, unsigned long offset,
1803 			     unsigned long size, void *data)
1804 {
1805 	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1806 	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1807 	struct drm_i915_gem_pread pread;
1808 	int ret;
1809 
1810 	if (bo_gem->is_userptr)
1811 		return -EINVAL;
1812 
1813 	memclear(pread);
1814 	pread.handle = bo_gem->gem_handle;
1815 	pread.offset = offset;
1816 	pread.size = size;
1817 	pread.data_ptr = (uint64_t) (uintptr_t) data;
1818 	ret = drmIoctl(bufmgr_gem->fd,
1819 		       DRM_IOCTL_I915_GEM_PREAD,
1820 		       &pread);
1821 	if (ret != 0) {
1822 		ret = -errno;
1823 		DBG("%s:%d: Error reading data from buffer %d: (%d %d) %s .\n",
1824 		    __FILE__, __LINE__, bo_gem->gem_handle, (int)offset,
1825 		    (int)size, strerror(errno));
1826 	}
1827 
1828 	return ret;
1829 }
1830 
1831 /** Waits for all GPU rendering with the object to have completed. */
1832 static void
drm_intel_gem_bo_wait_rendering(drm_intel_bo * bo)1833 drm_intel_gem_bo_wait_rendering(drm_intel_bo *bo)
1834 {
1835 	drm_intel_gem_bo_start_gtt_access(bo, 1);
1836 }
1837 
1838 /**
1839  * Waits on a BO for the given amount of time.
1840  *
1841  * @bo: buffer object to wait for
1842  * @timeout_ns: amount of time to wait in nanoseconds.
1843  *   If value is less than 0, an infinite wait will occur.
1844  *
1845  * Returns 0 if the wait was successful ie. the last batch referencing the
1846  * object has completed within the allotted time. Otherwise some negative return
1847  * value describes the error. Of particular interest is -ETIME when the wait has
1848  * failed to yield the desired result.
1849  *
1850  * Similar to drm_intel_gem_bo_wait_rendering except a timeout parameter allows
1851  * the operation to give up after a certain amount of time. Another subtle
1852  * difference is the internal locking semantics are different (this variant does
1853  * not hold the lock for the duration of the wait). This makes the wait subject
1854  * to a larger userspace race window.
1855  *
1856  * The implementation shall wait until the object is no longer actively
1857  * referenced within a batch buffer at the time of the call. The wait will
1858  * not guarantee that the buffer is re-issued via another thread, or an flinked
1859  * handle. Userspace must make sure this race does not occur if such precision
1860  * is important.
1861  *
1862  * Note that some kernels have broken the inifite wait for negative values
1863  * promise, upgrade to latest stable kernels if this is the case.
1864  */
1865 int
drm_intel_gem_bo_wait(drm_intel_bo * bo,int64_t timeout_ns)1866 drm_intel_gem_bo_wait(drm_intel_bo *bo, int64_t timeout_ns)
1867 {
1868 	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1869 	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1870 	struct drm_i915_gem_wait wait;
1871 	int ret;
1872 
1873 	if (!bufmgr_gem->has_wait_timeout) {
1874 		DBG("%s:%d: Timed wait is not supported. Falling back to "
1875 		    "infinite wait\n", __FILE__, __LINE__);
1876 		if (timeout_ns) {
1877 			drm_intel_gem_bo_wait_rendering(bo);
1878 			return 0;
1879 		} else {
1880 			return drm_intel_gem_bo_busy(bo) ? -ETIME : 0;
1881 		}
1882 	}
1883 
1884 	memclear(wait);
1885 	wait.bo_handle = bo_gem->gem_handle;
1886 	wait.timeout_ns = timeout_ns;
1887 	ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_WAIT, &wait);
1888 	if (ret == -1)
1889 		return -errno;
1890 
1891 	return ret;
1892 }
1893 
1894 /**
1895  * Sets the object to the GTT read and possibly write domain, used by the X
1896  * 2D driver in the absence of kernel support to do drm_intel_gem_bo_map_gtt().
1897  *
1898  * In combination with drm_intel_gem_bo_pin() and manual fence management, we
1899  * can do tiled pixmaps this way.
1900  */
1901 void
drm_intel_gem_bo_start_gtt_access(drm_intel_bo * bo,int write_enable)1902 drm_intel_gem_bo_start_gtt_access(drm_intel_bo *bo, int write_enable)
1903 {
1904 	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1905 	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1906 	struct drm_i915_gem_set_domain set_domain;
1907 	int ret;
1908 
1909 	memclear(set_domain);
1910 	set_domain.handle = bo_gem->gem_handle;
1911 	set_domain.read_domains = I915_GEM_DOMAIN_GTT;
1912 	set_domain.write_domain = write_enable ? I915_GEM_DOMAIN_GTT : 0;
1913 	ret = drmIoctl(bufmgr_gem->fd,
1914 		       DRM_IOCTL_I915_GEM_SET_DOMAIN,
1915 		       &set_domain);
1916 	if (ret != 0) {
1917 		DBG("%s:%d: Error setting memory domains %d (%08x %08x): %s .\n",
1918 		    __FILE__, __LINE__, bo_gem->gem_handle,
1919 		    set_domain.read_domains, set_domain.write_domain,
1920 		    strerror(errno));
1921 	}
1922 }
1923 
1924 static void
drm_intel_bufmgr_gem_destroy(drm_intel_bufmgr * bufmgr)1925 drm_intel_bufmgr_gem_destroy(drm_intel_bufmgr *bufmgr)
1926 {
1927 	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
1928 	struct drm_gem_close close_bo;
1929 	int i, ret;
1930 
1931 	free(bufmgr_gem->exec2_objects);
1932 	free(bufmgr_gem->exec_objects);
1933 	free(bufmgr_gem->exec_bos);
1934 
1935 	pthread_mutex_destroy(&bufmgr_gem->lock);
1936 
1937 	/* Free any cached buffer objects we were going to reuse */
1938 	for (i = 0; i < bufmgr_gem->num_buckets; i++) {
1939 		struct drm_intel_gem_bo_bucket *bucket =
1940 		    &bufmgr_gem->cache_bucket[i];
1941 		drm_intel_bo_gem *bo_gem;
1942 
1943 		while (!DRMLISTEMPTY(&bucket->head)) {
1944 			bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
1945 					      bucket->head.next, head);
1946 			DRMLISTDEL(&bo_gem->head);
1947 
1948 			drm_intel_gem_bo_free(&bo_gem->bo);
1949 		}
1950 	}
1951 
1952 	/* Release userptr bo kept hanging around for optimisation. */
1953 	if (bufmgr_gem->userptr_active.ptr) {
1954 		memclear(close_bo);
1955 		close_bo.handle = bufmgr_gem->userptr_active.handle;
1956 		ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_GEM_CLOSE, &close_bo);
1957 		free(bufmgr_gem->userptr_active.ptr);
1958 		if (ret)
1959 			fprintf(stderr,
1960 				"Failed to release test userptr object! (%d) "
1961 				"i915 kernel driver may not be sane!\n", errno);
1962 	}
1963 
1964 	free(bufmgr);
1965 }
1966 
1967 /**
1968  * Adds the target buffer to the validation list and adds the relocation
1969  * to the reloc_buffer's relocation list.
1970  *
1971  * The relocation entry at the given offset must already contain the
1972  * precomputed relocation value, because the kernel will optimize out
1973  * the relocation entry write when the buffer hasn't moved from the
1974  * last known offset in target_bo.
1975  */
1976 static int
do_bo_emit_reloc(drm_intel_bo * bo,uint32_t offset,drm_intel_bo * target_bo,uint32_t target_offset,uint32_t read_domains,uint32_t write_domain,bool need_fence)1977 do_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset,
1978 		 drm_intel_bo *target_bo, uint32_t target_offset,
1979 		 uint32_t read_domains, uint32_t write_domain,
1980 		 bool need_fence)
1981 {
1982 	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1983 	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1984 	drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) target_bo;
1985 	bool fenced_command;
1986 
1987 	if (bo_gem->has_error)
1988 		return -ENOMEM;
1989 
1990 	if (target_bo_gem->has_error) {
1991 		bo_gem->has_error = true;
1992 		return -ENOMEM;
1993 	}
1994 
1995 	/* We never use HW fences for rendering on 965+ */
1996 	if (bufmgr_gem->gen >= 4)
1997 		need_fence = false;
1998 
1999 	fenced_command = need_fence;
2000 	if (target_bo_gem->tiling_mode == I915_TILING_NONE)
2001 		need_fence = false;
2002 
2003 	/* Create a new relocation list if needed */
2004 	if (bo_gem->relocs == NULL && drm_intel_setup_reloc_list(bo))
2005 		return -ENOMEM;
2006 
2007 	/* Check overflow */
2008 	assert(bo_gem->reloc_count < bufmgr_gem->max_relocs);
2009 
2010 	/* Check args */
2011 	assert(offset <= bo->size - 4);
2012 	assert((write_domain & (write_domain - 1)) == 0);
2013 
2014 	/* An object needing a fence is a tiled buffer, so it won't have
2015 	 * relocs to other buffers.
2016 	 */
2017 	if (need_fence) {
2018 		assert(target_bo_gem->reloc_count == 0);
2019 		target_bo_gem->reloc_tree_fences = 1;
2020 	}
2021 
2022 	/* Make sure that we're not adding a reloc to something whose size has
2023 	 * already been accounted for.
2024 	 */
2025 	assert(!bo_gem->used_as_reloc_target);
2026 	if (target_bo_gem != bo_gem) {
2027 		target_bo_gem->used_as_reloc_target = true;
2028 		bo_gem->reloc_tree_size += target_bo_gem->reloc_tree_size;
2029 		bo_gem->reloc_tree_fences += target_bo_gem->reloc_tree_fences;
2030 	}
2031 
2032 	bo_gem->reloc_target_info[bo_gem->reloc_count].bo = target_bo;
2033 	if (target_bo != bo)
2034 		drm_intel_gem_bo_reference(target_bo);
2035 	if (fenced_command)
2036 		bo_gem->reloc_target_info[bo_gem->reloc_count].flags =
2037 			DRM_INTEL_RELOC_FENCE;
2038 	else
2039 		bo_gem->reloc_target_info[bo_gem->reloc_count].flags = 0;
2040 
2041 	bo_gem->relocs[bo_gem->reloc_count].offset = offset;
2042 	bo_gem->relocs[bo_gem->reloc_count].delta = target_offset;
2043 	bo_gem->relocs[bo_gem->reloc_count].target_handle =
2044 	    target_bo_gem->gem_handle;
2045 	bo_gem->relocs[bo_gem->reloc_count].read_domains = read_domains;
2046 	bo_gem->relocs[bo_gem->reloc_count].write_domain = write_domain;
2047 	bo_gem->relocs[bo_gem->reloc_count].presumed_offset = target_bo->offset64;
2048 	bo_gem->reloc_count++;
2049 
2050 	return 0;
2051 }
2052 
2053 static void
drm_intel_gem_bo_use_48b_address_range(drm_intel_bo * bo,uint32_t enable)2054 drm_intel_gem_bo_use_48b_address_range(drm_intel_bo *bo, uint32_t enable)
2055 {
2056 	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2057 	bo_gem->use_48b_address_range = enable;
2058 }
2059 
2060 static int
drm_intel_gem_bo_add_softpin_target(drm_intel_bo * bo,drm_intel_bo * target_bo)2061 drm_intel_gem_bo_add_softpin_target(drm_intel_bo *bo, drm_intel_bo *target_bo)
2062 {
2063 	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2064 	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2065 	drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) target_bo;
2066 	if (bo_gem->has_error)
2067 		return -ENOMEM;
2068 
2069 	if (target_bo_gem->has_error) {
2070 		bo_gem->has_error = true;
2071 		return -ENOMEM;
2072 	}
2073 
2074 	if (!target_bo_gem->is_softpin)
2075 		return -EINVAL;
2076 	if (target_bo_gem == bo_gem)
2077 		return -EINVAL;
2078 
2079 	if (bo_gem->softpin_target_count == bo_gem->softpin_target_size) {
2080 		int new_size = bo_gem->softpin_target_size * 2;
2081 		if (new_size == 0)
2082 			new_size = bufmgr_gem->max_relocs;
2083 
2084 		bo_gem->softpin_target = realloc(bo_gem->softpin_target, new_size *
2085 				sizeof(drm_intel_bo *));
2086 		if (!bo_gem->softpin_target)
2087 			return -ENOMEM;
2088 
2089 		bo_gem->softpin_target_size = new_size;
2090 	}
2091 	bo_gem->softpin_target[bo_gem->softpin_target_count] = target_bo;
2092 	drm_intel_gem_bo_reference(target_bo);
2093 	bo_gem->softpin_target_count++;
2094 
2095 	return 0;
2096 }
2097 
2098 static int
drm_intel_gem_bo_emit_reloc(drm_intel_bo * bo,uint32_t offset,drm_intel_bo * target_bo,uint32_t target_offset,uint32_t read_domains,uint32_t write_domain)2099 drm_intel_gem_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset,
2100 			    drm_intel_bo *target_bo, uint32_t target_offset,
2101 			    uint32_t read_domains, uint32_t write_domain)
2102 {
2103 	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
2104 	drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *)target_bo;
2105 
2106 	if (target_bo_gem->is_softpin)
2107 		return drm_intel_gem_bo_add_softpin_target(bo, target_bo);
2108 	else
2109 		return do_bo_emit_reloc(bo, offset, target_bo, target_offset,
2110 					read_domains, write_domain,
2111 					!bufmgr_gem->fenced_relocs);
2112 }
2113 
2114 static int
drm_intel_gem_bo_emit_reloc_fence(drm_intel_bo * bo,uint32_t offset,drm_intel_bo * target_bo,uint32_t target_offset,uint32_t read_domains,uint32_t write_domain)2115 drm_intel_gem_bo_emit_reloc_fence(drm_intel_bo *bo, uint32_t offset,
2116 				  drm_intel_bo *target_bo,
2117 				  uint32_t target_offset,
2118 				  uint32_t read_domains, uint32_t write_domain)
2119 {
2120 	return do_bo_emit_reloc(bo, offset, target_bo, target_offset,
2121 				read_domains, write_domain, true);
2122 }
2123 
2124 int
drm_intel_gem_bo_get_reloc_count(drm_intel_bo * bo)2125 drm_intel_gem_bo_get_reloc_count(drm_intel_bo *bo)
2126 {
2127 	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2128 
2129 	return bo_gem->reloc_count;
2130 }
2131 
2132 /**
2133  * Removes existing relocation entries in the BO after "start".
2134  *
2135  * This allows a user to avoid a two-step process for state setup with
2136  * counting up all the buffer objects and doing a
2137  * drm_intel_bufmgr_check_aperture_space() before emitting any of the
2138  * relocations for the state setup.  Instead, save the state of the
2139  * batchbuffer including drm_intel_gem_get_reloc_count(), emit all the
2140  * state, and then check if it still fits in the aperture.
2141  *
2142  * Any further drm_intel_bufmgr_check_aperture_space() queries
2143  * involving this buffer in the tree are undefined after this call.
2144  *
2145  * This also removes all softpinned targets being referenced by the BO.
2146  */
2147 void
drm_intel_gem_bo_clear_relocs(drm_intel_bo * bo,int start)2148 drm_intel_gem_bo_clear_relocs(drm_intel_bo *bo, int start)
2149 {
2150 	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2151 	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2152 	int i;
2153 	struct timespec time;
2154 
2155 	clock_gettime(CLOCK_MONOTONIC, &time);
2156 
2157 	assert(bo_gem->reloc_count >= start);
2158 
2159 	/* Unreference the cleared target buffers */
2160 	pthread_mutex_lock(&bufmgr_gem->lock);
2161 
2162 	for (i = start; i < bo_gem->reloc_count; i++) {
2163 		drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) bo_gem->reloc_target_info[i].bo;
2164 		if (&target_bo_gem->bo != bo) {
2165 			bo_gem->reloc_tree_fences -= target_bo_gem->reloc_tree_fences;
2166 			drm_intel_gem_bo_unreference_locked_timed(&target_bo_gem->bo,
2167 								  time.tv_sec);
2168 		}
2169 	}
2170 	bo_gem->reloc_count = start;
2171 
2172 	for (i = 0; i < bo_gem->softpin_target_count; i++) {
2173 		drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) bo_gem->softpin_target[i];
2174 		drm_intel_gem_bo_unreference_locked_timed(&target_bo_gem->bo, time.tv_sec);
2175 	}
2176 	bo_gem->softpin_target_count = 0;
2177 
2178 	pthread_mutex_unlock(&bufmgr_gem->lock);
2179 
2180 }
2181 
2182 /**
2183  * Walk the tree of relocations rooted at BO and accumulate the list of
2184  * validations to be performed and update the relocation buffers with
2185  * index values into the validation list.
2186  */
2187 static void
drm_intel_gem_bo_process_reloc(drm_intel_bo * bo)2188 drm_intel_gem_bo_process_reloc(drm_intel_bo *bo)
2189 {
2190 	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2191 	int i;
2192 
2193 	if (bo_gem->relocs == NULL)
2194 		return;
2195 
2196 	for (i = 0; i < bo_gem->reloc_count; i++) {
2197 		drm_intel_bo *target_bo = bo_gem->reloc_target_info[i].bo;
2198 
2199 		if (target_bo == bo)
2200 			continue;
2201 
2202 		drm_intel_gem_bo_mark_mmaps_incoherent(bo);
2203 
2204 		/* Continue walking the tree depth-first. */
2205 		drm_intel_gem_bo_process_reloc(target_bo);
2206 
2207 		/* Add the target to the validate list */
2208 		drm_intel_add_validate_buffer(target_bo);
2209 	}
2210 }
2211 
2212 static void
drm_intel_gem_bo_process_reloc2(drm_intel_bo * bo)2213 drm_intel_gem_bo_process_reloc2(drm_intel_bo *bo)
2214 {
2215 	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
2216 	int i;
2217 
2218 	if (bo_gem->relocs == NULL && bo_gem->softpin_target == NULL)
2219 		return;
2220 
2221 	for (i = 0; i < bo_gem->reloc_count; i++) {
2222 		drm_intel_bo *target_bo = bo_gem->reloc_target_info[i].bo;
2223 		int need_fence;
2224 
2225 		if (target_bo == bo)
2226 			continue;
2227 
2228 		drm_intel_gem_bo_mark_mmaps_incoherent(bo);
2229 
2230 		/* Continue walking the tree depth-first. */
2231 		drm_intel_gem_bo_process_reloc2(target_bo);
2232 
2233 		need_fence = (bo_gem->reloc_target_info[i].flags &
2234 			      DRM_INTEL_RELOC_FENCE);
2235 
2236 		/* Add the target to the validate list */
2237 		drm_intel_add_validate_buffer2(target_bo, need_fence);
2238 	}
2239 
2240 	for (i = 0; i < bo_gem->softpin_target_count; i++) {
2241 		drm_intel_bo *target_bo = bo_gem->softpin_target[i];
2242 
2243 		if (target_bo == bo)
2244 			continue;
2245 
2246 		drm_intel_gem_bo_mark_mmaps_incoherent(bo);
2247 		drm_intel_gem_bo_process_reloc2(target_bo);
2248 		drm_intel_add_validate_buffer2(target_bo, false);
2249 	}
2250 }
2251 
2252 
2253 static void
drm_intel_update_buffer_offsets(drm_intel_bufmgr_gem * bufmgr_gem)2254 drm_intel_update_buffer_offsets(drm_intel_bufmgr_gem *bufmgr_gem)
2255 {
2256 	int i;
2257 
2258 	for (i = 0; i < bufmgr_gem->exec_count; i++) {
2259 		drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
2260 		drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2261 
2262 		/* Update the buffer offset */
2263 		if (bufmgr_gem->exec_objects[i].offset != bo->offset64) {
2264 			DBG("BO %d (%s) migrated: 0x%08x %08x -> 0x%08x %08x\n",
2265 			    bo_gem->gem_handle, bo_gem->name,
2266 			    upper_32_bits(bo->offset64),
2267 			    lower_32_bits(bo->offset64),
2268 			    upper_32_bits(bufmgr_gem->exec_objects[i].offset),
2269 			    lower_32_bits(bufmgr_gem->exec_objects[i].offset));
2270 			bo->offset64 = bufmgr_gem->exec_objects[i].offset;
2271 			bo->offset = bufmgr_gem->exec_objects[i].offset;
2272 		}
2273 	}
2274 }
2275 
2276 static void
drm_intel_update_buffer_offsets2(drm_intel_bufmgr_gem * bufmgr_gem)2277 drm_intel_update_buffer_offsets2 (drm_intel_bufmgr_gem *bufmgr_gem)
2278 {
2279 	int i;
2280 
2281 	for (i = 0; i < bufmgr_gem->exec_count; i++) {
2282 		drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
2283 		drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
2284 
2285 		/* Update the buffer offset */
2286 		if (bufmgr_gem->exec2_objects[i].offset != bo->offset64) {
2287 			/* If we're seeing softpinned object here it means that the kernel
2288 			 * has relocated our object... Indicating a programming error
2289 			 */
2290 			assert(!bo_gem->is_softpin);
2291 			DBG("BO %d (%s) migrated: 0x%08x %08x -> 0x%08x %08x\n",
2292 			    bo_gem->gem_handle, bo_gem->name,
2293 			    upper_32_bits(bo->offset64),
2294 			    lower_32_bits(bo->offset64),
2295 			    upper_32_bits(bufmgr_gem->exec2_objects[i].offset),
2296 			    lower_32_bits(bufmgr_gem->exec2_objects[i].offset));
2297 			bo->offset64 = bufmgr_gem->exec2_objects[i].offset;
2298 			bo->offset = bufmgr_gem->exec2_objects[i].offset;
2299 		}
2300 	}
2301 }
2302 
2303 void
drm_intel_gem_bo_aub_dump_bmp(drm_intel_bo * bo,int x1,int y1,int width,int height,enum aub_dump_bmp_format format,int pitch,int offset)2304 drm_intel_gem_bo_aub_dump_bmp(drm_intel_bo *bo,
2305 			      int x1, int y1, int width, int height,
2306 			      enum aub_dump_bmp_format format,
2307 			      int pitch, int offset)
2308 {
2309 }
2310 
2311 static int
drm_intel_gem_bo_exec(drm_intel_bo * bo,int used,drm_clip_rect_t * cliprects,int num_cliprects,int DR4)2312 drm_intel_gem_bo_exec(drm_intel_bo *bo, int used,
2313 		      drm_clip_rect_t * cliprects, int num_cliprects, int DR4)
2314 {
2315 	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2316 	struct drm_i915_gem_execbuffer execbuf;
2317 	int ret, i;
2318 
2319 	if (to_bo_gem(bo)->has_error)
2320 		return -ENOMEM;
2321 
2322 	pthread_mutex_lock(&bufmgr_gem->lock);
2323 	/* Update indices and set up the validate list. */
2324 	drm_intel_gem_bo_process_reloc(bo);
2325 
2326 	/* Add the batch buffer to the validation list.  There are no
2327 	 * relocations pointing to it.
2328 	 */
2329 	drm_intel_add_validate_buffer(bo);
2330 
2331 	memclear(execbuf);
2332 	execbuf.buffers_ptr = (uintptr_t) bufmgr_gem->exec_objects;
2333 	execbuf.buffer_count = bufmgr_gem->exec_count;
2334 	execbuf.batch_start_offset = 0;
2335 	execbuf.batch_len = used;
2336 	execbuf.cliprects_ptr = (uintptr_t) cliprects;
2337 	execbuf.num_cliprects = num_cliprects;
2338 	execbuf.DR1 = 0;
2339 	execbuf.DR4 = DR4;
2340 
2341 	ret = drmIoctl(bufmgr_gem->fd,
2342 		       DRM_IOCTL_I915_GEM_EXECBUFFER,
2343 		       &execbuf);
2344 	if (ret != 0) {
2345 		ret = -errno;
2346 		if (errno == ENOSPC) {
2347 			DBG("Execbuffer fails to pin. "
2348 			    "Estimate: %u. Actual: %u. Available: %u\n",
2349 			    drm_intel_gem_estimate_batch_space(bufmgr_gem->exec_bos,
2350 							       bufmgr_gem->
2351 							       exec_count),
2352 			    drm_intel_gem_compute_batch_space(bufmgr_gem->exec_bos,
2353 							      bufmgr_gem->
2354 							      exec_count),
2355 			    (unsigned int)bufmgr_gem->gtt_size);
2356 		}
2357 	}
2358 	drm_intel_update_buffer_offsets(bufmgr_gem);
2359 
2360 	if (bufmgr_gem->bufmgr.debug)
2361 		drm_intel_gem_dump_validation_list(bufmgr_gem);
2362 
2363 	for (i = 0; i < bufmgr_gem->exec_count; i++) {
2364 		drm_intel_bo_gem *bo_gem = to_bo_gem(bufmgr_gem->exec_bos[i]);
2365 
2366 		bo_gem->idle = false;
2367 
2368 		/* Disconnect the buffer from the validate list */
2369 		bo_gem->validate_index = -1;
2370 		bufmgr_gem->exec_bos[i] = NULL;
2371 	}
2372 	bufmgr_gem->exec_count = 0;
2373 	pthread_mutex_unlock(&bufmgr_gem->lock);
2374 
2375 	return ret;
2376 }
2377 
2378 static int
do_exec2(drm_intel_bo * bo,int used,drm_intel_context * ctx,drm_clip_rect_t * cliprects,int num_cliprects,int DR4,int in_fence,int * out_fence,unsigned int flags)2379 do_exec2(drm_intel_bo *bo, int used, drm_intel_context *ctx,
2380 	 drm_clip_rect_t *cliprects, int num_cliprects, int DR4,
2381 	 int in_fence, int *out_fence,
2382 	 unsigned int flags)
2383 {
2384 	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
2385 	struct drm_i915_gem_execbuffer2 execbuf;
2386 	int ret = 0;
2387 	int i;
2388 
2389 	if (to_bo_gem(bo)->has_error)
2390 		return -ENOMEM;
2391 
2392 	switch (flags & 0x7) {
2393 	default:
2394 		return -EINVAL;
2395 	case I915_EXEC_BLT:
2396 		if (!bufmgr_gem->has_blt)
2397 			return -EINVAL;
2398 		break;
2399 	case I915_EXEC_BSD:
2400 		if (!bufmgr_gem->has_bsd)
2401 			return -EINVAL;
2402 		break;
2403 	case I915_EXEC_VEBOX:
2404 		if (!bufmgr_gem->has_vebox)
2405 			return -EINVAL;
2406 		break;
2407 	case I915_EXEC_RENDER:
2408 	case I915_EXEC_DEFAULT:
2409 		break;
2410 	}
2411 
2412 	pthread_mutex_lock(&bufmgr_gem->lock);
2413 	/* Update indices and set up the validate list. */
2414 	drm_intel_gem_bo_process_reloc2(bo);
2415 
2416 	/* Add the batch buffer to the validation list.  There are no relocations
2417 	 * pointing to it.
2418 	 */
2419 	drm_intel_add_validate_buffer2(bo, 0);
2420 
2421 	memclear(execbuf);
2422 	execbuf.buffers_ptr = (uintptr_t)bufmgr_gem->exec2_objects;
2423 	execbuf.buffer_count = bufmgr_gem->exec_count;
2424 	execbuf.batch_start_offset = 0;
2425 	execbuf.batch_len = used;
2426 	execbuf.cliprects_ptr = (uintptr_t)cliprects;
2427 	execbuf.num_cliprects = num_cliprects;
2428 	execbuf.DR1 = 0;
2429 	execbuf.DR4 = DR4;
2430 	execbuf.flags = flags;
2431 	if (ctx == NULL)
2432 		i915_execbuffer2_set_context_id(execbuf, 0);
2433 	else
2434 		i915_execbuffer2_set_context_id(execbuf, ctx->ctx_id);
2435 	execbuf.rsvd2 = 0;
2436 	if (in_fence != -1) {
2437 		execbuf.rsvd2 = in_fence;
2438 		execbuf.flags |= I915_EXEC_FENCE_IN;
2439 	}
2440 	if (out_fence != NULL) {
2441 		*out_fence = -1;
2442 		execbuf.flags |= I915_EXEC_FENCE_OUT;
2443 	}
2444 
2445 	if (bufmgr_gem->no_exec)
2446 		goto skip_execution;
2447 
2448 	ret = drmIoctl(bufmgr_gem->fd,
2449 		       DRM_IOCTL_I915_GEM_EXECBUFFER2_WR,
2450 		       &execbuf);
2451 	if (ret != 0) {
2452 		ret = -errno;
2453 		if (ret == -ENOSPC) {
2454 			DBG("Execbuffer fails to pin. "
2455 			    "Estimate: %u. Actual: %u. Available: %u\n",
2456 			    drm_intel_gem_estimate_batch_space(bufmgr_gem->exec_bos,
2457 							       bufmgr_gem->exec_count),
2458 			    drm_intel_gem_compute_batch_space(bufmgr_gem->exec_bos,
2459 							      bufmgr_gem->exec_count),
2460 			    (unsigned int) bufmgr_gem->gtt_size);
2461 		}
2462 	}
2463 	drm_intel_update_buffer_offsets2(bufmgr_gem);
2464 
2465 	if (ret == 0 && out_fence != NULL)
2466 		*out_fence = execbuf.rsvd2 >> 32;
2467 
2468 skip_execution:
2469 	if (bufmgr_gem->bufmgr.debug)
2470 		drm_intel_gem_dump_validation_list(bufmgr_gem);
2471 
2472 	for (i = 0; i < bufmgr_gem->exec_count; i++) {
2473 		drm_intel_bo_gem *bo_gem = to_bo_gem(bufmgr_gem->exec_bos[i]);
2474 
2475 		bo_gem->idle = false;
2476 
2477 		/* Disconnect the buffer from the validate list */
2478 		bo_gem->validate_index = -1;
2479 		bufmgr_gem->exec_bos[i] = NULL;
2480 	}
2481 	bufmgr_gem->exec_count = 0;
2482 	pthread_mutex_unlock(&bufmgr_gem->lock);
2483 
2484 	return ret;
2485 }
2486 
2487 static int
drm_intel_gem_bo_exec2(drm_intel_bo * bo,int used,drm_clip_rect_t * cliprects,int num_cliprects,int DR4)2488 drm_intel_gem_bo_exec2(drm_intel_bo *bo, int used,
2489 		       drm_clip_rect_t *cliprects, int num_cliprects,
2490 		       int DR4)
2491 {
2492 	return do_exec2(bo, used, NULL, cliprects, num_cliprects, DR4,
2493 			-1, NULL, I915_EXEC_RENDER);
2494 }
2495 
2496 static int
drm_intel_gem_bo_mrb_exec2(drm_intel_bo * bo,int used,drm_clip_rect_t * cliprects,int num_cliprects,int DR4,unsigned int flags)2497 drm_intel_gem_bo_mrb_exec2(drm_intel_bo *bo, int used,
2498 			drm_clip_rect_t *cliprects, int num_cliprects, int DR4,
2499 			unsigned int flags)
2500 {
2501 	return do_exec2(bo, used, NULL, cliprects, num_cliprects, DR4,
2502 			-1, NULL, flags);
2503 }
2504 
2505 int
drm_intel_gem_bo_context_exec(drm_intel_bo * bo,drm_intel_context * ctx,int used,unsigned int flags)2506 drm_intel_gem_bo_context_exec(drm_intel_bo *bo, drm_intel_context *ctx,
2507 			      int used, unsigned int flags)
2508 {
2509 	return do_exec2(bo, used, ctx, NULL, 0, 0, -1, NULL, flags);
2510 }
2511 
2512 int
drm_intel_gem_bo_fence_exec(drm_intel_bo * bo,drm_intel_context * ctx,int used,int in_fence,int * out_fence,unsigned int flags)2513 drm_intel_gem_bo_fence_exec(drm_intel_bo *bo,
2514 			    drm_intel_context *ctx,
2515 			    int used,
2516 			    int in_fence,
2517 			    int *out_fence,
2518 			    unsigned int flags)
2519 {
2520 	return do_exec2(bo, used, ctx, NULL, 0, 0, in_fence, out_fence, flags);
2521 }
2522 
2523 static int
drm_intel_gem_bo_pin(drm_intel_bo * bo,uint32_t alignment)2524 drm_intel_gem_bo_pin(drm_intel_bo *bo, uint32_t alignment)
2525 {
2526 	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2527 	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2528 	struct drm_i915_gem_pin pin;
2529 	int ret;
2530 
2531 	memclear(pin);
2532 	pin.handle = bo_gem->gem_handle;
2533 	pin.alignment = alignment;
2534 
2535 	ret = drmIoctl(bufmgr_gem->fd,
2536 		       DRM_IOCTL_I915_GEM_PIN,
2537 		       &pin);
2538 	if (ret != 0)
2539 		return -errno;
2540 
2541 	bo->offset64 = pin.offset;
2542 	bo->offset = pin.offset;
2543 	return 0;
2544 }
2545 
2546 static int
drm_intel_gem_bo_unpin(drm_intel_bo * bo)2547 drm_intel_gem_bo_unpin(drm_intel_bo *bo)
2548 {
2549 	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2550 	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2551 	struct drm_i915_gem_unpin unpin;
2552 	int ret;
2553 
2554 	memclear(unpin);
2555 	unpin.handle = bo_gem->gem_handle;
2556 
2557 	ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_UNPIN, &unpin);
2558 	if (ret != 0)
2559 		return -errno;
2560 
2561 	return 0;
2562 }
2563 
2564 static int
drm_intel_gem_bo_set_tiling_internal(drm_intel_bo * bo,uint32_t tiling_mode,uint32_t stride)2565 drm_intel_gem_bo_set_tiling_internal(drm_intel_bo *bo,
2566 				     uint32_t tiling_mode,
2567 				     uint32_t stride)
2568 {
2569 	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2570 	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2571 	struct drm_i915_gem_set_tiling set_tiling;
2572 	int ret;
2573 
2574 	if (bo_gem->global_name == 0 &&
2575 	    tiling_mode == bo_gem->tiling_mode &&
2576 	    stride == bo_gem->stride)
2577 		return 0;
2578 
2579 	memset(&set_tiling, 0, sizeof(set_tiling));
2580 	do {
2581 		/* set_tiling is slightly broken and overwrites the
2582 		 * input on the error path, so we have to open code
2583 		 * rmIoctl.
2584 		 */
2585 		set_tiling.handle = bo_gem->gem_handle;
2586 		set_tiling.tiling_mode = tiling_mode;
2587 		set_tiling.stride = stride;
2588 
2589 		ret = ioctl(bufmgr_gem->fd,
2590 			    DRM_IOCTL_I915_GEM_SET_TILING,
2591 			    &set_tiling);
2592 	} while (ret == -1 && (errno == EINTR || errno == EAGAIN));
2593 	if (ret == -1)
2594 		return -errno;
2595 
2596 	bo_gem->tiling_mode = set_tiling.tiling_mode;
2597 	bo_gem->swizzle_mode = set_tiling.swizzle_mode;
2598 	bo_gem->stride = set_tiling.stride;
2599 	return 0;
2600 }
2601 
2602 static int
drm_intel_gem_bo_set_tiling(drm_intel_bo * bo,uint32_t * tiling_mode,uint32_t stride)2603 drm_intel_gem_bo_set_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
2604 			    uint32_t stride)
2605 {
2606 	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2607 	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2608 	int ret;
2609 
2610 	/* Tiling with userptr surfaces is not supported
2611 	 * on all hardware so refuse it for time being.
2612 	 */
2613 	if (bo_gem->is_userptr)
2614 		return -EINVAL;
2615 
2616 	/* Linear buffers have no stride. By ensuring that we only ever use
2617 	 * stride 0 with linear buffers, we simplify our code.
2618 	 */
2619 	if (*tiling_mode == I915_TILING_NONE)
2620 		stride = 0;
2621 
2622 	ret = drm_intel_gem_bo_set_tiling_internal(bo, *tiling_mode, stride);
2623 	if (ret == 0)
2624 		drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem, 0);
2625 
2626 	*tiling_mode = bo_gem->tiling_mode;
2627 	return ret;
2628 }
2629 
2630 static int
drm_intel_gem_bo_get_tiling(drm_intel_bo * bo,uint32_t * tiling_mode,uint32_t * swizzle_mode)2631 drm_intel_gem_bo_get_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
2632 			    uint32_t * swizzle_mode)
2633 {
2634 	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2635 
2636 	*tiling_mode = bo_gem->tiling_mode;
2637 	*swizzle_mode = bo_gem->swizzle_mode;
2638 	return 0;
2639 }
2640 
2641 static int
drm_intel_gem_bo_set_softpin_offset(drm_intel_bo * bo,uint64_t offset)2642 drm_intel_gem_bo_set_softpin_offset(drm_intel_bo *bo, uint64_t offset)
2643 {
2644 	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2645 
2646 	bo_gem->is_softpin = true;
2647 	bo->offset64 = offset;
2648 	bo->offset = offset;
2649 	return 0;
2650 }
2651 
2652 drm_intel_bo *
drm_intel_bo_gem_create_from_prime(drm_intel_bufmgr * bufmgr,int prime_fd,int size)2653 drm_intel_bo_gem_create_from_prime(drm_intel_bufmgr *bufmgr, int prime_fd, int size)
2654 {
2655 	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
2656 	int ret;
2657 	uint32_t handle;
2658 	drm_intel_bo_gem *bo_gem;
2659 	struct drm_i915_gem_get_tiling get_tiling;
2660 
2661 	pthread_mutex_lock(&bufmgr_gem->lock);
2662 	ret = drmPrimeFDToHandle(bufmgr_gem->fd, prime_fd, &handle);
2663 	if (ret) {
2664 		DBG("create_from_prime: failed to obtain handle from fd: %s\n", strerror(errno));
2665 		pthread_mutex_unlock(&bufmgr_gem->lock);
2666 		return NULL;
2667 	}
2668 
2669 	/*
2670 	 * See if the kernel has already returned this buffer to us. Just as
2671 	 * for named buffers, we must not create two bo's pointing at the same
2672 	 * kernel object
2673 	 */
2674 	HASH_FIND(handle_hh, bufmgr_gem->handle_table,
2675 		  &handle, sizeof(handle), bo_gem);
2676 	if (bo_gem) {
2677 		drm_intel_gem_bo_reference(&bo_gem->bo);
2678 		goto out;
2679 	}
2680 
2681 	bo_gem = calloc(1, sizeof(*bo_gem));
2682 	if (!bo_gem)
2683 		goto out;
2684 
2685 	atomic_set(&bo_gem->refcount, 1);
2686 	DRMINITLISTHEAD(&bo_gem->vma_list);
2687 
2688 	/* Determine size of bo.  The fd-to-handle ioctl really should
2689 	 * return the size, but it doesn't.  If we have kernel 3.12 or
2690 	 * later, we can lseek on the prime fd to get the size.  Older
2691 	 * kernels will just fail, in which case we fall back to the
2692 	 * provided (estimated or guess size). */
2693 	ret = lseek(prime_fd, 0, SEEK_END);
2694 	if (ret != -1)
2695 		bo_gem->bo.size = ret;
2696 	else
2697 		bo_gem->bo.size = size;
2698 
2699 	bo_gem->bo.handle = handle;
2700 	bo_gem->bo.bufmgr = bufmgr;
2701 
2702 	bo_gem->gem_handle = handle;
2703 	HASH_ADD(handle_hh, bufmgr_gem->handle_table,
2704 		 gem_handle, sizeof(bo_gem->gem_handle), bo_gem);
2705 
2706 	bo_gem->name = "prime";
2707 	bo_gem->validate_index = -1;
2708 	bo_gem->reloc_tree_fences = 0;
2709 	bo_gem->used_as_reloc_target = false;
2710 	bo_gem->has_error = false;
2711 	bo_gem->reusable = false;
2712 	bo_gem->use_48b_address_range = false;
2713 
2714 	memclear(get_tiling);
2715 	get_tiling.handle = bo_gem->gem_handle;
2716 	if (drmIoctl(bufmgr_gem->fd,
2717 		     DRM_IOCTL_I915_GEM_GET_TILING,
2718 		     &get_tiling))
2719 		goto err;
2720 
2721 	bo_gem->tiling_mode = get_tiling.tiling_mode;
2722 	bo_gem->swizzle_mode = get_tiling.swizzle_mode;
2723 	/* XXX stride is unknown */
2724 	drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem, 0);
2725 
2726 out:
2727 	pthread_mutex_unlock(&bufmgr_gem->lock);
2728 	return &bo_gem->bo;
2729 
2730 err:
2731 	drm_intel_gem_bo_free(&bo_gem->bo);
2732 	pthread_mutex_unlock(&bufmgr_gem->lock);
2733 	return NULL;
2734 }
2735 
2736 int
drm_intel_bo_gem_export_to_prime(drm_intel_bo * bo,int * prime_fd)2737 drm_intel_bo_gem_export_to_prime(drm_intel_bo *bo, int *prime_fd)
2738 {
2739 	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2740 	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2741 
2742 	if (drmPrimeHandleToFD(bufmgr_gem->fd, bo_gem->gem_handle,
2743 			       DRM_CLOEXEC, prime_fd) != 0)
2744 		return -errno;
2745 
2746 	bo_gem->reusable = false;
2747 
2748 	return 0;
2749 }
2750 
2751 static int
drm_intel_gem_bo_flink(drm_intel_bo * bo,uint32_t * name)2752 drm_intel_gem_bo_flink(drm_intel_bo *bo, uint32_t * name)
2753 {
2754 	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2755 	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2756 
2757 	if (!bo_gem->global_name) {
2758 		struct drm_gem_flink flink;
2759 
2760 		memclear(flink);
2761 		flink.handle = bo_gem->gem_handle;
2762 		if (drmIoctl(bufmgr_gem->fd, DRM_IOCTL_GEM_FLINK, &flink))
2763 			return -errno;
2764 
2765 		pthread_mutex_lock(&bufmgr_gem->lock);
2766 		if (!bo_gem->global_name) {
2767 			bo_gem->global_name = flink.name;
2768 			bo_gem->reusable = false;
2769 
2770 			HASH_ADD(name_hh, bufmgr_gem->name_table,
2771 				 global_name, sizeof(bo_gem->global_name),
2772 				 bo_gem);
2773 		}
2774 		pthread_mutex_unlock(&bufmgr_gem->lock);
2775 	}
2776 
2777 	*name = bo_gem->global_name;
2778 	return 0;
2779 }
2780 
2781 /**
2782  * Enables unlimited caching of buffer objects for reuse.
2783  *
2784  * This is potentially very memory expensive, as the cache at each bucket
2785  * size is only bounded by how many buffers of that size we've managed to have
2786  * in flight at once.
2787  */
2788 void
drm_intel_bufmgr_gem_enable_reuse(drm_intel_bufmgr * bufmgr)2789 drm_intel_bufmgr_gem_enable_reuse(drm_intel_bufmgr *bufmgr)
2790 {
2791 	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
2792 
2793 	bufmgr_gem->bo_reuse = true;
2794 }
2795 
2796 /**
2797  * Disables implicit synchronisation before executing the bo
2798  *
2799  * This will cause rendering corruption unless you correctly manage explicit
2800  * fences for all rendering involving this buffer - including use by others.
2801  * Disabling the implicit serialisation is only required if that serialisation
2802  * is too coarse (for example, you have split the buffer into many
2803  * non-overlapping regions and are sharing the whole buffer between concurrent
2804  * independent command streams).
2805  *
2806  * Note the kernel must advertise support via I915_PARAM_HAS_EXEC_ASYNC,
2807  * which can be checked using drm_intel_bufmgr_can_disable_implicit_sync,
2808  * or subsequent execbufs involving the bo will generate EINVAL.
2809  */
2810 void
drm_intel_gem_bo_disable_implicit_sync(drm_intel_bo * bo)2811 drm_intel_gem_bo_disable_implicit_sync(drm_intel_bo *bo)
2812 {
2813 	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2814 
2815 	bo_gem->kflags |= EXEC_OBJECT_ASYNC;
2816 }
2817 
2818 /**
2819  * Enables implicit synchronisation before executing the bo
2820  *
2821  * This is the default behaviour of the kernel, to wait upon prior writes
2822  * completing on the object before rendering with it, or to wait for prior
2823  * reads to complete before writing into the object.
2824  * drm_intel_gem_bo_disable_implicit_sync() can stop this behaviour, telling
2825  * the kernel never to insert a stall before using the object. Then this
2826  * function can be used to restore the implicit sync before subsequent
2827  * rendering.
2828  */
2829 void
drm_intel_gem_bo_enable_implicit_sync(drm_intel_bo * bo)2830 drm_intel_gem_bo_enable_implicit_sync(drm_intel_bo *bo)
2831 {
2832 	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2833 
2834 	bo_gem->kflags &= ~EXEC_OBJECT_ASYNC;
2835 }
2836 
2837 /**
2838  * Query whether the kernel supports disabling of its implicit synchronisation
2839  * before execbuf. See drm_intel_gem_bo_disable_implicit_sync()
2840  */
2841 int
drm_intel_bufmgr_gem_can_disable_implicit_sync(drm_intel_bufmgr * bufmgr)2842 drm_intel_bufmgr_gem_can_disable_implicit_sync(drm_intel_bufmgr *bufmgr)
2843 {
2844 	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
2845 
2846 	return bufmgr_gem->has_exec_async;
2847 }
2848 
2849 /**
2850  * Enable use of fenced reloc type.
2851  *
2852  * New code should enable this to avoid unnecessary fence register
2853  * allocation.  If this option is not enabled, all relocs will have fence
2854  * register allocated.
2855  */
2856 void
drm_intel_bufmgr_gem_enable_fenced_relocs(drm_intel_bufmgr * bufmgr)2857 drm_intel_bufmgr_gem_enable_fenced_relocs(drm_intel_bufmgr *bufmgr)
2858 {
2859 	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
2860 
2861 	if (bufmgr_gem->bufmgr.bo_exec == drm_intel_gem_bo_exec2)
2862 		bufmgr_gem->fenced_relocs = true;
2863 }
2864 
2865 /**
2866  * Return the additional aperture space required by the tree of buffer objects
2867  * rooted at bo.
2868  */
2869 static int
drm_intel_gem_bo_get_aperture_space(drm_intel_bo * bo)2870 drm_intel_gem_bo_get_aperture_space(drm_intel_bo *bo)
2871 {
2872 	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2873 	int i;
2874 	int total = 0;
2875 
2876 	if (bo == NULL || bo_gem->included_in_check_aperture)
2877 		return 0;
2878 
2879 	total += bo->size;
2880 	bo_gem->included_in_check_aperture = true;
2881 
2882 	for (i = 0; i < bo_gem->reloc_count; i++)
2883 		total +=
2884 		    drm_intel_gem_bo_get_aperture_space(bo_gem->
2885 							reloc_target_info[i].bo);
2886 
2887 	return total;
2888 }
2889 
2890 /**
2891  * Count the number of buffers in this list that need a fence reg
2892  *
2893  * If the count is greater than the number of available regs, we'll have
2894  * to ask the caller to resubmit a batch with fewer tiled buffers.
2895  *
2896  * This function over-counts if the same buffer is used multiple times.
2897  */
2898 static unsigned int
drm_intel_gem_total_fences(drm_intel_bo ** bo_array,int count)2899 drm_intel_gem_total_fences(drm_intel_bo ** bo_array, int count)
2900 {
2901 	int i;
2902 	unsigned int total = 0;
2903 
2904 	for (i = 0; i < count; i++) {
2905 		drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo_array[i];
2906 
2907 		if (bo_gem == NULL)
2908 			continue;
2909 
2910 		total += bo_gem->reloc_tree_fences;
2911 	}
2912 	return total;
2913 }
2914 
2915 /**
2916  * Clear the flag set by drm_intel_gem_bo_get_aperture_space() so we're ready
2917  * for the next drm_intel_bufmgr_check_aperture_space() call.
2918  */
2919 static void
drm_intel_gem_bo_clear_aperture_space_flag(drm_intel_bo * bo)2920 drm_intel_gem_bo_clear_aperture_space_flag(drm_intel_bo *bo)
2921 {
2922 	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2923 	int i;
2924 
2925 	if (bo == NULL || !bo_gem->included_in_check_aperture)
2926 		return;
2927 
2928 	bo_gem->included_in_check_aperture = false;
2929 
2930 	for (i = 0; i < bo_gem->reloc_count; i++)
2931 		drm_intel_gem_bo_clear_aperture_space_flag(bo_gem->
2932 							   reloc_target_info[i].bo);
2933 }
2934 
2935 /**
2936  * Return a conservative estimate for the amount of aperture required
2937  * for a collection of buffers. This may double-count some buffers.
2938  */
2939 static unsigned int
drm_intel_gem_estimate_batch_space(drm_intel_bo ** bo_array,int count)2940 drm_intel_gem_estimate_batch_space(drm_intel_bo **bo_array, int count)
2941 {
2942 	int i;
2943 	unsigned int total = 0;
2944 
2945 	for (i = 0; i < count; i++) {
2946 		drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo_array[i];
2947 		if (bo_gem != NULL)
2948 			total += bo_gem->reloc_tree_size;
2949 	}
2950 	return total;
2951 }
2952 
2953 /**
2954  * Return the amount of aperture needed for a collection of buffers.
2955  * This avoids double counting any buffers, at the cost of looking
2956  * at every buffer in the set.
2957  */
2958 static unsigned int
drm_intel_gem_compute_batch_space(drm_intel_bo ** bo_array,int count)2959 drm_intel_gem_compute_batch_space(drm_intel_bo **bo_array, int count)
2960 {
2961 	int i;
2962 	unsigned int total = 0;
2963 
2964 	for (i = 0; i < count; i++) {
2965 		total += drm_intel_gem_bo_get_aperture_space(bo_array[i]);
2966 		/* For the first buffer object in the array, we get an
2967 		 * accurate count back for its reloc_tree size (since nothing
2968 		 * had been flagged as being counted yet).  We can save that
2969 		 * value out as a more conservative reloc_tree_size that
2970 		 * avoids double-counting target buffers.  Since the first
2971 		 * buffer happens to usually be the batch buffer in our
2972 		 * callers, this can pull us back from doing the tree
2973 		 * walk on every new batch emit.
2974 		 */
2975 		if (i == 0) {
2976 			drm_intel_bo_gem *bo_gem =
2977 			    (drm_intel_bo_gem *) bo_array[i];
2978 			bo_gem->reloc_tree_size = total;
2979 		}
2980 	}
2981 
2982 	for (i = 0; i < count; i++)
2983 		drm_intel_gem_bo_clear_aperture_space_flag(bo_array[i]);
2984 	return total;
2985 }
2986 
2987 /**
2988  * Return -1 if the batchbuffer should be flushed before attempting to
2989  * emit rendering referencing the buffers pointed to by bo_array.
2990  *
2991  * This is required because if we try to emit a batchbuffer with relocations
2992  * to a tree of buffers that won't simultaneously fit in the aperture,
2993  * the rendering will return an error at a point where the software is not
2994  * prepared to recover from it.
2995  *
2996  * However, we also want to emit the batchbuffer significantly before we reach
2997  * the limit, as a series of batchbuffers each of which references buffers
2998  * covering almost all of the aperture means that at each emit we end up
2999  * waiting to evict a buffer from the last rendering, and we get synchronous
3000  * performance.  By emitting smaller batchbuffers, we eat some CPU overhead to
3001  * get better parallelism.
3002  */
3003 static int
drm_intel_gem_check_aperture_space(drm_intel_bo ** bo_array,int count)3004 drm_intel_gem_check_aperture_space(drm_intel_bo **bo_array, int count)
3005 {
3006 	drm_intel_bufmgr_gem *bufmgr_gem =
3007 	    (drm_intel_bufmgr_gem *) bo_array[0]->bufmgr;
3008 	unsigned int total = 0;
3009 	unsigned int threshold = bufmgr_gem->gtt_size * 3 / 4;
3010 	int total_fences;
3011 
3012 	/* Check for fence reg constraints if necessary */
3013 	if (bufmgr_gem->available_fences) {
3014 		total_fences = drm_intel_gem_total_fences(bo_array, count);
3015 		if (total_fences > bufmgr_gem->available_fences)
3016 			return -ENOSPC;
3017 	}
3018 
3019 	total = drm_intel_gem_estimate_batch_space(bo_array, count);
3020 
3021 	if (total > threshold)
3022 		total = drm_intel_gem_compute_batch_space(bo_array, count);
3023 
3024 	if (total > threshold) {
3025 		DBG("check_space: overflowed available aperture, "
3026 		    "%dkb vs %dkb\n",
3027 		    total / 1024, (int)bufmgr_gem->gtt_size / 1024);
3028 		return -ENOSPC;
3029 	} else {
3030 		DBG("drm_check_space: total %dkb vs bufgr %dkb\n", total / 1024,
3031 		    (int)bufmgr_gem->gtt_size / 1024);
3032 		return 0;
3033 	}
3034 }
3035 
3036 /*
3037  * Disable buffer reuse for objects which are shared with the kernel
3038  * as scanout buffers
3039  */
3040 static int
drm_intel_gem_bo_disable_reuse(drm_intel_bo * bo)3041 drm_intel_gem_bo_disable_reuse(drm_intel_bo *bo)
3042 {
3043 	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
3044 
3045 	bo_gem->reusable = false;
3046 	return 0;
3047 }
3048 
3049 static int
drm_intel_gem_bo_is_reusable(drm_intel_bo * bo)3050 drm_intel_gem_bo_is_reusable(drm_intel_bo *bo)
3051 {
3052 	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
3053 
3054 	return bo_gem->reusable;
3055 }
3056 
3057 static int
_drm_intel_gem_bo_references(drm_intel_bo * bo,drm_intel_bo * target_bo)3058 _drm_intel_gem_bo_references(drm_intel_bo *bo, drm_intel_bo *target_bo)
3059 {
3060 	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
3061 	int i;
3062 
3063 	for (i = 0; i < bo_gem->reloc_count; i++) {
3064 		if (bo_gem->reloc_target_info[i].bo == target_bo)
3065 			return 1;
3066 		if (bo == bo_gem->reloc_target_info[i].bo)
3067 			continue;
3068 		if (_drm_intel_gem_bo_references(bo_gem->reloc_target_info[i].bo,
3069 						target_bo))
3070 			return 1;
3071 	}
3072 
3073 	for (i = 0; i< bo_gem->softpin_target_count; i++) {
3074 		if (bo_gem->softpin_target[i] == target_bo)
3075 			return 1;
3076 		if (_drm_intel_gem_bo_references(bo_gem->softpin_target[i], target_bo))
3077 			return 1;
3078 	}
3079 
3080 	return 0;
3081 }
3082 
3083 /** Return true if target_bo is referenced by bo's relocation tree. */
3084 static int
drm_intel_gem_bo_references(drm_intel_bo * bo,drm_intel_bo * target_bo)3085 drm_intel_gem_bo_references(drm_intel_bo *bo, drm_intel_bo *target_bo)
3086 {
3087 	drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) target_bo;
3088 
3089 	if (bo == NULL || target_bo == NULL)
3090 		return 0;
3091 	if (target_bo_gem->used_as_reloc_target)
3092 		return _drm_intel_gem_bo_references(bo, target_bo);
3093 	return 0;
3094 }
3095 
3096 static void
add_bucket(drm_intel_bufmgr_gem * bufmgr_gem,int size)3097 add_bucket(drm_intel_bufmgr_gem *bufmgr_gem, int size)
3098 {
3099 	unsigned int i = bufmgr_gem->num_buckets;
3100 
3101 	assert(i < ARRAY_SIZE(bufmgr_gem->cache_bucket));
3102 
3103 	DRMINITLISTHEAD(&bufmgr_gem->cache_bucket[i].head);
3104 	bufmgr_gem->cache_bucket[i].size = size;
3105 	bufmgr_gem->num_buckets++;
3106 }
3107 
3108 static void
init_cache_buckets(drm_intel_bufmgr_gem * bufmgr_gem)3109 init_cache_buckets(drm_intel_bufmgr_gem *bufmgr_gem)
3110 {
3111 	unsigned long size, cache_max_size = 64 * 1024 * 1024;
3112 
3113 	/* OK, so power of two buckets was too wasteful of memory.
3114 	 * Give 3 other sizes between each power of two, to hopefully
3115 	 * cover things accurately enough.  (The alternative is
3116 	 * probably to just go for exact matching of sizes, and assume
3117 	 * that for things like composited window resize the tiled
3118 	 * width/height alignment and rounding of sizes to pages will
3119 	 * get us useful cache hit rates anyway)
3120 	 */
3121 	add_bucket(bufmgr_gem, 4096);
3122 	add_bucket(bufmgr_gem, 4096 * 2);
3123 	add_bucket(bufmgr_gem, 4096 * 3);
3124 
3125 	/* Initialize the linked lists for BO reuse cache. */
3126 	for (size = 4 * 4096; size <= cache_max_size; size *= 2) {
3127 		add_bucket(bufmgr_gem, size);
3128 
3129 		add_bucket(bufmgr_gem, size + size * 1 / 4);
3130 		add_bucket(bufmgr_gem, size + size * 2 / 4);
3131 		add_bucket(bufmgr_gem, size + size * 3 / 4);
3132 	}
3133 }
3134 
3135 void
drm_intel_bufmgr_gem_set_vma_cache_size(drm_intel_bufmgr * bufmgr,int limit)3136 drm_intel_bufmgr_gem_set_vma_cache_size(drm_intel_bufmgr *bufmgr, int limit)
3137 {
3138 	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
3139 
3140 	bufmgr_gem->vma_max = limit;
3141 
3142 	drm_intel_gem_bo_purge_vma_cache(bufmgr_gem);
3143 }
3144 
3145 static int
parse_devid_override(const char * devid_override)3146 parse_devid_override(const char *devid_override)
3147 {
3148 	static const struct {
3149 		const char *name;
3150 		int pci_id;
3151 	} name_map[] = {
3152 		{ "brw", PCI_CHIP_I965_GM },
3153 		{ "g4x", PCI_CHIP_GM45_GM },
3154 		{ "ilk", PCI_CHIP_ILD_G },
3155 		{ "snb", PCI_CHIP_SANDYBRIDGE_M_GT2_PLUS },
3156 		{ "ivb", PCI_CHIP_IVYBRIDGE_S_GT2 },
3157 		{ "hsw", PCI_CHIP_HASWELL_CRW_E_GT3 },
3158 		{ "byt", PCI_CHIP_VALLEYVIEW_3 },
3159 		{ "bdw", 0x1620 | BDW_ULX },
3160 		{ "skl", PCI_CHIP_SKYLAKE_DT_GT2 },
3161 		{ "kbl", PCI_CHIP_KABYLAKE_DT_GT2 },
3162 	};
3163 	unsigned int i;
3164 
3165 	for (i = 0; i < ARRAY_SIZE(name_map); i++) {
3166 		if (!strcmp(name_map[i].name, devid_override))
3167 			return name_map[i].pci_id;
3168 	}
3169 
3170 	return strtod(devid_override, NULL);
3171 }
3172 
3173 /**
3174  * Get the PCI ID for the device.  This can be overridden by setting the
3175  * INTEL_DEVID_OVERRIDE environment variable to the desired ID.
3176  */
3177 static int
get_pci_device_id(drm_intel_bufmgr_gem * bufmgr_gem)3178 get_pci_device_id(drm_intel_bufmgr_gem *bufmgr_gem)
3179 {
3180 	char *devid_override;
3181 	int devid = 0;
3182 	int ret;
3183 	drm_i915_getparam_t gp;
3184 
3185 	if (geteuid() == getuid()) {
3186 		devid_override = getenv("INTEL_DEVID_OVERRIDE");
3187 		if (devid_override) {
3188 			bufmgr_gem->no_exec = true;
3189 			return parse_devid_override(devid_override);
3190 		}
3191 	}
3192 
3193 	memclear(gp);
3194 	gp.param = I915_PARAM_CHIPSET_ID;
3195 	gp.value = &devid;
3196 	ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3197 	if (ret) {
3198 		fprintf(stderr, "get chip id failed: %d [%d]\n", ret, errno);
3199 		fprintf(stderr, "param: %d, val: %d\n", gp.param, *gp.value);
3200 	}
3201 	return devid;
3202 }
3203 
3204 int
drm_intel_bufmgr_gem_get_devid(drm_intel_bufmgr * bufmgr)3205 drm_intel_bufmgr_gem_get_devid(drm_intel_bufmgr *bufmgr)
3206 {
3207 	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
3208 
3209 	return bufmgr_gem->pci_device;
3210 }
3211 
3212 /**
3213  * Sets the AUB filename.
3214  *
3215  * This function has to be called before drm_intel_bufmgr_gem_set_aub_dump()
3216  * for it to have any effect.
3217  */
3218 void
drm_intel_bufmgr_gem_set_aub_filename(drm_intel_bufmgr * bufmgr,const char * filename)3219 drm_intel_bufmgr_gem_set_aub_filename(drm_intel_bufmgr *bufmgr,
3220 				      const char *filename)
3221 {
3222 }
3223 
3224 /**
3225  * Sets up AUB dumping.
3226  *
3227  * This is a trace file format that can be used with the simulator.
3228  * Packets are emitted in a format somewhat like GPU command packets.
3229  * You can set up a GTT and upload your objects into the referenced
3230  * space, then send off batchbuffers and get BMPs out the other end.
3231  */
3232 void
drm_intel_bufmgr_gem_set_aub_dump(drm_intel_bufmgr * bufmgr,int enable)3233 drm_intel_bufmgr_gem_set_aub_dump(drm_intel_bufmgr *bufmgr, int enable)
3234 {
3235 	fprintf(stderr, "libdrm aub dumping is deprecated.\n\n"
3236 		"Use intel_aubdump from intel-gpu-tools instead.  Install intel-gpu-tools,\n"
3237 		"then run (for example)\n\n"
3238 		"\t$ intel_aubdump --output=trace.aub glxgears -geometry 500x500\n\n"
3239 		"See the intel_aubdump man page for more details.\n");
3240 }
3241 
3242 drm_intel_context *
drm_intel_gem_context_create(drm_intel_bufmgr * bufmgr)3243 drm_intel_gem_context_create(drm_intel_bufmgr *bufmgr)
3244 {
3245 	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
3246 	struct drm_i915_gem_context_create create;
3247 	drm_intel_context *context = NULL;
3248 	int ret;
3249 
3250 	context = calloc(1, sizeof(*context));
3251 	if (!context)
3252 		return NULL;
3253 
3254 	memclear(create);
3255 	ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_CONTEXT_CREATE, &create);
3256 	if (ret != 0) {
3257 		DBG("DRM_IOCTL_I915_GEM_CONTEXT_CREATE failed: %s\n",
3258 		    strerror(errno));
3259 		free(context);
3260 		return NULL;
3261 	}
3262 
3263 	context->ctx_id = create.ctx_id;
3264 	context->bufmgr = bufmgr;
3265 
3266 	return context;
3267 }
3268 
3269 int
drm_intel_gem_context_get_id(drm_intel_context * ctx,uint32_t * ctx_id)3270 drm_intel_gem_context_get_id(drm_intel_context *ctx, uint32_t *ctx_id)
3271 {
3272 	if (ctx == NULL)
3273 		return -EINVAL;
3274 
3275 	*ctx_id = ctx->ctx_id;
3276 
3277 	return 0;
3278 }
3279 
3280 void
drm_intel_gem_context_destroy(drm_intel_context * ctx)3281 drm_intel_gem_context_destroy(drm_intel_context *ctx)
3282 {
3283 	drm_intel_bufmgr_gem *bufmgr_gem;
3284 	struct drm_i915_gem_context_destroy destroy;
3285 	int ret;
3286 
3287 	if (ctx == NULL)
3288 		return;
3289 
3290 	memclear(destroy);
3291 
3292 	bufmgr_gem = (drm_intel_bufmgr_gem *)ctx->bufmgr;
3293 	destroy.ctx_id = ctx->ctx_id;
3294 	ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_CONTEXT_DESTROY,
3295 		       &destroy);
3296 	if (ret != 0)
3297 		fprintf(stderr, "DRM_IOCTL_I915_GEM_CONTEXT_DESTROY failed: %s\n",
3298 			strerror(errno));
3299 
3300 	free(ctx);
3301 }
3302 
3303 int
drm_intel_get_reset_stats(drm_intel_context * ctx,uint32_t * reset_count,uint32_t * active,uint32_t * pending)3304 drm_intel_get_reset_stats(drm_intel_context *ctx,
3305 			  uint32_t *reset_count,
3306 			  uint32_t *active,
3307 			  uint32_t *pending)
3308 {
3309 	drm_intel_bufmgr_gem *bufmgr_gem;
3310 	struct drm_i915_reset_stats stats;
3311 	int ret;
3312 
3313 	if (ctx == NULL)
3314 		return -EINVAL;
3315 
3316 	memclear(stats);
3317 
3318 	bufmgr_gem = (drm_intel_bufmgr_gem *)ctx->bufmgr;
3319 	stats.ctx_id = ctx->ctx_id;
3320 	ret = drmIoctl(bufmgr_gem->fd,
3321 		       DRM_IOCTL_I915_GET_RESET_STATS,
3322 		       &stats);
3323 	if (ret == 0) {
3324 		if (reset_count != NULL)
3325 			*reset_count = stats.reset_count;
3326 
3327 		if (active != NULL)
3328 			*active = stats.batch_active;
3329 
3330 		if (pending != NULL)
3331 			*pending = stats.batch_pending;
3332 	}
3333 
3334 	return ret;
3335 }
3336 
3337 int
drm_intel_reg_read(drm_intel_bufmgr * bufmgr,uint32_t offset,uint64_t * result)3338 drm_intel_reg_read(drm_intel_bufmgr *bufmgr,
3339 		   uint32_t offset,
3340 		   uint64_t *result)
3341 {
3342 	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
3343 	struct drm_i915_reg_read reg_read;
3344 	int ret;
3345 
3346 	memclear(reg_read);
3347 	reg_read.offset = offset;
3348 
3349 	ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_REG_READ, &reg_read);
3350 
3351 	*result = reg_read.val;
3352 	return ret;
3353 }
3354 
3355 int
drm_intel_get_subslice_total(int fd,unsigned int * subslice_total)3356 drm_intel_get_subslice_total(int fd, unsigned int *subslice_total)
3357 {
3358 	drm_i915_getparam_t gp;
3359 	int ret;
3360 
3361 	memclear(gp);
3362 	gp.value = (int*)subslice_total;
3363 	gp.param = I915_PARAM_SUBSLICE_TOTAL;
3364 	ret = drmIoctl(fd, DRM_IOCTL_I915_GETPARAM, &gp);
3365 	if (ret)
3366 		return -errno;
3367 
3368 	return 0;
3369 }
3370 
3371 int
drm_intel_get_eu_total(int fd,unsigned int * eu_total)3372 drm_intel_get_eu_total(int fd, unsigned int *eu_total)
3373 {
3374 	drm_i915_getparam_t gp;
3375 	int ret;
3376 
3377 	memclear(gp);
3378 	gp.value = (int*)eu_total;
3379 	gp.param = I915_PARAM_EU_TOTAL;
3380 	ret = drmIoctl(fd, DRM_IOCTL_I915_GETPARAM, &gp);
3381 	if (ret)
3382 		return -errno;
3383 
3384 	return 0;
3385 }
3386 
3387 int
drm_intel_get_pooled_eu(int fd)3388 drm_intel_get_pooled_eu(int fd)
3389 {
3390 	drm_i915_getparam_t gp;
3391 	int ret = -1;
3392 
3393 	memclear(gp);
3394 	gp.param = I915_PARAM_HAS_POOLED_EU;
3395 	gp.value = &ret;
3396 	if (drmIoctl(fd, DRM_IOCTL_I915_GETPARAM, &gp))
3397 		return -errno;
3398 
3399 	return ret;
3400 }
3401 
3402 int
drm_intel_get_min_eu_in_pool(int fd)3403 drm_intel_get_min_eu_in_pool(int fd)
3404 {
3405 	drm_i915_getparam_t gp;
3406 	int ret = -1;
3407 
3408 	memclear(gp);
3409 	gp.param = I915_PARAM_MIN_EU_IN_POOL;
3410 	gp.value = &ret;
3411 	if (drmIoctl(fd, DRM_IOCTL_I915_GETPARAM, &gp))
3412 		return -errno;
3413 
3414 	return ret;
3415 }
3416 
3417 /**
3418  * Annotate the given bo for use in aub dumping.
3419  *
3420  * \param annotations is an array of drm_intel_aub_annotation objects
3421  * describing the type of data in various sections of the bo.  Each
3422  * element of the array specifies the type and subtype of a section of
3423  * the bo, and the past-the-end offset of that section.  The elements
3424  * of \c annotations must be sorted so that ending_offset is
3425  * increasing.
3426  *
3427  * \param count is the number of elements in the \c annotations array.
3428  * If \c count is zero, then \c annotations will not be dereferenced.
3429  *
3430  * Annotations are copied into a private data structure, so caller may
3431  * re-use the memory pointed to by \c annotations after the call
3432  * returns.
3433  *
3434  * Annotations are stored for the lifetime of the bo; to reset to the
3435  * default state (no annotations), call this function with a \c count
3436  * of zero.
3437  */
3438 void
drm_intel_bufmgr_gem_set_aub_annotations(drm_intel_bo * bo,drm_intel_aub_annotation * annotations,unsigned count)3439 drm_intel_bufmgr_gem_set_aub_annotations(drm_intel_bo *bo,
3440 					 drm_intel_aub_annotation *annotations,
3441 					 unsigned count)
3442 {
3443 }
3444 
3445 static pthread_mutex_t bufmgr_list_mutex = PTHREAD_MUTEX_INITIALIZER;
3446 static drmMMListHead bufmgr_list = { &bufmgr_list, &bufmgr_list };
3447 
3448 static drm_intel_bufmgr_gem *
drm_intel_bufmgr_gem_find(int fd)3449 drm_intel_bufmgr_gem_find(int fd)
3450 {
3451 	drm_intel_bufmgr_gem *bufmgr_gem;
3452 
3453 	DRMLISTFOREACHENTRY(bufmgr_gem, &bufmgr_list, managers) {
3454 		if (bufmgr_gem->fd == fd) {
3455 			atomic_inc(&bufmgr_gem->refcount);
3456 			return bufmgr_gem;
3457 		}
3458 	}
3459 
3460 	return NULL;
3461 }
3462 
3463 static void
drm_intel_bufmgr_gem_unref(drm_intel_bufmgr * bufmgr)3464 drm_intel_bufmgr_gem_unref(drm_intel_bufmgr *bufmgr)
3465 {
3466 	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
3467 
3468 	if (atomic_add_unless(&bufmgr_gem->refcount, -1, 1)) {
3469 		pthread_mutex_lock(&bufmgr_list_mutex);
3470 
3471 		if (atomic_dec_and_test(&bufmgr_gem->refcount)) {
3472 			DRMLISTDEL(&bufmgr_gem->managers);
3473 			drm_intel_bufmgr_gem_destroy(bufmgr);
3474 		}
3475 
3476 		pthread_mutex_unlock(&bufmgr_list_mutex);
3477 	}
3478 }
3479 
drm_intel_gem_bo_map__gtt(drm_intel_bo * bo)3480 void *drm_intel_gem_bo_map__gtt(drm_intel_bo *bo)
3481 {
3482 	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
3483 	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
3484 
3485 	if (bo_gem->gtt_virtual)
3486 		return bo_gem->gtt_virtual;
3487 
3488 	if (bo_gem->is_userptr)
3489 		return NULL;
3490 
3491 	pthread_mutex_lock(&bufmgr_gem->lock);
3492 	if (bo_gem->gtt_virtual == NULL) {
3493 		struct drm_i915_gem_mmap_gtt mmap_arg;
3494 		void *ptr;
3495 
3496 		DBG("bo_map_gtt: mmap %d (%s), map_count=%d\n",
3497 		    bo_gem->gem_handle, bo_gem->name, bo_gem->map_count);
3498 
3499 		if (bo_gem->map_count++ == 0)
3500 			drm_intel_gem_bo_open_vma(bufmgr_gem, bo_gem);
3501 
3502 		memclear(mmap_arg);
3503 		mmap_arg.handle = bo_gem->gem_handle;
3504 
3505 		/* Get the fake offset back... */
3506 		ptr = MAP_FAILED;
3507 		if (drmIoctl(bufmgr_gem->fd,
3508 			     DRM_IOCTL_I915_GEM_MMAP_GTT,
3509 			     &mmap_arg) == 0) {
3510 			/* and mmap it */
3511 			ptr = drm_mmap(0, bo->size, PROT_READ | PROT_WRITE,
3512 				       MAP_SHARED, bufmgr_gem->fd,
3513 				       mmap_arg.offset);
3514 		}
3515 		if (ptr == MAP_FAILED) {
3516 			if (--bo_gem->map_count == 0)
3517 				drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
3518 			ptr = NULL;
3519 		}
3520 
3521 		bo_gem->gtt_virtual = ptr;
3522 	}
3523 	pthread_mutex_unlock(&bufmgr_gem->lock);
3524 
3525 	return bo_gem->gtt_virtual;
3526 }
3527 
drm_intel_gem_bo_map__cpu(drm_intel_bo * bo)3528 void *drm_intel_gem_bo_map__cpu(drm_intel_bo *bo)
3529 {
3530 	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
3531 	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
3532 
3533 	if (bo_gem->mem_virtual)
3534 		return bo_gem->mem_virtual;
3535 
3536 	if (bo_gem->is_userptr) {
3537 		/* Return the same user ptr */
3538 		return bo_gem->user_virtual;
3539 	}
3540 
3541 	pthread_mutex_lock(&bufmgr_gem->lock);
3542 	if (!bo_gem->mem_virtual) {
3543 		struct drm_i915_gem_mmap mmap_arg;
3544 
3545 		if (bo_gem->map_count++ == 0)
3546 			drm_intel_gem_bo_open_vma(bufmgr_gem, bo_gem);
3547 
3548 		DBG("bo_map: %d (%s), map_count=%d\n",
3549 		    bo_gem->gem_handle, bo_gem->name, bo_gem->map_count);
3550 
3551 		memclear(mmap_arg);
3552 		mmap_arg.handle = bo_gem->gem_handle;
3553 		mmap_arg.size = bo->size;
3554 		if (drmIoctl(bufmgr_gem->fd,
3555 			     DRM_IOCTL_I915_GEM_MMAP,
3556 			     &mmap_arg)) {
3557 			DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
3558 			    __FILE__, __LINE__, bo_gem->gem_handle,
3559 			    bo_gem->name, strerror(errno));
3560 			if (--bo_gem->map_count == 0)
3561 				drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
3562 		} else {
3563 			VG(VALGRIND_MALLOCLIKE_BLOCK(mmap_arg.addr_ptr, mmap_arg.size, 0, 1));
3564 			bo_gem->mem_virtual = (void *)(uintptr_t) mmap_arg.addr_ptr;
3565 		}
3566 	}
3567 	pthread_mutex_unlock(&bufmgr_gem->lock);
3568 
3569 	return bo_gem->mem_virtual;
3570 }
3571 
drm_intel_gem_bo_map__wc(drm_intel_bo * bo)3572 void *drm_intel_gem_bo_map__wc(drm_intel_bo *bo)
3573 {
3574 	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
3575 	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
3576 
3577 	if (bo_gem->wc_virtual)
3578 		return bo_gem->wc_virtual;
3579 
3580 	if (bo_gem->is_userptr)
3581 		return NULL;
3582 
3583 	pthread_mutex_lock(&bufmgr_gem->lock);
3584 	if (!bo_gem->wc_virtual) {
3585 		struct drm_i915_gem_mmap mmap_arg;
3586 
3587 		if (bo_gem->map_count++ == 0)
3588 			drm_intel_gem_bo_open_vma(bufmgr_gem, bo_gem);
3589 
3590 		DBG("bo_map: %d (%s), map_count=%d\n",
3591 		    bo_gem->gem_handle, bo_gem->name, bo_gem->map_count);
3592 
3593 		memclear(mmap_arg);
3594 		mmap_arg.handle = bo_gem->gem_handle;
3595 		mmap_arg.size = bo->size;
3596 		mmap_arg.flags = I915_MMAP_WC;
3597 		if (drmIoctl(bufmgr_gem->fd,
3598 			     DRM_IOCTL_I915_GEM_MMAP,
3599 			     &mmap_arg)) {
3600 			DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
3601 			    __FILE__, __LINE__, bo_gem->gem_handle,
3602 			    bo_gem->name, strerror(errno));
3603 			if (--bo_gem->map_count == 0)
3604 				drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
3605 		} else {
3606 			VG(VALGRIND_MALLOCLIKE_BLOCK(mmap_arg.addr_ptr, mmap_arg.size, 0, 1));
3607 			bo_gem->wc_virtual = (void *)(uintptr_t) mmap_arg.addr_ptr;
3608 		}
3609 	}
3610 	pthread_mutex_unlock(&bufmgr_gem->lock);
3611 
3612 	return bo_gem->wc_virtual;
3613 }
3614 
3615 /**
3616  * Initializes the GEM buffer manager, which uses the kernel to allocate, map,
3617  * and manage map buffer objections.
3618  *
3619  * \param fd File descriptor of the opened DRM device.
3620  */
3621 drm_intel_bufmgr *
drm_intel_bufmgr_gem_init(int fd,int batch_size)3622 drm_intel_bufmgr_gem_init(int fd, int batch_size)
3623 {
3624 	drm_intel_bufmgr_gem *bufmgr_gem;
3625 	struct drm_i915_gem_get_aperture aperture;
3626 	drm_i915_getparam_t gp;
3627 	int ret, tmp;
3628 	bool exec2 = false;
3629 
3630 	pthread_mutex_lock(&bufmgr_list_mutex);
3631 
3632 	bufmgr_gem = drm_intel_bufmgr_gem_find(fd);
3633 	if (bufmgr_gem)
3634 		goto exit;
3635 
3636 	bufmgr_gem = calloc(1, sizeof(*bufmgr_gem));
3637 	if (bufmgr_gem == NULL)
3638 		goto exit;
3639 
3640 	bufmgr_gem->fd = fd;
3641 	atomic_set(&bufmgr_gem->refcount, 1);
3642 
3643 	if (pthread_mutex_init(&bufmgr_gem->lock, NULL) != 0) {
3644 		free(bufmgr_gem);
3645 		bufmgr_gem = NULL;
3646 		goto exit;
3647 	}
3648 
3649 	memclear(aperture);
3650 	ret = drmIoctl(bufmgr_gem->fd,
3651 		       DRM_IOCTL_I915_GEM_GET_APERTURE,
3652 		       &aperture);
3653 
3654 	if (ret == 0)
3655 		bufmgr_gem->gtt_size = aperture.aper_available_size;
3656 	else {
3657 		fprintf(stderr, "DRM_IOCTL_I915_GEM_APERTURE failed: %s\n",
3658 			strerror(errno));
3659 		bufmgr_gem->gtt_size = 128 * 1024 * 1024;
3660 		fprintf(stderr, "Assuming %dkB available aperture size.\n"
3661 			"May lead to reduced performance or incorrect "
3662 			"rendering.\n",
3663 			(int)bufmgr_gem->gtt_size / 1024);
3664 	}
3665 
3666 	bufmgr_gem->pci_device = get_pci_device_id(bufmgr_gem);
3667 
3668 	if (IS_GEN2(bufmgr_gem->pci_device))
3669 		bufmgr_gem->gen = 2;
3670 	else if (IS_GEN3(bufmgr_gem->pci_device))
3671 		bufmgr_gem->gen = 3;
3672 	else if (IS_GEN4(bufmgr_gem->pci_device))
3673 		bufmgr_gem->gen = 4;
3674 	else if (IS_GEN5(bufmgr_gem->pci_device))
3675 		bufmgr_gem->gen = 5;
3676 	else if (IS_GEN6(bufmgr_gem->pci_device))
3677 		bufmgr_gem->gen = 6;
3678 	else if (IS_GEN7(bufmgr_gem->pci_device))
3679 		bufmgr_gem->gen = 7;
3680 	else if (IS_GEN8(bufmgr_gem->pci_device))
3681 		bufmgr_gem->gen = 8;
3682 	else if (IS_GEN9(bufmgr_gem->pci_device))
3683 		bufmgr_gem->gen = 9;
3684 	else {
3685 		free(bufmgr_gem);
3686 		bufmgr_gem = NULL;
3687 		goto exit;
3688 	}
3689 
3690 	if (IS_GEN3(bufmgr_gem->pci_device) &&
3691 	    bufmgr_gem->gtt_size > 256*1024*1024) {
3692 		/* The unmappable part of gtt on gen 3 (i.e. above 256MB) can't
3693 		 * be used for tiled blits. To simplify the accounting, just
3694 		 * subtract the unmappable part (fixed to 256MB on all known
3695 		 * gen3 devices) if the kernel advertises it. */
3696 		bufmgr_gem->gtt_size -= 256*1024*1024;
3697 	}
3698 
3699 	memclear(gp);
3700 	gp.value = &tmp;
3701 
3702 	gp.param = I915_PARAM_HAS_EXECBUF2;
3703 	ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3704 	if (!ret)
3705 		exec2 = true;
3706 
3707 	gp.param = I915_PARAM_HAS_BSD;
3708 	ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3709 	bufmgr_gem->has_bsd = ret == 0;
3710 
3711 	gp.param = I915_PARAM_HAS_BLT;
3712 	ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3713 	bufmgr_gem->has_blt = ret == 0;
3714 
3715 	gp.param = I915_PARAM_HAS_RELAXED_FENCING;
3716 	ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3717 	bufmgr_gem->has_relaxed_fencing = ret == 0;
3718 
3719 	gp.param = I915_PARAM_HAS_EXEC_ASYNC;
3720 	ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3721 	bufmgr_gem->has_exec_async = ret == 0;
3722 
3723 	bufmgr_gem->bufmgr.bo_alloc_userptr = check_bo_alloc_userptr;
3724 
3725 	gp.param = I915_PARAM_HAS_WAIT_TIMEOUT;
3726 	ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3727 	bufmgr_gem->has_wait_timeout = ret == 0;
3728 
3729 	gp.param = I915_PARAM_HAS_LLC;
3730 	ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3731 	if (ret != 0) {
3732 		/* Kernel does not supports HAS_LLC query, fallback to GPU
3733 		 * generation detection and assume that we have LLC on GEN6/7
3734 		 */
3735 		bufmgr_gem->has_llc = (IS_GEN6(bufmgr_gem->pci_device) |
3736 				IS_GEN7(bufmgr_gem->pci_device));
3737 	} else
3738 		bufmgr_gem->has_llc = *gp.value;
3739 
3740 	gp.param = I915_PARAM_HAS_VEBOX;
3741 	ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3742 	bufmgr_gem->has_vebox = (ret == 0) & (*gp.value > 0);
3743 
3744 	gp.param = I915_PARAM_HAS_EXEC_SOFTPIN;
3745 	ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3746 	if (ret == 0 && *gp.value > 0)
3747 		bufmgr_gem->bufmgr.bo_set_softpin_offset = drm_intel_gem_bo_set_softpin_offset;
3748 
3749 	if (bufmgr_gem->gen < 4) {
3750 		gp.param = I915_PARAM_NUM_FENCES_AVAIL;
3751 		gp.value = &bufmgr_gem->available_fences;
3752 		ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3753 		if (ret) {
3754 			fprintf(stderr, "get fences failed: %d [%d]\n", ret,
3755 				errno);
3756 			fprintf(stderr, "param: %d, val: %d\n", gp.param,
3757 				*gp.value);
3758 			bufmgr_gem->available_fences = 0;
3759 		} else {
3760 			/* XXX The kernel reports the total number of fences,
3761 			 * including any that may be pinned.
3762 			 *
3763 			 * We presume that there will be at least one pinned
3764 			 * fence for the scanout buffer, but there may be more
3765 			 * than one scanout and the user may be manually
3766 			 * pinning buffers. Let's move to execbuffer2 and
3767 			 * thereby forget the insanity of using fences...
3768 			 */
3769 			bufmgr_gem->available_fences -= 2;
3770 			if (bufmgr_gem->available_fences < 0)
3771 				bufmgr_gem->available_fences = 0;
3772 		}
3773 	}
3774 
3775 	if (bufmgr_gem->gen >= 8) {
3776 		gp.param = I915_PARAM_HAS_ALIASING_PPGTT;
3777 		ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3778 		if (ret == 0 && *gp.value == 3)
3779 			bufmgr_gem->bufmgr.bo_use_48b_address_range = drm_intel_gem_bo_use_48b_address_range;
3780 	}
3781 
3782 	/* Let's go with one relocation per every 2 dwords (but round down a bit
3783 	 * since a power of two will mean an extra page allocation for the reloc
3784 	 * buffer).
3785 	 *
3786 	 * Every 4 was too few for the blender benchmark.
3787 	 */
3788 	bufmgr_gem->max_relocs = batch_size / sizeof(uint32_t) / 2 - 2;
3789 
3790 	bufmgr_gem->bufmgr.bo_alloc = drm_intel_gem_bo_alloc;
3791 	bufmgr_gem->bufmgr.bo_alloc_for_render =
3792 	    drm_intel_gem_bo_alloc_for_render;
3793 	bufmgr_gem->bufmgr.bo_alloc_tiled = drm_intel_gem_bo_alloc_tiled;
3794 	bufmgr_gem->bufmgr.bo_reference = drm_intel_gem_bo_reference;
3795 	bufmgr_gem->bufmgr.bo_unreference = drm_intel_gem_bo_unreference;
3796 	bufmgr_gem->bufmgr.bo_map = drm_intel_gem_bo_map;
3797 	bufmgr_gem->bufmgr.bo_unmap = drm_intel_gem_bo_unmap;
3798 	bufmgr_gem->bufmgr.bo_subdata = drm_intel_gem_bo_subdata;
3799 	bufmgr_gem->bufmgr.bo_get_subdata = drm_intel_gem_bo_get_subdata;
3800 	bufmgr_gem->bufmgr.bo_wait_rendering = drm_intel_gem_bo_wait_rendering;
3801 	bufmgr_gem->bufmgr.bo_emit_reloc = drm_intel_gem_bo_emit_reloc;
3802 	bufmgr_gem->bufmgr.bo_emit_reloc_fence = drm_intel_gem_bo_emit_reloc_fence;
3803 	bufmgr_gem->bufmgr.bo_pin = drm_intel_gem_bo_pin;
3804 	bufmgr_gem->bufmgr.bo_unpin = drm_intel_gem_bo_unpin;
3805 	bufmgr_gem->bufmgr.bo_get_tiling = drm_intel_gem_bo_get_tiling;
3806 	bufmgr_gem->bufmgr.bo_set_tiling = drm_intel_gem_bo_set_tiling;
3807 	bufmgr_gem->bufmgr.bo_flink = drm_intel_gem_bo_flink;
3808 	/* Use the new one if available */
3809 	if (exec2) {
3810 		bufmgr_gem->bufmgr.bo_exec = drm_intel_gem_bo_exec2;
3811 		bufmgr_gem->bufmgr.bo_mrb_exec = drm_intel_gem_bo_mrb_exec2;
3812 	} else
3813 		bufmgr_gem->bufmgr.bo_exec = drm_intel_gem_bo_exec;
3814 	bufmgr_gem->bufmgr.bo_busy = drm_intel_gem_bo_busy;
3815 	bufmgr_gem->bufmgr.bo_madvise = drm_intel_gem_bo_madvise;
3816 	bufmgr_gem->bufmgr.destroy = drm_intel_bufmgr_gem_unref;
3817 	bufmgr_gem->bufmgr.debug = 0;
3818 	bufmgr_gem->bufmgr.check_aperture_space =
3819 	    drm_intel_gem_check_aperture_space;
3820 	bufmgr_gem->bufmgr.bo_disable_reuse = drm_intel_gem_bo_disable_reuse;
3821 	bufmgr_gem->bufmgr.bo_is_reusable = drm_intel_gem_bo_is_reusable;
3822 	bufmgr_gem->bufmgr.get_pipe_from_crtc_id =
3823 	    drm_intel_gem_get_pipe_from_crtc_id;
3824 	bufmgr_gem->bufmgr.bo_references = drm_intel_gem_bo_references;
3825 
3826 	init_cache_buckets(bufmgr_gem);
3827 
3828 	DRMINITLISTHEAD(&bufmgr_gem->vma_cache);
3829 	bufmgr_gem->vma_max = -1; /* unlimited by default */
3830 
3831 	DRMLISTADD(&bufmgr_gem->managers, &bufmgr_list);
3832 
3833 exit:
3834 	pthread_mutex_unlock(&bufmgr_list_mutex);
3835 
3836 	return bufmgr_gem != NULL ? &bufmgr_gem->bufmgr : NULL;
3837 }
3838