1 /*
2 * Copyright © 2017 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #ifndef IRIS_BUFMGR_H
25 #define IRIS_BUFMGR_H
26
27 #include <stdbool.h>
28 #include <stdint.h>
29 #include <stdio.h>
30 #include <sys/types.h>
31
32 #include "c11/threads.h"
33 #include "common/intel_bind_timeline.h"
34 #include "util/macros.h"
35 #include "util/u_atomic.h"
36 #include "util/u_dynarray.h"
37 #include "util/list.h"
38 #include "util/simple_mtx.h"
39 #include "pipe/p_defines.h"
40 #include "pipebuffer/pb_slab.h"
41 #include "intel/dev/intel_device_info.h"
42
43 struct intel_device_info;
44 struct util_debug_callback;
45 struct isl_surf;
46 struct iris_syncobj;
47
48 /**
49 * Memory zones. When allocating a buffer, you can request that it is
50 * placed into a specific region of the virtual address space (PPGTT).
51 *
52 * Most buffers can go anywhere (IRIS_MEMZONE_OTHER). Some buffers are
53 * accessed via an offset from a base address. STATE_BASE_ADDRESS has
54 * a maximum 4GB size for each region, so we need to restrict those
55 * buffers to be within 4GB of the base. Each memory zone corresponds
56 * to a particular base address.
57 *
58 * We lay out the virtual address space as follows:
59 *
60 * - [0, 4K): Nothing (empty page for null address)
61 * - [4K, 4G): Shaders (Instruction Base Address)
62 * - [4G, 8G): Surfaces & Binders (Surface State Base Address, Bindless ...)
63 * - [8G, 12G): Dynamic (Dynamic State Base Address)
64 * - [12G, *): Other (everything else in the full 48-bit VMA)
65 *
66 * A special buffer for border color lives at the start of the dynamic state
67 * memory zone. This unfortunately has to be handled specially because the
68 * SAMPLER_STATE "Indirect State Pointer" field is only a 24-bit pointer.
69 *
70 * Each GL context uses a separate GEM context, which technically gives them
71 * each a separate VMA. However, we assign address globally, so buffers will
72 * have the same address in all GEM contexts. This lets us have a single BO
73 * field for the address, which is easy and cheap.
74 */
75 enum iris_memory_zone {
76 IRIS_MEMZONE_SHADER,
77 IRIS_MEMZONE_BINDER,
78 IRIS_MEMZONE_SCRATCH,
79 IRIS_MEMZONE_SURFACE,
80 IRIS_MEMZONE_DYNAMIC,
81 IRIS_MEMZONE_OTHER,
82
83 IRIS_MEMZONE_BORDER_COLOR_POOL,
84 };
85
86 /* Intentionally exclude single buffer "zones" */
87 #define IRIS_MEMZONE_COUNT (IRIS_MEMZONE_OTHER + 1)
88
89 #define IRIS_SCRATCH_ZONE_SIZE (8 * 1024 * 1024)
90 #define IRIS_BINDER_ZONE_SIZE ((1ull << 30) - IRIS_SCRATCH_ZONE_SIZE)
91
92 #define IRIS_MEMZONE_SHADER_START (0ull * (1ull << 32))
93 #define IRIS_MEMZONE_BINDER_START (1ull * (1ull << 32))
94 #define IRIS_MEMZONE_SCRATCH_START IRIS_MEMZONE_BINDER_START
95 #define IRIS_MEMZONE_SURFACE_START (IRIS_MEMZONE_BINDER_START + IRIS_BINDER_ZONE_SIZE)
96 #define IRIS_MEMZONE_DYNAMIC_START (2ull * (1ull << 32))
97 #define IRIS_MEMZONE_OTHER_START (3ull * (1ull << 32))
98
99 #define IRIS_BORDER_COLOR_POOL_ADDRESS IRIS_MEMZONE_DYNAMIC_START
100 #define IRIS_BORDER_COLOR_POOL_SIZE (64 * 4096)
101
102 /**
103 * Classification of the various incoherent caches of the GPU into a number of
104 * caching domains.
105 */
106 enum iris_domain {
107 /** Render color cache. */
108 IRIS_DOMAIN_RENDER_WRITE = 0,
109 /** (Hi)Z/stencil cache. */
110 IRIS_DOMAIN_DEPTH_WRITE,
111 /** Data port (HDC) cache. */
112 IRIS_DOMAIN_DATA_WRITE,
113 /** Any other read-write cache. */
114 IRIS_DOMAIN_OTHER_WRITE,
115 /** Vertex cache. */
116 IRIS_DOMAIN_VF_READ,
117 /** Texture cache. */
118 IRIS_DOMAIN_SAMPLER_READ,
119 /** Pull-style shader constant loads. */
120 IRIS_DOMAIN_PULL_CONSTANT_READ,
121 /** Any other read-only cache, including reads from non-L3 clients. */
122 IRIS_DOMAIN_OTHER_READ,
123 /** Number of caching domains. */
124 NUM_IRIS_DOMAINS,
125 /** Not a real cache, use to opt out of the cache tracking mechanism. */
126 IRIS_DOMAIN_NONE = NUM_IRIS_DOMAINS
127 };
128
129 /**
130 * Whether a caching domain is guaranteed not to write any data to memory.
131 */
132 static inline bool
iris_domain_is_read_only(enum iris_domain access)133 iris_domain_is_read_only(enum iris_domain access)
134 {
135 return access >= IRIS_DOMAIN_VF_READ &&
136 access <= IRIS_DOMAIN_OTHER_READ;
137 }
138
139 static inline bool
iris_domain_is_l3_coherent(const struct intel_device_info * devinfo,enum iris_domain access)140 iris_domain_is_l3_coherent(const struct intel_device_info *devinfo,
141 enum iris_domain access)
142 {
143 /* VF reads are coherent with the L3 on Tigerlake+ because we set
144 * the "L3 Bypass Disable" bit in the vertex/index buffer packets.
145 */
146 if (access == IRIS_DOMAIN_VF_READ)
147 return devinfo->ver >= 12;
148
149 return access != IRIS_DOMAIN_OTHER_WRITE &&
150 access != IRIS_DOMAIN_OTHER_READ;
151 }
152
153 enum iris_mmap_mode {
154 IRIS_MMAP_NONE, /**< Cannot be mapped */
155 IRIS_MMAP_UC, /**< Fully uncached memory map */
156 IRIS_MMAP_WC, /**< Write-combining map with no caching of reads */
157 IRIS_MMAP_WB, /**< Write-back mapping with CPU caches enabled */
158 };
159
160 enum iris_heap {
161 /**
162 * System memory which is CPU-cached at (at least 1-way) coherent.
163 *
164 * This will use WB (write-back) CPU mappings.
165 *
166 * LLC systems and discrete cards (which enable snooping) will mostly use
167 * this heap. Non-LLC systems will only use it when explicit coherency is
168 * required, as snooping is expensive there.
169 */
170 IRIS_HEAP_SYSTEM_MEMORY_CACHED_COHERENT,
171
172 /**
173 * System memory which is not CPU cached.
174 *
175 * This will use WC (write-combining) CPU mappings, which has uncached
176 * performance for reads. This can be used for scanout on integrated
177 * GPUs (which is never coherent with CPU caches). It will be used for
178 * most buffers on non-LLC platforms, where cache coherency is expensive.
179 */
180 IRIS_HEAP_SYSTEM_MEMORY_UNCACHED,
181
182 /** Device-local memory (VRAM). Cannot be placed in system memory! */
183 IRIS_HEAP_DEVICE_LOCAL,
184
185 /** Device-local memory that may be evicted to system memory if needed. */
186 IRIS_HEAP_DEVICE_LOCAL_PREFERRED,
187
188 IRIS_HEAP_MAX,
189 };
190
191 extern const char *iris_heap_to_string[];
192
193 static inline bool
iris_heap_is_device_local(enum iris_heap heap)194 iris_heap_is_device_local(enum iris_heap heap)
195 {
196 return heap == IRIS_HEAP_DEVICE_LOCAL ||
197 heap == IRIS_HEAP_DEVICE_LOCAL_PREFERRED;
198 }
199
200 #define IRIS_BATCH_COUNT 3
201
202 struct iris_bo_screen_deps {
203 struct iris_syncobj *write_syncobjs[IRIS_BATCH_COUNT];
204 struct iris_syncobj *read_syncobjs[IRIS_BATCH_COUNT];
205 };
206
207 struct iris_bo {
208 /**
209 * Size in bytes of the buffer object.
210 *
211 * The size may be larger than the size originally requested for the
212 * allocation, such as being aligned to page size.
213 */
214 uint64_t size;
215
216 /** Buffer manager context associated with this buffer object */
217 struct iris_bufmgr *bufmgr;
218
219 /** Pre-computed hash using _mesa_hash_pointer for cache tracking sets */
220 uint32_t hash;
221
222 /** The GEM handle for this buffer object. */
223 uint32_t gem_handle;
224
225 /**
226 * Canonical virtual address of the buffer inside the PPGTT (Per-Process Graphics
227 * Translation Table).
228 *
229 * Although each hardware context has its own VMA, we assign BO's to the
230 * same address in all contexts, for simplicity.
231 */
232 uint64_t address;
233
234 /**
235 * If non-zero, then this bo has an aux-map translation to this address.
236 */
237 uint64_t aux_map_address;
238
239 /**
240 * If this BO is referenced by a batch, this _may_ be the index into the
241 * batch->exec_bos[] list.
242 *
243 * Note that a single buffer may be used by multiple batches/contexts,
244 * and thus appear in multiple lists, but we only track one index here.
245 * In the common case one can guess that batch->exec_bos[bo->index] == bo
246 * and double check if that's true to avoid a linear list walk.
247 *
248 * XXX: this is not ideal now that we have more than one batch per context,
249 * XXX: as the index will flop back and forth between the render index and
250 * XXX: compute index...
251 */
252 unsigned index;
253
254 int refcount;
255 const char *name;
256
257 /** BO cache list */
258 struct list_head head;
259
260 /**
261 * Synchronization sequence number of most recent access of this BO from
262 * each caching domain.
263 *
264 * Although this is a global field, use in multiple contexts should be
265 * safe, see iris_emit_buffer_barrier_for() for details.
266 *
267 * Also align it to 64 bits. This will make atomic operations faster on 32
268 * bit platforms.
269 */
270 alignas(8) uint64_t last_seqnos[NUM_IRIS_DOMAINS];
271
272 /** Up to one per screen, may need realloc. */
273 struct iris_bo_screen_deps *deps;
274 int deps_size;
275
276 /**
277 * Boolean of whether the GPU is definitely not accessing the buffer.
278 *
279 * This is only valid when reusable, since non-reusable
280 * buffers are those that have been shared with other
281 * processes, so we don't know their state.
282 */
283 bool idle;
284
285 /** Was this buffer zeroed at allocation time? */
286 bool zeroed;
287
288 union {
289 struct {
290 time_t free_time;
291
292 /** Mapped address for the buffer, saved across map/unmap cycles */
293 void *map;
294
295 /** List of GEM handle exports of this buffer (bo_export) */
296 struct list_head exports;
297
298 /**
299 * Kernel-assigned global name for this object
300 *
301 * List contains both flink named and prime fd'd objects
302 */
303 unsigned global_name;
304
305 /** Prime fd used for shared buffers, -1 otherwise. */
306 int prime_fd;
307
308 /** The mmap coherency mode selected at BO allocation time */
309 enum iris_mmap_mode mmap_mode;
310
311 /** The heap selected at BO allocation time */
312 enum iris_heap heap;
313
314 /** Was this buffer imported from an external client? */
315 bool imported;
316
317 /** Has this buffer been exported to external clients? */
318 bool exported;
319
320 /** Boolean of whether this buffer can be re-used */
321 bool reusable;
322
323 /** Boolean of whether this buffer points into user memory */
324 bool userptr;
325
326 /** Boolean of whether this buffer is protected (HW encryption) */
327 bool protected;
328
329 /** Boolean of whether this buffer needs to be captured in error dump.
330 * Xe KMD requires this to be set before vm bind while i915 needs
331 * this set before batch_submit().
332 */
333 bool capture;
334 } real;
335 struct {
336 struct pb_slab_entry entry;
337 struct iris_bo *real;
338 } slab;
339 };
340 };
341
342 #define BO_ALLOC_PLAIN 0
343 #define BO_ALLOC_ZEROED (1<<0)
344 #define BO_ALLOC_COHERENT (1<<1)
345 #define BO_ALLOC_SMEM (1<<2)
346 #define BO_ALLOC_SCANOUT (1<<3)
347 #define BO_ALLOC_NO_SUBALLOC (1<<4)
348 #define BO_ALLOC_LMEM (1<<5)
349 #define BO_ALLOC_PROTECTED (1<<6)
350 #define BO_ALLOC_SHARED (1<<7)
351 #define BO_ALLOC_CAPTURE (1<<8)
352
353 /**
354 * Allocate a buffer object.
355 *
356 * Buffer objects are not necessarily initially mapped into CPU virtual
357 * address space or graphics device aperture. They must be mapped
358 * using iris_bo_map() to be used by the CPU.
359 */
360 struct iris_bo *iris_bo_alloc(struct iris_bufmgr *bufmgr,
361 const char *name,
362 uint64_t size,
363 uint32_t alignment,
364 enum iris_memory_zone memzone,
365 unsigned flags);
366
367 struct iris_bo *
368 iris_bo_create_userptr(struct iris_bufmgr *bufmgr, const char *name,
369 void *ptr, size_t size,
370 enum iris_memory_zone memzone);
371
372 /** Takes a reference on a buffer object */
373 static inline void
iris_bo_reference(struct iris_bo * bo)374 iris_bo_reference(struct iris_bo *bo)
375 {
376 p_atomic_inc(&bo->refcount);
377 }
378
379 /**
380 * Releases a reference on a buffer object, freeing the data if
381 * no references remain.
382 */
383 void iris_bo_unreference(struct iris_bo *bo);
384
385 #define MAP_READ PIPE_MAP_READ
386 #define MAP_WRITE PIPE_MAP_WRITE
387 #define MAP_ASYNC PIPE_MAP_UNSYNCHRONIZED
388 #define MAP_PERSISTENT PIPE_MAP_PERSISTENT
389 #define MAP_COHERENT PIPE_MAP_COHERENT
390 /* internal */
391 #define MAP_RAW (PIPE_MAP_DRV_PRV << 0)
392 #define MAP_INTERNAL_MASK (MAP_RAW)
393
394 #define MAP_FLAGS (MAP_READ | MAP_WRITE | MAP_ASYNC | \
395 MAP_PERSISTENT | MAP_COHERENT | MAP_INTERNAL_MASK)
396
397 /**
398 * Maps the buffer into userspace.
399 *
400 * This function will block waiting for any existing execution on the
401 * buffer to complete, first. The resulting mapping is returned.
402 */
403 MUST_CHECK void *iris_bo_map(struct util_debug_callback *dbg,
404 struct iris_bo *bo, unsigned flags);
405
406 /**
407 * Reduces the refcount on the userspace mapping of the buffer
408 * object.
409 */
iris_bo_unmap(struct iris_bo * bo)410 static inline int iris_bo_unmap(struct iris_bo *bo) { return 0; }
411
412 /**
413 * Waits for rendering to an object by the GPU to have completed.
414 *
415 * This is not required for any access to the BO by bo_map,
416 * bo_subdata, etc. It is merely a way for the driver to implement
417 * glFinish.
418 */
419 void iris_bo_wait_rendering(struct iris_bo *bo);
420
421
422 /**
423 * Unref a buffer manager instance.
424 */
425 void iris_bufmgr_unref(struct iris_bufmgr *bufmgr);
426
427 /**
428 * Create a visible name for a buffer which can be used by other apps
429 *
430 * \param buf Buffer to create a name for
431 * \param name Returned name
432 */
433 int iris_bo_flink(struct iris_bo *bo, uint32_t *name);
434
435 /**
436 * Returns true if the BO is backed by a real GEM object, false if it's
437 * a wrapper that's suballocated from a larger BO.
438 */
439 static inline bool
iris_bo_is_real(struct iris_bo * bo)440 iris_bo_is_real(struct iris_bo *bo)
441 {
442 return bo->gem_handle != 0;
443 }
444
445 /**
446 * Unwrap any slab-allocated wrapper BOs to get the BO for the underlying
447 * backing storage, which is a real BO associated with a GEM object.
448 */
449 static inline struct iris_bo *
iris_get_backing_bo(struct iris_bo * bo)450 iris_get_backing_bo(struct iris_bo *bo)
451 {
452 if (!iris_bo_is_real(bo))
453 bo = bo->slab.real;
454
455 /* We only allow one level of wrapping. */
456 assert(iris_bo_is_real(bo));
457
458 return bo;
459 }
460
461 /**
462 * Is this buffer shared with external clients (imported or exported)?
463 */
464 static inline bool
iris_bo_is_external(const struct iris_bo * bo)465 iris_bo_is_external(const struct iris_bo *bo)
466 {
467 bo = iris_get_backing_bo((struct iris_bo *) bo);
468 return bo->real.exported || bo->real.imported;
469 }
470
471 static inline bool
iris_bo_is_imported(const struct iris_bo * bo)472 iris_bo_is_imported(const struct iris_bo *bo)
473 {
474 bo = iris_get_backing_bo((struct iris_bo *) bo);
475 return bo->real.imported;
476 }
477
478 static inline bool
iris_bo_is_exported(const struct iris_bo * bo)479 iris_bo_is_exported(const struct iris_bo *bo)
480 {
481 bo = iris_get_backing_bo((struct iris_bo *) bo);
482 return bo->real.exported;
483 }
484
485 /**
486 * True if the BO prefers to reside in device-local memory.
487 *
488 * We don't consider eviction here; this is meant to be a performance hint.
489 * It will return true for BOs allocated from the LMEM or LMEM+SMEM heaps,
490 * even if the buffer has been temporarily evicted to system memory.
491 */
492 static inline bool
iris_bo_likely_local(const struct iris_bo * bo)493 iris_bo_likely_local(const struct iris_bo *bo)
494 {
495 if (!bo)
496 return false;
497
498 bo = iris_get_backing_bo((struct iris_bo *) bo);
499 return iris_heap_is_device_local(bo->real.heap);
500 }
501
502 static inline enum iris_mmap_mode
iris_bo_mmap_mode(const struct iris_bo * bo)503 iris_bo_mmap_mode(const struct iris_bo *bo)
504 {
505 bo = iris_get_backing_bo((struct iris_bo *) bo);
506 return bo->real.mmap_mode;
507 }
508
509 /**
510 * Mark a buffer as being shared with other external clients.
511 */
512 void iris_bo_mark_exported(struct iris_bo *bo);
513
514 /**
515 * Returns true if mapping the buffer for write could cause the process
516 * to block, due to the object being active in the GPU.
517 */
518 bool iris_bo_busy(struct iris_bo *bo);
519
520 struct iris_bufmgr *iris_bufmgr_get_for_fd(int fd, bool bo_reuse);
521 int iris_bufmgr_get_fd(struct iris_bufmgr *bufmgr);
522
523 struct iris_bo *iris_bo_gem_create_from_name(struct iris_bufmgr *bufmgr,
524 const char *name,
525 unsigned handle);
526
527 void* iris_bufmgr_get_aux_map_context(struct iris_bufmgr *bufmgr);
528
529 int iris_gem_get_tiling(struct iris_bo *bo, uint32_t *tiling);
530 int iris_gem_set_tiling(struct iris_bo *bo, const struct isl_surf *surf);
531
532 int iris_bo_export_dmabuf(struct iris_bo *bo, int *prime_fd);
533 struct iris_bo *iris_bo_import_dmabuf(struct iris_bufmgr *bufmgr, int prime_fd,
534 const uint64_t modifier);
535
536 /**
537 * Exports a bo as a GEM handle into a given DRM file descriptor
538 * \param bo Buffer to export
539 * \param drm_fd File descriptor where the new handle is created
540 * \param out_handle Pointer to store the new handle
541 *
542 * Returns 0 if the buffer was successfully exported, a non zero error code
543 * otherwise.
544 */
545 int iris_bo_export_gem_handle_for_device(struct iris_bo *bo, int drm_fd,
546 uint32_t *out_handle);
547
548 /**
549 * Returns the BO's address relative to the appropriate base address.
550 *
551 * All of our base addresses are programmed to the start of a 4GB region,
552 * so simply returning the bottom 32 bits of the BO address will give us
553 * the offset from whatever base address corresponds to that memory region.
554 */
555 static inline uint32_t
iris_bo_offset_from_base_address(struct iris_bo * bo)556 iris_bo_offset_from_base_address(struct iris_bo *bo)
557 {
558 /* This only works for buffers in the memory zones corresponding to a
559 * base address - the top, unbounded memory zone doesn't have a base.
560 */
561 assert(bo->address < IRIS_MEMZONE_OTHER_START);
562 return bo->address;
563 }
564
565 /**
566 * Track access of a BO from the specified caching domain and sequence number.
567 *
568 * Can be used without locking. Only the most recent access (i.e. highest
569 * seqno) is tracked.
570 */
571 static inline void
iris_bo_bump_seqno(struct iris_bo * bo,uint64_t seqno,enum iris_domain type)572 iris_bo_bump_seqno(struct iris_bo *bo, uint64_t seqno,
573 enum iris_domain type)
574 {
575 uint64_t *const last_seqno = &bo->last_seqnos[type];
576 uint64_t tmp, prev_seqno = p_atomic_read(last_seqno);
577
578 while (prev_seqno < seqno &&
579 prev_seqno != (tmp = p_atomic_cmpxchg(last_seqno, prev_seqno, seqno)))
580 prev_seqno = tmp;
581 }
582
583 /**
584 * Return the PAT entry based for the given heap.
585 */
586 const struct intel_device_info_pat_entry *
587 iris_heap_to_pat_entry(const struct intel_device_info *devinfo,
588 enum iris_heap heap);
589
590 enum iris_memory_zone iris_memzone_for_address(uint64_t address);
591
592 int iris_bufmgr_create_screen_id(struct iris_bufmgr *bufmgr);
593
594 simple_mtx_t *iris_bufmgr_get_bo_deps_lock(struct iris_bufmgr *bufmgr);
595
596 /**
597 * A pool containing SAMPLER_BORDER_COLOR_STATE entries.
598 *
599 * See iris_border_color.c for more information.
600 */
601 struct iris_border_color_pool {
602 struct iris_bo *bo;
603 void *map;
604 unsigned insert_point;
605
606 /** Map from border colors to offsets in the buffer. */
607 struct hash_table *ht;
608
609 /** Protects insert_point and the hash table. */
610 simple_mtx_t lock;
611 };
612
613 struct iris_border_color_pool *iris_bufmgr_get_border_color_pool(
614 struct iris_bufmgr *bufmgr);
615
616 /* iris_border_color.c */
617 void iris_init_border_color_pool(struct iris_bufmgr *bufmgr,
618 struct iris_border_color_pool *pool);
619 void iris_destroy_border_color_pool(struct iris_border_color_pool *pool);
620 uint32_t iris_upload_border_color(struct iris_border_color_pool *pool,
621 union pipe_color_union *color);
622
623 uint64_t iris_bufmgr_vram_size(struct iris_bufmgr *bufmgr);
624 uint64_t iris_bufmgr_sram_size(struct iris_bufmgr *bufmgr);
625 const struct intel_device_info *iris_bufmgr_get_device_info(struct iris_bufmgr *bufmgr);
626 const struct iris_kmd_backend *
627 iris_bufmgr_get_kernel_driver_backend(struct iris_bufmgr *bufmgr);
628 uint32_t iris_bufmgr_get_global_vm_id(struct iris_bufmgr *bufmgr);
629 bool iris_bufmgr_use_global_vm_id(struct iris_bufmgr *bufmgr);
630 struct intel_bind_timeline *iris_bufmgr_get_bind_timeline(struct iris_bufmgr *bufmgr);
631 bool iris_bufmgr_compute_engine_supported(struct iris_bufmgr *bufmgr);
632
633 enum iris_madvice {
634 IRIS_MADVICE_WILL_NEED = 0,
635 IRIS_MADVICE_DONT_NEED = 1,
636 };
637
638 void iris_bo_import_sync_state(struct iris_bo *bo, int sync_file_fd);
639 struct iris_syncobj *iris_bo_export_sync_state(struct iris_bo *bo);
640
641 #endif /* IRIS_BUFMGR_H */
642