• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2017 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 #ifndef IRIS_BUFMGR_H
25 #define IRIS_BUFMGR_H
26 
27 #include <stdbool.h>
28 #include <stdint.h>
29 #include <stdio.h>
30 #include <sys/types.h>
31 
32 #include "c11/threads.h"
33 #include "common/intel_bind_timeline.h"
34 #include "util/macros.h"
35 #include "util/u_atomic.h"
36 #include "util/u_dynarray.h"
37 #include "util/list.h"
38 #include "util/simple_mtx.h"
39 #include "pipe/p_defines.h"
40 #include "pipebuffer/pb_slab.h"
41 #include "intel/dev/intel_device_info.h"
42 
43 struct intel_device_info;
44 struct util_debug_callback;
45 struct isl_surf;
46 struct iris_syncobj;
47 
48 /**
49  * Memory zones.  When allocating a buffer, you can request that it is
50  * placed into a specific region of the virtual address space (PPGTT).
51  *
52  * Most buffers can go anywhere (IRIS_MEMZONE_OTHER).  Some buffers are
53  * accessed via an offset from a base address.  STATE_BASE_ADDRESS has
54  * a maximum 4GB size for each region, so we need to restrict those
55  * buffers to be within 4GB of the base.  Each memory zone corresponds
56  * to a particular base address.
57  *
58  * We lay out the virtual address space as follows:
59  *
60  * - [0,   4K): Nothing            (empty page for null address)
61  * - [4K,  4G): Shaders            (Instruction Base Address)
62  * - [4G,  8G): Surfaces & Binders (Surface State Base Address, Bindless ...)
63  * - [8G, 12G): Dynamic            (Dynamic State Base Address)
64  * - [12G, *):  Other              (everything else in the full 48-bit VMA)
65  *
66  * A special buffer for border color lives at the start of the dynamic state
67  * memory zone.  This unfortunately has to be handled specially because the
68  * SAMPLER_STATE "Indirect State Pointer" field is only a 24-bit pointer.
69  *
70  * Each GL context uses a separate GEM context, which technically gives them
71  * each a separate VMA.  However, we assign address globally, so buffers will
72  * have the same address in all GEM contexts.  This lets us have a single BO
73  * field for the address, which is easy and cheap.
74  */
75 enum iris_memory_zone {
76    IRIS_MEMZONE_SHADER,
77    IRIS_MEMZONE_BINDER,
78    IRIS_MEMZONE_SCRATCH,
79    IRIS_MEMZONE_SURFACE,
80    IRIS_MEMZONE_DYNAMIC,
81    IRIS_MEMZONE_OTHER,
82 
83    IRIS_MEMZONE_BORDER_COLOR_POOL,
84 };
85 
86 /* Intentionally exclude single buffer "zones" */
87 #define IRIS_MEMZONE_COUNT (IRIS_MEMZONE_OTHER + 1)
88 
89 #define IRIS_SCRATCH_ZONE_SIZE (8 * 1024 * 1024)
90 #define IRIS_BINDER_ZONE_SIZE ((1ull << 30) - IRIS_SCRATCH_ZONE_SIZE)
91 
92 #define IRIS_MEMZONE_SHADER_START     (0ull * (1ull << 32))
93 #define IRIS_MEMZONE_BINDER_START     (1ull * (1ull << 32))
94 #define IRIS_MEMZONE_SCRATCH_START    IRIS_MEMZONE_BINDER_START
95 #define IRIS_MEMZONE_SURFACE_START    (IRIS_MEMZONE_BINDER_START + IRIS_BINDER_ZONE_SIZE)
96 #define IRIS_MEMZONE_DYNAMIC_START    (2ull * (1ull << 32))
97 #define IRIS_MEMZONE_OTHER_START      (3ull * (1ull << 32))
98 
99 #define IRIS_BORDER_COLOR_POOL_ADDRESS IRIS_MEMZONE_DYNAMIC_START
100 #define IRIS_BORDER_COLOR_POOL_SIZE (64 * 4096)
101 
102 /**
103  * Classification of the various incoherent caches of the GPU into a number of
104  * caching domains.
105  */
106 enum iris_domain {
107    /** Render color cache. */
108    IRIS_DOMAIN_RENDER_WRITE = 0,
109    /** (Hi)Z/stencil cache. */
110    IRIS_DOMAIN_DEPTH_WRITE,
111    /** Data port (HDC) cache. */
112    IRIS_DOMAIN_DATA_WRITE,
113    /** Any other read-write cache. */
114    IRIS_DOMAIN_OTHER_WRITE,
115    /** Vertex cache. */
116    IRIS_DOMAIN_VF_READ,
117    /** Texture cache. */
118    IRIS_DOMAIN_SAMPLER_READ,
119    /** Pull-style shader constant loads. */
120    IRIS_DOMAIN_PULL_CONSTANT_READ,
121    /** Any other read-only cache, including reads from non-L3 clients. */
122    IRIS_DOMAIN_OTHER_READ,
123    /** Number of caching domains. */
124    NUM_IRIS_DOMAINS,
125    /** Not a real cache, use to opt out of the cache tracking mechanism. */
126    IRIS_DOMAIN_NONE = NUM_IRIS_DOMAINS
127 };
128 
129 /**
130  * Whether a caching domain is guaranteed not to write any data to memory.
131  */
132 static inline bool
iris_domain_is_read_only(enum iris_domain access)133 iris_domain_is_read_only(enum iris_domain access)
134 {
135    return access >= IRIS_DOMAIN_VF_READ &&
136           access <= IRIS_DOMAIN_OTHER_READ;
137 }
138 
139 static inline bool
iris_domain_is_l3_coherent(const struct intel_device_info * devinfo,enum iris_domain access)140 iris_domain_is_l3_coherent(const struct intel_device_info *devinfo,
141                            enum iris_domain access)
142 {
143    /* VF reads are coherent with the L3 on Tigerlake+ because we set
144     * the "L3 Bypass Disable" bit in the vertex/index buffer packets.
145     */
146    if (access == IRIS_DOMAIN_VF_READ)
147       return devinfo->ver >= 12;
148 
149    return access != IRIS_DOMAIN_OTHER_WRITE &&
150           access != IRIS_DOMAIN_OTHER_READ;
151 }
152 
153 enum iris_mmap_mode {
154    IRIS_MMAP_NONE, /**< Cannot be mapped */
155    IRIS_MMAP_UC, /**< Fully uncached memory map */
156    IRIS_MMAP_WC, /**< Write-combining map with no caching of reads */
157    IRIS_MMAP_WB, /**< Write-back mapping with CPU caches enabled */
158 };
159 
160 enum iris_heap {
161    /**
162     * System memory which is CPU-cached at (at least 1-way) coherent.
163     *
164     * This will use WB (write-back) CPU mappings.
165     *
166     * LLC systems and discrete cards (which enable snooping) will mostly use
167     * this heap.  Non-LLC systems will only use it when explicit coherency is
168     * required, as snooping is expensive there.
169     */
170    IRIS_HEAP_SYSTEM_MEMORY_CACHED_COHERENT,
171 
172    /**
173     * System memory which is not CPU cached.
174     *
175     * This will use WC (write-combining) CPU mappings, which has uncached
176     * performance for reads.  This can be used for scanout on integrated
177     * GPUs (which is never coherent with CPU caches).  It will be used for
178     * most buffers on non-LLC platforms, where cache coherency is expensive.
179     */
180    IRIS_HEAP_SYSTEM_MEMORY_UNCACHED,
181 
182    /** IRIS_HEAP_SYSTEM_MEMORY_UNCACHED + compressed, only supported in Xe2 */
183    IRIS_HEAP_SYSTEM_MEMORY_UNCACHED_COMPRESSED,
184 
185    /** Device-local memory (VRAM).  Cannot be placed in system memory! */
186    IRIS_HEAP_DEVICE_LOCAL,
187    IRIS_HEAP_MAX_NO_VRAM = IRIS_HEAP_DEVICE_LOCAL,
188 
189    /** Device-local compressed memory, only supported in Xe2 */
190    IRIS_HEAP_DEVICE_LOCAL_COMPRESSED,
191 
192    /** Device-local memory that may be evicted to system memory if needed. */
193    IRIS_HEAP_DEVICE_LOCAL_PREFERRED,
194 
195    /**
196     * Device-local memory (VRAM) + guarantee that is CPU visible.
197     *
198     * To be used in cases that cannot be placed in system memory!
199     * This will only be used when running in small PCIe bar systems.
200     */
201    IRIS_HEAP_DEVICE_LOCAL_CPU_VISIBLE_SMALL_BAR,
202    IRIS_HEAP_MAX_LARGE_BAR = IRIS_HEAP_DEVICE_LOCAL_CPU_VISIBLE_SMALL_BAR,
203 
204    IRIS_HEAP_MAX,
205 };
206 
207 extern const char *iris_heap_to_string[];
208 
209 static inline bool
iris_heap_is_device_local(enum iris_heap heap)210 iris_heap_is_device_local(enum iris_heap heap)
211 {
212    return heap == IRIS_HEAP_DEVICE_LOCAL ||
213           heap == IRIS_HEAP_DEVICE_LOCAL_PREFERRED ||
214           heap == IRIS_HEAP_DEVICE_LOCAL_CPU_VISIBLE_SMALL_BAR ||
215           heap == IRIS_HEAP_DEVICE_LOCAL_COMPRESSED;
216 }
217 
218 static inline bool
iris_heap_is_compressed(enum iris_heap heap)219 iris_heap_is_compressed(enum iris_heap heap)
220 {
221    return heap == IRIS_HEAP_SYSTEM_MEMORY_UNCACHED_COMPRESSED ||
222           heap == IRIS_HEAP_DEVICE_LOCAL_COMPRESSED;
223 }
224 
225 #define IRIS_BATCH_COUNT 3
226 
227 struct iris_bo_screen_deps {
228    struct iris_syncobj *write_syncobjs[IRIS_BATCH_COUNT];
229    struct iris_syncobj *read_syncobjs[IRIS_BATCH_COUNT];
230 };
231 
232 struct iris_bo {
233    /**
234     * Size in bytes of the buffer object.
235     *
236     * The size may be larger than the size originally requested for the
237     * allocation, such as being aligned to page size.
238     */
239    uint64_t size;
240 
241    /** Buffer manager context associated with this buffer object */
242    struct iris_bufmgr *bufmgr;
243 
244    /** Pre-computed hash using _mesa_hash_pointer for cache tracking sets */
245    uint32_t hash;
246 
247    /** The GEM handle for this buffer object. */
248    uint32_t gem_handle;
249 
250    /**
251     * Canonical virtual address of the buffer inside the PPGTT (Per-Process Graphics
252     * Translation Table).
253     *
254     * Although each hardware context has its own VMA, we assign BO's to the
255     * same address in all contexts, for simplicity.
256     */
257    uint64_t address;
258 
259    /**
260     * If non-zero, then this bo has an aux-map translation to this address.
261     */
262    uint64_t aux_map_address;
263 
264    /**
265     * If this BO is referenced by a batch, this _may_ be the index into the
266     * batch->exec_bos[] list.
267     *
268     * Note that a single buffer may be used by multiple batches/contexts,
269     * and thus appear in multiple lists, but we only track one index here.
270     * In the common case one can guess that batch->exec_bos[bo->index] == bo
271     * and double check if that's true to avoid a linear list walk.
272     *
273     * XXX: this is not ideal now that we have more than one batch per context,
274     * XXX: as the index will flop back and forth between the render index and
275     * XXX: compute index...
276     */
277    unsigned index;
278 
279    int refcount;
280    const char *name;
281 
282    /** BO cache list */
283    struct list_head head;
284 
285    /**
286     * Synchronization sequence number of most recent access of this BO from
287     * each caching domain.
288     *
289     * Although this is a global field, use in multiple contexts should be
290     * safe, see iris_emit_buffer_barrier_for() for details.
291     *
292     * Also align it to 64 bits. This will make atomic operations faster on 32
293     * bit platforms.
294     */
295    alignas(8) uint64_t last_seqnos[NUM_IRIS_DOMAINS];
296 
297    /** Up to one per screen, may need realloc. */
298    struct iris_bo_screen_deps *deps;
299    int deps_size;
300 
301    /**
302     * Boolean of whether the GPU is definitely not accessing the buffer.
303     *
304     * This is only valid when reusable, since non-reusable
305     * buffers are those that have been shared with other
306     * processes, so we don't know their state.
307     */
308    bool idle;
309 
310    /** Was this buffer zeroed at allocation time? */
311    bool zeroed;
312 
313    union {
314       struct {
315          time_t free_time;
316 
317          /** Mapped address for the buffer, saved across map/unmap cycles */
318          void *map;
319 
320          /** List of GEM handle exports of this buffer (bo_export) */
321          struct list_head exports;
322 
323          /**
324           * Kernel-assigned global name for this object
325           *
326           * List contains both flink named and prime fd'd objects
327           */
328          unsigned global_name;
329 
330          /** Prime fd used for shared buffers, -1 otherwise. */
331          int prime_fd;
332 
333          /** The mmap coherency mode selected at BO allocation time */
334          enum iris_mmap_mode mmap_mode;
335 
336          /** The heap selected at BO allocation time */
337          enum iris_heap heap;
338 
339          /** Was this buffer imported from an external client? */
340          bool imported;
341 
342          /** Has this buffer been exported to external clients? */
343          bool exported;
344 
345          /** Boolean of whether this buffer can be re-used */
346          bool reusable;
347 
348          /** Boolean of whether this buffer points into user memory */
349          bool userptr;
350 
351          /** Boolean of whether this buffer is protected (HW encryption) */
352          bool protected;
353 
354          /** Boolean of whether this buffer needs to be captured in error dump.
355           * Xe KMD requires this to be set before vm bind while i915 needs
356           * this set before batch_submit().
357           */
358          bool capture;
359 
360          /** Boolean of whether this buffer can be scanout to display */
361          bool scanout;
362       } real;
363       struct {
364          struct pb_slab_entry entry;
365          struct iris_bo *real;
366       } slab;
367    };
368 };
369 
370 /* No special attributes. */
371 #define BO_ALLOC_PLAIN           0
372 /* Content is set to 0, only done in cache and slabs code paths. */
373 #define BO_ALLOC_ZEROED          (1<<0)
374 /* Allocate a cached and coherent BO, this has a performance cost in
375  * integrated platforms without LLC.
376  * Should only be used in BOs that will be written and read from CPU often.
377  */
378 #define BO_ALLOC_CACHED_COHERENT (1<<1)
379 /* Place BO only on smem. */
380 #define BO_ALLOC_SMEM            (1<<2)
381 /* BO can be sent to display. */
382 #define BO_ALLOC_SCANOUT         (1<<3)
383 /* No sub-allocation(slabs). */
384 #define BO_ALLOC_NO_SUBALLOC     (1<<4)
385 /* Place BO only on lmem. */
386 #define BO_ALLOC_LMEM            (1<<5)
387 /* Content is protected, can't be mapped and needs special handling.  */
388 #define BO_ALLOC_PROTECTED       (1<<6)
389 /* BO can be exported to other applications. */
390 #define BO_ALLOC_SHARED          (1<<7)
391 /* BO will be captured in the KMD error dump. */
392 #define BO_ALLOC_CAPTURE         (1<<8)
393 /* Can be mapped. */
394 #define BO_ALLOC_CPU_VISIBLE     (1<<9)
395 /* BO content is compressed. */
396 #define BO_ALLOC_COMPRESSED      (1<<10)
397 
398 /**
399  * Allocate a buffer object.
400  *
401  * Buffer objects are not necessarily initially mapped into CPU virtual
402  * address space or graphics device aperture.  They must be mapped
403  * using iris_bo_map() to be used by the CPU.
404  */
405 struct iris_bo *iris_bo_alloc(struct iris_bufmgr *bufmgr,
406                               const char *name,
407                               uint64_t size,
408                               uint32_t alignment,
409                               enum iris_memory_zone memzone,
410                               unsigned flags);
411 
412 struct iris_bo *
413 iris_bo_create_userptr(struct iris_bufmgr *bufmgr, const char *name,
414                        void *ptr, size_t size,
415                        enum iris_memory_zone memzone);
416 
417 /** Takes a reference on a buffer object */
418 static inline void
iris_bo_reference(struct iris_bo * bo)419 iris_bo_reference(struct iris_bo *bo)
420 {
421    p_atomic_inc(&bo->refcount);
422 }
423 
424 /**
425  * Releases a reference on a buffer object, freeing the data if
426  * no references remain.
427  */
428 void iris_bo_unreference(struct iris_bo *bo);
429 
430 #define MAP_READ          PIPE_MAP_READ
431 #define MAP_WRITE         PIPE_MAP_WRITE
432 #define MAP_ASYNC         PIPE_MAP_UNSYNCHRONIZED
433 #define MAP_PERSISTENT    PIPE_MAP_PERSISTENT
434 #define MAP_COHERENT      PIPE_MAP_COHERENT
435 /* internal */
436 #define MAP_RAW           (PIPE_MAP_DRV_PRV << 0)
437 #define MAP_INTERNAL_MASK (MAP_RAW)
438 
439 #define MAP_FLAGS         (MAP_READ | MAP_WRITE | MAP_ASYNC | \
440                            MAP_PERSISTENT | MAP_COHERENT | MAP_INTERNAL_MASK)
441 
442 /**
443  * Maps the buffer into userspace.
444  *
445  * This function will block waiting for any existing execution on the
446  * buffer to complete, first.  The resulting mapping is returned.
447  */
448 MUST_CHECK void *iris_bo_map(struct util_debug_callback *dbg,
449                              struct iris_bo *bo, unsigned flags);
450 
451 /**
452  * Reduces the refcount on the userspace mapping of the buffer
453  * object.
454  */
iris_bo_unmap(struct iris_bo * bo)455 static inline int iris_bo_unmap(struct iris_bo *bo) { return 0; }
456 
457 /**
458  * Waits for rendering to an object by the GPU to have completed.
459  *
460  * This is not required for any access to the BO by bo_map,
461  * bo_subdata, etc.  It is merely a way for the driver to implement
462  * glFinish.
463  */
464 void iris_bo_wait_rendering(struct iris_bo *bo);
465 
466 
467 /**
468  * Unref a buffer manager instance.
469  */
470 void iris_bufmgr_unref(struct iris_bufmgr *bufmgr);
471 
472 /**
473  * Create a visible name for a buffer which can be used by other apps
474  *
475  * \param buf Buffer to create a name for
476  * \param name Returned name
477  */
478 int iris_bo_flink(struct iris_bo *bo, uint32_t *name);
479 
480 /**
481  * Returns true if the BO is backed by a real GEM object, false if it's
482  * a wrapper that's suballocated from a larger BO.
483  */
484 static inline bool
iris_bo_is_real(struct iris_bo * bo)485 iris_bo_is_real(struct iris_bo *bo)
486 {
487    return bo->gem_handle != 0;
488 }
489 
490 /**
491  * Unwrap any slab-allocated wrapper BOs to get the BO for the underlying
492  * backing storage, which is a real BO associated with a GEM object.
493  */
494 static inline struct iris_bo *
iris_get_backing_bo(struct iris_bo * bo)495 iris_get_backing_bo(struct iris_bo *bo)
496 {
497    if (!iris_bo_is_real(bo))
498       bo = bo->slab.real;
499 
500    /* We only allow one level of wrapping. */
501    assert(iris_bo_is_real(bo));
502 
503    return bo;
504 }
505 
506 /**
507  * Is this buffer shared with external clients (imported or exported)?
508  */
509 static inline bool
iris_bo_is_external(const struct iris_bo * bo)510 iris_bo_is_external(const struct iris_bo *bo)
511 {
512    bo = iris_get_backing_bo((struct iris_bo *) bo);
513    return bo->real.exported || bo->real.imported;
514 }
515 
516 static inline bool
iris_bo_is_imported(const struct iris_bo * bo)517 iris_bo_is_imported(const struct iris_bo *bo)
518 {
519    bo = iris_get_backing_bo((struct iris_bo *) bo);
520    return bo->real.imported;
521 }
522 
523 static inline bool
iris_bo_is_exported(const struct iris_bo * bo)524 iris_bo_is_exported(const struct iris_bo *bo)
525 {
526    bo = iris_get_backing_bo((struct iris_bo *) bo);
527    return bo->real.exported;
528 }
529 
530 /**
531  * True if the BO prefers to reside in device-local memory.
532  *
533  * We don't consider eviction here; this is meant to be a performance hint.
534  * It will return true for BOs allocated from the LMEM or LMEM+SMEM heaps,
535  * even if the buffer has been temporarily evicted to system memory.
536  */
537 static inline bool
iris_bo_likely_local(const struct iris_bo * bo)538 iris_bo_likely_local(const struct iris_bo *bo)
539 {
540    if (!bo)
541       return false;
542 
543    bo = iris_get_backing_bo((struct iris_bo *) bo);
544    return iris_heap_is_device_local(bo->real.heap);
545 }
546 
547 static inline enum iris_mmap_mode
iris_bo_mmap_mode(const struct iris_bo * bo)548 iris_bo_mmap_mode(const struct iris_bo *bo)
549 {
550    bo = iris_get_backing_bo((struct iris_bo *) bo);
551    return bo->real.mmap_mode;
552 }
553 
554 /**
555  * Mark a buffer as being shared with other external clients.
556  */
557 void iris_bo_mark_exported(struct iris_bo *bo);
558 
559 /**
560  * Returns true  if mapping the buffer for write could cause the process
561  * to block, due to the object being active in the GPU.
562  */
563 bool iris_bo_busy(struct iris_bo *bo);
564 
565 struct iris_bufmgr *iris_bufmgr_get_for_fd(int fd, bool bo_reuse);
566 int iris_bufmgr_get_fd(struct iris_bufmgr *bufmgr);
567 
568 struct iris_bo *iris_bo_gem_create_from_name(struct iris_bufmgr *bufmgr,
569                                              const char *name,
570                                              unsigned handle);
571 
572 void* iris_bufmgr_get_aux_map_context(struct iris_bufmgr *bufmgr);
573 
574 int iris_gem_get_tiling(struct iris_bo *bo, uint32_t *tiling);
575 int iris_gem_set_tiling(struct iris_bo *bo, const struct isl_surf *surf);
576 
577 int iris_bo_export_dmabuf(struct iris_bo *bo, int *prime_fd);
578 struct iris_bo *iris_bo_import_dmabuf(struct iris_bufmgr *bufmgr, int prime_fd,
579                                       const uint64_t modifier);
580 
581 /**
582  * Exports a bo as a GEM handle into a given DRM file descriptor
583  * \param bo Buffer to export
584  * \param drm_fd File descriptor where the new handle is created
585  * \param out_handle Pointer to store the new handle
586  *
587  * Returns 0 if the buffer was successfully exported, a non zero error code
588  * otherwise.
589  */
590 int iris_bo_export_gem_handle_for_device(struct iris_bo *bo, int drm_fd,
591                                          uint32_t *out_handle);
592 
593 /**
594  * Returns the BO's address relative to the appropriate base address.
595  *
596  * All of our base addresses are programmed to the start of a 4GB region,
597  * so simply returning the bottom 32 bits of the BO address will give us
598  * the offset from whatever base address corresponds to that memory region.
599  */
600 static inline uint32_t
iris_bo_offset_from_base_address(struct iris_bo * bo)601 iris_bo_offset_from_base_address(struct iris_bo *bo)
602 {
603    /* This only works for buffers in the memory zones corresponding to a
604     * base address - the top, unbounded memory zone doesn't have a base.
605     */
606    assert(bo->address < IRIS_MEMZONE_OTHER_START);
607    return bo->address;
608 }
609 
610 /**
611  * Track access of a BO from the specified caching domain and sequence number.
612  *
613  * Can be used without locking.  Only the most recent access (i.e. highest
614  * seqno) is tracked.
615  */
616 static inline void
iris_bo_bump_seqno(struct iris_bo * bo,uint64_t seqno,enum iris_domain type)617 iris_bo_bump_seqno(struct iris_bo *bo, uint64_t seqno,
618                    enum iris_domain type)
619 {
620    uint64_t *const last_seqno = &bo->last_seqnos[type];
621    uint64_t tmp, prev_seqno = p_atomic_read(last_seqno);
622 
623    while (prev_seqno < seqno &&
624           prev_seqno != (tmp = p_atomic_cmpxchg(last_seqno, prev_seqno, seqno)))
625       prev_seqno = tmp;
626 }
627 
628 /**
629  * Return the PAT entry based for the given heap.
630  */
631 const struct intel_device_info_pat_entry *
632 iris_heap_to_pat_entry(const struct intel_device_info *devinfo,
633                        enum iris_heap heap, bool scanout);
634 
635 enum iris_memory_zone iris_memzone_for_address(uint64_t address);
636 
637 int iris_bufmgr_create_screen_id(struct iris_bufmgr *bufmgr);
638 
639 simple_mtx_t *iris_bufmgr_get_bo_deps_lock(struct iris_bufmgr *bufmgr);
640 
641 /**
642  * A pool containing SAMPLER_BORDER_COLOR_STATE entries.
643  *
644  * See iris_border_color.c for more information.
645  */
646 struct iris_border_color_pool {
647    struct iris_bo *bo;
648    void *map;
649    unsigned insert_point;
650 
651    /** Map from border colors to offsets in the buffer. */
652    struct hash_table *ht;
653 
654    /** Protects insert_point and the hash table. */
655    simple_mtx_t lock;
656 };
657 
658 struct iris_border_color_pool *iris_bufmgr_get_border_color_pool(
659       struct iris_bufmgr *bufmgr);
660 
661 /* iris_border_color.c */
662 void iris_init_border_color_pool(struct iris_bufmgr *bufmgr,
663                                  struct iris_border_color_pool *pool);
664 void iris_destroy_border_color_pool(struct iris_border_color_pool *pool);
665 uint32_t iris_upload_border_color(struct iris_border_color_pool *pool,
666                                   union pipe_color_union *color);
667 
668 uint64_t iris_bufmgr_vram_size(struct iris_bufmgr *bufmgr);
669 uint64_t iris_bufmgr_sram_size(struct iris_bufmgr *bufmgr);
670 const struct intel_device_info *iris_bufmgr_get_device_info(struct iris_bufmgr *bufmgr);
671 const struct iris_kmd_backend *
672 iris_bufmgr_get_kernel_driver_backend(struct iris_bufmgr *bufmgr);
673 uint32_t iris_bufmgr_get_global_vm_id(struct iris_bufmgr *bufmgr);
674 bool iris_bufmgr_use_global_vm_id(struct iris_bufmgr *bufmgr);
675 struct intel_bind_timeline *iris_bufmgr_get_bind_timeline(struct iris_bufmgr *bufmgr);
676 bool iris_bufmgr_compute_engine_supported(struct iris_bufmgr *bufmgr);
677 uint64_t iris_bufmgr_get_dummy_aux_address(struct iris_bufmgr *bufmgr);
678 struct iris_bo *iris_bufmgr_get_mem_fence_bo(struct iris_bufmgr *bufmgr);
679 
680 enum iris_madvice {
681    IRIS_MADVICE_WILL_NEED = 0,
682    IRIS_MADVICE_DONT_NEED = 1,
683 };
684 
685 void iris_bo_import_sync_state(struct iris_bo *bo, int sync_file_fd);
686 struct iris_syncobj *iris_bo_export_sync_state(struct iris_bo *bo);
687 
688 #endif /* IRIS_BUFMGR_H */
689