• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2021 Alyssa Rosenzweig
3  * Copyright 2019 Collabora, Ltd.
4  * SPDX-License-Identifier: MIT
5  */
6 
7 #pragma once
8 
9 #include <stdbool.h>
10 #include <stddef.h>
11 #include <stdint.h>
12 #include <time.h>
13 #include "util/list.h"
14 
15 struct agx_device;
16 
17 enum agx_bo_flags {
18    /* BO is shared across processes (imported or exported) and therefore cannot
19     * be cached locally
20     */
21    AGX_BO_SHARED = 1 << 0,
22 
23    /* BO must be allocated in the low 32-bits of VA space */
24    AGX_BO_LOW_VA = 1 << 1,
25 
26    /* BO is executable */
27    AGX_BO_EXEC = 1 << 2,
28 
29    /* BO should be mapped write-back on the CPU (else, write combine) */
30    AGX_BO_WRITEBACK = 1 << 3,
31 
32    /* BO could potentially be shared (imported or exported) and therefore cannot
33     * be allocated as private
34     */
35    AGX_BO_SHAREABLE = 1 << 4,
36 
37    /* BO is read-only from the GPU side
38     */
39    AGX_BO_READONLY = 1 << 5,
40 };
41 
42 enum agx_va_flags {
43    /* VA must be inside the USC region, otherwise unrestricted. */
44    AGX_VA_USC = (1 << 0),
45 
46    /* VA must be fixed, otherwise allocated by the driver. */
47    AGX_VA_FIXED = (1 << 1),
48 };
49 
50 struct agx_va {
51    enum agx_va_flags flags;
52    uint64_t addr;
53    uint64_t size_B;
54 };
55 
56 struct agx_ptr {
57    /* If CPU mapped, CPU address. NULL if not mapped */
58    void *cpu;
59 
60    /* Mapped GPU address */
61    uint64_t gpu;
62 };
63 
64 struct agx_bo {
65    /* Must be first for casting */
66    struct list_head bucket_link;
67 
68    /* Used to link the BO to the BO cache LRU list. */
69    struct list_head lru_link;
70 
71    /* Convenience */
72    struct agx_device *dev;
73 
74    /* The time this BO was used last, so we can evict stale BOs. */
75    time_t last_used;
76 
77    /* Creation attributes */
78    enum agx_bo_flags flags;
79    size_t size;
80    size_t align;
81 
82    /* Mapping */
83    struct agx_va *va;
84 
85    /* Suffixed to force agx_bo_map access */
86    void *_map;
87 
88    /* Process-local index */
89    uint32_t handle;
90 
91    /* DMA-BUF fd clone for adding fences to imports/exports */
92    int prime_fd;
93 
94    /* Current writer, if any (queue in upper 32 bits, syncobj in lower 32 bits) */
95    uint64_t writer;
96 
97    /* Update atomically */
98    int32_t refcnt;
99 
100    /* For debugging */
101    const char *label;
102 
103    /* virtio blob_id */
104    uint32_t blob_id;
105    uint32_t vbo_res_id;
106 };
107 
108 static inline uint32_t
agx_bo_writer_syncobj(uint64_t writer)109 agx_bo_writer_syncobj(uint64_t writer)
110 {
111    return writer;
112 }
113 
114 static inline uint32_t
agx_bo_writer_queue(uint64_t writer)115 agx_bo_writer_queue(uint64_t writer)
116 {
117    return writer >> 32;
118 }
119 
120 static inline uint64_t
agx_bo_writer(uint32_t queue,uint32_t syncobj)121 agx_bo_writer(uint32_t queue, uint32_t syncobj)
122 {
123    return (((uint64_t)queue) << 32) | syncobj;
124 }
125 
126 struct agx_bo *agx_bo_create(struct agx_device *dev, size_t size,
127                              unsigned align, enum agx_bo_flags flags,
128                              const char *label);
129 
130 void agx_bo_reference(struct agx_bo *bo);
131 void agx_bo_unreference(struct agx_device *dev, struct agx_bo *bo);
132 struct agx_bo *agx_bo_import(struct agx_device *dev, int fd);
133 int agx_bo_export(struct agx_device *dev, struct agx_bo *bo);
134 
135 void agx_bo_free(struct agx_device *dev, struct agx_bo *bo);
136 struct agx_bo *agx_bo_cache_fetch(struct agx_device *dev, size_t size,
137                                   size_t align, uint32_t flags,
138                                   const bool dontwait);
139 void agx_bo_cache_evict_all(struct agx_device *dev);
140