• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 #ifndef AMDGPU_VIRTIO_PROTO_H
2 #define AMDGPU_VIRTIO_PROTO_H
3 
4 #include <stdint.h>
5 #include "amdgpu.h"
6 #include "amdgpu_drm.h"
7 #ifdef __GNUC__
8 # pragma GCC diagnostic push
9 # pragma GCC diagnostic error "-Wpadded"
10 #endif
11 
12 enum amdgpu_ccmd {
13    AMDGPU_CCMD_QUERY_INFO = 1,
14    AMDGPU_CCMD_GEM_NEW,
15    AMDGPU_CCMD_BO_VA_OP,
16    AMDGPU_CCMD_CS_SUBMIT,
17    AMDGPU_CCMD_SET_METADATA,
18    AMDGPU_CCMD_BO_QUERY_INFO,
19    AMDGPU_CCMD_CREATE_CTX,
20    AMDGPU_CCMD_RESERVE_VMID,
21    AMDGPU_CCMD_SET_PSTATE,
22    AMDGPU_CCMD_CS_QUERY_FENCE_STATUS,
23 };
24 
25 struct amdgpu_ccmd_rsp {
26    struct vdrm_ccmd_rsp base;
27    int32_t ret;
28 };
29 static_assert(sizeof(struct amdgpu_ccmd_rsp) == 8, "bug");
30 
31 #define AMDGPU_STATIC_ASSERT_SIZE(t) \
32    static_assert(sizeof(struct t) % 8 == 0, "sizeof(struct " #t ") not multiple of 8"); \
33    static_assert(alignof(struct t) <= 8, "alignof(struct " #t ") too large");
34 
35 /**
36  * Defines the layout of shmem buffer used for host->guest communication.
37  */
38 struct amdvgpu_shmem {
39    struct vdrm_shmem base;
40 
41    /**
42     * Counter that is incremented on asynchronous errors, like SUBMIT
43     * or GEM_NEW failures.  The guest should treat errors as context-
44     * lost.
45     */
46    uint32_t async_error;
47 
48    uint32_t __pad;
49 
50    struct amdgpu_heap_info gtt;
51    struct amdgpu_heap_info vram;
52    struct amdgpu_heap_info vis_vram;
53 };
54 AMDGPU_STATIC_ASSERT_SIZE(amdvgpu_shmem)
55 DEFINE_CAST(vdrm_shmem, amdvgpu_shmem)
56 
57 
58 #define AMDGPU_CCMD(_cmd, _len) (struct vdrm_ccmd_req){ \
59        .cmd = AMDGPU_CCMD_##_cmd,                         \
60        .len = (_len),                                     \
61    }
62 
63 /*
64  * AMDGPU_CCMD_QUERY_INFO
65  *
66  * This is amdgpu_query_info.
67  */
68 struct amdgpu_ccmd_query_info_req {
69    struct vdrm_ccmd_req hdr;
70    struct drm_amdgpu_info info;
71 };
72 DEFINE_CAST(vdrm_ccmd_req, amdgpu_ccmd_query_info_req)
73 AMDGPU_STATIC_ASSERT_SIZE(amdgpu_ccmd_query_info_req)
74 
75 struct amdgpu_ccmd_query_info_rsp {
76    struct amdgpu_ccmd_rsp hdr;
77    uint8_t payload[];
78 };
79 AMDGPU_STATIC_ASSERT_SIZE(amdgpu_ccmd_query_info_rsp)
80 
81 struct amdgpu_ccmd_gem_new_req {
82    struct vdrm_ccmd_req hdr;
83 
84    uint64_t blob_id;
85 
86    /* This is amdgpu_bo_alloc_request but padded correctly. */
87    struct {
88       uint64_t alloc_size;
89       uint64_t phys_alignment;
90       uint32_t preferred_heap;
91       uint32_t __pad;
92       uint64_t flags;
93    } r;
94 };
95 DEFINE_CAST(vdrm_ccmd_req, amdgpu_ccmd_gem_new_req)
96 AMDGPU_STATIC_ASSERT_SIZE(amdgpu_ccmd_gem_new_req)
97 
98 
99 /*
100  * AMDGPU_CCMD_BO_VA_OP
101  *
102  */
103 struct amdgpu_ccmd_bo_va_op_req {
104    struct vdrm_ccmd_req hdr;
105    uint64_t va;
106    uint64_t vm_map_size;
107    uint64_t flags; /* Passed directly to kernel */
108    uint64_t flags2; /* AMDGPU_CCMD_BO_VA_OP_* */
109    uint64_t offset;
110    uint32_t res_id;
111    uint32_t op;
112 };
113 DEFINE_CAST(vdrm_ccmd_req, amdgpu_ccmd_bo_va_op_req)
114 AMDGPU_STATIC_ASSERT_SIZE(amdgpu_ccmd_bo_va_op_req)
115 /* Specifies that this is a sparse BO. */
116 #define AMDGPU_CCMD_BO_VA_OP_SPARSE_BO (1 << 0)
117 
118 /*
119  * AMDGPU_CCMD_CS_SUBMIT
120  */
121 struct amdgpu_ccmd_cs_submit_req {
122    struct vdrm_ccmd_req hdr;
123 
124    uint32_t ctx_id;
125    uint32_t num_chunks; /* limited to AMDGPU_CCMD_CS_SUBMIT_MAX_NUM_CHUNKS */
126    uint32_t pad;
127    uint32_t ring_idx;
128 
129    /* Starts with a descriptor array:
130     *     (chunk_id, offset_in_payload), ...
131     */
132    uint8_t payload[];
133 };
134 DEFINE_CAST(vdrm_ccmd_req, amdgpu_ccmd_cs_submit_req)
135 AMDGPU_STATIC_ASSERT_SIZE(amdgpu_ccmd_cs_submit_req)
136 #define AMDGPU_CCMD_CS_SUBMIT_MAX_NUM_CHUNKS 128
137 
138 /*
139  * AMDGPU_CCMD_SET_METADATA
140  */
141 struct amdgpu_ccmd_set_metadata_req {
142    struct vdrm_ccmd_req hdr;
143    uint64_t flags;
144    uint64_t tiling_info;
145    uint32_t res_id;
146    uint32_t size_metadata;
147    uint32_t umd_metadata[];
148 };
149 DEFINE_CAST(vdrm_ccmd_req, amdgpu_ccmd_set_metadata_req)
150 AMDGPU_STATIC_ASSERT_SIZE(amdgpu_ccmd_set_metadata_req)
151 
152 
153 /*
154  * AMDGPU_CCMD_BO_QUERY_INFO
155  */
156 struct amdgpu_ccmd_bo_query_info_req {
157    struct vdrm_ccmd_req hdr;
158    uint32_t res_id;
159    uint32_t pad; /* must be zero */
160 };
161 DEFINE_CAST(vdrm_ccmd_req, amdgpu_ccmd_bo_query_info_req)
162 AMDGPU_STATIC_ASSERT_SIZE(amdgpu_ccmd_bo_query_info_req)
163 
164 struct amdgpu_ccmd_bo_query_info_rsp {
165    struct amdgpu_ccmd_rsp hdr;
166    /* This is almost struct amdgpu_bo_info, but padded to get
167     * the same struct on 32 bit and 64 bit builds.
168     */
169    struct {
170       uint64_t                   alloc_size;           /*     0     8 */
171       uint64_t                   phys_alignment;       /*     8     8 */
172       uint32_t                   preferred_heap;       /*    16     4 */
173       uint32_t                   __pad;                /*    20     4 */
174       uint64_t                   alloc_flags;          /*    24     8 */
175       /* This is almost struct amdgpu_bo_metadata, but padded to get
176        * the same struct on 32 bit and 64 bit builds.
177        */
178       struct {
179          uint64_t                flags;                /*    32     8 */
180          uint64_t                tiling_info;          /*    40     8 */
181          uint32_t                size_metadata;        /*    48     4 */
182          uint32_t                umd_metadata[64];     /*    52   256 */
183          uint32_t                __pad;                /*    308    4 */
184       } metadata;
185    } info;
186 };
187 AMDGPU_STATIC_ASSERT_SIZE(amdgpu_ccmd_bo_query_info_rsp)
188 
189 /*
190  * AMDGPU_CCMD_CREATE_CTX
191  */
192 struct amdgpu_ccmd_create_ctx_req {
193    struct vdrm_ccmd_req hdr;
194    union {
195       int32_t priority; /* create */
196       uint32_t id;      /* destroy */
197    };
198    uint32_t flags; /* AMDGPU_CCMD_CREATE_CTX_* */
199 };
200 DEFINE_CAST(vdrm_ccmd_req, amdgpu_ccmd_create_ctx_req)
201 AMDGPU_STATIC_ASSERT_SIZE(amdgpu_ccmd_create_ctx_req)
202 /* Destroy a context instead of creating one */
203 #define AMDGPU_CCMD_CREATE_CTX_DESTROY (1 << 0)
204 
205 struct amdgpu_ccmd_create_ctx_rsp {
206    struct amdgpu_ccmd_rsp hdr;
207    uint32_t ctx_id;
208    uint32_t pad;
209 };
210 AMDGPU_STATIC_ASSERT_SIZE(amdgpu_ccmd_create_ctx_rsp)
211 
212 /*
213  * AMDGPU_CCMD_RESERVE_VMID
214  */
215 struct amdgpu_ccmd_reserve_vmid_req {
216    struct vdrm_ccmd_req hdr;
217    uint64_t flags; /* AMDGPU_CCMD_RESERVE_VMID_* */
218 };
219 DEFINE_CAST(vdrm_ccmd_req, amdgpu_ccmd_reserve_vmid_req)
220 AMDGPU_STATIC_ASSERT_SIZE(amdgpu_ccmd_reserve_vmid_req)
221 /* Unreserve a VMID instead of reserving one */
222 #define AMDGPU_CCMD_RESERVE_VMID_UNRESERVE (1 << 0)
223 
224 /*
225  * AMDGPU_CCMD_SET_PSTATE
226  */
227 struct amdgpu_ccmd_set_pstate_req {
228    struct vdrm_ccmd_req hdr;
229    uint32_t ctx_id;
230    uint32_t op;
231    uint32_t flags;
232    uint32_t pad;
233 };
234 DEFINE_CAST(vdrm_ccmd_req, amdgpu_ccmd_set_pstate_req)
235 AMDGPU_STATIC_ASSERT_SIZE(amdgpu_ccmd_set_pstate_req)
236 
237 struct amdgpu_ccmd_set_pstate_rsp {
238    struct amdgpu_ccmd_rsp hdr;
239    uint32_t out_flags;
240    uint32_t pad;
241 };
242 AMDGPU_STATIC_ASSERT_SIZE(amdgpu_ccmd_set_pstate_rsp)
243 
244 /*
245  * AMDGPU_CCMD_CS_QUERY_FENCE_STATUS
246  */
247 struct amdgpu_ccmd_cs_query_fence_status_req {
248    struct vdrm_ccmd_req hdr;
249 
250    uint32_t ctx_id;
251 
252    uint32_t ip_type;
253    uint32_t ip_instance;
254    uint32_t ring;
255 
256    uint64_t fence;
257 
258    uint64_t timeout_ns;
259    uint64_t flags;
260 };
261 DEFINE_CAST(vdrm_ccmd_req, amdgpu_ccmd_cs_query_fence_status_req)
262 AMDGPU_STATIC_ASSERT_SIZE(amdgpu_ccmd_cs_query_fence_status_req)
263 
264 struct amdgpu_ccmd_cs_query_fence_status_rsp {
265    struct amdgpu_ccmd_rsp hdr;
266    uint32_t expired;
267    uint32_t pad;
268 };
269 AMDGPU_STATIC_ASSERT_SIZE(amdgpu_ccmd_cs_query_fence_status_rsp)
270 
271 #ifdef __GNUC__
272 # pragma GCC diagnostic pop
273 #endif
274 
275 #endif
276