• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2018 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "VirtioGpuStream.h"
18 #include "virtgpu_drm.h"
19 
20 #include <xf86drm.h>
21 
22 #include <sys/types.h>
23 #include <sys/mman.h>
24 
25 #include <errno.h>
26 #include <unistd.h>
27 
28 #ifndef PAGE_SIZE
29 #define PAGE_SIZE 0x1000
30 #endif
31 
32 // In a virtual machine, there should only be one GPU
33 #define RENDERNODE_MINOR 128
34 
35 // Maximum size of readback / response buffer in bytes
36 #define MAX_CMDRESPBUF_SIZE (10*PAGE_SIZE)
37 
38 // Attributes use to allocate our response buffer
39 // Similar to virgl's fence objects
40 #define PIPE_BUFFER             0
41 #define VIRGL_FORMAT_R8_UNORM   64
42 #define VIRGL_BIND_CUSTOM       (1 << 17)
43 
44 // Conservative; see virgl_winsys.h
45 #define VIRGL_MAX_CMDBUF_DWORDS (16*1024)
46 #define VIRGL_MAX_CMDBUF_SIZE   (4*VIRGL_MAX_CMDBUF_DWORDS)
47 
48 struct VirtioGpuCmd {
49     uint32_t op;
50     uint32_t cmdSize;
51     unsigned char buf[0];
52 } __attribute__((packed));
53 
54 union process_pipe_info {
55     uint64_t proto;
56     struct {
57        int pid;
58        int tid;
59       } id;
60 };
61 
processPipeInit(int stream_handle,HostConnectionType,renderControl_encoder_context_t * rcEnc)62 bool VirtioGpuProcessPipe::processPipeInit(int stream_handle, HostConnectionType, renderControl_encoder_context_t *rcEnc)
63 {
64   union process_pipe_info info;
65 
66   info.id.pid = getpid();
67   info.id.tid = gettid();
68   rcEnc->rcSetPuid(rcEnc, info.proto);
69   return true;
70 }
71 
VirtioGpuStream(size_t bufSize)72 VirtioGpuStream::VirtioGpuStream(size_t bufSize) :
73     IOStream(0U),
74     m_fd(-1),
75     m_bufSize(bufSize),
76     m_buf(nullptr),
77     m_cmdResp_rh(0U),
78     m_cmdResp_bo(0U),
79     m_cmdResp(nullptr),
80     m_cmdRespPos(0U),
81     m_cmdPos(0U),
82     m_flushPos(0U),
83     m_allocSize(0U),
84     m_allocFlushSize(0U)
85 {
86 }
87 
~VirtioGpuStream()88 VirtioGpuStream::~VirtioGpuStream()
89 {
90     if (m_cmdResp) {
91         munmap(m_cmdResp, MAX_CMDRESPBUF_SIZE);
92     }
93 
94     if (m_cmdResp_bo > 0U) {
95         drm_gem_close gem_close = {
96             .handle = m_cmdResp_bo,
97         };
98         drmIoctl(m_fd, DRM_IOCTL_GEM_CLOSE, &gem_close);
99     }
100 
101     if (m_fd >= 0) {
102         close(m_fd);
103     }
104 
105     free(m_buf);
106 }
107 
connect()108 int VirtioGpuStream::connect()
109 {
110     if (m_fd < 0) {
111         m_fd = drmOpenRender(RENDERNODE_MINOR);
112         if (m_fd < 0) {
113             ERR("%s: failed with fd %d (%s)", __func__, m_fd, strerror(errno));
114             return -1;
115         }
116     }
117 
118     if (!m_cmdResp_bo) {
119         drm_virtgpu_resource_create create = {
120             .target     = PIPE_BUFFER,
121             .format     = VIRGL_FORMAT_R8_UNORM,
122             .bind       = VIRGL_BIND_CUSTOM,
123             .width      = MAX_CMDRESPBUF_SIZE,
124             .height     = 1U,
125             .depth      = 1U,
126             .array_size = 0U,
127             .size       = MAX_CMDRESPBUF_SIZE,
128             .stride     = MAX_CMDRESPBUF_SIZE,
129         };
130         int ret = drmIoctl(m_fd, DRM_IOCTL_VIRTGPU_RESOURCE_CREATE, &create);
131         if (ret) {
132             ERR("%s: failed with %d allocating command response buffer (%s)",
133                 __func__, ret, strerror(errno));
134             return -1;
135         }
136         m_cmdResp_bo = create.bo_handle;
137         if (!m_cmdResp_bo) {
138             ERR("%s: no handle when allocating command response buffer",
139                 __func__);
140             return -1;
141         }
142         m_cmdResp_rh = create.res_handle;
143         if (create.size != MAX_CMDRESPBUF_SIZE) {
144 	    ERR("%s: command response buffer wrongly sized, create.size=%zu "
145 		"!= %zu", __func__,
146 		static_cast<size_t>(create.size),
147 		static_cast<size_t>(MAX_CMDRESPBUF_SIZE));
148 	    abort();
149 	}
150     }
151 
152     if (!m_cmdResp) {
153         drm_virtgpu_map map;
154         memset(&map, 0, sizeof(map));
155         map.handle = m_cmdResp_bo;
156 
157         int ret = drmIoctl(m_fd, DRM_IOCTL_VIRTGPU_MAP, &map);
158         if (ret) {
159             ERR("%s: failed with %d mapping command response buffer (%s)",
160                 __func__, ret, strerror(errno));
161             return -1;
162         }
163         m_cmdResp = static_cast<VirtioGpuCmd *>(mmap64(nullptr,
164                                                        MAX_CMDRESPBUF_SIZE,
165                                                        PROT_READ, MAP_SHARED,
166                                                        m_fd, map.offset));
167         if (m_cmdResp == MAP_FAILED) {
168             ERR("%s: failed with %d mmap'ing command response buffer (%s)",
169                 __func__, ret, strerror(errno));
170             return -1;
171         }
172     }
173 
174     return 0;
175 }
176 
flush()177 int VirtioGpuStream::flush()
178 {
179     int ret = commitBuffer(m_allocSize - m_allocFlushSize);
180     if (ret)
181         return ret;
182     m_allocFlushSize = m_allocSize;
183     return 0;
184 }
185 
allocBuffer(size_t minSize)186 void *VirtioGpuStream::allocBuffer(size_t minSize)
187 {
188     if (m_buf) {
189         // Try to model the alloc() calls being made by the user. They should be
190         // obeying the protocol and using alloc() for anything they don't write
191         // with writeFully(), so we can know if this alloc() is for part of a
192         // command, or not. If it is not for part of a command, we are starting
193         // a new command, and should increment m_cmdPos.
194         VirtioGpuCmd *cmd = reinterpret_cast<VirtioGpuCmd *>(&m_buf[m_cmdPos]);
195         if (m_allocSize + minSize > cmd->cmdSize) {
196             m_allocFlushSize = 0U;
197             m_allocSize = 0U;
198             // This might also be a convenient point to flush commands
199             if (m_cmdPos + cmd->cmdSize + minSize > m_bufSize) {
200                 if (commitAll() < 0) {
201                     ERR("%s: command flush failed", __func__);
202                     m_flushPos = 0U;
203                     m_bufSize = 0U;
204                     m_cmdPos = 0U;
205                     free(m_buf);
206                     m_buf = nullptr;
207                     return nullptr;
208                 }
209             } else {
210                 m_cmdPos += cmd->cmdSize;
211                 m_flushPos = m_cmdPos;
212             }
213         }
214     }
215 
216     // Update m_allocSize here, before minSize is tampered with below
217     m_allocSize += minSize;
218 
219     // Make sure anything we already have written to the buffer is retained
220     minSize += m_flushPos;
221 
222     size_t allocSize = (m_bufSize < minSize ? minSize : m_bufSize);
223     if (!m_buf) {
224         m_buf = static_cast<unsigned char *>(malloc(allocSize));
225     } else if (m_bufSize < allocSize) {
226         unsigned char *p = static_cast<unsigned char *>(realloc(m_buf, allocSize));
227         if (!p) {
228             free(m_buf);
229         }
230         m_buf = p;
231     }
232     if (!m_buf) {
233         ERR("%s: alloc (%zu) failed\n", __func__, allocSize);
234         m_allocFlushSize = 0U;
235         m_allocSize = 0U;
236         m_flushPos = 0U;
237         m_bufSize = 0U;
238         m_cmdPos = 0U;
239     } else {
240         m_bufSize = allocSize;
241     }
242     if (m_flushPos == 0 && m_cmdPos == 0) {
243       // During initialization, HostConnection will send an empty command
244       // packet to check the connection is good, but it doesn't obey the usual
245       // line protocol. This is a 4 byte write to [0], which is our 'op' field,
246       // and we don't have an op=0 so it's OK. We fake up a valid length, and
247       // overload this workaround by putting the res_handle for the readback
248       // buffer in the command payload, patched in just before we submit.
249       VirtioGpuCmd *cmd = reinterpret_cast<VirtioGpuCmd *>(&m_buf[m_cmdPos]);
250       cmd->op = 0U;
251       cmd->cmdSize = sizeof(*cmd) + sizeof(__u32);
252     }
253     return m_buf + m_cmdPos;
254 }
255 
256 // For us, writeFully() means to write a command without any header, directly
257 // into the buffer stream. We can use the packet frame written directly to the
258 // stream to verify this write is within bounds, then update the counter.
259 
writeFully(const void * buf,size_t len)260 int VirtioGpuStream::writeFully(const void *buf, size_t len)
261 {
262     if (!valid())
263         return -1;
264 
265     if (!buf) {
266         if (len > 0) {
267             // If len is non-zero, buf must not be NULL. Otherwise the pipe would
268             // be in a corrupted state, which is lethal for the emulator.
269             ERR("%s: failed, buf=NULL, len %zu, lethal error, exiting",
270                 __func__, len);
271             abort();
272         }
273         return 0;
274     }
275 
276     VirtioGpuCmd *cmd = reinterpret_cast<VirtioGpuCmd *>(&m_buf[m_cmdPos]);
277 
278     if (m_flushPos < sizeof(*cmd)) {
279         ERR("%s: writeFully len %zu would overwrite command header, "
280             "cmd_pos=%zu, flush_pos=%zu, lethal error, exiting", __func__,
281             len, m_cmdPos, m_flushPos);
282         abort();
283     }
284 
285     if (m_flushPos + len > cmd->cmdSize) {
286         ERR("%s: writeFully len %zu would overflow the command bounds, "
287             "cmd_pos=%zu, flush_pos=%zu, cmdsize=%" PRIu32 ", lethal error, exiting",
288             __func__, len, m_cmdPos, m_flushPos, cmd->cmdSize);
289         abort();
290     }
291 
292     if (len > VIRGL_MAX_CMDBUF_SIZE) {
293         ERR("%s: Large command (%zu bytes) exceeds virgl limits",
294             __func__, len);
295         /* Fall through */
296     }
297 
298     memcpy(&m_buf[m_flushPos], buf, len);
299     commitBuffer(len);
300     m_allocSize += len;
301     return 0;
302 }
303 
readFully(void * buf,size_t len)304 const unsigned char *VirtioGpuStream::readFully(void *buf, size_t len)
305 {
306     if (!valid())
307         return nullptr;
308 
309     if (!buf) {
310         if (len > 0) {
311             // If len is non-zero, buf must not be NULL. Otherwise the pipe would
312             // be in a corrupted state, which is lethal for the emulator.
313             ERR("%s: failed, buf=NULL, len %zu, lethal error, exiting.",
314                 __func__, len);
315             abort();
316         }
317         return nullptr;
318     }
319 
320     // Read is too big for current architecture
321     if (len > MAX_CMDRESPBUF_SIZE - sizeof(*m_cmdResp)) {
322         ERR("%s: failed, read too large, len %zu, lethal error, exiting.",
323             __func__, len);
324         abort();
325     }
326 
327     // Commit all outstanding write commands (if any)
328     if (commitAll() < 0) {
329         ERR("%s: command flush failed", __func__);
330         return nullptr;
331     }
332 
333     if (len > 0U && m_cmdRespPos == 0U) {
334         // When we are about to read for the first time, wait for the virtqueue
335         // to drain to this command, otherwise the data could be stale
336         drm_virtgpu_3d_wait wait = {
337             .handle = m_cmdResp_bo,
338         };
339         int ret = drmIoctl(m_fd, DRM_IOCTL_VIRTGPU_WAIT, &wait);
340         if (ret) {
341             ERR("%s: failed with %d waiting for response buffer (%s)",
342                 __func__, ret, strerror(errno));
343             // Fall through, hope for the best
344         }
345     }
346 
347     // Most likely a protocol implementation error
348     if (m_cmdResp->cmdSize - sizeof(*m_cmdResp) < m_cmdRespPos + len) {
349         ERR("%s: failed, op %" PRIu32 ", len %zu, cmdSize %" PRIu32 ", pos %zu, lethal "
350             "error, exiting.", __func__, m_cmdResp->op, len,
351             m_cmdResp->cmdSize, m_cmdRespPos);
352         abort();
353     }
354 
355     memcpy(buf, &m_cmdResp->buf[m_cmdRespPos], len);
356 
357     if (m_cmdRespPos + len == m_cmdResp->cmdSize - sizeof(*m_cmdResp)) {
358         m_cmdRespPos = 0U;
359     } else {
360         m_cmdRespPos += len;
361     }
362 
363     return reinterpret_cast<const unsigned char *>(buf);
364 }
365 
commitBuffer(size_t size)366 int VirtioGpuStream::commitBuffer(size_t size)
367 {
368     if (m_flushPos + size > m_bufSize) {
369         ERR("%s: illegal commit size %zu, flushPos %zu, bufSize %zu",
370             __func__, size, m_flushPos, m_bufSize);
371         return -1;
372     }
373     m_flushPos += size;
374     return 0;
375 }
376 
commitAll()377 int VirtioGpuStream::commitAll()
378 {
379     size_t pos = 0U, numFlushed = 0U;
380     while (pos < m_flushPos) {
381         VirtioGpuCmd *cmd = reinterpret_cast<VirtioGpuCmd *>(&m_buf[pos]);
382 
383         // Should never happen
384         if (pos + cmd->cmdSize > m_bufSize) {
385             ERR("%s: failed, pos %zu, cmdSize %" PRIu32 ", bufSize %zu, lethal "
386                 "error, exiting.", __func__, pos, cmd->cmdSize, m_bufSize);
387             abort();
388         }
389 
390         // Saw dummy command; patch it with res handle
391         if (cmd->op == 0) {
392             *(uint32_t *)cmd->buf = m_cmdResp_rh;
393         }
394 
395         // Flush a single command
396         drm_virtgpu_execbuffer execbuffer = {
397             .size           = cmd->cmdSize,
398             .command        = reinterpret_cast<__u64>(cmd),
399             .bo_handles     = reinterpret_cast<__u64>(&m_cmdResp_bo),
400             .num_bo_handles = 1U,
401         };
402         int ret = drmIoctl(m_fd, DRM_IOCTL_VIRTGPU_EXECBUFFER, &execbuffer);
403         if (ret) {
404             ERR("%s: failed with %d executing command buffer (%s)",  __func__,
405                 ret, strerror(errno));
406             return -1;
407         }
408 
409         pos += cmd->cmdSize;
410         numFlushed++;
411     }
412 
413     if (pos > m_flushPos) {
414         ERR("%s: aliasing, flushPos %zu, pos %zu, probably ok", __func__,
415             m_flushPos, pos);
416         /* Fall through */
417     }
418 
419     m_flushPos = 0U;
420     m_cmdPos = 0U;
421     return 0;
422 }
423