• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2018 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "VirtioGpuStream.h"
18 
19 #include <cros_gralloc_handle.h>
20 #include <drm/virtgpu_drm.h>
21 #include <xf86drm.h>
22 
23 #include <sys/types.h>
24 #include <sys/mman.h>
25 
26 #include <errno.h>
27 #include <unistd.h>
28 
29 #ifndef PAGE_SIZE
30 #define PAGE_SIZE 0x1000
31 #endif
32 
33 // In a virtual machine, there should only be one GPU
34 #define RENDERNODE_MINOR 128
35 
36 // Maximum size of readback / response buffer in bytes
37 #define MAX_CMDRESPBUF_SIZE (10*PAGE_SIZE)
38 
39 // Attributes use to allocate our response buffer
40 // Similar to virgl's fence objects
41 #define PIPE_BUFFER             0
42 #define VIRGL_FORMAT_R8_UNORM   64
43 #define VIRGL_BIND_CUSTOM       (1 << 17)
44 
45 // Conservative; see virgl_winsys.h
46 #define VIRGL_MAX_CMDBUF_DWORDS (16*1024)
47 #define VIRGL_MAX_CMDBUF_SIZE   (4*VIRGL_MAX_CMDBUF_DWORDS)
48 
49 struct VirtioGpuCmd {
50     uint32_t op;
51     uint32_t cmdSize;
52     unsigned char buf[0];
53 } __attribute__((packed));
54 
getHostHandle(native_handle_t const * handle_)55 uint32_t CrosGralloc::getHostHandle(native_handle_t const* handle_)
56 {
57     uint32_t id = 0;
58 
59     if (m_fd >= 0) {
60         cros_gralloc_handle const* handle =
61           reinterpret_cast<cros_gralloc_handle const*>(handle_);
62         drmPrimeFDToHandle(m_fd, handle->fds[0], &id);
63     }
64 
65     return id;
66 }
67 
getFormat(native_handle_t const * handle)68 int CrosGralloc::getFormat(native_handle_t const* handle)
69 {
70     return ((cros_gralloc_handle *)handle)->droid_format;
71 }
72 
processPipeInit(renderControl_encoder_context_t * rcEnc)73 bool VirtioGpuProcessPipe::processPipeInit(renderControl_encoder_context_t *rcEnc)
74 {
75   union {
76       uint64_t proto;
77       struct {
78           int pid;
79           int tid;
80       } id;
81   } puid = {
82       .id.pid = getpid(),
83       .id.tid = gettid(),
84   };
85   rcEnc->rcSetPuid(rcEnc, puid.proto);
86   return true;
87 }
88 
VirtioGpuStream(size_t bufSize)89 VirtioGpuStream::VirtioGpuStream(size_t bufSize) :
90     IOStream(0U),
91     m_fd(-1),
92     m_bufSize(bufSize),
93     m_buf(nullptr),
94     m_cmdResp_rh(0U),
95     m_cmdResp_bo(0U),
96     m_cmdResp(nullptr),
97     m_cmdRespPos(0U),
98     m_cmdPos(0U),
99     m_flushPos(0U),
100     m_allocSize(0U),
101     m_allocFlushSize(0U)
102 {
103 }
104 
~VirtioGpuStream()105 VirtioGpuStream::~VirtioGpuStream()
106 {
107     if (m_cmdResp) {
108         munmap(m_cmdResp, MAX_CMDRESPBUF_SIZE);
109     }
110 
111     if (m_cmdResp_bo > 0U) {
112         drm_gem_close gem_close = {
113             .handle = m_cmdResp_bo,
114         };
115         drmIoctl(m_fd, DRM_IOCTL_GEM_CLOSE, &gem_close);
116     }
117 
118     if (m_fd >= 0) {
119         close(m_fd);
120     }
121 
122     free(m_buf);
123 }
124 
connect()125 int VirtioGpuStream::connect()
126 {
127     if (m_fd < 0) {
128         m_fd = drmOpenRender(RENDERNODE_MINOR);
129         if (m_fd < 0) {
130             ERR("%s: failed with fd %d (%s)", __func__, m_fd, strerror(errno));
131             return -1;
132         }
133     }
134 
135     if (!m_cmdResp_bo) {
136         drm_virtgpu_resource_create create = {
137             .target     = PIPE_BUFFER,
138             .format     = VIRGL_FORMAT_R8_UNORM,
139             .bind       = VIRGL_BIND_CUSTOM,
140             .width      = MAX_CMDRESPBUF_SIZE,
141             .height     = 1U,
142             .depth      = 1U,
143             .array_size = 0U,
144             .size       = MAX_CMDRESPBUF_SIZE,
145             .stride     = MAX_CMDRESPBUF_SIZE,
146         };
147         int ret = drmIoctl(m_fd, DRM_IOCTL_VIRTGPU_RESOURCE_CREATE, &create);
148         if (ret) {
149             ERR("%s: failed with %d allocating command response buffer (%s)",
150                 __func__, ret, strerror(errno));
151             return -1;
152         }
153         m_cmdResp_bo = create.bo_handle;
154         if (!m_cmdResp_bo) {
155             ERR("%s: no handle when allocating command response buffer",
156                 __func__);
157             return -1;
158         }
159         m_cmdResp_rh = create.res_handle;
160         if (create.size != MAX_CMDRESPBUF_SIZE) {
161 	    ERR("%s: command response buffer wrongly sized, create.size=%zu "
162 		"!= %zu", __func__,
163 		static_cast<size_t>(create.size),
164 		static_cast<size_t>(MAX_CMDRESPBUF_SIZE));
165 	    abort();
166 	}
167     }
168 
169     if (!m_cmdResp) {
170         drm_virtgpu_map map = {
171             .handle = m_cmdResp_bo,
172         };
173         int ret = drmIoctl(m_fd, DRM_IOCTL_VIRTGPU_MAP, &map);
174         if (ret) {
175             ERR("%s: failed with %d mapping command response buffer (%s)",
176                 __func__, ret, strerror(errno));
177             return -1;
178         }
179         m_cmdResp = static_cast<VirtioGpuCmd *>(mmap64(nullptr,
180                                                        MAX_CMDRESPBUF_SIZE,
181                                                        PROT_READ, MAP_SHARED,
182                                                        m_fd, map.offset));
183         if (m_cmdResp == MAP_FAILED) {
184             ERR("%s: failed with %d mmap'ing command response buffer (%s)",
185                 __func__, ret, strerror(errno));
186             return -1;
187         }
188     }
189 
190     m_gralloc.setFd(m_fd);
191     return 0;
192 }
193 
flush()194 int VirtioGpuStream::flush()
195 {
196     int ret = commitBuffer(m_allocSize - m_allocFlushSize);
197     if (ret)
198         return ret;
199     m_allocFlushSize = m_allocSize;
200     return 0;
201 }
202 
allocBuffer(size_t minSize)203 void *VirtioGpuStream::allocBuffer(size_t minSize)
204 {
205     if (m_buf) {
206         // Try to model the alloc() calls being made by the user. They should be
207         // obeying the protocol and using alloc() for anything they don't write
208         // with writeFully(), so we can know if this alloc() is for part of a
209         // command, or not. If it is not for part of a command, we are starting
210         // a new command, and should increment m_cmdPos.
211         VirtioGpuCmd *cmd = reinterpret_cast<VirtioGpuCmd *>(&m_buf[m_cmdPos]);
212         if (m_allocSize + minSize > cmd->cmdSize) {
213             m_allocFlushSize = 0U;
214             m_allocSize = 0U;
215             // This might also be a convenient point to flush commands
216             if (m_cmdPos + cmd->cmdSize + minSize > m_bufSize) {
217                 if (commitAll() < 0) {
218                     ERR("%s: command flush failed", __func__);
219                     m_flushPos = 0U;
220                     m_bufSize = 0U;
221                     m_cmdPos = 0U;
222                     free(m_buf);
223                     m_buf = nullptr;
224                     return nullptr;
225                 }
226             } else {
227                 m_cmdPos += cmd->cmdSize;
228                 m_flushPos = m_cmdPos;
229             }
230         }
231     }
232 
233     // Update m_allocSize here, before minSize is tampered with below
234     m_allocSize += minSize;
235 
236     // Make sure anything we already have written to the buffer is retained
237     minSize += m_flushPos;
238 
239     size_t allocSize = (m_bufSize < minSize ? minSize : m_bufSize);
240     if (!m_buf) {
241         m_buf = static_cast<unsigned char *>(malloc(allocSize));
242     } else if (m_bufSize < allocSize) {
243         unsigned char *p = static_cast<unsigned char *>(realloc(m_buf, allocSize));
244         if (!p) {
245             free(m_buf);
246         }
247         m_buf = p;
248     }
249     if (!m_buf) {
250         ERR("%s: alloc (%zu) failed\n", __func__, allocSize);
251         m_allocFlushSize = 0U;
252         m_allocSize = 0U;
253         m_flushPos = 0U;
254         m_bufSize = 0U;
255         m_cmdPos = 0U;
256     } else {
257         m_bufSize = allocSize;
258     }
259     if (m_flushPos == 0 && m_cmdPos == 0) {
260       // During initialization, HostConnection will send an empty command
261       // packet to check the connection is good, but it doesn't obey the usual
262       // line protocol. This is a 4 byte write to [0], which is our 'op' field,
263       // and we don't have an op=0 so it's OK. We fake up a valid length, and
264       // overload this workaround by putting the res_handle for the readback
265       // buffer in the command payload, patched in just before we submit.
266       VirtioGpuCmd *cmd = reinterpret_cast<VirtioGpuCmd *>(&m_buf[m_cmdPos]);
267       cmd->op = 0U;
268       cmd->cmdSize = sizeof(*cmd) + sizeof(__u32);
269     }
270     return m_buf + m_cmdPos;
271 }
272 
273 // For us, writeFully() means to write a command without any header, directly
274 // into the buffer stream. We can use the packet frame written directly to the
275 // stream to verify this write is within bounds, then update the counter.
276 
writeFully(const void * buf,size_t len)277 int VirtioGpuStream::writeFully(const void *buf, size_t len)
278 {
279     if (!valid())
280         return -1;
281 
282     if (!buf) {
283         if (len > 0) {
284             // If len is non-zero, buf must not be NULL. Otherwise the pipe would
285             // be in a corrupted state, which is lethal for the emulator.
286             ERR("%s: failed, buf=NULL, len %zu, lethal error, exiting",
287                 __func__, len);
288             abort();
289         }
290         return 0;
291     }
292 
293     VirtioGpuCmd *cmd = reinterpret_cast<VirtioGpuCmd *>(&m_buf[m_cmdPos]);
294 
295     if (m_flushPos < sizeof(*cmd)) {
296         ERR("%s: writeFully len %zu would overwrite command header, "
297             "cmd_pos=%zu, flush_pos=%zu, lethal error, exiting", __func__,
298             len, m_cmdPos, m_flushPos);
299         abort();
300     }
301 
302     if (m_flushPos + len > cmd->cmdSize) {
303         ERR("%s: writeFully len %zu would overflow the command bounds, "
304             "cmd_pos=%zu, flush_pos=%zu, cmdsize=%zu, lethal error, exiting",
305             __func__, len, m_cmdPos, m_flushPos, cmd->cmdSize);
306         abort();
307     }
308 
309     if (len > VIRGL_MAX_CMDBUF_SIZE) {
310         ERR("%s: Large command (%zu bytes) exceeds virgl limits",
311             __func__, len);
312         /* Fall through */
313     }
314 
315     memcpy(&m_buf[m_flushPos], buf, len);
316     commitBuffer(len);
317     m_allocSize += len;
318     return 0;
319 }
320 
readFully(void * buf,size_t len)321 const unsigned char *VirtioGpuStream::readFully(void *buf, size_t len)
322 {
323     if (!valid())
324         return nullptr;
325 
326     if (!buf) {
327         if (len > 0) {
328             // If len is non-zero, buf must not be NULL. Otherwise the pipe would
329             // be in a corrupted state, which is lethal for the emulator.
330             ERR("%s: failed, buf=NULL, len %zu, lethal error, exiting.",
331                 __func__, len);
332             abort();
333         }
334         return nullptr;
335     }
336 
337     // Read is too big for current architecture
338     if (len > MAX_CMDRESPBUF_SIZE - sizeof(*m_cmdResp)) {
339         ERR("%s: failed, read too large, len %zu, lethal error, exiting.",
340             __func__, len);
341         abort();
342     }
343 
344     // Commit all outstanding write commands (if any)
345     if (commitAll() < 0) {
346         ERR("%s: command flush failed", __func__);
347         return nullptr;
348     }
349 
350     if (len > 0U && m_cmdRespPos == 0U) {
351         // When we are about to read for the first time, wait for the virtqueue
352         // to drain to this command, otherwise the data could be stale
353         drm_virtgpu_3d_wait wait = {
354             .handle = m_cmdResp_bo,
355         };
356         int ret = drmIoctl(m_fd, DRM_IOCTL_VIRTGPU_WAIT, &wait);
357         if (ret) {
358             ERR("%s: failed with %d waiting for response buffer (%s)",
359                 __func__, ret, strerror(errno));
360             // Fall through, hope for the best
361         }
362     }
363 
364     // Most likely a protocol implementation error
365     if (m_cmdResp->cmdSize - sizeof(*m_cmdResp) < m_cmdRespPos + len) {
366         ERR("%s: failed, op %zu, len %zu, cmdSize %zu, pos %zu, lethal "
367             "error, exiting.", __func__, m_cmdResp->op, len,
368             m_cmdResp->cmdSize, m_cmdRespPos);
369         abort();
370     }
371 
372     memcpy(buf, &m_cmdResp->buf[m_cmdRespPos], len);
373 
374     if (m_cmdRespPos + len == m_cmdResp->cmdSize - sizeof(*m_cmdResp)) {
375         m_cmdRespPos = 0U;
376     } else {
377         m_cmdRespPos += len;
378     }
379 
380     return reinterpret_cast<const unsigned char *>(buf);
381 }
382 
commitBuffer(size_t size)383 int VirtioGpuStream::commitBuffer(size_t size)
384 {
385     if (m_flushPos + size > m_bufSize) {
386         ERR("%s: illegal commit size %zu, flushPos %zu, bufSize %zu",
387             __func__, size, m_flushPos, m_bufSize);
388         return -1;
389     }
390     m_flushPos += size;
391     return 0;
392 }
393 
commitAll()394 int VirtioGpuStream::commitAll()
395 {
396     size_t pos = 0U, numFlushed = 0U;
397     while (pos < m_flushPos) {
398         VirtioGpuCmd *cmd = reinterpret_cast<VirtioGpuCmd *>(&m_buf[pos]);
399 
400         // Should never happen
401         if (pos + cmd->cmdSize > m_bufSize) {
402             ERR("%s: failed, pos %zu, cmdSize %zu, bufSize %zu, lethal "
403                 "error, exiting.", __func__, pos, cmd->cmdSize, m_bufSize);
404             abort();
405         }
406 
407         // Saw dummy command; patch it with res handle
408         if (cmd->op == 0) {
409             *(uint32_t *)cmd->buf = m_cmdResp_rh;
410         }
411 
412         // Flush a single command
413         drm_virtgpu_execbuffer execbuffer = {
414             .size           = cmd->cmdSize,
415             .command        = reinterpret_cast<__u64>(cmd),
416             .bo_handles     = reinterpret_cast<__u64>(&m_cmdResp_bo),
417             .num_bo_handles = 1U,
418         };
419         int ret = drmIoctl(m_fd, DRM_IOCTL_VIRTGPU_EXECBUFFER, &execbuffer);
420         if (ret) {
421             ERR("%s: failed with %d executing command buffer (%s)",  __func__,
422                 ret, strerror(errno));
423             return -1;
424         }
425 
426         pos += cmd->cmdSize;
427         numFlushed++;
428     }
429 
430     if (pos > m_flushPos) {
431         ERR("%s: aliasing, flushPos %zu, pos %zu, probably ok", __func__,
432             m_flushPos, pos);
433         /* Fall through */
434     }
435 
436     m_flushPos = 0U;
437     m_cmdPos = 0U;
438     return 0;
439 }
440