• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2018 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "VirtioGpuStream.h"
18 
19 #include <cros_gralloc_handle.h>
20 #include <drm/virtgpu_drm.h>
21 #include <xf86drm.h>
22 
23 #include <sys/types.h>
24 #include <sys/mman.h>
25 
26 #include <errno.h>
27 #include <unistd.h>
28 
29 #ifndef PAGE_SIZE
30 #define PAGE_SIZE 0x1000
31 #endif
32 
33 // In a virtual machine, there should only be one GPU
34 #define RENDERNODE_MINOR 128
35 
36 // Maximum size of readback / response buffer in bytes
37 #define MAX_CMDRESPBUF_SIZE (10*PAGE_SIZE)
38 
39 // Attributes use to allocate our response buffer
40 // Similar to virgl's fence objects
41 #define PIPE_BUFFER             0
42 #define VIRGL_FORMAT_R8_UNORM   64
43 #define VIRGL_BIND_CUSTOM       (1 << 17)
44 
45 // Conservative; see virgl_winsys.h
46 #define VIRGL_MAX_CMDBUF_DWORDS (16*1024)
47 #define VIRGL_MAX_CMDBUF_SIZE   (4*VIRGL_MAX_CMDBUF_DWORDS)
48 
49 struct VirtioGpuCmd {
50     uint32_t op;
51     uint32_t cmdSize;
52     unsigned char buf[0];
53 } __attribute__((packed));
54 
processPipeInit(HostConnectionType,renderControl_encoder_context_t * rcEnc)55 bool VirtioGpuProcessPipe::processPipeInit(HostConnectionType, renderControl_encoder_context_t *rcEnc)
56 {
57   union {
58       uint64_t proto;
59       struct {
60           int pid;
61           int tid;
62       } id;
63   } puid = {
64       .id.pid = getpid(),
65       .id.tid = gettid(),
66   };
67   rcEnc->rcSetPuid(rcEnc, puid.proto);
68   return true;
69 }
70 
VirtioGpuStream(size_t bufSize)71 VirtioGpuStream::VirtioGpuStream(size_t bufSize) :
72     IOStream(0U),
73     m_fd(-1),
74     m_bufSize(bufSize),
75     m_buf(nullptr),
76     m_cmdResp_rh(0U),
77     m_cmdResp_bo(0U),
78     m_cmdResp(nullptr),
79     m_cmdRespPos(0U),
80     m_cmdPos(0U),
81     m_flushPos(0U),
82     m_allocSize(0U),
83     m_allocFlushSize(0U)
84 {
85 }
86 
~VirtioGpuStream()87 VirtioGpuStream::~VirtioGpuStream()
88 {
89     if (m_cmdResp) {
90         munmap(m_cmdResp, MAX_CMDRESPBUF_SIZE);
91     }
92 
93     if (m_cmdResp_bo > 0U) {
94         drm_gem_close gem_close = {
95             .handle = m_cmdResp_bo,
96         };
97         drmIoctl(m_fd, DRM_IOCTL_GEM_CLOSE, &gem_close);
98     }
99 
100     if (m_fd >= 0) {
101         close(m_fd);
102     }
103 
104     free(m_buf);
105 }
106 
connect()107 int VirtioGpuStream::connect()
108 {
109     if (m_fd < 0) {
110         m_fd = drmOpenRender(RENDERNODE_MINOR);
111         if (m_fd < 0) {
112             ERR("%s: failed with fd %d (%s)", __func__, m_fd, strerror(errno));
113             return -1;
114         }
115     }
116 
117     if (!m_cmdResp_bo) {
118         drm_virtgpu_resource_create create = {
119             .target     = PIPE_BUFFER,
120             .format     = VIRGL_FORMAT_R8_UNORM,
121             .bind       = VIRGL_BIND_CUSTOM,
122             .width      = MAX_CMDRESPBUF_SIZE,
123             .height     = 1U,
124             .depth      = 1U,
125             .array_size = 0U,
126             .size       = MAX_CMDRESPBUF_SIZE,
127             .stride     = MAX_CMDRESPBUF_SIZE,
128         };
129         int ret = drmIoctl(m_fd, DRM_IOCTL_VIRTGPU_RESOURCE_CREATE, &create);
130         if (ret) {
131             ERR("%s: failed with %d allocating command response buffer (%s)",
132                 __func__, ret, strerror(errno));
133             return -1;
134         }
135         m_cmdResp_bo = create.bo_handle;
136         if (!m_cmdResp_bo) {
137             ERR("%s: no handle when allocating command response buffer",
138                 __func__);
139             return -1;
140         }
141         m_cmdResp_rh = create.res_handle;
142         if (create.size != MAX_CMDRESPBUF_SIZE) {
143 	    ERR("%s: command response buffer wrongly sized, create.size=%zu "
144 		"!= %zu", __func__,
145 		static_cast<size_t>(create.size),
146 		static_cast<size_t>(MAX_CMDRESPBUF_SIZE));
147 	    abort();
148 	}
149     }
150 
151     if (!m_cmdResp) {
152         drm_virtgpu_map map;
153         memset(&map, 0, sizeof(map));
154         map.handle = m_cmdResp_bo;
155 
156         int ret = drmIoctl(m_fd, DRM_IOCTL_VIRTGPU_MAP, &map);
157         if (ret) {
158             ERR("%s: failed with %d mapping command response buffer (%s)",
159                 __func__, ret, strerror(errno));
160             return -1;
161         }
162         m_cmdResp = static_cast<VirtioGpuCmd *>(mmap64(nullptr,
163                                                        MAX_CMDRESPBUF_SIZE,
164                                                        PROT_READ, MAP_SHARED,
165                                                        m_fd, map.offset));
166         if (m_cmdResp == MAP_FAILED) {
167             ERR("%s: failed with %d mmap'ing command response buffer (%s)",
168                 __func__, ret, strerror(errno));
169             return -1;
170         }
171     }
172 
173     return 0;
174 }
175 
flush()176 int VirtioGpuStream::flush()
177 {
178     int ret = commitBuffer(m_allocSize - m_allocFlushSize);
179     if (ret)
180         return ret;
181     m_allocFlushSize = m_allocSize;
182     return 0;
183 }
184 
allocBuffer(size_t minSize)185 void *VirtioGpuStream::allocBuffer(size_t minSize)
186 {
187     if (m_buf) {
188         // Try to model the alloc() calls being made by the user. They should be
189         // obeying the protocol and using alloc() for anything they don't write
190         // with writeFully(), so we can know if this alloc() is for part of a
191         // command, or not. If it is not for part of a command, we are starting
192         // a new command, and should increment m_cmdPos.
193         VirtioGpuCmd *cmd = reinterpret_cast<VirtioGpuCmd *>(&m_buf[m_cmdPos]);
194         if (m_allocSize + minSize > cmd->cmdSize) {
195             m_allocFlushSize = 0U;
196             m_allocSize = 0U;
197             // This might also be a convenient point to flush commands
198             if (m_cmdPos + cmd->cmdSize + minSize > m_bufSize) {
199                 if (commitAll() < 0) {
200                     ERR("%s: command flush failed", __func__);
201                     m_flushPos = 0U;
202                     m_bufSize = 0U;
203                     m_cmdPos = 0U;
204                     free(m_buf);
205                     m_buf = nullptr;
206                     return nullptr;
207                 }
208             } else {
209                 m_cmdPos += cmd->cmdSize;
210                 m_flushPos = m_cmdPos;
211             }
212         }
213     }
214 
215     // Update m_allocSize here, before minSize is tampered with below
216     m_allocSize += minSize;
217 
218     // Make sure anything we already have written to the buffer is retained
219     minSize += m_flushPos;
220 
221     size_t allocSize = (m_bufSize < minSize ? minSize : m_bufSize);
222     if (!m_buf) {
223         m_buf = static_cast<unsigned char *>(malloc(allocSize));
224     } else if (m_bufSize < allocSize) {
225         unsigned char *p = static_cast<unsigned char *>(realloc(m_buf, allocSize));
226         if (!p) {
227             free(m_buf);
228         }
229         m_buf = p;
230     }
231     if (!m_buf) {
232         ERR("%s: alloc (%zu) failed\n", __func__, allocSize);
233         m_allocFlushSize = 0U;
234         m_allocSize = 0U;
235         m_flushPos = 0U;
236         m_bufSize = 0U;
237         m_cmdPos = 0U;
238     } else {
239         m_bufSize = allocSize;
240     }
241     if (m_flushPos == 0 && m_cmdPos == 0) {
242       // During initialization, HostConnection will send an empty command
243       // packet to check the connection is good, but it doesn't obey the usual
244       // line protocol. This is a 4 byte write to [0], which is our 'op' field,
245       // and we don't have an op=0 so it's OK. We fake up a valid length, and
246       // overload this workaround by putting the res_handle for the readback
247       // buffer in the command payload, patched in just before we submit.
248       VirtioGpuCmd *cmd = reinterpret_cast<VirtioGpuCmd *>(&m_buf[m_cmdPos]);
249       cmd->op = 0U;
250       cmd->cmdSize = sizeof(*cmd) + sizeof(__u32);
251     }
252     return m_buf + m_cmdPos;
253 }
254 
255 // For us, writeFully() means to write a command without any header, directly
256 // into the buffer stream. We can use the packet frame written directly to the
257 // stream to verify this write is within bounds, then update the counter.
258 
writeFully(const void * buf,size_t len)259 int VirtioGpuStream::writeFully(const void *buf, size_t len)
260 {
261     if (!valid())
262         return -1;
263 
264     if (!buf) {
265         if (len > 0) {
266             // If len is non-zero, buf must not be NULL. Otherwise the pipe would
267             // be in a corrupted state, which is lethal for the emulator.
268             ERR("%s: failed, buf=NULL, len %zu, lethal error, exiting",
269                 __func__, len);
270             abort();
271         }
272         return 0;
273     }
274 
275     VirtioGpuCmd *cmd = reinterpret_cast<VirtioGpuCmd *>(&m_buf[m_cmdPos]);
276 
277     if (m_flushPos < sizeof(*cmd)) {
278         ERR("%s: writeFully len %zu would overwrite command header, "
279             "cmd_pos=%zu, flush_pos=%zu, lethal error, exiting", __func__,
280             len, m_cmdPos, m_flushPos);
281         abort();
282     }
283 
284     if (m_flushPos + len > cmd->cmdSize) {
285         ERR("%s: writeFully len %zu would overflow the command bounds, "
286             "cmd_pos=%zu, flush_pos=%zu, cmdsize=%" PRIu32 ", lethal error, exiting",
287             __func__, len, m_cmdPos, m_flushPos, cmd->cmdSize);
288         abort();
289     }
290 
291     if (len > VIRGL_MAX_CMDBUF_SIZE) {
292         ERR("%s: Large command (%zu bytes) exceeds virgl limits",
293             __func__, len);
294         /* Fall through */
295     }
296 
297     memcpy(&m_buf[m_flushPos], buf, len);
298     commitBuffer(len);
299     m_allocSize += len;
300     return 0;
301 }
302 
readFully(void * buf,size_t len)303 const unsigned char *VirtioGpuStream::readFully(void *buf, size_t len)
304 {
305     if (!valid())
306         return nullptr;
307 
308     if (!buf) {
309         if (len > 0) {
310             // If len is non-zero, buf must not be NULL. Otherwise the pipe would
311             // be in a corrupted state, which is lethal for the emulator.
312             ERR("%s: failed, buf=NULL, len %zu, lethal error, exiting.",
313                 __func__, len);
314             abort();
315         }
316         return nullptr;
317     }
318 
319     // Read is too big for current architecture
320     if (len > MAX_CMDRESPBUF_SIZE - sizeof(*m_cmdResp)) {
321         ERR("%s: failed, read too large, len %zu, lethal error, exiting.",
322             __func__, len);
323         abort();
324     }
325 
326     // Commit all outstanding write commands (if any)
327     if (commitAll() < 0) {
328         ERR("%s: command flush failed", __func__);
329         return nullptr;
330     }
331 
332     if (len > 0U && m_cmdRespPos == 0U) {
333         // When we are about to read for the first time, wait for the virtqueue
334         // to drain to this command, otherwise the data could be stale
335         drm_virtgpu_3d_wait wait = {
336             .handle = m_cmdResp_bo,
337         };
338         int ret = drmIoctl(m_fd, DRM_IOCTL_VIRTGPU_WAIT, &wait);
339         if (ret) {
340             ERR("%s: failed with %d waiting for response buffer (%s)",
341                 __func__, ret, strerror(errno));
342             // Fall through, hope for the best
343         }
344     }
345 
346     // Most likely a protocol implementation error
347     if (m_cmdResp->cmdSize - sizeof(*m_cmdResp) < m_cmdRespPos + len) {
348         ERR("%s: failed, op %" PRIu32 ", len %zu, cmdSize %" PRIu32 ", pos %zu, lethal "
349             "error, exiting.", __func__, m_cmdResp->op, len,
350             m_cmdResp->cmdSize, m_cmdRespPos);
351         abort();
352     }
353 
354     memcpy(buf, &m_cmdResp->buf[m_cmdRespPos], len);
355 
356     if (m_cmdRespPos + len == m_cmdResp->cmdSize - sizeof(*m_cmdResp)) {
357         m_cmdRespPos = 0U;
358     } else {
359         m_cmdRespPos += len;
360     }
361 
362     return reinterpret_cast<const unsigned char *>(buf);
363 }
364 
commitBuffer(size_t size)365 int VirtioGpuStream::commitBuffer(size_t size)
366 {
367     if (m_flushPos + size > m_bufSize) {
368         ERR("%s: illegal commit size %zu, flushPos %zu, bufSize %zu",
369             __func__, size, m_flushPos, m_bufSize);
370         return -1;
371     }
372     m_flushPos += size;
373     return 0;
374 }
375 
commitAll()376 int VirtioGpuStream::commitAll()
377 {
378     size_t pos = 0U, numFlushed = 0U;
379     while (pos < m_flushPos) {
380         VirtioGpuCmd *cmd = reinterpret_cast<VirtioGpuCmd *>(&m_buf[pos]);
381 
382         // Should never happen
383         if (pos + cmd->cmdSize > m_bufSize) {
384             ERR("%s: failed, pos %zu, cmdSize %" PRIu32 ", bufSize %zu, lethal "
385                 "error, exiting.", __func__, pos, cmd->cmdSize, m_bufSize);
386             abort();
387         }
388 
389         // Saw dummy command; patch it with res handle
390         if (cmd->op == 0) {
391             *(uint32_t *)cmd->buf = m_cmdResp_rh;
392         }
393 
394         // Flush a single command
395         drm_virtgpu_execbuffer execbuffer = {
396             .size           = cmd->cmdSize,
397             .command        = reinterpret_cast<__u64>(cmd),
398             .bo_handles     = reinterpret_cast<__u64>(&m_cmdResp_bo),
399             .num_bo_handles = 1U,
400         };
401         int ret = drmIoctl(m_fd, DRM_IOCTL_VIRTGPU_EXECBUFFER, &execbuffer);
402         if (ret) {
403             ERR("%s: failed with %d executing command buffer (%s)",  __func__,
404                 ret, strerror(errno));
405             return -1;
406         }
407 
408         pos += cmd->cmdSize;
409         numFlushed++;
410     }
411 
412     if (pos > m_flushPos) {
413         ERR("%s: aliasing, flushPos %zu, pos %zu, probably ok", __func__,
414             m_flushPos, pos);
415         /* Fall through */
416     }
417 
418     m_flushPos = 0U;
419     m_cmdPos = 0U;
420     return 0;
421 }
422