• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1// Copyright (C) 2019 The Android Open Source Project
2// Copyright (C) 2019 Google Inc.
3//
4// Licensed under the Apache License, Version 2.0 (the "License");
5// you may not use this file except in compliance with the License.
6// You may obtain a copy of the License at
7//
8// http://www.apache.org/licenses/LICENSE-2.0
9//
10// Unless required by applicable law or agreed to in writing, software
11// distributed under the License is distributed on an "AS IS" BASIS,
12// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13// See the License for the specific language governing permissions and
14// limitations under the License.
15
16#include <linux/types.h>
17#include <linux/ioctl.h>
18#include <sys/types.h>
19#include <sys/stat.h>
20#include <sys/mman.h>
21#include <sys/ioctl.h>
22#include <fcntl.h>
23#include <unistd.h>
24#include <cstdlib>
25#include <errno.h>
26#include <memory>
27#include <cstring>
28
29#ifdef VIRTIO_GPU
30#include <xf86drm.h>
31#endif
32
33#include <log/log.h>
34
35#include "goldfish_address_space.h"
36#include "virtgpu_drm.h"
37
38// See virgl_hw.h and p_defines.h
39#define VIRGL_FORMAT_R8_UNORM 64
40#define VIRGL_BIND_CUSTOM (1 << 17)
41#define PIPE_BUFFER 0
42
43namespace {
44
45struct goldfish_address_space_allocate_block {
46    __u64 size;
47    __u64 offset;
48    __u64 phys_addr;
49};
50
51struct goldfish_address_space_claim_shared {
52    __u64 offset;
53    __u64 size;
54};
55
56#define GOLDFISH_ADDRESS_SPACE_IOCTL_MAGIC		'G'
57#define GOLDFISH_ADDRESS_SPACE_IOCTL_OP(OP, T)		_IOWR(GOLDFISH_ADDRESS_SPACE_IOCTL_MAGIC, OP, T)
58#define GOLDFISH_ADDRESS_SPACE_IOCTL_ALLOCATE_BLOCK	GOLDFISH_ADDRESS_SPACE_IOCTL_OP(10, struct goldfish_address_space_allocate_block)
59#define GOLDFISH_ADDRESS_SPACE_IOCTL_DEALLOCATE_BLOCK	GOLDFISH_ADDRESS_SPACE_IOCTL_OP(11, __u64)
60#define GOLDFISH_ADDRESS_SPACE_IOCTL_PING		GOLDFISH_ADDRESS_SPACE_IOCTL_OP(12, struct address_space_ping)
61#define GOLDFISH_ADDRESS_SPACE_IOCTL_CLAIM_SHARED		GOLDFISH_ADDRESS_SPACE_IOCTL_OP(13, struct goldfish_address_space_claim_shared)
62#define GOLDFISH_ADDRESS_SPACE_IOCTL_UNCLAIM_SHARED		GOLDFISH_ADDRESS_SPACE_IOCTL_OP(14, __u64)
63
64const char GOLDFISH_ADDRESS_SPACE_DEVICE_NAME[] = "/dev/goldfish_address_space";
65
66const int HOST_MEMORY_ALLOCATOR_COMMAND_ALLOCATE_ID = 1;
67const int HOST_MEMORY_ALLOCATOR_COMMAND_UNALLOCATE_ID = 2;
68
69int create_address_space_fd()
70{
71    return ::open(GOLDFISH_ADDRESS_SPACE_DEVICE_NAME, O_RDWR);
72}
73
74long ioctl_allocate(int fd, struct goldfish_address_space_allocate_block *request)
75{
76    return ::ioctl(fd, GOLDFISH_ADDRESS_SPACE_IOCTL_ALLOCATE_BLOCK, request);
77}
78
79long ioctl_deallocate(int fd, uint64_t offset)
80{
81    return ::ioctl(fd, GOLDFISH_ADDRESS_SPACE_IOCTL_DEALLOCATE_BLOCK, &offset);
82}
83
84long ioctl_ping(int fd, struct address_space_ping *request)
85{
86    return ::ioctl(fd, GOLDFISH_ADDRESS_SPACE_IOCTL_PING, request);
87}
88
89long set_address_space_subdevice_type(int fd, uint64_t type)
90{
91    struct address_space_ping request;
92    ::memset(&request, 0, sizeof(request));
93    request.version = sizeof(request);
94    request.metadata = type;
95
96    long ret = ioctl_ping(fd, &request);
97    if (ret) {
98        return ret;
99    }
100
101    return request.metadata;
102}
103
104long ioctl_claim_shared(int fd, struct goldfish_address_space_claim_shared *request)
105{
106    return ::ioctl(fd, GOLDFISH_ADDRESS_SPACE_IOCTL_CLAIM_SHARED, request);
107}
108
109long ioctl_unclaim_shared(int fd, uint64_t offset)
110{
111    return ::ioctl(fd, GOLDFISH_ADDRESS_SPACE_IOCTL_UNCLAIM_SHARED, &offset);
112}
113
114}  // namespace
115
116GoldfishAddressSpaceBlockProvider::GoldfishAddressSpaceBlockProvider(GoldfishAddressSpaceSubdeviceType subdevice)
117  : m_handle(create_address_space_fd())
118{
119    if ((subdevice != GoldfishAddressSpaceSubdeviceType::NoSubdevice) && is_opened()) {
120        const long ret = set_address_space_subdevice_type(m_handle, subdevice);
121        if (ret != 0 && ret != subdevice) {  // TODO: retire the 'ret != subdevice' check
122            ALOGE("%s: set_address_space_subdevice_type failed for device_type=%lu, ret=%ld",
123                  __func__, static_cast<unsigned long>(subdevice), ret);
124            close();
125        }
126    }
127}
128
129GoldfishAddressSpaceBlockProvider::~GoldfishAddressSpaceBlockProvider()
130{
131    if (is_opened()) {
132        ::close(m_handle);
133    }
134}
135
136bool GoldfishAddressSpaceBlockProvider::is_opened() const
137{
138    return m_handle >= 0;
139}
140
141void GoldfishAddressSpaceBlockProvider::close()
142{
143    if (is_opened()) {
144        ::close(m_handle);
145        m_handle = -1;
146    }
147}
148
149address_space_handle_t GoldfishAddressSpaceBlockProvider::release()
150{
151    address_space_handle_t handle = m_handle;
152    m_handle = -1;
153    return handle;
154}
155
156void GoldfishAddressSpaceBlockProvider::closeHandle(address_space_handle_t handle)
157{
158    ::close(handle);
159}
160
161GoldfishAddressSpaceBlock::GoldfishAddressSpaceBlock()
162    : m_handle(-1)
163    , m_mmaped_ptr(NULL)
164    , m_phys_addr(0)
165    , m_host_addr(0)
166    , m_offset(0)
167    , m_size(0) {}
168
169GoldfishAddressSpaceBlock::~GoldfishAddressSpaceBlock()
170{
171    destroy();
172}
173
174GoldfishAddressSpaceBlock &GoldfishAddressSpaceBlock::operator=(const GoldfishAddressSpaceBlock &rhs)
175{
176    m_mmaped_ptr = rhs.m_mmaped_ptr;
177    m_phys_addr = rhs.m_phys_addr;
178    m_host_addr = rhs.m_host_addr;
179    m_offset = rhs.m_offset;
180    m_size = rhs.m_size;
181    m_handle = rhs.m_handle;
182
183    return *this;
184}
185
186bool GoldfishAddressSpaceBlock::allocate(GoldfishAddressSpaceBlockProvider *provider, size_t size)
187{
188    ALOGV("%s: Ask for block of size 0x%llx\n", __func__,
189         (unsigned long long)size);
190
191    destroy();
192
193    if (!provider->is_opened()) {
194        return false;
195    }
196
197    struct goldfish_address_space_allocate_block request;
198    ::memset(&request, 0, sizeof(request));
199    request.size = size;
200
201    long res = ioctl_allocate(provider->m_handle, &request);
202    if (res) {
203        return false;
204    } else {
205        m_phys_addr = request.phys_addr;
206        m_offset = request.offset;
207        m_size = request.size;
208        m_handle = provider->m_handle;
209        m_is_shared_mapping = false;
210
211        ALOGV("%s: ioctl allocate returned offset 0x%llx size 0x%llx\n", __func__,
212                (unsigned long long)m_offset,
213                (unsigned long long)m_size);
214
215        return true;
216    }
217}
218
219bool GoldfishAddressSpaceBlock::claimShared(GoldfishAddressSpaceBlockProvider *provider, uint64_t offset, uint64_t size)
220{
221    ALOGV("%s: Ask to claim region [0x%llx 0x%llx]\n", __func__,
222         (unsigned long long)offset,
223         (unsigned long long)offset + size);
224
225    destroy();
226
227    if (!provider->is_opened()) {
228        return false;
229    }
230
231    struct goldfish_address_space_claim_shared request;
232    request.offset = offset;
233    request.size = size;
234    long res = ioctl_claim_shared(provider->m_handle, &request);
235
236    if (res) {
237        return false;
238    }
239
240    m_offset = offset;
241    m_size = size;
242    m_handle = provider->m_handle;
243    m_is_shared_mapping = true;
244
245    return true;
246}
247
248uint64_t GoldfishAddressSpaceBlock::physAddr() const
249{
250    return m_phys_addr;
251}
252
253uint64_t GoldfishAddressSpaceBlock::hostAddr() const
254{
255    return m_host_addr;
256}
257
258void *GoldfishAddressSpaceBlock::mmap(uint64_t host_addr)
259{
260    if (m_size == 0) {
261        ALOGE("%s: called with zero size\n", __func__);
262        return NULL;
263    }
264    if (m_mmaped_ptr) {
265        ALOGE("'mmap' called for an already mmaped address block");
266        ::abort();
267    }
268
269    void *result;
270    const int res = memoryMap(NULL, m_size, m_handle, m_offset, &result);
271    if (res) {
272        ALOGE("%s: host memory map failed with size 0x%llx "
273              "off 0x%llx errno %d\n",
274              __func__,
275              (unsigned long long)m_size,
276              (unsigned long long)m_offset, res);
277        return NULL;
278    } else {
279        m_mmaped_ptr = result;
280        m_host_addr = host_addr;
281        return guestPtr();
282    }
283}
284
285void *GoldfishAddressSpaceBlock::guestPtr() const
286{
287    return reinterpret_cast<char *>(m_mmaped_ptr) + (m_host_addr & (PAGE_SIZE - 1));
288}
289
290void GoldfishAddressSpaceBlock::destroy()
291{
292    if (m_mmaped_ptr && m_size) {
293        memoryUnmap(m_mmaped_ptr, m_size);
294        m_mmaped_ptr = NULL;
295    }
296
297    if (m_size) {
298        long res = -EINVAL;
299
300        if (m_is_shared_mapping) {
301            res = ioctl_unclaim_shared(m_handle, m_offset);
302            if (res) {
303                ALOGE("ioctl_unclaim_shared failed, res=%ld", res);
304                ::abort();
305            }
306        } else {
307            res = ioctl_deallocate(m_handle, m_offset);
308            if (res) {
309                ALOGE("ioctl_deallocate failed, res=%ld", res);
310                ::abort();
311            }
312        }
313
314        m_is_shared_mapping = false;
315
316        m_phys_addr = 0;
317        m_host_addr = 0;
318        m_offset = 0;
319        m_size = 0;
320    }
321}
322
323void GoldfishAddressSpaceBlock::release()
324{
325    m_handle = -1;
326    m_mmaped_ptr = NULL;
327    m_phys_addr = 0;
328    m_host_addr = 0;
329    m_offset = 0;
330    m_size = 0;
331}
332
333int GoldfishAddressSpaceBlock::memoryMap(void *addr,
334                                         size_t len,
335                                         address_space_handle_t fd,
336                                         uint64_t off,
337                                         void** dst) {
338    void* ptr = ::mmap64(addr, len, PROT_WRITE, MAP_SHARED, fd, off);
339    if (MAP_FAILED == ptr) {
340        return errno;
341    } else {
342        *dst = ptr;
343        return 0;
344    }
345}
346
347void GoldfishAddressSpaceBlock::memoryUnmap(void *ptr, size_t size)
348{
349    ::munmap(ptr, size);
350}
351
352GoldfishAddressSpaceHostMemoryAllocator::GoldfishAddressSpaceHostMemoryAllocator(bool useSharedSlots)
353  : m_provider(useSharedSlots
354        ? GoldfishAddressSpaceSubdeviceType::SharedSlotsHostMemoryAllocator
355        : GoldfishAddressSpaceSubdeviceType::HostMemoryAllocator),
356    m_useSharedSlots(useSharedSlots)
357{}
358
359bool GoldfishAddressSpaceHostMemoryAllocator::is_opened() const { return m_provider.is_opened(); }
360
361long GoldfishAddressSpaceHostMemoryAllocator::hostMalloc(GoldfishAddressSpaceBlock *block, size_t size)
362{
363    if (size == 0) {
364        return -EINVAL;
365    }
366    if (block->size() > 0) {
367        return -EINVAL;
368    }
369    if (!m_provider.is_opened()) {
370        return -ENODEV;
371    }
372
373    struct address_space_ping request;
374    if (m_useSharedSlots) {
375        // shared memory slots are supported
376        ::memset(&request, 0, sizeof(request));
377        request.version = sizeof(request);
378        request.size = size;
379        request.metadata = HOST_MEMORY_ALLOCATOR_COMMAND_ALLOCATE_ID;
380
381        long ret = ioctl_ping(m_provider.m_handle, &request);
382        if (ret) {
383            return ret;
384        }
385        ret = static_cast<long>(request.metadata);
386        if (ret) {
387            return ret;
388        }
389
390        block->claimShared(&m_provider, request.offset, request.size);
391    } else {
392        // shared memory slots are not supported
393        if (!block->allocate(&m_provider, size)) {
394            return -ENOMEM;
395        }
396
397        ::memset(&request, 0, sizeof(request));
398        request.version = sizeof(request);
399        request.offset = block->offset();
400        request.size = block->size();
401        request.metadata = HOST_MEMORY_ALLOCATOR_COMMAND_ALLOCATE_ID;
402
403        long ret = ioctl_ping(m_provider.m_handle, &request);
404        if (ret) {
405            return ret;
406        }
407        ret = static_cast<long>(request.metadata);
408        if (ret) {
409            return ret;
410        }
411    }
412
413    block->mmap(0);
414    return 0;
415}
416
417void GoldfishAddressSpaceHostMemoryAllocator::hostFree(GoldfishAddressSpaceBlock *block)
418{
419    if (block->size() == 0) {
420        return;
421    }
422
423    if (!m_provider.is_opened()) {
424        ALOGE("%s: device is not available", __func__);
425        ::abort();
426    }
427
428    if (block->guestPtr()) {
429        struct address_space_ping request;
430        ::memset(&request, 0, sizeof(request));
431        request.version = sizeof(request);
432        request.offset = block->offset();
433        request.metadata = HOST_MEMORY_ALLOCATOR_COMMAND_UNALLOCATE_ID;
434
435        const long ret = ioctl_ping(m_provider.m_handle, &request);
436        if (ret) {
437            ALOGE("%s: ioctl_ping failed, ret=%ld", __func__, ret);
438            ::abort();
439        }
440    }
441
442    block->replace(NULL);
443}
444
445address_space_handle_t goldfish_address_space_open() {
446    return ::open(GOLDFISH_ADDRESS_SPACE_DEVICE_NAME, O_RDWR);
447}
448
449void goldfish_address_space_close(address_space_handle_t handle) {
450    ::close(handle);
451}
452
453bool goldfish_address_space_allocate(
454    address_space_handle_t handle,
455    size_t size, uint64_t* phys_addr, uint64_t* offset) {
456
457    struct goldfish_address_space_allocate_block request;
458    ::memset(&request, 0, sizeof(request));
459    request.size = size;
460
461    long res = ioctl_allocate(handle, &request);
462
463    if (res) return false;
464
465    *phys_addr = request.phys_addr;
466    *offset = request.offset;
467    return true;
468}
469
470bool goldfish_address_space_free(
471    address_space_handle_t handle, uint64_t offset) {
472
473    long res = ioctl_deallocate(handle, offset);
474
475    if (res) {
476        ALOGE("ioctl_deallocate failed, res=%ld", res);
477        ::abort();
478    }
479
480    return true;
481}
482
483bool goldfish_address_space_claim_shared(
484    address_space_handle_t handle, uint64_t offset, uint64_t size) {
485
486    struct goldfish_address_space_claim_shared request;
487    request.offset = offset;
488    request.size = size;
489    long res = ioctl_claim_shared(handle, &request);
490
491    if (res) return false;
492
493    return true;
494}
495
496bool goldfish_address_space_unclaim_shared(
497        address_space_handle_t handle, uint64_t offset) {
498    long res = ioctl_unclaim_shared(handle, offset);
499    if (res) {
500        ALOGE("ioctl_unclaim_shared failed, res=%ld", res);
501        ::abort();
502    }
503
504    return true;
505}
506
507// pgoff is the offset into the page to return in the result
508void* goldfish_address_space_map(
509    address_space_handle_t handle,
510    uint64_t offset, uint64_t size,
511    uint64_t pgoff) {
512
513    void* res = ::mmap64(0, size, PROT_WRITE, MAP_SHARED, handle, offset);
514
515    if (res == MAP_FAILED) {
516        ALOGE("%s: failed to map. errno: %d\n", __func__, errno);
517        return 0;
518    }
519
520    return (void*)(((char*)res) + (uintptr_t)(pgoff & (PAGE_SIZE - 1)));
521}
522
523void goldfish_address_space_unmap(void* ptr, uint64_t size) {
524    void* pagePtr = (void*)(((uintptr_t)ptr) & ~(PAGE_SIZE - 1));
525    ::munmap(pagePtr, size);
526}
527
528bool goldfish_address_space_set_subdevice_type(
529    address_space_handle_t handle, GoldfishAddressSpaceSubdeviceType type,
530    address_space_handle_t* handle_out) {
531    struct address_space_ping request;
532    request.metadata = (uint64_t)type;
533    *handle_out = handle;
534    return goldfish_address_space_ping(handle, &request);
535}
536
537bool goldfish_address_space_ping(
538    address_space_handle_t handle,
539    struct address_space_ping* ping) {
540    long res = ioctl_ping(handle, ping);
541
542    if (res) {
543        ALOGE("%s: ping failed: errno: %d\n", __func__, errno);
544        return false;
545    }
546
547    return true;
548}
549
550#define CAPSET_GFXSTREAM 3
551
552
553address_space_handle_t virtgpu_address_space_open() {
554    return -EINVAL;
555}
556
557void virtgpu_address_space_close(address_space_handle_t fd) {
558    close(fd);
559}
560
561// kVirtioGpuAddressSpaceContextCreateWithSubdevice | subdeviceType
562const uint32_t kVirtioGpuAddressSpaceContextCreateWithSubdevice = 0x1001;
563
564// kVirtioGpuAddressSpacePing | offset_lo | offset_hi | size_lo | size_hi | metadata_lo | metadata_hi | version | wait_fd | wait_flags | direction
565// no output
566const uint32_t kVirtioGpuAddressSpacePing = 0x1002;
567
568// kVirtioGpuAddressSpacePingWithResponse | resp_resid | offset_lo | offset_hi | metadata_lo | metadata_hi | version | wait_fd | wait_flags | direction
569// out: same as input then | out: error
570const uint32_t kVirtioGpuAddressSpacePingWithResponse = 0x1003;
571
572// Ping with no response
573bool virtgpu_address_space_ping(address_space_handle_t fd, struct address_space_ping* info) {
574
575    uint32_t words[] = {
576        kVirtioGpuAddressSpacePing,
577        (uint32_t)(info->offset), (uint32_t)(info->offset >> 32),
578        (uint32_t)(info->size), (uint32_t)(info->size >> 32),
579        (uint32_t)(info->metadata), (uint32_t)(info->metadata >> 32),
580        (uint32_t)(info->version), (uint32_t)(info->wait_fd),
581        (uint32_t)(info->wait_flags), (uint32_t)(info->direction),
582    };
583
584    drm_virtgpu_execbuffer execbuffer = {
585        .flags = 0,
586        .size = sizeof(words),
587        .command = (uint64_t)(uintptr_t)(words),
588        .bo_handles = 0,
589        .num_bo_handles = 0,
590        .fence_fd = -1,
591    };
592
593    int queue_work_err = drmIoctl(fd, DRM_IOCTL_VIRTGPU_EXECBUFFER, &execbuffer);
594
595    if (queue_work_err) {
596        ALOGE("%s: failed with %d executing command buffer (%s)\n",  __func__,
597                queue_work_err, strerror(errno));
598        return false;
599    }
600
601    return true;
602}
603
604bool virtgpu_address_space_create_context_with_subdevice(
605    address_space_handle_t fd,
606    uint32_t subdevice_type,
607    struct address_space_virtgpu_info* info_out) {
608
609    // response page
610    drm_virtgpu_resource_create create = {
611        .target     = PIPE_BUFFER,
612        .format     = VIRGL_FORMAT_R8_UNORM,
613        .bind       = VIRGL_BIND_CUSTOM,
614        .width      = 4096,
615        .height     = 1U,
616        .depth      = 1U,
617        .array_size = 0U,
618        .size       = 4096,
619        .stride     = 4096,
620    };
621
622    int ret = drmIoctl(fd, DRM_IOCTL_VIRTGPU_RESOURCE_CREATE, &create);
623    if (ret) {
624        ALOGE("%s: failed with %d allocating command buffer (%s)\n",
625                __func__, ret, strerror(errno));
626        return false;
627    }
628
629    drm_virtgpu_map map;
630    memset(&map, 0, sizeof(map));
631    map.handle = create.bo_handle;
632
633    ret = drmIoctl(fd, DRM_IOCTL_VIRTGPU_MAP, &map);
634    if (ret) {
635        ALOGE("%s: failed with %d mapping command response buffer (%s)\n",
636            __func__, ret, strerror(errno));
637        return false;
638    }
639
640    void* ptr = static_cast<unsigned char*>(
641            mmap64(nullptr, 4096, PROT_WRITE, MAP_SHARED, fd, map.offset));
642
643    if (ptr == MAP_FAILED) {
644        ALOGE("%s: failed with %d mmap'ing command response buffer (%s)\n",
645                __func__, errno, strerror(errno));
646        return false;
647    }
648
649    info_out->fd = fd;
650    info_out->resp_bo = create.bo_handle;
651    info_out->resp_resid = create.res_handle;
652    info_out->resp_mapped_ptr = ptr;
653
654    ALOGD("%s: resp bo: %u resid %u mapped %p\n", __func__,
655            create.bo_handle, create.res_handle, ptr);
656
657    // Context creation command
658    uint32_t words[] = {
659        kVirtioGpuAddressSpaceContextCreateWithSubdevice,
660        subdevice_type,
661    };
662
663    drm_virtgpu_execbuffer execbuffer = {
664        .flags = 0,
665        .size = sizeof(words),
666        .command = (uint64_t)(uintptr_t)(words),
667        .bo_handles = 0,
668        .num_bo_handles = 0,
669        .fence_fd = -1,
670    };
671
672    int queue_work_err = drmIoctl(fd, DRM_IOCTL_VIRTGPU_EXECBUFFER, &execbuffer);
673
674    if (queue_work_err) {
675        ALOGE("%s: failed with %d executing command buffer (%s)\n",  __func__,
676                queue_work_err, strerror(errno));
677        return false;
678    }
679
680    return true;
681}
682
683bool virtgpu_address_space_allocate_hostmem(
684    address_space_handle_t fd,
685    size_t size,
686    uint64_t hostmem_id,
687    struct address_space_virtgpu_hostmem_info* hostmem_info_out) {
688
689    struct drm_virtgpu_resource_create_blob drm_rc_blob = {};
690    drm_rc_blob.blob_mem = VIRTGPU_BLOB_MEM_HOST3D;
691    drm_rc_blob.blob_flags = VIRTGPU_BLOB_FLAG_USE_MAPPABLE;
692    drm_rc_blob.blob_id = hostmem_id;
693    drm_rc_blob.size = size;
694
695    int res = drmIoctl(
696            fd, DRM_IOCTL_VIRTGPU_RESOURCE_CREATE_BLOB, &drm_rc_blob);
697
698    if (res) {
699        ALOGE("%s: Failed to resource create v2: sterror: %s errno: %d\n", __func__,
700                strerror(errno), errno);
701        abort();
702    }
703
704    drm_virtgpu_map map;
705    memset(&map, 0, sizeof(map));
706    map.handle = drm_rc_blob.bo_handle;
707
708    res = drmIoctl(fd, DRM_IOCTL_VIRTGPU_MAP, &map);
709    if (res) {
710        ALOGE("%s: Failed to virtgpu map: sterror: %s errno: %d\n", __func__,
711                strerror(errno), errno);
712        abort();
713    }
714
715    void* directMappedAddr = mmap64(0, size, PROT_WRITE, MAP_SHARED, fd, map.offset);
716
717    if (directMappedAddr == MAP_FAILED) {
718        ALOGE("%s: mmap of virtio gpu resource failed\n", __func__);
719        abort();
720    }
721
722    hostmem_info_out->id = hostmem_id;
723    hostmem_info_out->bo = drm_rc_blob.bo_handle;
724    hostmem_info_out->ptr = directMappedAddr;
725    return true;
726}
727
728uint64_t buildu64(uint32_t lo, uint32_t hi) {
729    uint64_t res = (uint64_t)lo;
730    uint64_t hi64 = (uint64_t)hi;
731    return res | (hi64 << 32);
732}
733
734/* Used to retry DRM_IOCTL_VIRTGPU_WAIT, which can also return EBUSY. */
735#define TEMP_FAILURE_RETRY_BUSY(tag, exp) ({                                            \
736    __typeof__(exp) _rc;                                                                \
737    do {                                                                                \
738        uint32_t busy_times = 0;                                                        \
739        _rc = (exp);                                                                    \
740        if (errno == EBUSY) {                                                           \
741            ++busy_times;                                                               \
742            usleep(10000);                                                              \
743            ALOGE("%s:%s busy! waited %u times on EBUSY\n", __func__, tag, busy_times); \
744        }                                                                               \
745    } while (_rc != 0 && (errno == EINTR || errno == EBUSY));                           \
746    _rc; })
747
748// Ping with response
749bool virtgpu_address_space_ping_with_response(
750    struct address_space_virtgpu_info* info,
751    struct address_space_ping* ping) {
752
753    uint32_t words[] = {
754        kVirtioGpuAddressSpacePingWithResponse,
755        info->resp_resid,
756        (uint32_t)(ping->offset), (uint32_t)(ping->offset >> 32),
757        (uint32_t)(ping->size), (uint32_t)(ping->size >> 32),
758        (uint32_t)(ping->metadata), (uint32_t)(ping->metadata >> 32),
759        (uint32_t)(ping->version), (uint32_t)(ping->wait_fd),
760        (uint32_t)(ping->wait_flags), (uint32_t)(ping->direction),
761    };
762
763    drm_virtgpu_execbuffer execbuffer = {
764        .flags = 0,
765        .size = sizeof(words),
766        .command = (uint64_t)(uintptr_t)(words),
767        .bo_handles = (uint64_t)(uintptr_t)(&info->resp_bo),
768        .num_bo_handles = 1,
769        .fence_fd = -1,
770    };
771
772    int queue_work_err = drmIoctl(info->fd, DRM_IOCTL_VIRTGPU_EXECBUFFER, &execbuffer);
773
774    if (queue_work_err) {
775        ALOGE("%s: failed with %d executing command buffer (%s)\n",  __func__,
776                queue_work_err, strerror(errno));
777        return false;
778    }
779
780    struct drm_virtgpu_3d_wait waitcmd;
781    memset(&waitcmd, 0, sizeof(waitcmd));
782    waitcmd.handle = info->resp_bo;
783
784    int ret = TEMP_FAILURE_RETRY_BUSY("DRM_IOCTL_VIRTGPU_WAIT", drmIoctl(info->fd, DRM_IOCTL_VIRTGPU_WAIT, &waitcmd));
785    if (ret) {
786        ALOGE("%s: DRM_IOCTL_VIRTGPU_WAIT failed with %d (%s)\n", __func__, errno, strerror(errno));
787        return false;
788    }
789
790    uint32_t* respWords = (uint32_t*)info->resp_mapped_ptr;
791
792    ping->offset = buildu64(respWords[0], respWords[1]);
793    ping->size = buildu64(respWords[2], respWords[3]);
794    ping->metadata = buildu64(respWords[4], respWords[5]);
795    ping->version = respWords[6];
796    ping->wait_fd = respWords[7];
797    ping->wait_flags = respWords[8];
798    ping->direction = respWords[9];
799
800    return true;
801}
802