1 // Copyright (C) 2023 The Android Open Source Project
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 // http://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14 
15 #include <assert.h>
16 #include <fcntl.h>
17 #include <lib/magma/magma_common_defs.h>
18 #include <stdarg.h>
19 #include <stdint.h>
20 #include <string.h>
21 #include <sys/mman.h>
22 #include <unistd.h>
23 #include <virtgpu_drm.h>
24 #include <xf86drm.h>
25 
26 #include <limits>
27 #include <mutex>
28 #include <thread>
29 #include <unordered_map>
30 
31 #include "AddressSpaceStream.h"
32 #include "EncoderDebug.h"
33 #include "magma_enc.h"
34 
get_ns_monotonic(bool raw)35 static uint64_t get_ns_monotonic(bool raw) {
36     struct timespec time;
37     int ret = clock_gettime(raw ? CLOCK_MONOTONIC_RAW : CLOCK_MONOTONIC, &time);
38     if (ret < 0) return 0;
39     return static_cast<uint64_t>(time.tv_sec) * 1000000000ULL + time.tv_nsec;
40 }
41 
42 class MagmaClientContext : public magma_encoder_context_t {
43    public:
44     MagmaClientContext(AddressSpaceStream* stream);
45 
stream()46     AddressSpaceStream* stream() {
47         return reinterpret_cast<AddressSpaceStream*>(magma_encoder_context_t::m_stream);
48     }
49 
50     magma_status_t get_fd_for_buffer(magma_buffer_t buffer, int* fd_out);
51 
mutex()52     std::mutex& mutex() { return m_mutex_; }
53 
54     static magma_status_t magma_device_import(void* self, magma_handle_t device_channel,
55                                               magma_device_t* device_out);
56     static magma_status_t magma_device_query(void* self, magma_device_t device, uint64_t id,
57                                              magma_handle_t* handle_out, uint64_t* value_out);
58     static magma_status_t magma_buffer_export(void* self, magma_buffer_t buffer,
59                                               magma_handle_t* handle_out);
60     static magma_status_t magma_poll(void* self, magma_poll_item_t* items, uint32_t count,
61                                      uint64_t timeout_ns);
62     static magma_status_t magma_connection_create_buffer(void* self, magma_connection_t connection,
63                                                          uint64_t size, uint64_t* size_out,
64                                                          magma_buffer_t* buffer_out,
65                                                          magma_buffer_id_t* id_out);
66     static void magma_connection_release_buffer(void* self, magma_connection_t connection,
67                                                 magma_buffer_t buffer);
68 
set_thread_local_context_lock(std::unique_lock<std::mutex> * lock)69     static void set_thread_local_context_lock(std::unique_lock<std::mutex>* lock) { t_lock = lock; }
70 
get_thread_local_context_lock()71     static std::unique_lock<std::mutex>* get_thread_local_context_lock() { return t_lock; }
72 
73     magma_device_import_client_proc_t magma_device_import_enc_;
74     magma_device_query_client_proc_t magma_device_query_enc_;
75     magma_poll_client_proc_t magma_poll_enc_;
76     magma_connection_create_buffer_client_proc_t magma_connection_create_buffer_enc_;
77     magma_connection_release_buffer_client_proc_t magma_connection_release_buffer_enc_;
78 
79     int render_node_fd_;
80 
81     // Stores buffer info upon creation.
82     struct BufferInfo {
83         magma_connection_t connection;  // Owning connection.
84         uint64_t size;                  // Actual size.
85         magma_buffer_id_t id;           // Id.
86     };
87     std::unordered_map<magma_buffer_t, BufferInfo> buffer_info_;
88 
89     std::mutex m_mutex_;
90     static thread_local std::unique_lock<std::mutex>* t_lock;
91 };
92 
93 // This makes the mutex lock available to decoding methods that can take time
94 // (eg magma_poll), to prevent one thread from locking out others.
95 class ContextLock {
96    public:
ContextLock(MagmaClientContext * context)97     ContextLock(MagmaClientContext* context) : m_context_(context), m_lock_(context->mutex()) {
98         m_context_->set_thread_local_context_lock(&m_lock_);
99     }
100 
~ContextLock()101     ~ContextLock() { m_context_->set_thread_local_context_lock(nullptr); }
102 
103    private:
104     MagmaClientContext* m_context_;
105     std::unique_lock<std::mutex> m_lock_;
106 };
107 
108 // static
109 thread_local std::unique_lock<std::mutex>* MagmaClientContext::t_lock;
110 
MagmaClientContext(AddressSpaceStream * stream)111 MagmaClientContext::MagmaClientContext(AddressSpaceStream* stream)
112     : magma_encoder_context_t(stream, new ChecksumCalculator) {
113     magma_device_import_enc_ = magma_client_context_t::magma_device_import;
114     magma_device_query_enc_ = magma_client_context_t::magma_device_query;
115     magma_poll_enc_ = magma_client_context_t::magma_poll;
116     magma_connection_create_buffer_enc_ = magma_client_context_t::magma_connection_create_buffer;
117 
118     magma_client_context_t::magma_device_import = &MagmaClientContext::magma_device_import;
119     magma_client_context_t::magma_device_query = &MagmaClientContext::magma_device_query;
120     magma_client_context_t::magma_buffer_export = &MagmaClientContext::magma_buffer_export;
121     magma_client_context_t::magma_poll = &MagmaClientContext::magma_poll;
122     magma_client_context_t::magma_connection_create_buffer =
123         &MagmaClientContext::magma_connection_create_buffer;
124     magma_client_context_t::magma_connection_release_buffer =
125         &MagmaClientContext::magma_connection_release_buffer;
126 }
127 
128 // static
magma_device_import(void * self,magma_handle_t device_channel,magma_device_t * device_out)129 magma_status_t MagmaClientContext::magma_device_import(void* self, magma_handle_t device_channel,
130                                                        magma_device_t* device_out) {
131     auto context = reinterpret_cast<MagmaClientContext*>(self);
132 
133     magma_handle_t placeholder = 0xacbd1234;  // not used
134 
135     magma_status_t status = context->magma_device_import_enc_(self, placeholder, device_out);
136 
137     // The local fd isn't needed, just close it.
138     int fd = device_channel;
139     close(fd);
140 
141     return status;
142 }
143 
get_fd_for_buffer(magma_buffer_t buffer,int * fd_out)144 magma_status_t MagmaClientContext::get_fd_for_buffer(magma_buffer_t buffer, int* fd_out) {
145     *fd_out = -1;
146 
147     auto it = buffer_info_.find(buffer);
148     if (it == buffer_info_.end()) {
149         ALOGE("%s: buffer (%lu) not found in map", __func__, buffer);
150         return MAGMA_STATUS_INVALID_ARGS;
151     }
152     auto& info = it->second;
153 
154     // TODO(fxbug.dev/122604): Evaluate deferred guest resource creation.
155     auto blob = VirtGpuDevice::getInstance(VirtGpuCapset::kCapsetGfxStream)
156                     .createBlob({.size = info.size,
157                                  .flags = kBlobFlagMappable | kBlobFlagShareable,
158                                  .blobMem = kBlobMemHost3d,
159                                  .blobId = info.id});
160     if (!blob) {
161         return MAGMA_STATUS_INTERNAL_ERROR;
162     }
163 
164     VirtGpuExternalHandle handle{};
165     int result = blob->exportBlob(handle);
166     if (result != 0 || handle.osHandle < 0) {
167         return MAGMA_STATUS_INTERNAL_ERROR;
168     }
169 
170     *fd_out = handle.osHandle;
171 
172     return MAGMA_STATUS_OK;
173 }
174 
magma_device_query(void * self,magma_device_t device,uint64_t id,magma_handle_t * handle_out,uint64_t * value_out)175 magma_status_t MagmaClientContext::magma_device_query(void* self, magma_device_t device,
176                                                       uint64_t id, magma_handle_t* handle_out,
177                                                       uint64_t* value_out) {
178     auto context = reinterpret_cast<MagmaClientContext*>(self);
179 
180     magma_buffer_t buffer = 0;
181     uint64_t value = 0;
182     {
183         magma_handle_t handle;
184         magma_status_t status = context->magma_device_query_enc_(self, device, id, &handle, &value);
185         if (status != MAGMA_STATUS_OK) {
186             ALOGE("magma_device_query_enc failed: %d\n", status);
187             return status;
188         }
189         // magma_buffer_t and magma_handle_t are both gem_handles on the server.
190         buffer = handle;
191     }
192 
193     if (!buffer) {
194         if (!value_out) return MAGMA_STATUS_INVALID_ARGS;
195 
196         *value_out = value;
197 
198         if (handle_out) {
199             *handle_out = -1;
200         }
201 
202         return MAGMA_STATUS_OK;
203     }
204 
205     if (!handle_out) return MAGMA_STATUS_INVALID_ARGS;
206 
207     int fd;
208     magma_status_t status = context->get_fd_for_buffer(buffer, &fd);
209     if (status != MAGMA_STATUS_OK) return status;
210 
211     *handle_out = fd;
212 
213     return MAGMA_STATUS_OK;
214 }
215 
magma_buffer_export(void * self,magma_buffer_t buffer,magma_handle_t * handle_out)216 magma_status_t MagmaClientContext::magma_buffer_export(void* self, magma_buffer_t buffer,
217                                                        magma_handle_t* handle_out) {
218     auto context = reinterpret_cast<MagmaClientContext*>(self);
219 
220     int fd;
221     magma_status_t status = context->get_fd_for_buffer(buffer, &fd);
222     if (status != MAGMA_STATUS_OK) return status;
223 
224     *handle_out = fd;
225 
226     return MAGMA_STATUS_OK;
227 }
228 
229 // We can't pass a non-zero timeout to the server, as that would block the server from handling
230 // requests from other threads. So we busy wait here, which isn't ideal; however if the server did
231 // block, gfxstream would busy wait for the response anyway.
magma_poll(void * self,magma_poll_item_t * items,uint32_t count,uint64_t timeout_ns)232 magma_status_t MagmaClientContext::magma_poll(void* self, magma_poll_item_t* items, uint32_t count,
233                                               uint64_t timeout_ns) {
234     auto context = reinterpret_cast<MagmaClientContext*>(self);
235 
236     int64_t time_start = static_cast<int64_t>(get_ns_monotonic(false));
237 
238     int64_t abs_timeout_ns = time_start + timeout_ns;
239 
240     if (abs_timeout_ns < time_start) {
241         abs_timeout_ns = std::numeric_limits<int64_t>::max();
242     }
243 
244     bool warned_for_long_poll = false;
245 
246     while (true) {
247         magma_status_t status = context->magma_poll_enc_(self, items, count, 0);
248 
249         if (status != MAGMA_STATUS_TIMED_OUT) return status;
250 
251         // Not ready, allow other threads to work in with us
252         get_thread_local_context_lock()->unlock();
253 
254         std::this_thread::yield();
255 
256         int64_t time_now = static_cast<int64_t>(get_ns_monotonic(false));
257 
258         // TODO(fxb/122604): Add back-off to the busy loop, ideally based on recent sleep
259         // patterns (e.g. start polling shortly before next expected burst).
260         if (!warned_for_long_poll && time_now - time_start > 5000000000) {
261             ALOGE("magma_poll: long poll detected (%lu us)", (time_now - time_start) / 1000);
262             warned_for_long_poll = true;
263         }
264 
265         if (time_now >= abs_timeout_ns) break;
266 
267         get_thread_local_context_lock()->lock();
268     }
269 
270     return MAGMA_STATUS_TIMED_OUT;
271 }
272 
273 // Magma 1.0 no longer tracks buffer size and id on behalf of the client, so we mirror it here.
magma_connection_create_buffer(void * self,magma_connection_t connection,uint64_t size,uint64_t * size_out,magma_buffer_t * buffer_out,magma_buffer_id_t * id_out)274 magma_status_t MagmaClientContext::magma_connection_create_buffer(void* self,
275                                                                   magma_connection_t connection,
276                                                                   uint64_t size, uint64_t* size_out,
277                                                                   magma_buffer_t* buffer_out,
278                                                                   magma_buffer_id_t* id_out) {
279     auto context = reinterpret_cast<MagmaClientContext*>(self);
280 
281     // TODO(b/277219980): support guest-allocated buffers
282     magma_status_t status = context->magma_connection_create_buffer_enc_(
283         self, connection, size, size_out, buffer_out, id_out);
284     if (status != MAGMA_STATUS_OK) return status;
285 
286     auto [_, inserted] = context->buffer_info_.emplace(
287         *buffer_out, BufferInfo{.connection = connection, .size = *size_out, .id = *id_out});
288     if (!inserted) {
289         ALOGE("magma_connection_create_buffer: duplicate entry in buffer info map");
290         return MAGMA_STATUS_INTERNAL_ERROR;
291     }
292 
293     return MAGMA_STATUS_OK;
294 }
295 
magma_connection_release_buffer(void * self,magma_connection_t connection,magma_buffer_t buffer)296 void MagmaClientContext::magma_connection_release_buffer(void* self, magma_connection_t connection,
297                                                          magma_buffer_t buffer) {
298     auto context = reinterpret_cast<MagmaClientContext*>(self);
299 
300     context->magma_connection_release_buffer_enc_(self, connection, buffer);
301 
302     // Invalid buffer or connection is treated as no-op by magma, so only log as verbose.
303     auto it = context->buffer_info_.find(buffer);
304     if (it == context->buffer_info_.end()) {
305         ALOGV("magma_connection_release_buffer: buffer (%lu) not found in map", buffer);
306         return;
307     }
308     if (it->second.connection != connection) {
309         ALOGV(
310             "magma_connection_release_buffer: buffer (%lu) attempted release using wrong "
311             "connection (expected %lu, received %lu)",
312             buffer, it->second.connection, connection);
313         return;
314     }
315     context->buffer_info_.erase(it);
316 }
317 
318 template <typename T, typename U>
SafeCast(const U & value)319 static T SafeCast(const U& value) {
320     if (value > std::numeric_limits<T>::max() || value < std::numeric_limits<T>::min()) {
321         abort();
322     }
323     return static_cast<T>(value);
324 }
325 
326 // We have a singleton client context for all threads.  We want all client
327 // threads served by a single server RenderThread.
GetMagmaContext()328 MagmaClientContext* GetMagmaContext() {
329     static MagmaClientContext* s_context;
330     static std::once_flag once_flag;
331 
332     std::call_once(once_flag, []() {
333         auto stream = createVirtioGpuAddressSpaceStream(nullptr);
334         assert(stream);
335 
336         // RenderThread expects flags: send zero 'clientFlags' to the host.
337         {
338             auto pClientFlags =
339                 reinterpret_cast<unsigned int*>(stream->allocBuffer(sizeof(unsigned int)));
340             *pClientFlags = 0;
341             stream->commitBuffer(sizeof(unsigned int));
342         }
343 
344         s_context = new MagmaClientContext(stream);
345         auto render_node_fd =
346             VirtGpuDevice::getInstance(VirtGpuCapset::kCapsetGfxStream).getDeviceHandle();
347         s_context->render_node_fd_ = SafeCast<int>(render_node_fd);
348 
349         ALOGE("Created new context\n");
350         fflush(stdout);
351     });
352 
353     return s_context;
354 }
355 
356 // Used in magma_entry.cpp
357 // Always lock around the encoding methods because we have a singleton context.
358 #define GET_CONTEXT                              \
359     MagmaClientContext* ctx = GetMagmaContext(); \
360     ContextLock lock(ctx)
361 
362 #include "magma_entry.cpp"
363