• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2021 The Android Open Source Project
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 // http://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14 #ifndef VIRTIO_GPU_TIMELINES_H
15 #define VIRTIO_GPU_TIMELINES_H
16 
17 #include <atomic>
18 #include <functional>
19 #include <list>
20 #include <memory>
21 #include <sstream>
22 #include <string>
23 #include <unordered_map>
24 #include <variant>
25 
26 #include "aemu/base/synchronization/Lock.h"
27 #include "gfxstream/virtio-gpu-gfxstream-renderer.h"
28 #include "render-utils/virtio_gpu_ops.h"
29 
30 typedef uint32_t VirtioGpuCtxId;
31 typedef uint8_t VirtioGpuRingIdx;
32 
33 struct VirtioGpuRingGlobal {};
34 struct VirtioGpuRingContextSpecific {
35     VirtioGpuCtxId mCtxId;
36     VirtioGpuRingIdx mRingIdx;
37 };
38 using VirtioGpuRing = std::variant<VirtioGpuRingGlobal, VirtioGpuRingContextSpecific>;
39 
40 template <>
41 struct std::hash<VirtioGpuRingGlobal> {
42     std::size_t operator()(VirtioGpuRingGlobal const&) const noexcept { return 0; }
43 };
44 
45 inline bool operator==(const VirtioGpuRingGlobal&, const VirtioGpuRingGlobal&) { return true; }
46 
47 template <>
48 struct std::hash<VirtioGpuRingContextSpecific> {
49     std::size_t operator()(VirtioGpuRingContextSpecific const& ringContextSpecific) const noexcept {
50         std::size_t ctxHash = std::hash<VirtioGpuCtxId>{}(ringContextSpecific.mCtxId);
51         std::size_t ringHash = std::hash<VirtioGpuRingIdx>{}(ringContextSpecific.mRingIdx);
52         // Use the hash_combine from
53         // https://www.boost.org/doc/libs/1_78_0/boost/container_hash/hash.hpp.
54         std::size_t res = ctxHash;
55         res ^= ringHash + 0x9e3779b9 + (res << 6) + (res >> 2);
56         return res;
57     }
58 };
59 
60 inline bool operator==(const VirtioGpuRingContextSpecific& lhs,
61                        const VirtioGpuRingContextSpecific& rhs) {
62     return lhs.mCtxId == rhs.mCtxId && lhs.mRingIdx == rhs.mRingIdx;
63 }
64 
65 inline std::string to_string(const VirtioGpuRing& ring) {
66     struct {
67         std::string operator()(const VirtioGpuRingGlobal&) { return "global"; }
68         std::string operator()(const VirtioGpuRingContextSpecific& ring) {
69             std::stringstream ss;
70             ss << "context specific {ctx = " << ring.mCtxId << ", ring = " << (int)ring.mRingIdx
71                << "}";
72             return ss.str();
73         }
74     } visitor;
75     return std::visit(visitor, ring);
76 }
77 
78 class VirtioGpuTimelines {
79    public:
80     using FenceId = uint64_t;
81     using Ring = VirtioGpuRing;
82     using TaskId = uint64_t;
83 
84     TaskId enqueueTask(const Ring&);
85     void enqueueFence(const Ring&, FenceId, FenceCompletionCallback);
86     void notifyTaskCompletion(TaskId);
87     void poll();
88     static std::unique_ptr<VirtioGpuTimelines> create(bool withAsyncCallback);
89 
90    private:
91     VirtioGpuTimelines(bool withAsyncCallback);
92     struct Fence {
93         FenceId mId;
94         FenceCompletionCallback mCompletionCallback;
95         Fence(FenceId id, FenceCompletionCallback completionCallback)
96             : mId(id), mCompletionCallback(std::move(completionCallback)) {}
97     };
98     struct Task {
99         TaskId mId;
100         Ring mRing;
101         std::atomic_bool mHasCompleted;
102         Task(TaskId id, const Ring& ring) : mId(id), mRing(ring), mHasCompleted(false) {}
103     };
104     using TimelineItem =
105         std::variant<std::unique_ptr<Fence>, std::shared_ptr<Task>>;
106     android::base::Lock mLock;
107     std::atomic<TaskId> mNextId;
108     // The mTaskIdToTask cache must be destroyed after the actual owner of Task,
109     // mTimelineQueues, is destroyed, because the deleter of Task will
110     // automatically remove the entry in mTaskIdToTask.
111     std::unordered_map<TaskId, std::weak_ptr<Task>> mTaskIdToTask;
112     std::unordered_map<Ring, std::list<TimelineItem>> mTimelineQueues;
113     const bool mWithAsyncCallback;
114     // Go over the timeline, signal any fences without pending tasks, and remove
115     // timeline items that are no longer needed.
116     void poll_locked(const Ring&);
117 };
118 
119 #endif  // VIRTIO_GPU_TIMELINES_H
120