• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2021 The Android Open Source Project
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 // http://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14 #ifndef VIRTIO_GPU_TIMELINES_H
15 #define VIRTIO_GPU_TIMELINES_H
16 
17 #include <atomic>
18 #include <functional>
19 #include <list>
20 #include <memory>
21 #include <sstream>
22 #include <string>
23 #include <unordered_map>
24 #include <variant>
25 
26 #include "aemu/base/synchronization/Lock.h"
27 #include "virtio-gpu-gfxstream-renderer.h"
28 #include "render-utils/virtio_gpu_ops.h"
29 
30 struct VirtioGpuRingGlobal {};
31 struct VirtioGpuRingContextSpecific {
32     VirtioGpuCtxId mCtxId;
33     VirtioGpuRingIdx mRingIdx;
34 };
35 using VirtioGpuRing = std::variant<VirtioGpuRingGlobal, VirtioGpuRingContextSpecific>;
36 
37 template <>
38 struct std::hash<VirtioGpuRingGlobal> {
39     std::size_t operator()(VirtioGpuRingGlobal const&) const noexcept { return 0; }
40 };
41 
42 inline bool operator==(const VirtioGpuRingGlobal&, const VirtioGpuRingGlobal&) { return true; }
43 
44 template <>
45 struct std::hash<VirtioGpuRingContextSpecific> {
46     std::size_t operator()(VirtioGpuRingContextSpecific const& ringContextSpecific) const noexcept {
47         std::size_t ctxHash = std::hash<VirtioGpuCtxId>{}(ringContextSpecific.mCtxId);
48         std::size_t ringHash = std::hash<VirtioGpuRingIdx>{}(ringContextSpecific.mRingIdx);
49         // Use the hash_combine from
50         // https://www.boost.org/doc/libs/1_78_0/boost/container_hash/hash.hpp.
51         std::size_t res = ctxHash;
52         res ^= ringHash + 0x9e3779b9 + (res << 6) + (res >> 2);
53         return res;
54     }
55 };
56 
57 inline bool operator==(const VirtioGpuRingContextSpecific& lhs,
58                        const VirtioGpuRingContextSpecific& rhs) {
59     return lhs.mCtxId == rhs.mCtxId && lhs.mRingIdx == rhs.mRingIdx;
60 }
61 
62 inline std::string to_string(const VirtioGpuRing& ring) {
63     struct {
64         std::string operator()(const VirtioGpuRingGlobal&) { return "global"; }
65         std::string operator()(const VirtioGpuRingContextSpecific& ring) {
66             std::stringstream ss;
67             ss << "context specific {ctx = " << ring.mCtxId << ", ring = " << ring.mRingIdx << "}";
68             return ss.str();
69         }
70     } visitor;
71     return std::visit(visitor, ring);
72 }
73 
74 class VirtioGpuTimelines {
75    public:
76     using FenceId = uint64_t;
77     using Ring = VirtioGpuRing;
78     using TaskId = uint64_t;
79 
80     TaskId enqueueTask(const Ring&);
81     void enqueueFence(const Ring&, FenceId, FenceCompletionCallback);
82     void notifyTaskCompletion(TaskId);
83     void poll();
84     static std::unique_ptr<VirtioGpuTimelines> create(bool withAsyncCallback);
85 
86    private:
87     VirtioGpuTimelines(bool withAsyncCallback);
88     struct Fence {
89         std::unique_ptr<FenceCompletionCallback> mCompletionCallback;
90         Fence(FenceCompletionCallback completionCallback)
91             : mCompletionCallback(std::make_unique<FenceCompletionCallback>(
92                   completionCallback)) {}
93     };
94     struct Task {
95         TaskId mId;
96         Ring mRing;
97         std::atomic_bool mHasCompleted;
98         Task(TaskId id, const Ring& ring) : mId(id), mRing(ring), mHasCompleted(false) {}
99     };
100     using TimelineItem =
101         std::variant<std::unique_ptr<Fence>, std::shared_ptr<Task>>;
102     android::base::Lock mLock;
103     std::atomic<TaskId> mNextId;
104     // The mTaskIdToTask cache must be destroyed after the actual owner of Task,
105     // mTimelineQueues, is destroyed, because the deleter of Task will
106     // automatically remove the entry in mTaskIdToTask.
107     std::unordered_map<TaskId, std::weak_ptr<Task>> mTaskIdToTask;
108     std::unordered_map<Ring, std::list<TimelineItem>> mTimelineQueues;
109     const bool mWithAsyncCallback;
110     // Go over the timeline, signal any fences without pending tasks, and remove
111     // timeline items that are no longer needed.
112     void poll_locked(const Ring&);
113 };
114 
115 #endif  // VIRTIO_GPU_TIMELINES_H
116