• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2019 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #define ATRACE_TAG ATRACE_TAG_GRAPHICS
18 #undef LOG_TAG
19 #define LOG_TAG "VSyncReactor"
20 //#define LOG_NDEBUG 0
21 #include "VSyncReactor.h"
22 #include <cutils/properties.h>
23 #include <log/log.h>
24 #include <utils/Trace.h>
25 #include "../TracedOrdinal.h"
26 #include "TimeKeeper.h"
27 #include "VSyncDispatch.h"
28 #include "VSyncTracker.h"
29 
30 namespace android::scheduler {
31 using base::StringAppendF;
32 
33 Clock::~Clock() = default;
now() const34 nsecs_t SystemClock::now() const {
35     return systemTime(SYSTEM_TIME_MONOTONIC);
36 }
37 
38 class PredictedVsyncTracer {
39 public:
PredictedVsyncTracer(VSyncDispatch & dispatch)40     PredictedVsyncTracer(VSyncDispatch& dispatch)
41           : mRegistration(dispatch,
42                           std::bind(&PredictedVsyncTracer::callback, this, std::placeholders::_1,
43                                     std::placeholders::_2),
44                           "PredictedVsyncTracer") {
45         mRegistration.schedule(0, 0);
46     }
47 
48 private:
49     TracedOrdinal<bool> mParity = {"VSYNC-predicted", 0};
50     VSyncCallbackRegistration mRegistration;
51 
callback(nsecs_t,nsecs_t)52     void callback(nsecs_t /*vsyncTime*/, nsecs_t /*targetWakeupTim*/) {
53         mParity = !mParity;
54         mRegistration.schedule(0, 0);
55     }
56 };
57 
VSyncReactor(std::unique_ptr<Clock> clock,std::unique_ptr<VSyncDispatch> dispatch,std::unique_ptr<VSyncTracker> tracker,size_t pendingFenceLimit,bool supportKernelIdleTimer)58 VSyncReactor::VSyncReactor(std::unique_ptr<Clock> clock, std::unique_ptr<VSyncDispatch> dispatch,
59                            std::unique_ptr<VSyncTracker> tracker, size_t pendingFenceLimit,
60                            bool supportKernelIdleTimer)
61       : mClock(std::move(clock)),
62         mTracker(std::move(tracker)),
63         mDispatch(std::move(dispatch)),
64         mPendingLimit(pendingFenceLimit),
65         mPredictedVsyncTracer(property_get_bool("debug.sf.show_predicted_vsync", false)
66                                       ? std::make_unique<PredictedVsyncTracer>(*mDispatch)
67                                       : nullptr),
68         mSupportKernelIdleTimer(supportKernelIdleTimer) {}
69 
70 VSyncReactor::~VSyncReactor() = default;
71 
72 // The DispSync interface has a 'repeat this callback at rate' semantic. This object adapts
73 // VSyncDispatch's individually-scheduled callbacks so as to meet DispSync's existing semantic
74 // for now.
75 class CallbackRepeater {
76 public:
CallbackRepeater(VSyncDispatch & dispatch,DispSync::Callback * cb,const char * name,nsecs_t period,nsecs_t offset,nsecs_t notBefore)77     CallbackRepeater(VSyncDispatch& dispatch, DispSync::Callback* cb, const char* name,
78                      nsecs_t period, nsecs_t offset, nsecs_t notBefore)
79           : mName(name),
80             mCallback(cb),
81             mRegistration(dispatch,
82                           std::bind(&CallbackRepeater::callback, this, std::placeholders::_1,
83                                     std::placeholders::_2),
84                           mName),
85             mPeriod(period),
86             mOffset(offset),
87             mLastCallTime(notBefore) {}
88 
~CallbackRepeater()89     ~CallbackRepeater() {
90         std::lock_guard<std::mutex> lk(mMutex);
91         mRegistration.cancel();
92     }
93 
start(nsecs_t offset)94     void start(nsecs_t offset) {
95         std::lock_guard<std::mutex> lk(mMutex);
96         mStopped = false;
97         mOffset = offset;
98 
99         auto const schedule_result = mRegistration.schedule(calculateWorkload(), mLastCallTime);
100         LOG_ALWAYS_FATAL_IF((schedule_result != ScheduleResult::Scheduled),
101                             "Error scheduling callback: rc %X", schedule_result);
102     }
103 
setPeriod(nsecs_t period)104     void setPeriod(nsecs_t period) {
105         std::lock_guard<std::mutex> lk(mMutex);
106         if (period == mPeriod) {
107             return;
108         }
109         mPeriod = period;
110     }
111 
stop()112     void stop() {
113         std::lock_guard<std::mutex> lk(mMutex);
114         LOG_ALWAYS_FATAL_IF(mStopped, "DispSyncInterface misuse: callback already stopped");
115         mStopped = true;
116         mRegistration.cancel();
117     }
118 
dump(std::string & result) const119     void dump(std::string& result) const {
120         std::lock_guard<std::mutex> lk(mMutex);
121         StringAppendF(&result, "\t%s: mPeriod=%.2f last vsync time %.2fms relative to now (%s)\n",
122                       mName.c_str(), mPeriod / 1e6f, (mLastCallTime - systemTime()) / 1e6f,
123                       mStopped ? "stopped" : "running");
124     }
125 
126 private:
callback(nsecs_t vsynctime,nsecs_t wakeupTime)127     void callback(nsecs_t vsynctime, nsecs_t wakeupTime) {
128         {
129             std::lock_guard<std::mutex> lk(mMutex);
130             mLastCallTime = vsynctime;
131         }
132 
133         mCallback->onDispSyncEvent(wakeupTime, vsynctime);
134 
135         {
136             std::lock_guard<std::mutex> lk(mMutex);
137             if (mStopped) {
138                 return;
139             }
140             auto const schedule_result = mRegistration.schedule(calculateWorkload(), vsynctime);
141             LOG_ALWAYS_FATAL_IF((schedule_result != ScheduleResult::Scheduled),
142                                 "Error rescheduling callback: rc %X", schedule_result);
143         }
144     }
145 
146     // DispSync offsets are defined as time after the vsync before presentation.
147     // VSyncReactor workloads are defined as time before the intended presentation vsync.
148     // Note change in sign between the two defnitions.
calculateWorkload()149     nsecs_t calculateWorkload() REQUIRES(mMutex) { return mPeriod - mOffset; }
150 
151     const std::string mName;
152     DispSync::Callback* const mCallback;
153 
154     std::mutex mutable mMutex;
155     VSyncCallbackRegistration mRegistration GUARDED_BY(mMutex);
156     bool mStopped GUARDED_BY(mMutex) = false;
157     nsecs_t mPeriod GUARDED_BY(mMutex);
158     nsecs_t mOffset GUARDED_BY(mMutex);
159     nsecs_t mLastCallTime GUARDED_BY(mMutex);
160 };
161 
addPresentFence(const std::shared_ptr<FenceTime> & fence)162 bool VSyncReactor::addPresentFence(const std::shared_ptr<FenceTime>& fence) {
163     if (!fence) {
164         return false;
165     }
166 
167     nsecs_t const signalTime = fence->getCachedSignalTime();
168     if (signalTime == Fence::SIGNAL_TIME_INVALID) {
169         return true;
170     }
171 
172     std::lock_guard<std::mutex> lk(mMutex);
173     if (mExternalIgnoreFences || mInternalIgnoreFences) {
174         return true;
175     }
176 
177     bool timestampAccepted = true;
178     for (auto it = mUnfiredFences.begin(); it != mUnfiredFences.end();) {
179         auto const time = (*it)->getCachedSignalTime();
180         if (time == Fence::SIGNAL_TIME_PENDING) {
181             it++;
182         } else if (time == Fence::SIGNAL_TIME_INVALID) {
183             it = mUnfiredFences.erase(it);
184         } else {
185             timestampAccepted &= mTracker->addVsyncTimestamp(time);
186 
187             it = mUnfiredFences.erase(it);
188         }
189     }
190 
191     if (signalTime == Fence::SIGNAL_TIME_PENDING) {
192         if (mPendingLimit == mUnfiredFences.size()) {
193             mUnfiredFences.erase(mUnfiredFences.begin());
194         }
195         mUnfiredFences.push_back(fence);
196     } else {
197         timestampAccepted &= mTracker->addVsyncTimestamp(signalTime);
198     }
199 
200     if (!timestampAccepted) {
201         mMoreSamplesNeeded = true;
202         setIgnorePresentFencesInternal(true);
203         mPeriodConfirmationInProgress = true;
204     }
205 
206     return mMoreSamplesNeeded;
207 }
208 
setIgnorePresentFences(bool ignoration)209 void VSyncReactor::setIgnorePresentFences(bool ignoration) {
210     std::lock_guard<std::mutex> lk(mMutex);
211     mExternalIgnoreFences = ignoration;
212     updateIgnorePresentFencesInternal();
213 }
214 
setIgnorePresentFencesInternal(bool ignoration)215 void VSyncReactor::setIgnorePresentFencesInternal(bool ignoration) {
216     mInternalIgnoreFences = ignoration;
217     updateIgnorePresentFencesInternal();
218 }
219 
updateIgnorePresentFencesInternal()220 void VSyncReactor::updateIgnorePresentFencesInternal() {
221     if (mExternalIgnoreFences || mInternalIgnoreFences) {
222         mUnfiredFences.clear();
223     }
224 }
225 
computeNextRefresh(int periodOffset,nsecs_t now) const226 nsecs_t VSyncReactor::computeNextRefresh(int periodOffset, nsecs_t now) const {
227     auto const currentPeriod = periodOffset ? mTracker->currentPeriod() : 0;
228     return mTracker->nextAnticipatedVSyncTimeFrom(now + periodOffset * currentPeriod);
229 }
230 
expectedPresentTime(nsecs_t now)231 nsecs_t VSyncReactor::expectedPresentTime(nsecs_t now) {
232     return mTracker->nextAnticipatedVSyncTimeFrom(now);
233 }
234 
startPeriodTransition(nsecs_t newPeriod)235 void VSyncReactor::startPeriodTransition(nsecs_t newPeriod) {
236     ATRACE_CALL();
237     mPeriodConfirmationInProgress = true;
238     mPeriodTransitioningTo = newPeriod;
239     mMoreSamplesNeeded = true;
240     setIgnorePresentFencesInternal(true);
241 }
242 
endPeriodTransition()243 void VSyncReactor::endPeriodTransition() {
244     ATRACE_CALL();
245     mPeriodTransitioningTo.reset();
246     mPeriodConfirmationInProgress = false;
247     mLastHwVsync.reset();
248 }
249 
setPeriod(nsecs_t period)250 void VSyncReactor::setPeriod(nsecs_t period) {
251     ATRACE_INT64("VSR-setPeriod", period);
252     std::lock_guard lk(mMutex);
253     mLastHwVsync.reset();
254 
255     if (!mSupportKernelIdleTimer && period == getPeriod()) {
256         endPeriodTransition();
257         setIgnorePresentFencesInternal(false);
258         mMoreSamplesNeeded = false;
259     } else {
260         startPeriodTransition(period);
261     }
262 }
263 
getPeriod()264 nsecs_t VSyncReactor::getPeriod() {
265     return mTracker->currentPeriod();
266 }
267 
beginResync()268 void VSyncReactor::beginResync() {
269     mTracker->resetModel();
270 }
271 
endResync()272 void VSyncReactor::endResync() {}
273 
periodConfirmed(nsecs_t vsync_timestamp,std::optional<nsecs_t> HwcVsyncPeriod)274 bool VSyncReactor::periodConfirmed(nsecs_t vsync_timestamp, std::optional<nsecs_t> HwcVsyncPeriod) {
275     if (!mPeriodConfirmationInProgress) {
276         return false;
277     }
278 
279     if (!mLastHwVsync && !HwcVsyncPeriod) {
280         return false;
281     }
282 
283     const bool periodIsChanging =
284             mPeriodTransitioningTo && (*mPeriodTransitioningTo != getPeriod());
285     if (mSupportKernelIdleTimer && !periodIsChanging) {
286         // Clear out the Composer-provided period and use the allowance logic below
287         HwcVsyncPeriod = {};
288     }
289 
290     auto const period = mPeriodTransitioningTo ? *mPeriodTransitioningTo : getPeriod();
291     static constexpr int allowancePercent = 10;
292     static constexpr std::ratio<allowancePercent, 100> allowancePercentRatio;
293     auto const allowance = period * allowancePercentRatio.num / allowancePercentRatio.den;
294     if (HwcVsyncPeriod) {
295         return std::abs(*HwcVsyncPeriod - period) < allowance;
296     }
297 
298     auto const distance = vsync_timestamp - *mLastHwVsync;
299     return std::abs(distance - period) < allowance;
300 }
301 
addResyncSample(nsecs_t timestamp,std::optional<nsecs_t> hwcVsyncPeriod,bool * periodFlushed)302 bool VSyncReactor::addResyncSample(nsecs_t timestamp, std::optional<nsecs_t> hwcVsyncPeriod,
303                                    bool* periodFlushed) {
304     assert(periodFlushed);
305 
306     std::lock_guard<std::mutex> lk(mMutex);
307     if (periodConfirmed(timestamp, hwcVsyncPeriod)) {
308         ATRACE_NAME("VSR: period confirmed");
309         if (mPeriodTransitioningTo) {
310             mTracker->setPeriod(*mPeriodTransitioningTo);
311             for (auto& entry : mCallbacks) {
312                 entry.second->setPeriod(*mPeriodTransitioningTo);
313             }
314             *periodFlushed = true;
315         }
316 
317         if (mLastHwVsync) {
318             mTracker->addVsyncTimestamp(*mLastHwVsync);
319         }
320         mTracker->addVsyncTimestamp(timestamp);
321 
322         endPeriodTransition();
323         mMoreSamplesNeeded = mTracker->needsMoreSamples();
324     } else if (mPeriodConfirmationInProgress) {
325         ATRACE_NAME("VSR: still confirming period");
326         mLastHwVsync = timestamp;
327         mMoreSamplesNeeded = true;
328         *periodFlushed = false;
329     } else {
330         ATRACE_NAME("VSR: adding sample");
331         *periodFlushed = false;
332         mTracker->addVsyncTimestamp(timestamp);
333         mMoreSamplesNeeded = mTracker->needsMoreSamples();
334     }
335 
336     if (!mMoreSamplesNeeded) {
337         setIgnorePresentFencesInternal(false);
338     }
339     return mMoreSamplesNeeded;
340 }
341 
addEventListener(const char * name,nsecs_t phase,DispSync::Callback * callback,nsecs_t)342 status_t VSyncReactor::addEventListener(const char* name, nsecs_t phase,
343                                         DispSync::Callback* callback,
344                                         nsecs_t /* lastCallbackTime */) {
345     std::lock_guard<std::mutex> lk(mMutex);
346     auto it = mCallbacks.find(callback);
347     if (it == mCallbacks.end()) {
348         // TODO (b/146557561): resolve lastCallbackTime semantics in DispSync i/f.
349         static auto constexpr maxListeners = 4;
350         if (mCallbacks.size() >= maxListeners) {
351             ALOGE("callback %s not added, exceeded callback limit of %i (currently %zu)", name,
352                   maxListeners, mCallbacks.size());
353             return NO_MEMORY;
354         }
355 
356         auto const period = mTracker->currentPeriod();
357         auto repeater = std::make_unique<CallbackRepeater>(*mDispatch, callback, name, period,
358                                                            phase, mClock->now());
359         it = mCallbacks.emplace(std::pair(callback, std::move(repeater))).first;
360     }
361 
362     it->second->start(phase);
363     return NO_ERROR;
364 }
365 
removeEventListener(DispSync::Callback * callback,nsecs_t *)366 status_t VSyncReactor::removeEventListener(DispSync::Callback* callback,
367                                            nsecs_t* /* outLastCallback */) {
368     std::lock_guard<std::mutex> lk(mMutex);
369     auto const it = mCallbacks.find(callback);
370     LOG_ALWAYS_FATAL_IF(it == mCallbacks.end(), "callback %p not registered", callback);
371 
372     it->second->stop();
373     return NO_ERROR;
374 }
375 
changePhaseOffset(DispSync::Callback * callback,nsecs_t phase)376 status_t VSyncReactor::changePhaseOffset(DispSync::Callback* callback, nsecs_t phase) {
377     std::lock_guard<std::mutex> lk(mMutex);
378     auto const it = mCallbacks.find(callback);
379     LOG_ALWAYS_FATAL_IF(it == mCallbacks.end(), "callback was %p not registered", callback);
380 
381     it->second->start(phase);
382     return NO_ERROR;
383 }
384 
dump(std::string & result) const385 void VSyncReactor::dump(std::string& result) const {
386     std::lock_guard<std::mutex> lk(mMutex);
387     StringAppendF(&result, "VsyncReactor in use\n");
388     StringAppendF(&result, "Has %zu unfired fences\n", mUnfiredFences.size());
389     StringAppendF(&result, "mInternalIgnoreFences=%d mExternalIgnoreFences=%d\n",
390                   mInternalIgnoreFences, mExternalIgnoreFences);
391     StringAppendF(&result, "mMoreSamplesNeeded=%d mPeriodConfirmationInProgress=%d\n",
392                   mMoreSamplesNeeded, mPeriodConfirmationInProgress);
393     if (mPeriodTransitioningTo) {
394         StringAppendF(&result, "mPeriodTransitioningTo=%" PRId64 "\n", *mPeriodTransitioningTo);
395     } else {
396         StringAppendF(&result, "mPeriodTransitioningTo=nullptr\n");
397     }
398 
399     if (mLastHwVsync) {
400         StringAppendF(&result, "Last HW vsync was %.2fms ago\n",
401                       (mClock->now() - *mLastHwVsync) / 1e6f);
402     } else {
403         StringAppendF(&result, "No Last HW vsync\n");
404     }
405 
406     StringAppendF(&result, "CallbackRepeaters:\n");
407     for (const auto& [callback, repeater] : mCallbacks) {
408         repeater->dump(result);
409     }
410 
411     StringAppendF(&result, "VSyncTracker:\n");
412     mTracker->dump(result);
413     StringAppendF(&result, "VSyncDispatch:\n");
414     mDispatch->dump(result);
415 }
416 
reset()417 void VSyncReactor::reset() {}
418 
419 } // namespace android::scheduler
420