• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2013 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #define ATRACE_TAG ATRACE_TAG_GRAPHICS
18 
19 // This is needed for stdint.h to define INT64_MAX in C++
20 #define __STDC_LIMIT_MACROS
21 
22 #include <math.h>
23 
24 #include <cutils/log.h>
25 
26 #include <ui/Fence.h>
27 
28 #include <utils/String8.h>
29 #include <utils/Thread.h>
30 #include <utils/Trace.h>
31 #include <utils/Vector.h>
32 
33 #include "DispSync.h"
34 #include "EventLog/EventLog.h"
35 
36 namespace android {
37 
38 // Setting this to true enables verbose tracing that can be used to debug
39 // vsync event model or phase issues.
40 static const bool kTraceDetailedInfo = false;
41 
42 // This is the threshold used to determine when hardware vsync events are
43 // needed to re-synchronize the software vsync model with the hardware.  The
44 // error metric used is the mean of the squared difference between each
45 // present time and the nearest software-predicted vsync.
46 static const nsecs_t kErrorThreshold = 160000000000;    // 400 usec squared
47 
48 // This is the offset from the present fence timestamps to the corresponding
49 // vsync event.
50 static const int64_t kPresentTimeOffset = PRESENT_TIME_OFFSET_FROM_VSYNC_NS;
51 
52 class DispSyncThread: public Thread {
53 public:
54 
DispSyncThread()55     DispSyncThread():
56             mStop(false),
57             mPeriod(0),
58             mPhase(0),
59             mWakeupLatency(0) {
60     }
61 
~DispSyncThread()62     virtual ~DispSyncThread() {}
63 
updateModel(nsecs_t period,nsecs_t phase)64     void updateModel(nsecs_t period, nsecs_t phase) {
65         Mutex::Autolock lock(mMutex);
66         mPeriod = period;
67         mPhase = phase;
68         mCond.signal();
69     }
70 
stop()71     void stop() {
72         Mutex::Autolock lock(mMutex);
73         mStop = true;
74         mCond.signal();
75     }
76 
threadLoop()77     virtual bool threadLoop() {
78         status_t err;
79         nsecs_t now = systemTime(SYSTEM_TIME_MONOTONIC);
80         nsecs_t nextEventTime = 0;
81 
82         while (true) {
83             Vector<CallbackInvocation> callbackInvocations;
84 
85             nsecs_t targetTime = 0;
86 
87             { // Scope for lock
88                 Mutex::Autolock lock(mMutex);
89 
90                 if (mStop) {
91                     return false;
92                 }
93 
94                 if (mPeriod == 0) {
95                     err = mCond.wait(mMutex);
96                     if (err != NO_ERROR) {
97                         ALOGE("error waiting for new events: %s (%d)",
98                                 strerror(-err), err);
99                         return false;
100                     }
101                     continue;
102                 }
103 
104                 nextEventTime = computeNextEventTimeLocked(now);
105                 targetTime = nextEventTime;
106 
107                 bool isWakeup = false;
108 
109                 if (now < targetTime) {
110                     err = mCond.waitRelative(mMutex, targetTime - now);
111 
112                     if (err == TIMED_OUT) {
113                         isWakeup = true;
114                     } else if (err != NO_ERROR) {
115                         ALOGE("error waiting for next event: %s (%d)",
116                                 strerror(-err), err);
117                         return false;
118                     }
119                 }
120 
121                 now = systemTime(SYSTEM_TIME_MONOTONIC);
122 
123                 if (isWakeup) {
124                     mWakeupLatency = ((mWakeupLatency * 63) +
125                             (now - targetTime)) / 64;
126                     if (mWakeupLatency > 500000) {
127                         // Don't correct by more than 500 us
128                         mWakeupLatency = 500000;
129                     }
130                     if (kTraceDetailedInfo) {
131                         ATRACE_INT64("DispSync:WakeupLat", now - nextEventTime);
132                         ATRACE_INT64("DispSync:AvgWakeupLat", mWakeupLatency);
133                     }
134                 }
135 
136                 callbackInvocations = gatherCallbackInvocationsLocked(now);
137             }
138 
139             if (callbackInvocations.size() > 0) {
140                 fireCallbackInvocations(callbackInvocations);
141             }
142         }
143 
144         return false;
145     }
146 
addEventListener(nsecs_t phase,const sp<DispSync::Callback> & callback)147     status_t addEventListener(nsecs_t phase, const sp<DispSync::Callback>& callback) {
148         Mutex::Autolock lock(mMutex);
149 
150         for (size_t i = 0; i < mEventListeners.size(); i++) {
151             if (mEventListeners[i].mCallback == callback) {
152                 return BAD_VALUE;
153             }
154         }
155 
156         EventListener listener;
157         listener.mPhase = phase;
158         listener.mCallback = callback;
159 
160         // We want to allow the firstmost future event to fire without
161         // allowing any past events to fire.  Because
162         // computeListenerNextEventTimeLocked filters out events within a half
163         // a period of the last event time, we need to initialize the last
164         // event time to a half a period in the past.
165         listener.mLastEventTime = systemTime(SYSTEM_TIME_MONOTONIC) - mPeriod / 2;
166 
167         mEventListeners.push(listener);
168 
169         mCond.signal();
170 
171         return NO_ERROR;
172     }
173 
removeEventListener(const sp<DispSync::Callback> & callback)174     status_t removeEventListener(const sp<DispSync::Callback>& callback) {
175         Mutex::Autolock lock(mMutex);
176 
177         for (size_t i = 0; i < mEventListeners.size(); i++) {
178             if (mEventListeners[i].mCallback == callback) {
179                 mEventListeners.removeAt(i);
180                 mCond.signal();
181                 return NO_ERROR;
182             }
183         }
184 
185         return BAD_VALUE;
186     }
187 
188     // This method is only here to handle the kIgnorePresentFences case.
hasAnyEventListeners()189     bool hasAnyEventListeners() {
190         Mutex::Autolock lock(mMutex);
191         return !mEventListeners.empty();
192     }
193 
194 private:
195 
196     struct EventListener {
197         nsecs_t mPhase;
198         nsecs_t mLastEventTime;
199         sp<DispSync::Callback> mCallback;
200     };
201 
202     struct CallbackInvocation {
203         sp<DispSync::Callback> mCallback;
204         nsecs_t mEventTime;
205     };
206 
computeNextEventTimeLocked(nsecs_t now)207     nsecs_t computeNextEventTimeLocked(nsecs_t now) {
208         nsecs_t nextEventTime = INT64_MAX;
209         for (size_t i = 0; i < mEventListeners.size(); i++) {
210             nsecs_t t = computeListenerNextEventTimeLocked(mEventListeners[i],
211                     now);
212 
213             if (t < nextEventTime) {
214                 nextEventTime = t;
215             }
216         }
217 
218         return nextEventTime;
219     }
220 
gatherCallbackInvocationsLocked(nsecs_t now)221     Vector<CallbackInvocation> gatherCallbackInvocationsLocked(nsecs_t now) {
222         Vector<CallbackInvocation> callbackInvocations;
223         nsecs_t ref = now - mPeriod;
224 
225         for (size_t i = 0; i < mEventListeners.size(); i++) {
226             nsecs_t t = computeListenerNextEventTimeLocked(mEventListeners[i],
227                     ref);
228 
229             if (t < now) {
230                 CallbackInvocation ci;
231                 ci.mCallback = mEventListeners[i].mCallback;
232                 ci.mEventTime = t;
233                 callbackInvocations.push(ci);
234                 mEventListeners.editItemAt(i).mLastEventTime = t;
235             }
236         }
237 
238         return callbackInvocations;
239     }
240 
computeListenerNextEventTimeLocked(const EventListener & listener,nsecs_t ref)241     nsecs_t computeListenerNextEventTimeLocked(const EventListener& listener,
242             nsecs_t ref) {
243 
244         nsecs_t lastEventTime = listener.mLastEventTime;
245         if (ref < lastEventTime) {
246             ref = lastEventTime;
247         }
248 
249         nsecs_t phase = mPhase + listener.mPhase;
250         nsecs_t t = (((ref - phase) / mPeriod) + 1) * mPeriod + phase;
251 
252         if (t - listener.mLastEventTime < mPeriod / 2) {
253             t += mPeriod;
254         }
255 
256         return t;
257     }
258 
fireCallbackInvocations(const Vector<CallbackInvocation> & callbacks)259     void fireCallbackInvocations(const Vector<CallbackInvocation>& callbacks) {
260         for (size_t i = 0; i < callbacks.size(); i++) {
261             callbacks[i].mCallback->onDispSyncEvent(callbacks[i].mEventTime);
262         }
263     }
264 
265     bool mStop;
266 
267     nsecs_t mPeriod;
268     nsecs_t mPhase;
269     nsecs_t mWakeupLatency;
270 
271     Vector<EventListener> mEventListeners;
272 
273     Mutex mMutex;
274     Condition mCond;
275 };
276 
277 class ZeroPhaseTracer : public DispSync::Callback {
278 public:
ZeroPhaseTracer()279     ZeroPhaseTracer() : mParity(false) {}
280 
onDispSyncEvent(nsecs_t)281     virtual void onDispSyncEvent(nsecs_t /*when*/) {
282         mParity = !mParity;
283         ATRACE_INT("ZERO_PHASE_VSYNC", mParity ? 1 : 0);
284     }
285 
286 private:
287     bool mParity;
288 };
289 
DispSync()290 DispSync::DispSync() :
291         mRefreshSkipCount(0),
292         mThread(new DispSyncThread()) {
293 
294     mThread->run("DispSync", PRIORITY_URGENT_DISPLAY + PRIORITY_MORE_FAVORABLE);
295 
296     reset();
297     beginResync();
298 
299     if (kTraceDetailedInfo) {
300         // If we're not getting present fences then the ZeroPhaseTracer
301         // would prevent HW vsync event from ever being turned off.
302         // Even if we're just ignoring the fences, the zero-phase tracing is
303         // not needed because any time there is an event registered we will
304         // turn on the HW vsync events.
305         if (!kIgnorePresentFences) {
306             addEventListener(0, new ZeroPhaseTracer());
307         }
308     }
309 }
310 
~DispSync()311 DispSync::~DispSync() {}
312 
reset()313 void DispSync::reset() {
314     Mutex::Autolock lock(mMutex);
315 
316     mNumResyncSamples = 0;
317     mFirstResyncSample = 0;
318     mNumResyncSamplesSincePresent = 0;
319     resetErrorLocked();
320 }
321 
addPresentFence(const sp<Fence> & fence)322 bool DispSync::addPresentFence(const sp<Fence>& fence) {
323     Mutex::Autolock lock(mMutex);
324 
325     mPresentFences[mPresentSampleOffset] = fence;
326     mPresentTimes[mPresentSampleOffset] = 0;
327     mPresentSampleOffset = (mPresentSampleOffset + 1) % NUM_PRESENT_SAMPLES;
328     mNumResyncSamplesSincePresent = 0;
329 
330     for (size_t i = 0; i < NUM_PRESENT_SAMPLES; i++) {
331         const sp<Fence>& f(mPresentFences[i]);
332         if (f != NULL) {
333             nsecs_t t = f->getSignalTime();
334             if (t < INT64_MAX) {
335                 mPresentFences[i].clear();
336                 mPresentTimes[i] = t + kPresentTimeOffset;
337             }
338         }
339     }
340 
341     updateErrorLocked();
342 
343     return mPeriod == 0 || mError > kErrorThreshold;
344 }
345 
beginResync()346 void DispSync::beginResync() {
347     Mutex::Autolock lock(mMutex);
348 
349     mNumResyncSamples = 0;
350 }
351 
addResyncSample(nsecs_t timestamp)352 bool DispSync::addResyncSample(nsecs_t timestamp) {
353     Mutex::Autolock lock(mMutex);
354 
355     size_t idx = (mFirstResyncSample + mNumResyncSamples) % MAX_RESYNC_SAMPLES;
356     mResyncSamples[idx] = timestamp;
357 
358     if (mNumResyncSamples < MAX_RESYNC_SAMPLES) {
359         mNumResyncSamples++;
360     } else {
361         mFirstResyncSample = (mFirstResyncSample + 1) % MAX_RESYNC_SAMPLES;
362     }
363 
364     updateModelLocked();
365 
366     if (mNumResyncSamplesSincePresent++ > MAX_RESYNC_SAMPLES_WITHOUT_PRESENT) {
367         resetErrorLocked();
368     }
369 
370     if (kIgnorePresentFences) {
371         // If we don't have the sync framework we will never have
372         // addPresentFence called.  This means we have no way to know whether
373         // or not we're synchronized with the HW vsyncs, so we just request
374         // that the HW vsync events be turned on whenever we need to generate
375         // SW vsync events.
376         return mThread->hasAnyEventListeners();
377     }
378 
379     return mPeriod == 0 || mError > kErrorThreshold;
380 }
381 
endResync()382 void DispSync::endResync() {
383 }
384 
addEventListener(nsecs_t phase,const sp<Callback> & callback)385 status_t DispSync::addEventListener(nsecs_t phase,
386         const sp<Callback>& callback) {
387 
388     Mutex::Autolock lock(mMutex);
389     return mThread->addEventListener(phase, callback);
390 }
391 
setRefreshSkipCount(int count)392 void DispSync::setRefreshSkipCount(int count) {
393     Mutex::Autolock lock(mMutex);
394     ALOGD("setRefreshSkipCount(%d)", count);
395     mRefreshSkipCount = count;
396     updateModelLocked();
397 }
398 
removeEventListener(const sp<Callback> & callback)399 status_t DispSync::removeEventListener(const sp<Callback>& callback) {
400     Mutex::Autolock lock(mMutex);
401     return mThread->removeEventListener(callback);
402 }
403 
setPeriod(nsecs_t period)404 void DispSync::setPeriod(nsecs_t period) {
405     Mutex::Autolock lock(mMutex);
406     mPeriod = period;
407     mPhase = 0;
408     mThread->updateModel(mPeriod, mPhase);
409 }
410 
getPeriod()411 nsecs_t DispSync::getPeriod() {
412     // lock mutex as mPeriod changes multiple times in updateModelLocked
413     Mutex::Autolock lock(mMutex);
414     return mPeriod;
415 }
416 
updateModelLocked()417 void DispSync::updateModelLocked() {
418     if (mNumResyncSamples >= MIN_RESYNC_SAMPLES_FOR_UPDATE) {
419         nsecs_t durationSum = 0;
420         for (size_t i = 1; i < mNumResyncSamples; i++) {
421             size_t idx = (mFirstResyncSample + i) % MAX_RESYNC_SAMPLES;
422             size_t prev = (idx + MAX_RESYNC_SAMPLES - 1) % MAX_RESYNC_SAMPLES;
423             durationSum += mResyncSamples[idx] - mResyncSamples[prev];
424         }
425 
426         mPeriod = durationSum / (mNumResyncSamples - 1);
427 
428         double sampleAvgX = 0;
429         double sampleAvgY = 0;
430         double scale = 2.0 * M_PI / double(mPeriod);
431         for (size_t i = 0; i < mNumResyncSamples; i++) {
432             size_t idx = (mFirstResyncSample + i) % MAX_RESYNC_SAMPLES;
433             nsecs_t sample = mResyncSamples[idx];
434             double samplePhase = double(sample % mPeriod) * scale;
435             sampleAvgX += cos(samplePhase);
436             sampleAvgY += sin(samplePhase);
437         }
438 
439         sampleAvgX /= double(mNumResyncSamples);
440         sampleAvgY /= double(mNumResyncSamples);
441 
442         mPhase = nsecs_t(atan2(sampleAvgY, sampleAvgX) / scale);
443 
444         if (mPhase < 0) {
445             mPhase += mPeriod;
446         }
447 
448         if (kTraceDetailedInfo) {
449             ATRACE_INT64("DispSync:Period", mPeriod);
450             ATRACE_INT64("DispSync:Phase", mPhase);
451         }
452 
453         // Artificially inflate the period if requested.
454         mPeriod += mPeriod * mRefreshSkipCount;
455 
456         mThread->updateModel(mPeriod, mPhase);
457     }
458 }
459 
updateErrorLocked()460 void DispSync::updateErrorLocked() {
461     if (mPeriod == 0) {
462         return;
463     }
464 
465     // Need to compare present fences against the un-adjusted refresh period,
466     // since they might arrive between two events.
467     nsecs_t period = mPeriod / (1 + mRefreshSkipCount);
468 
469     int numErrSamples = 0;
470     nsecs_t sqErrSum = 0;
471 
472     for (size_t i = 0; i < NUM_PRESENT_SAMPLES; i++) {
473         nsecs_t sample = mPresentTimes[i];
474         if (sample > mPhase) {
475             nsecs_t sampleErr = (sample - mPhase) % period;
476             if (sampleErr > period / 2) {
477                 sampleErr -= period;
478             }
479             sqErrSum += sampleErr * sampleErr;
480             numErrSamples++;
481         }
482     }
483 
484     if (numErrSamples > 0) {
485         mError = sqErrSum / numErrSamples;
486     } else {
487         mError = 0;
488     }
489 
490     if (kTraceDetailedInfo) {
491         ATRACE_INT64("DispSync:Error", mError);
492     }
493 }
494 
resetErrorLocked()495 void DispSync::resetErrorLocked() {
496     mPresentSampleOffset = 0;
497     mError = 0;
498     for (size_t i = 0; i < NUM_PRESENT_SAMPLES; i++) {
499         mPresentFences[i].clear();
500         mPresentTimes[i] = 0;
501     }
502 }
503 
computeNextRefresh(int periodOffset) const504 nsecs_t DispSync::computeNextRefresh(int periodOffset) const {
505     Mutex::Autolock lock(mMutex);
506     nsecs_t now = systemTime(SYSTEM_TIME_MONOTONIC);
507     return (((now - mPhase) / mPeriod) + periodOffset + 1) * mPeriod + mPhase;
508 }
509 
dump(String8 & result) const510 void DispSync::dump(String8& result) const {
511     Mutex::Autolock lock(mMutex);
512     result.appendFormat("present fences are %s\n",
513             kIgnorePresentFences ? "ignored" : "used");
514     result.appendFormat("mPeriod: %" PRId64 " ns (%.3f fps; skipCount=%d)\n",
515             mPeriod, 1000000000.0 / mPeriod, mRefreshSkipCount);
516     result.appendFormat("mPhase: %" PRId64 " ns\n", mPhase);
517     result.appendFormat("mError: %" PRId64 " ns (sqrt=%.1f)\n",
518             mError, sqrt(mError));
519     result.appendFormat("mNumResyncSamplesSincePresent: %d (limit %d)\n",
520             mNumResyncSamplesSincePresent, MAX_RESYNC_SAMPLES_WITHOUT_PRESENT);
521     result.appendFormat("mNumResyncSamples: %zd (max %d)\n",
522             mNumResyncSamples, MAX_RESYNC_SAMPLES);
523 
524     result.appendFormat("mResyncSamples:\n");
525     nsecs_t previous = -1;
526     for (size_t i = 0; i < mNumResyncSamples; i++) {
527         size_t idx = (mFirstResyncSample + i) % MAX_RESYNC_SAMPLES;
528         nsecs_t sampleTime = mResyncSamples[idx];
529         if (i == 0) {
530             result.appendFormat("  %" PRId64 "\n", sampleTime);
531         } else {
532             result.appendFormat("  %" PRId64 " (+%" PRId64 ")\n",
533                     sampleTime, sampleTime - previous);
534         }
535         previous = sampleTime;
536     }
537 
538     result.appendFormat("mPresentFences / mPresentTimes [%d]:\n",
539             NUM_PRESENT_SAMPLES);
540     nsecs_t now = systemTime(SYSTEM_TIME_MONOTONIC);
541     previous = 0;
542     for (size_t i = 0; i < NUM_PRESENT_SAMPLES; i++) {
543         size_t idx = (i + mPresentSampleOffset) % NUM_PRESENT_SAMPLES;
544         bool signaled = mPresentFences[idx] == NULL;
545         nsecs_t presentTime = mPresentTimes[idx];
546         if (!signaled) {
547             result.appendFormat("  [unsignaled fence]\n");
548         } else if (presentTime == 0) {
549             result.appendFormat("  0\n");
550         } else if (previous == 0) {
551             result.appendFormat("  %" PRId64 "  (%.3f ms ago)\n", presentTime,
552                     (now - presentTime) / 1000000.0);
553         } else {
554             result.appendFormat("  %" PRId64 " (+%" PRId64 " / %.3f)  (%.3f ms ago)\n",
555                     presentTime, presentTime - previous,
556                     (presentTime - previous) / (double) mPeriod,
557                     (now - presentTime) / 1000000.0);
558         }
559         previous = presentTime;
560     }
561 
562     result.appendFormat("current monotonic time: %" PRId64 "\n", now);
563 }
564 
565 } // namespace android
566