• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2014 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #define LOG_TAG "FastThread"
18 //#define LOG_NDEBUG 0
19 
20 #define ATRACE_TAG ATRACE_TAG_AUDIO
21 
22 #include "Configuration.h"
23 #include <linux/futex.h>
24 #include <sys/syscall.h>
25 #include <audio_utils/clock.h>
26 #include <cutils/atomic.h>
27 #include <utils/Log.h>
28 #include <utils/Trace.h>
29 #include "FastThread.h"
30 #include "FastThreadDumpState.h"
31 #include "TypedLogger.h"
32 
33 #define FAST_DEFAULT_NS    999999999L   // ~1 sec: default time to sleep
34 #define FAST_HOT_IDLE_NS     1000000L   // 1 ms: time to sleep while hot idling
35 #define MIN_WARMUP_CYCLES          2    // minimum number of consecutive in-range loop cycles
36                                         // to wait for warmup
37 #define MAX_WARMUP_CYCLES         10    // maximum number of loop cycles to wait for warmup
38 
39 namespace android {
40 
FastThread(const char * cycleMs,const char * loadUs)41 FastThread::FastThread(const char *cycleMs, const char *loadUs) : Thread(false /*canCallJava*/),
42     // re-initialized to &sInitial by subclass constructor
43     mPrevious(NULL), mCurrent(NULL),
44     /* mOldTs({0, 0}), */
45     mOldTsValid(false),
46     mSleepNs(-1),
47     mPeriodNs(0),
48     mUnderrunNs(0),
49     mOverrunNs(0),
50     mForceNs(0),
51     mWarmupNsMin(0),
52     mWarmupNsMax(LONG_MAX),
53     // re-initialized to &mDummySubclassDumpState by subclass constructor
54     mDummyDumpState(NULL),
55     mDumpState(NULL),
56     mIgnoreNextOverrun(true),
57 #ifdef FAST_THREAD_STATISTICS
58     // mOldLoad
59     mOldLoadValid(false),
60     mBounds(0),
61     mFull(false),
62     // mTcu
63 #endif
64     mColdGen(0),
65     mIsWarm(false),
66     /* mMeasuredWarmupTs({0, 0}), */
67     mWarmupCycles(0),
68     mWarmupConsecutiveInRangeCycles(0),
69     mTimestampStatus(INVALID_OPERATION),
70 
71     mCommand(FastThreadState::INITIAL),
72 #if 0
73     frameCount(0),
74 #endif
75     mAttemptedWrite(false)
76     // mCycleMs(cycleMs)
77     // mLoadUs(loadUs)
78 {
79     mOldTs.tv_sec = 0;
80     mOldTs.tv_nsec = 0;
81     mMeasuredWarmupTs.tv_sec = 0;
82     mMeasuredWarmupTs.tv_nsec = 0;
83     strlcpy(mCycleMs, cycleMs, sizeof(mCycleMs));
84     strlcpy(mLoadUs, loadUs, sizeof(mLoadUs));
85 }
86 
~FastThread()87 FastThread::~FastThread()
88 {
89 }
90 
threadLoop()91 bool FastThread::threadLoop()
92 {
93     // LOGT now works even if tlNBLogWriter is nullptr, but we're considering changing that,
94     // so this initialization permits a future change to remove the check for nullptr.
95     tlNBLogWriter = mDummyNBLogWriter.get();
96     for (;;) {
97 
98         // either nanosleep, sched_yield, or busy wait
99         if (mSleepNs >= 0) {
100             if (mSleepNs > 0) {
101                 ALOG_ASSERT(mSleepNs < 1000000000);
102                 const struct timespec req = {0, mSleepNs};
103                 nanosleep(&req, NULL);
104             } else {
105                 sched_yield();
106             }
107         }
108         // default to long sleep for next cycle
109         mSleepNs = FAST_DEFAULT_NS;
110 
111         // poll for state change
112         const FastThreadState *next = poll();
113         if (next == NULL) {
114             // continue to use the default initial state until a real state is available
115             // FIXME &sInitial not available, should save address earlier
116             //ALOG_ASSERT(mCurrent == &sInitial && previous == &sInitial);
117             next = mCurrent;
118         }
119 
120         mCommand = next->mCommand;
121         if (next != mCurrent) {
122 
123             // As soon as possible of learning of a new dump area, start using it
124             mDumpState = next->mDumpState != NULL ? next->mDumpState : mDummyDumpState;
125             tlNBLogWriter = next->mNBLogWriter != NULL ?
126                     next->mNBLogWriter : mDummyNBLogWriter.get();
127             setNBLogWriter(tlNBLogWriter); // This is used for debugging only
128 
129             // We want to always have a valid reference to the previous (non-idle) state.
130             // However, the state queue only guarantees access to current and previous states.
131             // So when there is a transition from a non-idle state into an idle state, we make a
132             // copy of the last known non-idle state so it is still available on return from idle.
133             // The possible transitions are:
134             //  non-idle -> non-idle    update previous from current in-place
135             //  non-idle -> idle        update previous from copy of current
136             //  idle     -> idle        don't update previous
137             //  idle     -> non-idle    don't update previous
138             if (!(mCurrent->mCommand & FastThreadState::IDLE)) {
139                 if (mCommand & FastThreadState::IDLE) {
140                     onIdle();
141                     mOldTsValid = false;
142 #ifdef FAST_THREAD_STATISTICS
143                     mOldLoadValid = false;
144 #endif
145                     mIgnoreNextOverrun = true;
146                 }
147                 mPrevious = mCurrent;
148             }
149             mCurrent = next;
150         }
151 #if !LOG_NDEBUG
152         next = NULL;    // not referenced again
153 #endif
154 
155         mDumpState->mCommand = mCommand;
156 
157         // FIXME what does this comment mean?
158         // << current, previous, command, dumpState >>
159 
160         switch (mCommand) {
161         case FastThreadState::INITIAL:
162         case FastThreadState::HOT_IDLE:
163             mSleepNs = FAST_HOT_IDLE_NS;
164             continue;
165         case FastThreadState::COLD_IDLE:
166             // only perform a cold idle command once
167             // FIXME consider checking previous state and only perform if previous != COLD_IDLE
168             if (mCurrent->mColdGen != mColdGen) {
169                 int32_t *coldFutexAddr = mCurrent->mColdFutexAddr;
170                 ALOG_ASSERT(coldFutexAddr != NULL);
171                 int32_t old = android_atomic_dec(coldFutexAddr);
172                 if (old <= 0) {
173                     syscall(__NR_futex, coldFutexAddr, FUTEX_WAIT_PRIVATE, old - 1, NULL);
174                 }
175                 int policy = sched_getscheduler(0) & ~SCHED_RESET_ON_FORK;
176                 if (!(policy == SCHED_FIFO || policy == SCHED_RR)) {
177                     ALOGE("did not receive expected priority boost on time");
178                 }
179                 // This may be overly conservative; there could be times that the normal mixer
180                 // requests such a brief cold idle that it doesn't require resetting this flag.
181                 mIsWarm = false;
182                 mMeasuredWarmupTs.tv_sec = 0;
183                 mMeasuredWarmupTs.tv_nsec = 0;
184                 mWarmupCycles = 0;
185                 mWarmupConsecutiveInRangeCycles = 0;
186                 mSleepNs = -1;
187                 mColdGen = mCurrent->mColdGen;
188 #ifdef FAST_THREAD_STATISTICS
189                 mBounds = 0;
190                 mFull = false;
191 #endif
192                 mOldTsValid = !clock_gettime(CLOCK_MONOTONIC, &mOldTs);
193                 mTimestampStatus = INVALID_OPERATION;
194             } else {
195                 mSleepNs = FAST_HOT_IDLE_NS;
196             }
197             continue;
198         case FastThreadState::EXIT:
199             onExit();
200             return false;
201         default:
202             LOG_ALWAYS_FATAL_IF(!isSubClassCommand(mCommand));
203             break;
204         }
205 
206         // there is a non-idle state available to us; did the state change?
207         if (mCurrent != mPrevious) {
208             onStateChange();
209 #if 1   // FIXME shouldn't need this
210             // only process state change once
211             mPrevious = mCurrent;
212 #endif
213         }
214 
215         // do work using current state here
216         mAttemptedWrite = false;
217         onWork();
218 
219         // To be exactly periodic, compute the next sleep time based on current time.
220         // This code doesn't have long-term stability when the sink is non-blocking.
221         // FIXME To avoid drift, use the local audio clock or watch the sink's fill status.
222         struct timespec newTs;
223         int rc = clock_gettime(CLOCK_MONOTONIC, &newTs);
224         if (rc == 0) {
225             if (mOldTsValid) {
226                 time_t sec = newTs.tv_sec - mOldTs.tv_sec;
227                 long nsec = newTs.tv_nsec - mOldTs.tv_nsec;
228                 ALOGE_IF(sec < 0 || (sec == 0 && nsec < 0),
229                         "clock_gettime(CLOCK_MONOTONIC) failed: was %ld.%09ld but now %ld.%09ld",
230                         mOldTs.tv_sec, mOldTs.tv_nsec, newTs.tv_sec, newTs.tv_nsec);
231                 if (nsec < 0) {
232                     --sec;
233                     nsec += 1000000000;
234                 }
235                 // To avoid an initial underrun on fast tracks after exiting standby,
236                 // do not start pulling data from tracks and mixing until warmup is complete.
237                 // Warmup is considered complete after the earlier of:
238                 //      MIN_WARMUP_CYCLES consecutive in-range write() attempts,
239                 //          where "in-range" means mWarmupNsMin <= cycle time <= mWarmupNsMax
240                 //      MAX_WARMUP_CYCLES write() attempts.
241                 // This is overly conservative, but to get better accuracy requires a new HAL API.
242                 if (!mIsWarm && mAttemptedWrite) {
243                     mMeasuredWarmupTs.tv_sec += sec;
244                     mMeasuredWarmupTs.tv_nsec += nsec;
245                     if (mMeasuredWarmupTs.tv_nsec >= 1000000000) {
246                         mMeasuredWarmupTs.tv_sec++;
247                         mMeasuredWarmupTs.tv_nsec -= 1000000000;
248                     }
249                     ++mWarmupCycles;
250                     if (mWarmupNsMin <= nsec && nsec <= mWarmupNsMax) {
251                         ALOGV("warmup cycle %d in range: %.03f ms", mWarmupCycles, nsec * 1e-9);
252                         ++mWarmupConsecutiveInRangeCycles;
253                     } else {
254                         ALOGV("warmup cycle %d out of range: %.03f ms", mWarmupCycles, nsec * 1e-9);
255                         mWarmupConsecutiveInRangeCycles = 0;
256                     }
257                     if ((mWarmupConsecutiveInRangeCycles >= MIN_WARMUP_CYCLES) ||
258                             (mWarmupCycles >= MAX_WARMUP_CYCLES)) {
259                         mIsWarm = true;
260                         mDumpState->mMeasuredWarmupTs = mMeasuredWarmupTs;
261                         mDumpState->mWarmupCycles = mWarmupCycles;
262                         const double measuredWarmupMs = (mMeasuredWarmupTs.tv_sec * 1e3) +
263                                 (mMeasuredWarmupTs.tv_nsec * 1e-6);
264                         LOG_WARMUP_TIME(measuredWarmupMs);
265                     }
266                 }
267                 mSleepNs = -1;
268                 if (mIsWarm) {
269                     if (sec > 0 || nsec > mUnderrunNs) {
270                         ATRACE_NAME("underrun");
271                         // FIXME only log occasionally
272                         ALOGV("underrun: time since last cycle %d.%03ld sec",
273                                 (int) sec, nsec / 1000000L);
274                         mDumpState->mUnderruns++;
275                         LOG_UNDERRUN(audio_utils_ns_from_timespec(&newTs));
276                         mIgnoreNextOverrun = true;
277                     } else if (nsec < mOverrunNs) {
278                         if (mIgnoreNextOverrun) {
279                             mIgnoreNextOverrun = false;
280                         } else {
281                             // FIXME only log occasionally
282                             ALOGV("overrun: time since last cycle %d.%03ld sec",
283                                     (int) sec, nsec / 1000000L);
284                             mDumpState->mOverruns++;
285                             LOG_OVERRUN(audio_utils_ns_from_timespec(&newTs));
286                         }
287                         // This forces a minimum cycle time. It:
288                         //  - compensates for an audio HAL with jitter due to sample rate conversion
289                         //  - works with a variable buffer depth audio HAL that never pulls at a
290                         //    rate < than mOverrunNs per buffer.
291                         //  - recovers from overrun immediately after underrun
292                         // It doesn't work with a non-blocking audio HAL.
293                         mSleepNs = mForceNs - nsec;
294                     } else {
295                         mIgnoreNextOverrun = false;
296                     }
297                 }
298 #ifdef FAST_THREAD_STATISTICS
299                 if (mIsWarm) {
300                     // advance the FIFO queue bounds
301                     size_t i = mBounds & (mDumpState->mSamplingN - 1);
302                     mBounds = (mBounds & 0xFFFF0000) | ((mBounds + 1) & 0xFFFF);
303                     if (mFull) {
304                         //mBounds += 0x10000;
305                         __builtin_add_overflow(mBounds, 0x10000, &mBounds);
306                     } else if (!(mBounds & (mDumpState->mSamplingN - 1))) {
307                         mFull = true;
308                     }
309                     // compute the delta value of clock_gettime(CLOCK_MONOTONIC)
310                     uint32_t monotonicNs = nsec;
311                     if (sec > 0 && sec < 4) {
312                         monotonicNs += sec * 1000000000U; // unsigned to prevent signed overflow.
313                     }
314                     // compute raw CPU load = delta value of clock_gettime(CLOCK_THREAD_CPUTIME_ID)
315                     uint32_t loadNs = 0;
316                     struct timespec newLoad;
317                     rc = clock_gettime(CLOCK_THREAD_CPUTIME_ID, &newLoad);
318                     if (rc == 0) {
319                         if (mOldLoadValid) {
320                             sec = newLoad.tv_sec - mOldLoad.tv_sec;
321                             nsec = newLoad.tv_nsec - mOldLoad.tv_nsec;
322                             if (nsec < 0) {
323                                 --sec;
324                                 nsec += 1000000000;
325                             }
326                             loadNs = nsec;
327                             if (sec > 0 && sec < 4) {
328                                 loadNs += sec * 1000000000U; // unsigned to prevent signed overflow.
329                             }
330                         } else {
331                             // first time through the loop
332                             mOldLoadValid = true;
333                         }
334                         mOldLoad = newLoad;
335                     }
336 #ifdef CPU_FREQUENCY_STATISTICS
337                     // get the absolute value of CPU clock frequency in kHz
338                     int cpuNum = sched_getcpu();
339                     uint32_t kHz = mTcu.getCpukHz(cpuNum);
340                     kHz = (kHz << 4) | (cpuNum & 0xF);
341 #endif
342                     // save values in FIFO queues for dumpsys
343                     // these stores #1, #2, #3 are not atomic with respect to each other,
344                     // or with respect to store #4 below
345                     mDumpState->mMonotonicNs[i] = monotonicNs;
346                     LOG_WORK_TIME(monotonicNs);
347                     mDumpState->mLoadNs[i] = loadNs;
348 #ifdef CPU_FREQUENCY_STATISTICS
349                     mDumpState->mCpukHz[i] = kHz;
350 #endif
351                     // this store #4 is not atomic with respect to stores #1, #2, #3 above, but
352                     // the newest open & oldest closed halves are atomic with respect to each other
353                     mDumpState->mBounds = mBounds;
354                     ATRACE_INT(mCycleMs, monotonicNs / 1000000);
355                     ATRACE_INT(mLoadUs, loadNs / 1000);
356                 }
357 #endif
358             } else {
359                 // first time through the loop
360                 mOldTsValid = true;
361                 mSleepNs = mPeriodNs;
362                 mIgnoreNextOverrun = true;
363             }
364             mOldTs = newTs;
365         } else {
366             // monotonic clock is broken
367             mOldTsValid = false;
368             mSleepNs = mPeriodNs;
369         }
370 
371     }   // for (;;)
372 
373     // never return 'true'; Thread::_threadLoop() locks mutex which can result in priority inversion
374 }
375 
376 }   // namespace android
377