• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2014 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #define LOG_TAG "FastThread"
18 //#define LOG_NDEBUG 0
19 
20 #define ATRACE_TAG ATRACE_TAG_AUDIO
21 
22 #include "Configuration.h"
23 #include <linux/futex.h>
24 #include <sys/syscall.h>
25 #include <audio_utils/clock.h>
26 #include <cutils/atomic.h>
27 #include <utils/Log.h>
28 #include <utils/Trace.h>
29 #include "FastThread.h"
30 #include "FastThreadDumpState.h"
31 #include <afutils/TypedLogger.h>
32 
33 #define FAST_DEFAULT_NS    999999999L   // ~1 sec: default time to sleep
34 #define FAST_HOT_IDLE_NS     1000000L   // 1 ms: time to sleep while hot idling
35 #define MIN_WARMUP_CYCLES          2    // minimum number of consecutive in-range loop cycles
36                                         // to wait for warmup
37 #define MAX_WARMUP_CYCLES         10    // maximum number of loop cycles to wait for warmup
38 
39 namespace android {
40 
FastThread(const char * cycleMs,const char * loadUs)41 FastThread::FastThread(const char *cycleMs, const char *loadUs) : Thread(false /*canCallJava*/)
42 {
43     strlcpy(mCycleMs, cycleMs, sizeof(mCycleMs));
44     strlcpy(mLoadUs, loadUs, sizeof(mLoadUs));
45 }
46 
threadLoop()47 bool FastThread::threadLoop()
48 {
49     // LOGT now works even if tlNBLogWriter is nullptr, but we're considering changing that,
50     // so this initialization permits a future change to remove the check for nullptr.
51     aflog::setThreadWriter(mDummyNBLogWriter.get());
52     for (;;) {
53 
54         // either nanosleep, sched_yield, or busy wait
55         if (mSleepNs >= 0) {
56             if (mSleepNs > 0) {
57                 ALOG_ASSERT(mSleepNs < 1000000000);
58                 const struct timespec req = {
59                     0, // tv_sec
60                     static_cast<long>(mSleepNs) // NOLINT(google-runtime-int)
61                 };
62                 nanosleep(&req, nullptr);
63             } else {
64                 sched_yield();
65             }
66         }
67         // default to long sleep for next cycle
68         mSleepNs = FAST_DEFAULT_NS;
69 
70         // poll for state change
71         const FastThreadState *next = poll();
72         if (next == nullptr) {
73             // continue to use the default initial state until a real state is available
74             // FIXME &sInitial not available, should save address earlier
75             //ALOG_ASSERT(mCurrent == &sInitial && previous == &sInitial);
76             next = mCurrent;
77         }
78 
79         mCommand = next->mCommand;
80         if (next != mCurrent) {
81 
82             // As soon as possible of learning of a new dump area, start using it
83             mDumpState = next->mDumpState != nullptr ? next->mDumpState : mDummyDumpState;
84             NBLog::Writer * const writer = next->mNBLogWriter != nullptr ?
85                     next->mNBLogWriter : mDummyNBLogWriter.get();
86             aflog::setThreadWriter(writer);
87             setNBLogWriter(writer); // This is used for debugging only
88 
89             // We want to always have a valid reference to the previous (non-idle) state.
90             // However, the state queue only guarantees access to current and previous states.
91             // So when there is a transition from a non-idle state into an idle state, we make a
92             // copy of the last known non-idle state so it is still available on return from idle.
93             // The possible transitions are:
94             //  non-idle -> non-idle    update previous from current in-place
95             //  non-idle -> idle        update previous from copy of current
96             //  idle     -> idle        don't update previous
97             //  idle     -> non-idle    don't update previous
98             if (!(mCurrent->mCommand & FastThreadState::IDLE)) {
99                 if (mCommand & FastThreadState::IDLE) {
100                     onIdle();
101                     mOldTsValid = false;
102 #ifdef FAST_THREAD_STATISTICS
103                     mOldLoadValid = false;
104 #endif
105                     mIgnoreNextOverrun = true;
106                 }
107                 mPrevious = mCurrent;
108             }
109             mCurrent = next;
110         }
111 #if !LOG_NDEBUG
112         next = nullptr;    // not referenced again
113 #endif
114 
115         mDumpState->mCommand = mCommand;
116 
117         // FIXME what does this comment mean?
118         // << current, previous, command, dumpState >>
119 
120         switch (mCommand) {
121         case FastThreadState::INITIAL:
122         case FastThreadState::HOT_IDLE:
123             mSleepNs = FAST_HOT_IDLE_NS;
124             continue;
125         case FastThreadState::COLD_IDLE:
126             // only perform a cold idle command once
127             // FIXME consider checking previous state and only perform if previous != COLD_IDLE
128             if (mCurrent->mColdGen != mColdGen) {
129                 int32_t *coldFutexAddr = mCurrent->mColdFutexAddr;
130                 ALOG_ASSERT(coldFutexAddr != nullptr);
131                 const int32_t old = android_atomic_dec(coldFutexAddr);
132                 if (old <= 0) {
133                     syscall(__NR_futex, coldFutexAddr, FUTEX_WAIT_PRIVATE, old - 1, nullptr);
134                 }
135                 const int policy = sched_getscheduler(0) & ~SCHED_RESET_ON_FORK;
136                 if (!(policy == SCHED_FIFO || policy == SCHED_RR)) {
137                     ALOGE("did not receive expected priority boost on time");
138                 }
139                 // This may be overly conservative; there could be times that the normal mixer
140                 // requests such a brief cold idle that it doesn't require resetting this flag.
141                 mIsWarm = false;
142                 mMeasuredWarmupTs.tv_sec = 0;
143                 mMeasuredWarmupTs.tv_nsec = 0;
144                 mWarmupCycles = 0;
145                 mWarmupConsecutiveInRangeCycles = 0;
146                 mSleepNs = -1;
147                 mColdGen = mCurrent->mColdGen;
148 #ifdef FAST_THREAD_STATISTICS
149                 mBounds = 0;
150                 mFull = false;
151 #endif
152                 mOldTsValid = !clock_gettime(CLOCK_MONOTONIC, &mOldTs);
153                 mTimestampStatus = INVALID_OPERATION;
154             } else {
155                 mSleepNs = FAST_HOT_IDLE_NS;
156             }
157             continue;
158         case FastThreadState::EXIT:
159             onExit();
160             return false;
161         default:
162             LOG_ALWAYS_FATAL_IF(!isSubClassCommand(mCommand));
163             break;
164         }
165 
166         // there is a non-idle state available to us; did the state change?
167         if (mCurrent != mPrevious) {
168             onStateChange();
169 #if 1   // FIXME shouldn't need this
170             // only process state change once
171             mPrevious = mCurrent;
172 #endif
173         }
174 
175         // do work using current state here
176         mAttemptedWrite = false;
177         onWork();
178 
179         // To be exactly periodic, compute the next sleep time based on current time.
180         // This code doesn't have long-term stability when the sink is non-blocking.
181         // FIXME To avoid drift, use the local audio clock or watch the sink's fill status.
182         struct timespec newTs;
183         int rc = clock_gettime(CLOCK_MONOTONIC, &newTs);
184         if (rc == 0) {
185             if (mOldTsValid) {
186                 time_t sec = newTs.tv_sec - mOldTs.tv_sec;
187                 auto nsec = newTs.tv_nsec - mOldTs.tv_nsec;
188                 ALOGE_IF(sec < 0 || (sec == 0 && nsec < 0),
189                         "clock_gettime(CLOCK_MONOTONIC) failed: was %ld.%09ld but now %ld.%09ld",
190                         mOldTs.tv_sec, mOldTs.tv_nsec, newTs.tv_sec, newTs.tv_nsec);
191                 if (nsec < 0) {
192                     --sec;
193                     nsec += 1000000000;
194                 }
195                 // To avoid an initial underrun on fast tracks after exiting standby,
196                 // do not start pulling data from tracks and mixing until warmup is complete.
197                 // Warmup is considered complete after the earlier of:
198                 //      MIN_WARMUP_CYCLES consecutive in-range write() attempts,
199                 //          where "in-range" means mWarmupNsMin <= cycle time <= mWarmupNsMax
200                 //      MAX_WARMUP_CYCLES write() attempts.
201                 // This is overly conservative, but to get better accuracy requires a new HAL API.
202                 if (!mIsWarm && mAttemptedWrite) {
203                     mMeasuredWarmupTs.tv_sec += sec;
204                     mMeasuredWarmupTs.tv_nsec += nsec;
205                     if (mMeasuredWarmupTs.tv_nsec >= 1000000000) {
206                         mMeasuredWarmupTs.tv_sec++;
207                         mMeasuredWarmupTs.tv_nsec -= 1000000000;
208                     }
209                     ++mWarmupCycles;
210                     if (mWarmupNsMin <= nsec && nsec <= mWarmupNsMax) {
211                         ALOGV("warmup cycle %d in range: %.03f ms", mWarmupCycles, nsec * 1e-9);
212                         ++mWarmupConsecutiveInRangeCycles;
213                     } else {
214                         ALOGV("warmup cycle %d out of range: %.03f ms", mWarmupCycles, nsec * 1e-9);
215                         mWarmupConsecutiveInRangeCycles = 0;
216                     }
217                     if ((mWarmupConsecutiveInRangeCycles >= MIN_WARMUP_CYCLES) ||
218                             (mWarmupCycles >= MAX_WARMUP_CYCLES)) {
219                         mIsWarm = true;
220                         mDumpState->mMeasuredWarmupTs = mMeasuredWarmupTs;
221                         mDumpState->mWarmupCycles = mWarmupCycles;
222                         const double measuredWarmupMs = (mMeasuredWarmupTs.tv_sec * 1e3) +
223                                 (mMeasuredWarmupTs.tv_nsec * 1e-6);
224                         LOG_WARMUP_TIME(measuredWarmupMs);
225                     }
226                 }
227                 mSleepNs = -1;
228                 if (mIsWarm) {
229                     if (sec > 0 || nsec > mUnderrunNs) {
230                         ATRACE_NAME("underrun");   // NOLINT(misc-const-correctness)
231                         // FIXME only log occasionally
232                         ALOGV("underrun: time since last cycle %d.%03ld sec",
233                                 (int) sec, nsec / 1000000L);
234                         mDumpState->mUnderruns++;
235                         LOG_UNDERRUN(audio_utils_ns_from_timespec(&newTs));
236                         mIgnoreNextOverrun = true;
237                     } else if (nsec < mOverrunNs) {
238                         if (mIgnoreNextOverrun) {
239                             mIgnoreNextOverrun = false;
240                         } else {
241                             // FIXME only log occasionally
242                             ALOGV("overrun: time since last cycle %d.%03ld sec",
243                                     (int) sec, nsec / 1000000L);
244                             mDumpState->mOverruns++;
245                             LOG_OVERRUN(audio_utils_ns_from_timespec(&newTs));
246                         }
247                         // This forces a minimum cycle time. It:
248                         //  - compensates for an audio HAL with jitter due to sample rate conversion
249                         //  - works with a variable buffer depth audio HAL that never pulls at a
250                         //    rate < than mOverrunNs per buffer.
251                         //  - recovers from overrun immediately after underrun
252                         // It doesn't work with a non-blocking audio HAL.
253                         mSleepNs = mForceNs - nsec;
254                     } else {
255                         mIgnoreNextOverrun = false;
256                     }
257                 }
258 #ifdef FAST_THREAD_STATISTICS
259                 if (mIsWarm) {
260                     // advance the FIFO queue bounds
261                     const size_t i = mBounds & (mDumpState->mSamplingN - 1);
262                     mBounds = (mBounds & 0xFFFF0000) | ((mBounds + 1) & 0xFFFF);
263                     if (mFull) {
264                         //mBounds += 0x10000;
265                         __builtin_add_overflow(mBounds, 0x10000, &mBounds);
266                     } else if (!(mBounds & (mDumpState->mSamplingN - 1))) {
267                         mFull = true;
268                     }
269                     // compute the delta value of clock_gettime(CLOCK_MONOTONIC)
270                     uint32_t monotonicNs = nsec;
271                     if (sec > 0 && sec < 4) {
272                         monotonicNs += sec * 1000000000U; // unsigned to prevent signed overflow.
273                     }
274                     // compute raw CPU load = delta value of clock_gettime(CLOCK_THREAD_CPUTIME_ID)
275                     uint32_t loadNs = 0;
276                     struct timespec newLoad;
277                     rc = clock_gettime(CLOCK_THREAD_CPUTIME_ID, &newLoad);
278                     if (rc == 0) {
279                         if (mOldLoadValid) {
280                             sec = newLoad.tv_sec - mOldLoad.tv_sec;
281                             nsec = newLoad.tv_nsec - mOldLoad.tv_nsec;
282                             if (nsec < 0) {
283                                 --sec;
284                                 nsec += 1000000000;
285                             }
286                             loadNs = nsec;
287                             if (sec > 0 && sec < 4) {
288                                 loadNs += sec * 1000000000U; // unsigned to prevent signed overflow.
289                             }
290                         } else {
291                             // first time through the loop
292                             mOldLoadValid = true;
293                         }
294                         mOldLoad = newLoad;
295                     }
296 #ifdef CPU_FREQUENCY_STATISTICS
297                     // get the absolute value of CPU clock frequency in kHz
298                     int cpuNum = sched_getcpu();
299                     uint32_t kHz = mTcu.getCpukHz(cpuNum);
300                     kHz = (kHz << 4) | (cpuNum & 0xF);
301 #endif
302                     // save values in FIFO queues for dumpsys
303                     // these stores #1, #2, #3 are not atomic with respect to each other,
304                     // or with respect to store #4 below
305                     mDumpState->mMonotonicNs[i] = monotonicNs;
306                     LOG_WORK_TIME(monotonicNs);
307                     mDumpState->mLoadNs[i] = loadNs;
308 #ifdef CPU_FREQUENCY_STATISTICS
309                     mDumpState->mCpukHz[i] = kHz;
310 #endif
311                     // this store #4 is not atomic with respect to stores #1, #2, #3 above, but
312                     // the newest open & oldest closed halves are atomic with respect to each other
313                     mDumpState->mBounds = mBounds;
314                     ATRACE_INT(mCycleMs, monotonicNs / 1000000);
315                     ATRACE_INT(mLoadUs, loadNs / 1000);
316                 }
317 #endif
318             } else {
319                 // first time through the loop
320                 mOldTsValid = true;
321                 mSleepNs = mPeriodNs;
322                 mIgnoreNextOverrun = true;
323             }
324             mOldTs = newTs;
325         } else {
326             // monotonic clock is broken
327             mOldTsValid = false;
328             mSleepNs = mPeriodNs;
329         }
330 
331     }   // for (;;)
332 
333     // never return 'true'; Thread::_threadLoop() locks mutex which can result in priority inversion
334 }
335 
336 }   // namespace android
337