• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2014 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #define LOG_TAG "FastThread"
18 //#define LOG_NDEBUG 0
19 
20 #define ATRACE_TAG ATRACE_TAG_AUDIO
21 
22 #include "Configuration.h"
23 #include <linux/futex.h>
24 #include <sys/syscall.h>
25 #include <cutils/atomic.h>
26 #include <utils/Log.h>
27 #include <utils/Trace.h>
28 #include "FastThread.h"
29 #include "FastThreadDumpState.h"
30 #include "TypedLogger.h"
31 
32 #define FAST_DEFAULT_NS    999999999L   // ~1 sec: default time to sleep
33 #define FAST_HOT_IDLE_NS     1000000L   // 1 ms: time to sleep while hot idling
34 #define MIN_WARMUP_CYCLES          2    // minimum number of consecutive in-range loop cycles
35                                         // to wait for warmup
36 #define MAX_WARMUP_CYCLES         10    // maximum number of loop cycles to wait for warmup
37 
38 namespace android {
39 
FastThread(const char * cycleMs,const char * loadUs)40 FastThread::FastThread(const char *cycleMs, const char *loadUs) : Thread(false /*canCallJava*/),
41     // re-initialized to &sInitial by subclass constructor
42     mPrevious(NULL), mCurrent(NULL),
43     /* mOldTs({0, 0}), */
44     mOldTsValid(false),
45     mSleepNs(-1),
46     mPeriodNs(0),
47     mUnderrunNs(0),
48     mOverrunNs(0),
49     mForceNs(0),
50     mWarmupNsMin(0),
51     mWarmupNsMax(LONG_MAX),
52     // re-initialized to &mDummySubclassDumpState by subclass constructor
53     mDummyDumpState(NULL),
54     mDumpState(NULL),
55     mIgnoreNextOverrun(true),
56 #ifdef FAST_THREAD_STATISTICS
57     // mOldLoad
58     mOldLoadValid(false),
59     mBounds(0),
60     mFull(false),
61     // mTcu
62 #endif
63     mColdGen(0),
64     mIsWarm(false),
65     /* mMeasuredWarmupTs({0, 0}), */
66     mWarmupCycles(0),
67     mWarmupConsecutiveInRangeCycles(0),
68     // mDummyNBLogWriter
69     mNBLogWriter(&mDummyNBLogWriter),
70     mTimestampStatus(INVALID_OPERATION),
71 
72     mCommand(FastThreadState::INITIAL),
73 #if 0
74     frameCount(0),
75 #endif
76     mAttemptedWrite(false)
77     // mCycleMs(cycleMs)
78     // mLoadUs(loadUs)
79 {
80     mOldTs.tv_sec = 0;
81     mOldTs.tv_nsec = 0;
82     mMeasuredWarmupTs.tv_sec = 0;
83     mMeasuredWarmupTs.tv_nsec = 0;
84     strlcpy(mCycleMs, cycleMs, sizeof(mCycleMs));
85     strlcpy(mLoadUs, loadUs, sizeof(mLoadUs));
86 }
87 
~FastThread()88 FastThread::~FastThread()
89 {
90 }
91 
threadLoop()92 bool FastThread::threadLoop()
93 {
94     // LOGT now works even if tlNBLogWriter is nullptr, but we're considering changing that,
95     // so this initialization permits a future change to remove the check for nullptr.
96     tlNBLogWriter = &mDummyNBLogWriter;
97     for (;;) {
98 
99         // either nanosleep, sched_yield, or busy wait
100         if (mSleepNs >= 0) {
101             if (mSleepNs > 0) {
102                 ALOG_ASSERT(mSleepNs < 1000000000);
103                 const struct timespec req = {0, mSleepNs};
104                 nanosleep(&req, NULL);
105             } else {
106                 sched_yield();
107             }
108         }
109         // default to long sleep for next cycle
110         mSleepNs = FAST_DEFAULT_NS;
111 
112         // poll for state change
113         const FastThreadState *next = poll();
114         if (next == NULL) {
115             // continue to use the default initial state until a real state is available
116             // FIXME &sInitial not available, should save address earlier
117             //ALOG_ASSERT(mCurrent == &sInitial && previous == &sInitial);
118             next = mCurrent;
119         }
120 
121         mCommand = next->mCommand;
122         if (next != mCurrent) {
123 
124             // As soon as possible of learning of a new dump area, start using it
125             mDumpState = next->mDumpState != NULL ? next->mDumpState : mDummyDumpState;
126             mNBLogWriter = next->mNBLogWriter != NULL ? next->mNBLogWriter : &mDummyNBLogWriter;
127             setNBLogWriter(mNBLogWriter);   // FastMixer informs its AudioMixer, FastCapture ignores
128             tlNBLogWriter = mNBLogWriter;
129 
130             // We want to always have a valid reference to the previous (non-idle) state.
131             // However, the state queue only guarantees access to current and previous states.
132             // So when there is a transition from a non-idle state into an idle state, we make a
133             // copy of the last known non-idle state so it is still available on return from idle.
134             // The possible transitions are:
135             //  non-idle -> non-idle    update previous from current in-place
136             //  non-idle -> idle        update previous from copy of current
137             //  idle     -> idle        don't update previous
138             //  idle     -> non-idle    don't update previous
139             if (!(mCurrent->mCommand & FastThreadState::IDLE)) {
140                 if (mCommand & FastThreadState::IDLE) {
141                     onIdle();
142                     mOldTsValid = false;
143 #ifdef FAST_THREAD_STATISTICS
144                     mOldLoadValid = false;
145 #endif
146                     mIgnoreNextOverrun = true;
147                 }
148                 mPrevious = mCurrent;
149             }
150             mCurrent = next;
151         }
152 #if !LOG_NDEBUG
153         next = NULL;    // not referenced again
154 #endif
155 
156         mDumpState->mCommand = mCommand;
157 
158         // FIXME what does this comment mean?
159         // << current, previous, command, dumpState >>
160 
161         switch (mCommand) {
162         case FastThreadState::INITIAL:
163         case FastThreadState::HOT_IDLE:
164             mSleepNs = FAST_HOT_IDLE_NS;
165             continue;
166         case FastThreadState::COLD_IDLE:
167             // only perform a cold idle command once
168             // FIXME consider checking previous state and only perform if previous != COLD_IDLE
169             if (mCurrent->mColdGen != mColdGen) {
170                 int32_t *coldFutexAddr = mCurrent->mColdFutexAddr;
171                 ALOG_ASSERT(coldFutexAddr != NULL);
172                 int32_t old = android_atomic_dec(coldFutexAddr);
173                 if (old <= 0) {
174                     syscall(__NR_futex, coldFutexAddr, FUTEX_WAIT_PRIVATE, old - 1, NULL);
175                 }
176                 int policy = sched_getscheduler(0) & ~SCHED_RESET_ON_FORK;
177                 if (!(policy == SCHED_FIFO || policy == SCHED_RR)) {
178                     ALOGE("did not receive expected priority boost on time");
179                 }
180                 // This may be overly conservative; there could be times that the normal mixer
181                 // requests such a brief cold idle that it doesn't require resetting this flag.
182                 mIsWarm = false;
183                 mMeasuredWarmupTs.tv_sec = 0;
184                 mMeasuredWarmupTs.tv_nsec = 0;
185                 mWarmupCycles = 0;
186                 mWarmupConsecutiveInRangeCycles = 0;
187                 mSleepNs = -1;
188                 mColdGen = mCurrent->mColdGen;
189 #ifdef FAST_THREAD_STATISTICS
190                 mBounds = 0;
191                 mFull = false;
192 #endif
193                 mOldTsValid = !clock_gettime(CLOCK_MONOTONIC, &mOldTs);
194                 mTimestampStatus = INVALID_OPERATION;
195             } else {
196                 mSleepNs = FAST_HOT_IDLE_NS;
197             }
198             continue;
199         case FastThreadState::EXIT:
200             onExit();
201             return false;
202         default:
203             LOG_ALWAYS_FATAL_IF(!isSubClassCommand(mCommand));
204             break;
205         }
206 
207         // there is a non-idle state available to us; did the state change?
208         if (mCurrent != mPrevious) {
209             onStateChange();
210 #if 1   // FIXME shouldn't need this
211             // only process state change once
212             mPrevious = mCurrent;
213 #endif
214         }
215 
216         // do work using current state here
217         mAttemptedWrite = false;
218         onWork();
219 
220         // To be exactly periodic, compute the next sleep time based on current time.
221         // This code doesn't have long-term stability when the sink is non-blocking.
222         // FIXME To avoid drift, use the local audio clock or watch the sink's fill status.
223         struct timespec newTs;
224         int rc = clock_gettime(CLOCK_MONOTONIC, &newTs);
225         if (rc == 0) {
226             if (mOldTsValid) {
227                 time_t sec = newTs.tv_sec - mOldTs.tv_sec;
228                 long nsec = newTs.tv_nsec - mOldTs.tv_nsec;
229                 ALOGE_IF(sec < 0 || (sec == 0 && nsec < 0),
230                         "clock_gettime(CLOCK_MONOTONIC) failed: was %ld.%09ld but now %ld.%09ld",
231                         mOldTs.tv_sec, mOldTs.tv_nsec, newTs.tv_sec, newTs.tv_nsec);
232                 if (nsec < 0) {
233                     --sec;
234                     nsec += 1000000000;
235                 }
236                 // To avoid an initial underrun on fast tracks after exiting standby,
237                 // do not start pulling data from tracks and mixing until warmup is complete.
238                 // Warmup is considered complete after the earlier of:
239                 //      MIN_WARMUP_CYCLES consecutive in-range write() attempts,
240                 //          where "in-range" means mWarmupNsMin <= cycle time <= mWarmupNsMax
241                 //      MAX_WARMUP_CYCLES write() attempts.
242                 // This is overly conservative, but to get better accuracy requires a new HAL API.
243                 if (!mIsWarm && mAttemptedWrite) {
244                     mMeasuredWarmupTs.tv_sec += sec;
245                     mMeasuredWarmupTs.tv_nsec += nsec;
246                     if (mMeasuredWarmupTs.tv_nsec >= 1000000000) {
247                         mMeasuredWarmupTs.tv_sec++;
248                         mMeasuredWarmupTs.tv_nsec -= 1000000000;
249                     }
250                     ++mWarmupCycles;
251                     if (mWarmupNsMin <= nsec && nsec <= mWarmupNsMax) {
252                         ALOGV("warmup cycle %d in range: %.03f ms", mWarmupCycles, nsec * 1e-9);
253                         ++mWarmupConsecutiveInRangeCycles;
254                     } else {
255                         ALOGV("warmup cycle %d out of range: %.03f ms", mWarmupCycles, nsec * 1e-9);
256                         mWarmupConsecutiveInRangeCycles = 0;
257                     }
258                     if ((mWarmupConsecutiveInRangeCycles >= MIN_WARMUP_CYCLES) ||
259                             (mWarmupCycles >= MAX_WARMUP_CYCLES)) {
260                         mIsWarm = true;
261                         mDumpState->mMeasuredWarmupTs = mMeasuredWarmupTs;
262                         mDumpState->mWarmupCycles = mWarmupCycles;
263                     }
264                 }
265                 mSleepNs = -1;
266                 if (mIsWarm) {
267                     if (sec > 0 || nsec > mUnderrunNs) {
268                         ATRACE_NAME("underrun");
269                         // FIXME only log occasionally
270                         ALOGV("underrun: time since last cycle %d.%03ld sec",
271                                 (int) sec, nsec / 1000000L);
272                         mDumpState->mUnderruns++;
273                         mIgnoreNextOverrun = true;
274                     } else if (nsec < mOverrunNs) {
275                         if (mIgnoreNextOverrun) {
276                             mIgnoreNextOverrun = false;
277                         } else {
278                             // FIXME only log occasionally
279                             ALOGV("overrun: time since last cycle %d.%03ld sec",
280                                     (int) sec, nsec / 1000000L);
281                             mDumpState->mOverruns++;
282                         }
283                         // This forces a minimum cycle time. It:
284                         //  - compensates for an audio HAL with jitter due to sample rate conversion
285                         //  - works with a variable buffer depth audio HAL that never pulls at a
286                         //    rate < than mOverrunNs per buffer.
287                         //  - recovers from overrun immediately after underrun
288                         // It doesn't work with a non-blocking audio HAL.
289                         mSleepNs = mForceNs - nsec;
290                     } else {
291                         mIgnoreNextOverrun = false;
292                     }
293                 }
294 #ifdef FAST_THREAD_STATISTICS
295                 if (mIsWarm) {
296                     // advance the FIFO queue bounds
297                     size_t i = mBounds & (mDumpState->mSamplingN - 1);
298                     mBounds = (mBounds & 0xFFFF0000) | ((mBounds + 1) & 0xFFFF);
299                     if (mFull) {
300                         mBounds += 0x10000;
301                     } else if (!(mBounds & (mDumpState->mSamplingN - 1))) {
302                         mFull = true;
303                     }
304                     // compute the delta value of clock_gettime(CLOCK_MONOTONIC)
305                     uint32_t monotonicNs = nsec;
306                     if (sec > 0 && sec < 4) {
307                         monotonicNs += sec * 1000000000;
308                     }
309                     // compute raw CPU load = delta value of clock_gettime(CLOCK_THREAD_CPUTIME_ID)
310                     uint32_t loadNs = 0;
311                     struct timespec newLoad;
312                     rc = clock_gettime(CLOCK_THREAD_CPUTIME_ID, &newLoad);
313                     if (rc == 0) {
314                         if (mOldLoadValid) {
315                             sec = newLoad.tv_sec - mOldLoad.tv_sec;
316                             nsec = newLoad.tv_nsec - mOldLoad.tv_nsec;
317                             if (nsec < 0) {
318                                 --sec;
319                                 nsec += 1000000000;
320                             }
321                             loadNs = nsec;
322                             if (sec > 0 && sec < 4) {
323                                 loadNs += sec * 1000000000;
324                             }
325                         } else {
326                             // first time through the loop
327                             mOldLoadValid = true;
328                         }
329                         mOldLoad = newLoad;
330                     }
331 #ifdef CPU_FREQUENCY_STATISTICS
332                     // get the absolute value of CPU clock frequency in kHz
333                     int cpuNum = sched_getcpu();
334                     uint32_t kHz = mTcu.getCpukHz(cpuNum);
335                     kHz = (kHz << 4) | (cpuNum & 0xF);
336 #endif
337                     // save values in FIFO queues for dumpsys
338                     // these stores #1, #2, #3 are not atomic with respect to each other,
339                     // or with respect to store #4 below
340                     mDumpState->mMonotonicNs[i] = monotonicNs;
341                     mDumpState->mLoadNs[i] = loadNs;
342 #ifdef CPU_FREQUENCY_STATISTICS
343                     mDumpState->mCpukHz[i] = kHz;
344 #endif
345                     // this store #4 is not atomic with respect to stores #1, #2, #3 above, but
346                     // the newest open & oldest closed halves are atomic with respect to each other
347                     mDumpState->mBounds = mBounds;
348                     ATRACE_INT(mCycleMs, monotonicNs / 1000000);
349                     ATRACE_INT(mLoadUs, loadNs / 1000);
350                 }
351 #endif
352             } else {
353                 // first time through the loop
354                 mOldTsValid = true;
355                 mSleepNs = mPeriodNs;
356                 mIgnoreNextOverrun = true;
357             }
358             mOldTs = newTs;
359         } else {
360             // monotonic clock is broken
361             mOldTsValid = false;
362             mSleepNs = mPeriodNs;
363         }
364 
365     }   // for (;;)
366 
367     // never return 'true'; Thread::_threadLoop() locks mutex which can result in priority inversion
368 }
369 
370 }   // namespace android
371