1 /*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #define LOG_TAG "FastThread"
18 //#define LOG_NDEBUG 0
19
20 #define ATRACE_TAG ATRACE_TAG_AUDIO
21
22 #include "Configuration.h"
23 #include <linux/futex.h>
24 #include <sys/syscall.h>
25 #include <utils/Log.h>
26 #include <utils/Trace.h>
27 #include "FastThread.h"
28 #include "FastThreadDumpState.h"
29
30 #define FAST_DEFAULT_NS 999999999L // ~1 sec: default time to sleep
31 #define FAST_HOT_IDLE_NS 1000000L // 1 ms: time to sleep while hot idling
32 #define MIN_WARMUP_CYCLES 2 // minimum number of consecutive in-range loop cycles
33 // to wait for warmup
34 #define MAX_WARMUP_CYCLES 10 // maximum number of loop cycles to wait for warmup
35
36 namespace android {
37
FastThread()38 FastThread::FastThread() : Thread(false /*canCallJava*/),
39 // re-initialized to &sInitial by subclass constructor
40 mPrevious(NULL), mCurrent(NULL),
41 /* mOldTs({0, 0}), */
42 mOldTsValid(false),
43 mSleepNs(-1),
44 mPeriodNs(0),
45 mUnderrunNs(0),
46 mOverrunNs(0),
47 mForceNs(0),
48 mWarmupNsMin(0),
49 mWarmupNsMax(LONG_MAX),
50 // re-initialized to &mDummySubclassDumpState by subclass constructor
51 mDummyDumpState(NULL),
52 mDumpState(NULL),
53 mIgnoreNextOverrun(true),
54 #ifdef FAST_THREAD_STATISTICS
55 // mOldLoad
56 mOldLoadValid(false),
57 mBounds(0),
58 mFull(false),
59 // mTcu
60 #endif
61 mColdGen(0),
62 mIsWarm(false),
63 /* mMeasuredWarmupTs({0, 0}), */
64 mWarmupCycles(0),
65 mWarmupConsecutiveInRangeCycles(0),
66 // mDummyLogWriter
67 mLogWriter(&mDummyLogWriter),
68 mTimestampStatus(INVALID_OPERATION),
69
70 mCommand(FastThreadState::INITIAL),
71 #if 0
72 frameCount(0),
73 #endif
74 mAttemptedWrite(false)
75 {
76 mOldTs.tv_sec = 0;
77 mOldTs.tv_nsec = 0;
78 mMeasuredWarmupTs.tv_sec = 0;
79 mMeasuredWarmupTs.tv_nsec = 0;
80 }
81
~FastThread()82 FastThread::~FastThread()
83 {
84 }
85
threadLoop()86 bool FastThread::threadLoop()
87 {
88 for (;;) {
89
90 // either nanosleep, sched_yield, or busy wait
91 if (mSleepNs >= 0) {
92 if (mSleepNs > 0) {
93 ALOG_ASSERT(mSleepNs < 1000000000);
94 const struct timespec req = {0, mSleepNs};
95 nanosleep(&req, NULL);
96 } else {
97 sched_yield();
98 }
99 }
100 // default to long sleep for next cycle
101 mSleepNs = FAST_DEFAULT_NS;
102
103 // poll for state change
104 const FastThreadState *next = poll();
105 if (next == NULL) {
106 // continue to use the default initial state until a real state is available
107 // FIXME &sInitial not available, should save address earlier
108 //ALOG_ASSERT(mCurrent == &sInitial && previous == &sInitial);
109 next = mCurrent;
110 }
111
112 mCommand = next->mCommand;
113 if (next != mCurrent) {
114
115 // As soon as possible of learning of a new dump area, start using it
116 mDumpState = next->mDumpState != NULL ? next->mDumpState : mDummyDumpState;
117 mLogWriter = next->mNBLogWriter != NULL ? next->mNBLogWriter : &mDummyLogWriter;
118 setLog(mLogWriter);
119
120 // We want to always have a valid reference to the previous (non-idle) state.
121 // However, the state queue only guarantees access to current and previous states.
122 // So when there is a transition from a non-idle state into an idle state, we make a
123 // copy of the last known non-idle state so it is still available on return from idle.
124 // The possible transitions are:
125 // non-idle -> non-idle update previous from current in-place
126 // non-idle -> idle update previous from copy of current
127 // idle -> idle don't update previous
128 // idle -> non-idle don't update previous
129 if (!(mCurrent->mCommand & FastThreadState::IDLE)) {
130 if (mCommand & FastThreadState::IDLE) {
131 onIdle();
132 mOldTsValid = false;
133 #ifdef FAST_THREAD_STATISTICS
134 mOldLoadValid = false;
135 #endif
136 mIgnoreNextOverrun = true;
137 }
138 mPrevious = mCurrent;
139 }
140 mCurrent = next;
141 }
142 #if !LOG_NDEBUG
143 next = NULL; // not referenced again
144 #endif
145
146 mDumpState->mCommand = mCommand;
147
148 // FIXME what does this comment mean?
149 // << current, previous, command, dumpState >>
150
151 switch (mCommand) {
152 case FastThreadState::INITIAL:
153 case FastThreadState::HOT_IDLE:
154 mSleepNs = FAST_HOT_IDLE_NS;
155 continue;
156 case FastThreadState::COLD_IDLE:
157 // only perform a cold idle command once
158 // FIXME consider checking previous state and only perform if previous != COLD_IDLE
159 if (mCurrent->mColdGen != mColdGen) {
160 int32_t *coldFutexAddr = mCurrent->mColdFutexAddr;
161 ALOG_ASSERT(coldFutexAddr != NULL);
162 int32_t old = android_atomic_dec(coldFutexAddr);
163 if (old <= 0) {
164 syscall(__NR_futex, coldFutexAddr, FUTEX_WAIT_PRIVATE, old - 1, NULL);
165 }
166 int policy = sched_getscheduler(0);
167 if (!(policy == SCHED_FIFO || policy == SCHED_RR)) {
168 ALOGE("did not receive expected priority boost");
169 }
170 // This may be overly conservative; there could be times that the normal mixer
171 // requests such a brief cold idle that it doesn't require resetting this flag.
172 mIsWarm = false;
173 mMeasuredWarmupTs.tv_sec = 0;
174 mMeasuredWarmupTs.tv_nsec = 0;
175 mWarmupCycles = 0;
176 mWarmupConsecutiveInRangeCycles = 0;
177 mSleepNs = -1;
178 mColdGen = mCurrent->mColdGen;
179 #ifdef FAST_THREAD_STATISTICS
180 mBounds = 0;
181 mFull = false;
182 #endif
183 mOldTsValid = !clock_gettime(CLOCK_MONOTONIC, &mOldTs);
184 mTimestampStatus = INVALID_OPERATION;
185 } else {
186 mSleepNs = FAST_HOT_IDLE_NS;
187 }
188 continue;
189 case FastThreadState::EXIT:
190 onExit();
191 return false;
192 default:
193 LOG_ALWAYS_FATAL_IF(!isSubClassCommand(mCommand));
194 break;
195 }
196
197 // there is a non-idle state available to us; did the state change?
198 if (mCurrent != mPrevious) {
199 onStateChange();
200 #if 1 // FIXME shouldn't need this
201 // only process state change once
202 mPrevious = mCurrent;
203 #endif
204 }
205
206 // do work using current state here
207 mAttemptedWrite = false;
208 onWork();
209
210 // To be exactly periodic, compute the next sleep time based on current time.
211 // This code doesn't have long-term stability when the sink is non-blocking.
212 // FIXME To avoid drift, use the local audio clock or watch the sink's fill status.
213 struct timespec newTs;
214 int rc = clock_gettime(CLOCK_MONOTONIC, &newTs);
215 if (rc == 0) {
216 //mLogWriter->logTimestamp(newTs);
217 if (mOldTsValid) {
218 time_t sec = newTs.tv_sec - mOldTs.tv_sec;
219 long nsec = newTs.tv_nsec - mOldTs.tv_nsec;
220 ALOGE_IF(sec < 0 || (sec == 0 && nsec < 0),
221 "clock_gettime(CLOCK_MONOTONIC) failed: was %ld.%09ld but now %ld.%09ld",
222 mOldTs.tv_sec, mOldTs.tv_nsec, newTs.tv_sec, newTs.tv_nsec);
223 if (nsec < 0) {
224 --sec;
225 nsec += 1000000000;
226 }
227 // To avoid an initial underrun on fast tracks after exiting standby,
228 // do not start pulling data from tracks and mixing until warmup is complete.
229 // Warmup is considered complete after the earlier of:
230 // MIN_WARMUP_CYCLES consecutive in-range write() attempts,
231 // where "in-range" means mWarmupNsMin <= cycle time <= mWarmupNsMax
232 // MAX_WARMUP_CYCLES write() attempts.
233 // This is overly conservative, but to get better accuracy requires a new HAL API.
234 if (!mIsWarm && mAttemptedWrite) {
235 mMeasuredWarmupTs.tv_sec += sec;
236 mMeasuredWarmupTs.tv_nsec += nsec;
237 if (mMeasuredWarmupTs.tv_nsec >= 1000000000) {
238 mMeasuredWarmupTs.tv_sec++;
239 mMeasuredWarmupTs.tv_nsec -= 1000000000;
240 }
241 ++mWarmupCycles;
242 if (mWarmupNsMin <= nsec && nsec <= mWarmupNsMax) {
243 ALOGV("warmup cycle %d in range: %.03f ms", mWarmupCycles, nsec * 1e-9);
244 ++mWarmupConsecutiveInRangeCycles;
245 } else {
246 ALOGV("warmup cycle %d out of range: %.03f ms", mWarmupCycles, nsec * 1e-9);
247 mWarmupConsecutiveInRangeCycles = 0;
248 }
249 if ((mWarmupConsecutiveInRangeCycles >= MIN_WARMUP_CYCLES) ||
250 (mWarmupCycles >= MAX_WARMUP_CYCLES)) {
251 mIsWarm = true;
252 mDumpState->mMeasuredWarmupTs = mMeasuredWarmupTs;
253 mDumpState->mWarmupCycles = mWarmupCycles;
254 }
255 }
256 mSleepNs = -1;
257 if (mIsWarm) {
258 if (sec > 0 || nsec > mUnderrunNs) {
259 ATRACE_NAME("underrun");
260 // FIXME only log occasionally
261 ALOGV("underrun: time since last cycle %d.%03ld sec",
262 (int) sec, nsec / 1000000L);
263 mDumpState->mUnderruns++;
264 mIgnoreNextOverrun = true;
265 } else if (nsec < mOverrunNs) {
266 if (mIgnoreNextOverrun) {
267 mIgnoreNextOverrun = false;
268 } else {
269 // FIXME only log occasionally
270 ALOGV("overrun: time since last cycle %d.%03ld sec",
271 (int) sec, nsec / 1000000L);
272 mDumpState->mOverruns++;
273 }
274 // This forces a minimum cycle time. It:
275 // - compensates for an audio HAL with jitter due to sample rate conversion
276 // - works with a variable buffer depth audio HAL that never pulls at a
277 // rate < than mOverrunNs per buffer.
278 // - recovers from overrun immediately after underrun
279 // It doesn't work with a non-blocking audio HAL.
280 mSleepNs = mForceNs - nsec;
281 } else {
282 mIgnoreNextOverrun = false;
283 }
284 }
285 #ifdef FAST_THREAD_STATISTICS
286 if (mIsWarm) {
287 // advance the FIFO queue bounds
288 size_t i = mBounds & (mDumpState->mSamplingN - 1);
289 mBounds = (mBounds & 0xFFFF0000) | ((mBounds + 1) & 0xFFFF);
290 if (mFull) {
291 mBounds += 0x10000;
292 } else if (!(mBounds & (mDumpState->mSamplingN - 1))) {
293 mFull = true;
294 }
295 // compute the delta value of clock_gettime(CLOCK_MONOTONIC)
296 uint32_t monotonicNs = nsec;
297 if (sec > 0 && sec < 4) {
298 monotonicNs += sec * 1000000000;
299 }
300 // compute raw CPU load = delta value of clock_gettime(CLOCK_THREAD_CPUTIME_ID)
301 uint32_t loadNs = 0;
302 struct timespec newLoad;
303 rc = clock_gettime(CLOCK_THREAD_CPUTIME_ID, &newLoad);
304 if (rc == 0) {
305 if (mOldLoadValid) {
306 sec = newLoad.tv_sec - mOldLoad.tv_sec;
307 nsec = newLoad.tv_nsec - mOldLoad.tv_nsec;
308 if (nsec < 0) {
309 --sec;
310 nsec += 1000000000;
311 }
312 loadNs = nsec;
313 if (sec > 0 && sec < 4) {
314 loadNs += sec * 1000000000;
315 }
316 } else {
317 // first time through the loop
318 mOldLoadValid = true;
319 }
320 mOldLoad = newLoad;
321 }
322 #ifdef CPU_FREQUENCY_STATISTICS
323 // get the absolute value of CPU clock frequency in kHz
324 int cpuNum = sched_getcpu();
325 uint32_t kHz = mTcu.getCpukHz(cpuNum);
326 kHz = (kHz << 4) | (cpuNum & 0xF);
327 #endif
328 // save values in FIFO queues for dumpsys
329 // these stores #1, #2, #3 are not atomic with respect to each other,
330 // or with respect to store #4 below
331 mDumpState->mMonotonicNs[i] = monotonicNs;
332 mDumpState->mLoadNs[i] = loadNs;
333 #ifdef CPU_FREQUENCY_STATISTICS
334 mDumpState->mCpukHz[i] = kHz;
335 #endif
336 // this store #4 is not atomic with respect to stores #1, #2, #3 above, but
337 // the newest open & oldest closed halves are atomic with respect to each other
338 mDumpState->mBounds = mBounds;
339 ATRACE_INT("cycle_ms", monotonicNs / 1000000);
340 ATRACE_INT("load_us", loadNs / 1000);
341 }
342 #endif
343 } else {
344 // first time through the loop
345 mOldTsValid = true;
346 mSleepNs = mPeriodNs;
347 mIgnoreNextOverrun = true;
348 }
349 mOldTs = newTs;
350 } else {
351 // monotonic clock is broken
352 mOldTsValid = false;
353 mSleepNs = mPeriodNs;
354 }
355
356 } // for (;;)
357
358 // never return 'true'; Thread::_threadLoop() locks mutex which can result in priority inversion
359 }
360
361 } // namespace android
362