1 /* 2 * Copyright (C) 2012 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 #ifndef ANDROID_AUDIO_STATE_QUEUE_H 18 #define ANDROID_AUDIO_STATE_QUEUE_H 19 20 #include <stdatomic.h> 21 22 // The state queue template class was originally driven by this use case / requirements: 23 // There are two threads: a fast mixer, and a normal mixer, and they share state. 24 // The interesting part of the shared state is a set of active fast tracks, 25 // and the output HAL configuration (buffer size in frames, sample rate, etc.). 26 // Fast mixer thread: 27 // periodic with typical period < 10 ms 28 // FIFO/RR scheduling policy and a low fixed priority 29 // ok to block for bounded time using nanosleep() to achieve desired period 30 // must not block on condition wait, mutex lock, atomic operation spin, I/O, etc. 31 // under typical operations of mixing, writing, or adding/removing tracks 32 // ok to block for unbounded time when the output HAL configuration changes, 33 // and this may result in an audible artifact 34 // needs read-only access to a recent stable state, 35 // but not necessarily the most current one 36 // only allocate and free memory when configuration changes 37 // avoid conventional logging, as this is a form of I/O and could block 38 // defer computation to other threads when feasible; for example 39 // cycle times are collected by fast mixer thread but the floating-point 40 // statistical calculations on these cycle times are computed by normal mixer 41 // these requirements also apply to callouts such as AudioBufferProvider and VolumeProvider 42 // Normal mixer thread: 43 // periodic with typical period ~20 ms 44 // SCHED_OTHER scheduling policy and nice priority == urgent audio 45 // ok to block, but prefer to avoid as much as possible 46 // needs read/write access to state 47 // The normal mixer may need to temporarily suspend the fast mixer thread during mode changes. 48 // It will do this using the state -- one of the fields tells the fast mixer to idle. 49 50 // Additional requirements: 51 // - observer must always be able to poll for and view the latest pushed state; it must never be 52 // blocked from seeing that state 53 // - observer does not need to see every state in sequence; it is OK for it to skip states 54 // [see below for more on this] 55 // - mutator must always be able to read/modify a state, it must never be blocked from reading or 56 // modifying state 57 // - reduce memcpy where possible 58 // - work well if the observer runs more frequently than the mutator, 59 // as is the case with fast mixer/normal mixer. 60 // It is not a requirement to work well if the roles were reversed, 61 // and the mutator were to run more frequently than the observer. 62 // In this case, the mutator could get blocked waiting for a slot to fill up for 63 // it to work with. This could be solved somewhat by increasing the depth of the queue, but it would 64 // still limit the mutator to a finite number of changes before it would block. A future 65 // possibility, not implemented here, would be to allow the mutator to safely overwrite an already 66 // pushed state. This could be done by the mutator overwriting mNext, but then being prepared to 67 // read an mAck which is actually for the earlier mNext (since there is a race). 68 69 // Solution: 70 // Let's call the fast mixer thread the "observer" and normal mixer thread the "mutator". 71 // We assume there is only a single observer and a single mutator; this is critical. 72 // Each state is of type <T>, and should contain only POD (Plain Old Data) and raw pointers, as 73 // memcpy() may be used to copy state, and the destructors are run in unpredictable order. 74 // The states in chronological order are: previous, current, next, and mutating: 75 // previous read-only, observer can compare vs. current to see the subset that changed 76 // current read-only, this is the primary state for observer 77 // next read-only, when observer is ready to accept a new state it will shift it in: 78 // previous = current 79 // current = next 80 // and the slot formerly used by previous is now available to the mutator. 81 // mutating invisible to observer, read/write to mutator 82 // Initialization is tricky, especially for the observer. If the observer starts execution 83 // before the mutator, there are no previous, current, or next states. And even if the observer 84 // starts execution after the mutator, there is a next state but no previous or current states. 85 // To solve this, we'll have the observer idle until there is a next state, 86 // and it will have to deal with the case where there is no previous state. 87 // The states are stored in a shared FIFO queue represented using a circular array. 88 // The observer polls for mutations, and receives a new state pointer after a 89 // a mutation is pushed onto the queue. To the observer, the state pointers are 90 // effectively in random order, that is the observer should not do address 91 // arithmetic on the state pointers. However to the mutator, the state pointers 92 // are in a definite circular order. 93 94 #include "Configuration.h" 95 96 namespace android { 97 98 #ifdef STATE_QUEUE_DUMP 99 // The StateQueueObserverDump and StateQueueMutatorDump keep 100 // a cache of StateQueue statistics that can be logged by dumpsys. 101 // Each individual native word-sized field is accessed atomically. But the 102 // overall structure is non-atomic, that is there may be an inconsistency between fields. 103 // No barriers or locks are used for either writing or reading. 104 // Only POD types are permitted, and the contents shouldn't be trusted (i.e. do range checks). 105 // It has a different lifetime than the StateQueue, and so it can't be a member of StateQueue. 106 107 struct StateQueueObserverDump { StateQueueObserverDumpStateQueueObserverDump108 StateQueueObserverDump() : mStateChanges(0) { } ~StateQueueObserverDumpStateQueueObserverDump109 /*virtual*/ ~StateQueueObserverDump() { } 110 unsigned mStateChanges; // incremented each time poll() detects a state change 111 void dump(int fd); 112 }; 113 114 struct StateQueueMutatorDump { StateQueueMutatorDumpStateQueueMutatorDump115 StateQueueMutatorDump() : mPushDirty(0), mPushAck(0), mBlockedSequence(0) { } ~StateQueueMutatorDumpStateQueueMutatorDump116 /*virtual*/ ~StateQueueMutatorDump() { } 117 unsigned mPushDirty; // incremented each time push() is called with a dirty state 118 unsigned mPushAck; // incremented each time push(BLOCK_UNTIL_ACKED) is called 119 unsigned mBlockedSequence; // incremented before and after each time that push() 120 // blocks for more than one PUSH_BLOCK_ACK_NS; 121 // if odd, then mutator is currently blocked inside push() 122 void dump(int fd); 123 }; 124 #endif 125 126 // manages a FIFO queue of states 127 template<typename T> class StateQueue { 128 129 public: 130 StateQueue(); 131 virtual ~StateQueue(); 132 133 // Observer APIs 134 135 // Poll for a state change. Returns a pointer to a read-only state, 136 // or NULL if the state has not been initialized yet. 137 // If a new state has not pushed by mutator since the previous poll, 138 // then the returned pointer will be unchanged. 139 // The previous state pointer is guaranteed to still be valid; 140 // this allows the observer to diff the previous and new states. 141 const T* poll(); 142 143 // Mutator APIs 144 145 // Begin a mutation. Returns a pointer to a read/write state, except the 146 // first time it is called the state is write-only and _must_ be initialized. 147 // Mutations cannot be nested. 148 // If the state is dirty and has not been pushed onto the state queue yet, then 149 // this new mutation will be squashed together with the previous one. 150 T* begin(); 151 152 // End the current mutation and indicate whether caller modified the state. 153 // If didModify is true, then the state is marked dirty (in need of pushing). 154 // There is no rollback option because modifications are done in place. 155 // Does not automatically push the new state onto the state queue. 156 void end(bool didModify = true); 157 158 // Push a new state, if any, out to the observer via the state queue. 159 // For BLOCK_NEVER, returns: 160 // true if not dirty, or dirty and pushed successfully 161 // false if dirty and not pushed because that would block; remains dirty 162 // For BLOCK_UNTIL_PUSHED and BLOCK_UNTIL_ACKED, always returns true. 163 // No-op if there are no pending modifications (not dirty), except 164 // for BLOCK_UNTIL_ACKED it will wait until a prior push has been acknowledged. 165 // Must not be called in the middle of a mutation. 166 enum block_t { 167 BLOCK_NEVER, // do not block 168 BLOCK_UNTIL_PUSHED, // block until there's a slot available for the push 169 BLOCK_UNTIL_ACKED, // also block until the push is acknowledged by the observer 170 }; 171 bool push(block_t block = BLOCK_NEVER); 172 173 // Return whether the current state is dirty (modified and not pushed). isDirty()174 bool isDirty() const { return mIsDirty; } 175 176 #ifdef STATE_QUEUE_DUMP 177 // Register location of observer dump area setObserverDump(StateQueueObserverDump * dump)178 void setObserverDump(StateQueueObserverDump *dump) 179 { mObserverDump = dump != NULL ? dump : &mObserverDummyDump; } 180 181 // Register location of mutator dump area setMutatorDump(StateQueueMutatorDump * dump)182 void setMutatorDump(StateQueueMutatorDump *dump) 183 { mMutatorDump = dump != NULL ? dump : &mMutatorDummyDump; } 184 #endif 185 186 private: 187 static const unsigned kN = 4; // values < 4 are not supported by this code 188 T mStates[kN]; // written by mutator, read by observer 189 190 // "volatile" is meaningless with SMP, but here it indicates that we're using atomic ops 191 atomic_uintptr_t mNext; // written by mutator to advance next, read by observer 192 volatile const T* mAck; // written by observer to acknowledge advance of next, read by mutator 193 194 // only used by observer 195 const T* mCurrent; // most recent value returned by poll() 196 197 // only used by mutator 198 T* mMutating; // where updates by mutator are done in place 199 const T* mExpecting; // what the mutator expects mAck to be set to 200 bool mInMutation; // whether we're currently in the middle of a mutation 201 bool mIsDirty; // whether mutating state has been modified since last push 202 bool mIsInitialized; // whether mutating state has been initialized yet 203 204 #ifdef STATE_QUEUE_DUMP 205 StateQueueObserverDump mObserverDummyDump; // default area for observer dump if not set 206 StateQueueObserverDump* mObserverDump; // pointer to active observer dump, always non-NULL 207 StateQueueMutatorDump mMutatorDummyDump; // default area for mutator dump if not set 208 StateQueueMutatorDump* mMutatorDump; // pointer to active mutator dump, always non-NULL 209 #endif 210 211 }; // class StateQueue 212 213 } // namespace android 214 215 #endif // ANDROID_AUDIO_STATE_QUEUE_H 216