• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2016 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "async_manager.h"
18 
19 #include <algorithm>
20 #include <atomic>
21 #include <condition_variable>
22 #include <mutex>
23 #include <thread>
24 #include <vector>
25 
26 #include "fcntl.h"
27 #include "os/log.h"
28 #include "sys/select.h"
29 #include "unistd.h"
30 
31 namespace test_vendor_lib {
32 // Implementation of AsyncManager is divided between two classes, three if
33 // AsyncManager itself is taken into account, but its only responsability
34 // besides being a proxy for the other two classes is to provide a global
35 // synchronization mechanism for callbacks and client code to use.
36 
37 // The watching of file descriptors is done through AsyncFdWatcher. Several
38 // objects of this class may coexist simultaneosly as they share no state.
39 // After construction of this objects nothing happens beyond some very simple
40 // member initialization. When the first FD is set up for watching the object
41 // starts a new thread which watches the given (and later provided) FDs using
42 // select() inside a loop. A special FD (a pipe) is also watched which is
43 // used to notify the thread of internal changes on the object state (like
44 // the addition of new FDs to watch on). Every access to internal state is
45 // synchronized using a single internal mutex. The thread is only stopped on
46 // destruction of the object, by modifying a flag, which is the only member
47 // variable accessed without acquiring the lock (because the notification to
48 // the thread is done later by writing to a pipe which means the thread will
49 // be notified regardless of what phase of the loop it is in that moment)
50 
51 // The scheduling of asynchronous tasks, periodic or not, is handled by the
52 // AsyncTaskManager class. Like the one for FDs, this class shares no internal
53 // state between different instances so it is safe to use several objects of
54 // this class, also nothing interesting happens upon construction, but only
55 // after a Task has been scheduled and access to internal state is synchronized
56 // using a single internal mutex. When the first task is scheduled a thread
57 // is started which monitors a queue of tasks. The queue is peeked to see
58 // when the next task should be carried out and then the thread performs a
59 // (absolute) timed wait on a condition variable. The wait ends because of a
60 // time out or a notify on the cond var, the former means a task is due
61 // for execution while the later means there has been a change in internal
62 // state, like a task has been scheduled/canceled or the flag to stop has
63 // been set. Setting and querying the stop flag or modifying the task queue
64 // and subsequent notification on the cond var is done atomically (e.g while
65 // holding the lock on the internal mutex) to ensure that the thread never
66 // misses the notification, since notifying a cond var is not persistent as
67 // writing on a pipe (if not done this way, the thread could query the
68 // stopping flag and be put aside by the OS scheduler right after, then the
69 // 'stop thread' procedure could run, setting the flag, notifying a cond
70 // var that no one is waiting on and joining the thread, the thread then
71 // resumes execution believing that it needs to continue and waits on the
72 // cond var possibly forever if there are no tasks scheduled, efectively
73 // causing a deadlock).
74 
75 // This number also states the maximum number of scheduled tasks we can handle
76 // at a given time
77 static const uint16_t kMaxTaskId = -1; /* 2^16 - 1, permisible ids are {1..2^16-1}*/
NextAsyncTaskId(const AsyncTaskId id)78 static inline AsyncTaskId NextAsyncTaskId(const AsyncTaskId id) {
79   return (id == kMaxTaskId) ? 1 : id + 1;
80 }
81 // The buffer is only 10 bytes because the expected number of bytes
82 // written on this socket is 1. It is possible that the thread is notified
83 // more than once but highly unlikely, so a buffer of size 10 seems enough
84 // and the reads are performed inside a while just in case it isn't. From
85 // the thread routine's point of view it is the same to have been notified
86 // just once or 100 times so it just tries to consume the entire buffer.
87 // In the cases where an interrupt would cause read to return without
88 // having read everything that was available a new iteration of the thread
89 // loop will bring execution to this point almost immediately, so there is
90 // no need to treat that case.
91 static const int kNotificationBufferSize = 10;
92 
93 // Async File Descriptor Watcher Implementation:
94 class AsyncManager::AsyncFdWatcher {
95  public:
WatchFdForNonBlockingReads(int file_descriptor,const ReadCallback & on_read_fd_ready_callback)96   int WatchFdForNonBlockingReads(int file_descriptor, const ReadCallback& on_read_fd_ready_callback) {
97     // add file descriptor and callback
98     {
99       std::unique_lock<std::mutex> guard(internal_mutex_);
100       watched_shared_fds_[file_descriptor] = on_read_fd_ready_callback;
101     }
102 
103     // start the thread if not started yet
104     int started = tryStartThread();
105     if (started != 0) {
106       LOG_ERROR("%s: Unable to start thread", __func__);
107       return started;
108     }
109 
110     // notify the thread so that it knows of the new FD
111     notifyThread();
112 
113     return 0;
114   }
115 
StopWatchingFileDescriptor(int file_descriptor)116   void StopWatchingFileDescriptor(int file_descriptor) {
117     std::unique_lock<std::mutex> guard(internal_mutex_);
118     watched_shared_fds_.erase(file_descriptor);
119   }
120 
121   AsyncFdWatcher() = default;
122   AsyncFdWatcher(const AsyncFdWatcher&) = delete;
123   AsyncFdWatcher& operator=(const AsyncFdWatcher&) = delete;
124 
125   ~AsyncFdWatcher() = default;
126 
stopThread()127   int stopThread() {
128     if (!std::atomic_exchange(&running_, false)) {
129       return 0;  // if not running already
130     }
131 
132     notifyThread();
133 
134     if (std::this_thread::get_id() != thread_.get_id()) {
135       thread_.join();
136     } else {
137       LOG_WARN("%s: Starting thread stop from inside the reading thread itself", __func__);
138     }
139 
140     {
141       std::unique_lock<std::mutex> guard(internal_mutex_);
142       watched_shared_fds_.clear();
143     }
144 
145     return 0;
146   }
147 
148  private:
149   // Make sure to call this with at least one file descriptor ready to be
150   // watched upon or the thread routine will return immediately
tryStartThread()151   int tryStartThread() {
152     if (std::atomic_exchange(&running_, true)) {
153       return 0;  // if already running
154     }
155     // set up the communication channel
156     int pipe_fds[2];
157     if (pipe2(pipe_fds, O_NONBLOCK)) {
158       LOG_ERROR(
159           "%s:Unable to establish a communication channel to the reading "
160           "thread",
161           __func__);
162       return -1;
163     }
164     notification_listen_fd_ = pipe_fds[0];
165     notification_write_fd_ = pipe_fds[1];
166 
167     thread_ = std::thread([this]() { ThreadRoutine(); });
168     if (!thread_.joinable()) {
169       LOG_ERROR("%s: Unable to start reading thread", __func__);
170       return -1;
171     }
172     return 0;
173   }
174 
notifyThread()175   int notifyThread() {
176     char buffer = '0';
177     if (TEMP_FAILURE_RETRY(write(notification_write_fd_, &buffer, 1)) < 0) {
178       LOG_ERROR("%s: Unable to send message to reading thread", __func__);
179       return -1;
180     }
181     return 0;
182   }
183 
setUpFileDescriptorSet(fd_set & read_fds)184   int setUpFileDescriptorSet(fd_set& read_fds) {
185     // add comm channel to the set
186     FD_SET(notification_listen_fd_, &read_fds);
187     int nfds = notification_listen_fd_;
188 
189     // add watched FDs to the set
190     {
191       std::unique_lock<std::mutex> guard(internal_mutex_);
192       for (auto& fdp : watched_shared_fds_) {
193         FD_SET(fdp.first, &read_fds);
194         nfds = std::max(fdp.first, nfds);
195       }
196     }
197     return nfds;
198   }
199 
200   // check the comm channel and read everything there
consumeThreadNotifications(fd_set & read_fds)201   bool consumeThreadNotifications(fd_set& read_fds) {
202     if (FD_ISSET(notification_listen_fd_, &read_fds)) {
203       char buffer[kNotificationBufferSize];
204       while (TEMP_FAILURE_RETRY(read(notification_listen_fd_, buffer, kNotificationBufferSize)) ==
205              kNotificationBufferSize) {
206       }
207       return true;
208     }
209     return false;
210   }
211 
212   // check all file descriptors and call callbacks if necesary
runAppropriateCallbacks(fd_set & read_fds)213   void runAppropriateCallbacks(fd_set& read_fds) {
214     // not a good idea to call a callback while holding the FD lock,
215     // nor to release the lock while traversing the map
216     std::vector<decltype(watched_shared_fds_)::value_type> fds;
217     {
218       std::unique_lock<std::mutex> guard(internal_mutex_);
219       for (auto& fdc : watched_shared_fds_) {
220         if (FD_ISSET(fdc.first, &read_fds)) {
221           fds.push_back(fdc);
222         }
223       }
224     }
225     for (auto& p : fds) {
226       p.second(p.first);
227     }
228   }
229 
ThreadRoutine()230   void ThreadRoutine() {
231     while (running_) {
232       fd_set read_fds;
233       FD_ZERO(&read_fds);
234       int nfds = setUpFileDescriptorSet(read_fds);
235 
236       // wait until there is data available to read on some FD
237       int retval = select(nfds + 1, &read_fds, NULL, NULL, NULL);
238       if (retval <= 0) {  // there was some error or a timeout
239         LOG_ERROR(
240             "%s: There was an error while waiting for data on the file "
241             "descriptors: %s",
242             __func__, strerror(errno));
243         continue;
244       }
245 
246       consumeThreadNotifications(read_fds);
247 
248       // Do not read if there was a call to stop running
249       if (!running_) {
250         break;
251       }
252 
253       runAppropriateCallbacks(read_fds);
254     }
255   }
256 
257   std::atomic_bool running_{false};
258   std::thread thread_;
259   std::mutex internal_mutex_;
260 
261   std::map<int, ReadCallback> watched_shared_fds_;
262 
263   // A pair of FD to send information to the reading thread
264   int notification_listen_fd_{};
265   int notification_write_fd_{};
266 };
267 
268 // Async task manager implementation
269 class AsyncManager::AsyncTaskManager {
270  public:
GetNextUserId()271   AsyncUserId GetNextUserId() { return lastUserId_++; }
272 
ExecAsync(AsyncUserId user_id,std::chrono::milliseconds delay,const TaskCallback & callback)273   AsyncTaskId ExecAsync(AsyncUserId user_id, std::chrono::milliseconds delay,
274                         const TaskCallback& callback) {
275     return scheduleTask(std::make_shared<Task>(
276         std::chrono::steady_clock::now() + delay, callback, user_id));
277   }
278 
ExecAsyncPeriodically(AsyncUserId user_id,std::chrono::milliseconds delay,std::chrono::milliseconds period,const TaskCallback & callback)279   AsyncTaskId ExecAsyncPeriodically(AsyncUserId user_id,
280                                     std::chrono::milliseconds delay,
281                                     std::chrono::milliseconds period,
282                                     const TaskCallback& callback) {
283     return scheduleTask(std::make_shared<Task>(
284         std::chrono::steady_clock::now() + delay, period, callback, user_id));
285   }
286 
CancelAsyncTask(AsyncTaskId async_task_id)287   bool CancelAsyncTask(AsyncTaskId async_task_id) {
288     // remove task from queue (and task id association) while holding lock
289     std::unique_lock<std::mutex> guard(internal_mutex_);
290     return cancel_task_with_lock_held(async_task_id);
291   }
292 
CancelAsyncTasksFromUser(AsyncUserId user_id)293   bool CancelAsyncTasksFromUser(AsyncUserId user_id) {
294     // remove task from queue (and task id association) while holding lock
295     std::unique_lock<std::mutex> guard(internal_mutex_);
296     if (tasks_by_user_id_.count(user_id) == 0) {
297       return false;
298     }
299     for (auto task : tasks_by_user_id_[user_id]) {
300       cancel_task_with_lock_held(task);
301     }
302     tasks_by_user_id_.erase(user_id);
303     return true;
304   }
305 
306   AsyncTaskManager() = default;
307   AsyncTaskManager(const AsyncTaskManager&) = delete;
308   AsyncTaskManager& operator=(const AsyncTaskManager&) = delete;
309 
310   ~AsyncTaskManager() = default;
311 
stopThread()312   int stopThread() {
313     {
314       std::unique_lock<std::mutex> guard(internal_mutex_);
315       tasks_by_id_.clear();
316       task_queue_.clear();
317       if (!running_) {
318         return 0;
319       }
320       running_ = false;
321       // notify the thread
322       internal_cond_var_.notify_one();
323     }  // release the lock before joining a thread that is likely waiting for it
324     if (std::this_thread::get_id() != thread_.get_id()) {
325       thread_.join();
326     } else {
327       LOG_WARN("%s: Starting thread stop from inside the task thread itself", __func__);
328     }
329     return 0;
330   }
331 
332  private:
333   // Holds the data for each task
334   class Task {
335    public:
Task(std::chrono::steady_clock::time_point time,std::chrono::milliseconds period,const TaskCallback & callback,AsyncUserId user)336     Task(std::chrono::steady_clock::time_point time,
337          std::chrono::milliseconds period, const TaskCallback& callback,
338          AsyncUserId user)
339         : time(time),
340           periodic(true),
341           period(period),
342           callback(callback),
343           task_id(kInvalidTaskId),
344           user_id(user) {}
Task(std::chrono::steady_clock::time_point time,const TaskCallback & callback,AsyncUserId user)345     Task(std::chrono::steady_clock::time_point time,
346          const TaskCallback& callback, AsyncUserId user)
347         : time(time),
348           periodic(false),
349           callback(callback),
350           task_id(kInvalidTaskId),
351           user_id(user) {}
352 
353     // Operators needed to be in a collection
operator <(const Task & another) const354     bool operator<(const Task& another) const {
355       return std::make_pair(time, task_id) < std::make_pair(another.time, another.task_id);
356     }
357 
isPeriodic() const358     bool isPeriodic() const {
359       return periodic;
360     }
361 
362     // These fields should no longer be public if the class ever becomes
363     // public or gets more complex
364     std::chrono::steady_clock::time_point time;
365     bool periodic;
366     std::chrono::milliseconds period{};
367     TaskCallback callback;
368     AsyncTaskId task_id;
369     AsyncUserId user_id;
370   };
371 
372   // A comparator class to put shared pointers to tasks in an ordered set
373   struct task_p_comparator {
operator ()test_vendor_lib::AsyncManager::AsyncTaskManager::task_p_comparator374     bool operator()(const std::shared_ptr<Task>& t1, const std::shared_ptr<Task>& t2) const {
375       return *t1 < *t2;
376     }
377   };
378 
cancel_task_with_lock_held(AsyncTaskId async_task_id)379   bool cancel_task_with_lock_held(AsyncTaskId async_task_id) {
380     if (tasks_by_id_.count(async_task_id) == 0) {
381       return false;
382     }
383     task_queue_.erase(tasks_by_id_[async_task_id]);
384     tasks_by_id_.erase(async_task_id);
385     return true;
386   }
387 
scheduleTask(const std::shared_ptr<Task> & task)388   AsyncTaskId scheduleTask(const std::shared_ptr<Task>& task) {
389     {
390       std::unique_lock<std::mutex> guard(internal_mutex_);
391       // no more room for new tasks, we need a larger type for IDs
392       if (tasks_by_id_.size() == kMaxTaskId)  // TODO potentially type unsafe
393         return kInvalidTaskId;
394       do {
395         lastTaskId_ = NextAsyncTaskId(lastTaskId_);
396       } while (isTaskIdInUse(lastTaskId_));
397       task->task_id = lastTaskId_;
398       // add task to the queue and map
399       tasks_by_id_[lastTaskId_] = task;
400       tasks_by_user_id_[task->user_id].insert(task->task_id);
401       task_queue_.insert(task);
402     }
403     // start thread if necessary
404     int started = tryStartThread();
405     if (started != 0) {
406       LOG_ERROR("%s: Unable to start thread", __func__);
407       return kInvalidTaskId;
408     }
409     // notify the thread so that it knows of the new task
410     internal_cond_var_.notify_one();
411     // return task id
412     return task->task_id;
413   }
414 
isTaskIdInUse(const AsyncTaskId & task_id) const415   bool isTaskIdInUse(const AsyncTaskId& task_id) const {
416     return tasks_by_id_.count(task_id) != 0;
417   }
418 
tryStartThread()419   int tryStartThread() {
420     // need the lock because of the running flag and the cond var
421     std::unique_lock<std::mutex> guard(internal_mutex_);
422     // check that the thread is not yet running
423     if (running_) {
424       return 0;
425     }
426     // start the thread
427     running_ = true;
428     thread_ = std::thread([this]() { ThreadRoutine(); });
429     if (!thread_.joinable()) {
430       LOG_ERROR("%s: Unable to start task thread", __func__);
431       return -1;
432     }
433     return 0;
434   }
435 
ThreadRoutine()436   void ThreadRoutine() {
437     while (running_) {
438       TaskCallback callback;
439       bool run_it = false;
440       {
441         std::unique_lock<std::mutex> guard(internal_mutex_);
442         if (!task_queue_.empty()) {
443           std::shared_ptr<Task> task_p = *(task_queue_.begin());
444           if (task_p->time < std::chrono::steady_clock::now()) {
445             run_it = true;
446             callback = task_p->callback;
447             task_queue_.erase(task_p);  // need to remove and add again if
448                                         // periodic to update order
449             if (task_p->isPeriodic()) {
450               task_p->time += task_p->period;
451               task_queue_.insert(task_p);
452             } else {
453               tasks_by_user_id_[task_p->user_id].erase(task_p->task_id);
454               tasks_by_id_.erase(task_p->task_id);
455             }
456           }
457         }
458       }
459       if (run_it) {
460         callback();
461       }
462       {
463         std::unique_lock<std::mutex> guard(internal_mutex_);
464         // check for termination right before waiting
465         if (!running_) break;
466         // wait until time for the next task (if any)
467         if (task_queue_.size() > 0) {
468           // Make a copy of the time_point because wait_until takes a reference
469           // to it and may read it after waiting, by which time the task may
470           // have been freed (e.g. via CancelAsyncTask).
471           std::chrono::steady_clock::time_point time =
472               (*task_queue_.begin())->time;
473           internal_cond_var_.wait_until(guard, time);
474         } else {
475           internal_cond_var_.wait(guard);
476         }
477       }
478     }
479   }
480 
481   bool running_ = false;
482   std::thread thread_;
483   std::mutex internal_mutex_;
484   std::condition_variable internal_cond_var_;
485 
486   AsyncTaskId lastTaskId_ = kInvalidTaskId;
487   AsyncUserId lastUserId_{1};
488   std::map<AsyncTaskId, std::shared_ptr<Task> > tasks_by_id_;
489   std::map<AsyncUserId, std::set<AsyncTaskId>> tasks_by_user_id_;
490   std::set<std::shared_ptr<Task>, task_p_comparator> task_queue_;
491 };
492 
493 // Async Manager Implementation:
AsyncManager()494 AsyncManager::AsyncManager() : fdWatcher_p_(new AsyncFdWatcher()), taskManager_p_(new AsyncTaskManager()) {}
495 
~AsyncManager()496 AsyncManager::~AsyncManager() {
497   // Make sure the threads are stopped before destroying the object.
498   // The threads need to be stopped here and not in each internal class'
499   // destructor because unique_ptr's reset() first assigns nullptr to the
500   // pointer and only then calls the destructor, so any callback running
501   // on these threads would dereference a null pointer if they called a member
502   // function of this class.
503   fdWatcher_p_->stopThread();
504   taskManager_p_->stopThread();
505 }
506 
WatchFdForNonBlockingReads(int file_descriptor,const ReadCallback & on_read_fd_ready_callback)507 int AsyncManager::WatchFdForNonBlockingReads(int file_descriptor, const ReadCallback& on_read_fd_ready_callback) {
508   return fdWatcher_p_->WatchFdForNonBlockingReads(file_descriptor, on_read_fd_ready_callback);
509 }
510 
StopWatchingFileDescriptor(int file_descriptor)511 void AsyncManager::StopWatchingFileDescriptor(int file_descriptor) {
512   fdWatcher_p_->StopWatchingFileDescriptor(file_descriptor);
513 }
514 
GetNextUserId()515 AsyncUserId AsyncManager::GetNextUserId() {
516   return taskManager_p_->GetNextUserId();
517 }
518 
ExecAsync(AsyncUserId user_id,std::chrono::milliseconds delay,const TaskCallback & callback)519 AsyncTaskId AsyncManager::ExecAsync(AsyncUserId user_id,
520                                     std::chrono::milliseconds delay,
521                                     const TaskCallback& callback) {
522   return taskManager_p_->ExecAsync(user_id, delay, callback);
523 }
524 
ExecAsyncPeriodically(AsyncUserId user_id,std::chrono::milliseconds delay,std::chrono::milliseconds period,const TaskCallback & callback)525 AsyncTaskId AsyncManager::ExecAsyncPeriodically(
526     AsyncUserId user_id, std::chrono::milliseconds delay,
527     std::chrono::milliseconds period, const TaskCallback& callback) {
528   return taskManager_p_->ExecAsyncPeriodically(user_id, delay, period,
529                                                callback);
530 }
531 
CancelAsyncTask(AsyncTaskId async_task_id)532 bool AsyncManager::CancelAsyncTask(AsyncTaskId async_task_id) {
533   return taskManager_p_->CancelAsyncTask(async_task_id);
534 }
535 
CancelAsyncTasksFromUser(test_vendor_lib::AsyncUserId user_id)536 bool AsyncManager::CancelAsyncTasksFromUser(
537     test_vendor_lib::AsyncUserId user_id) {
538   return taskManager_p_->CancelAsyncTasksFromUser(user_id);
539 }
540 
Synchronize(const CriticalCallback & critical)541 void AsyncManager::Synchronize(const CriticalCallback& critical) {
542   std::unique_lock<std::mutex> guard(synchronization_mutex_);
543   critical();
544 }
545 }  // namespace test_vendor_lib
546