1 // Copyright 2017 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "mojo/core/watcher_dispatcher.h"
6
7 #include <algorithm>
8 #include <limits>
9
10 #include "base/debug/alias.h"
11 #include "base/macros.h"
12 #include "base/memory/ptr_util.h"
13 #include "mojo/core/watch.h"
14
15 namespace mojo {
16 namespace core {
17
WatcherDispatcher(MojoTrapEventHandler handler)18 WatcherDispatcher::WatcherDispatcher(MojoTrapEventHandler handler)
19 : handler_(handler) {}
20
NotifyHandleState(Dispatcher * dispatcher,const HandleSignalsState & state)21 void WatcherDispatcher::NotifyHandleState(Dispatcher* dispatcher,
22 const HandleSignalsState& state) {
23 base::AutoLock lock(lock_);
24 auto it = watched_handles_.find(dispatcher);
25 if (it == watched_handles_.end())
26 return;
27
28 // Maybe fire a notification to the watch associated with this dispatcher,
29 // provided we're armed and it cares about the new state.
30 if (it->second->NotifyState(state, armed_)) {
31 ready_watches_.insert(it->second.get());
32
33 // If we were armed and got here, we notified the watch. Disarm.
34 armed_ = false;
35 } else {
36 ready_watches_.erase(it->second.get());
37 }
38 }
39
NotifyHandleClosed(Dispatcher * dispatcher)40 void WatcherDispatcher::NotifyHandleClosed(Dispatcher* dispatcher) {
41 scoped_refptr<Watch> watch;
42 {
43 base::AutoLock lock(lock_);
44 auto it = watched_handles_.find(dispatcher);
45 if (it == watched_handles_.end())
46 return;
47
48 watch = std::move(it->second);
49
50 // Wipe out all state associated with the closed dispatcher.
51 watches_.erase(watch->context());
52 ready_watches_.erase(watch.get());
53 watched_handles_.erase(it);
54 }
55
56 // NOTE: It's important that this is called outside of |lock_| since it
57 // acquires internal Watch locks.
58 watch->Cancel();
59 }
60
InvokeWatchCallback(uintptr_t context,MojoResult result,const HandleSignalsState & state,MojoTrapEventFlags flags)61 void WatcherDispatcher::InvokeWatchCallback(uintptr_t context,
62 MojoResult result,
63 const HandleSignalsState& state,
64 MojoTrapEventFlags flags) {
65 MojoTrapEvent event;
66 event.struct_size = sizeof(event);
67 event.trigger_context = context;
68 event.result = result;
69 event.signals_state = static_cast<MojoHandleSignalsState>(state);
70 event.flags = flags;
71
72 {
73 // We avoid holding the lock during dispatch. It's OK for notification
74 // callbacks to close this watcher, and it's OK for notifications to race
75 // with closure, if for example the watcher is closed from another thread
76 // between this test and the invocation of |callback_| below.
77 //
78 // Because cancellation synchronously blocks all future notifications, and
79 // because notifications themselves are mutually exclusive for any given
80 // context, we still guarantee that a single MOJO_RESULT_CANCELLED result
81 // is the last notification received for any given context.
82 //
83 // This guarantee is sufficient to make safe, synchronized, per-context
84 // state management possible in user code.
85 base::AutoLock lock(lock_);
86 if (closed_ && result != MOJO_RESULT_CANCELLED)
87 return;
88 }
89
90 handler_(&event);
91 }
92
GetType() const93 Dispatcher::Type WatcherDispatcher::GetType() const {
94 return Type::WATCHER;
95 }
96
Close()97 MojoResult WatcherDispatcher::Close() {
98 // We swap out all the watched handle information onto the stack so we can
99 // call into their dispatchers without our own lock held.
100 base::flat_map<uintptr_t, scoped_refptr<Watch>> watches;
101 {
102 base::AutoLock lock(lock_);
103 if (closed_)
104 return MOJO_RESULT_INVALID_ARGUMENT;
105 closed_ = true;
106 std::swap(watches, watches_);
107 watched_handles_.clear();
108 }
109
110 // Remove all refs from our watched dispatchers and fire cancellations.
111 for (auto& entry : watches) {
112 entry.second->dispatcher()->RemoveWatcherRef(this, entry.first);
113 entry.second->Cancel();
114 }
115
116 return MOJO_RESULT_OK;
117 }
118
WatchDispatcher(scoped_refptr<Dispatcher> dispatcher,MojoHandleSignals signals,MojoTriggerCondition condition,uintptr_t context)119 MojoResult WatcherDispatcher::WatchDispatcher(
120 scoped_refptr<Dispatcher> dispatcher,
121 MojoHandleSignals signals,
122 MojoTriggerCondition condition,
123 uintptr_t context) {
124 // NOTE: Because it's critical to avoid acquiring any other dispatcher locks
125 // while |lock_| is held, we defer adding oursevles to the dispatcher until
126 // after we've updated all our own relevant state and released |lock_|.
127 {
128 base::AutoLock lock(lock_);
129 if (closed_)
130 return MOJO_RESULT_INVALID_ARGUMENT;
131
132 if (watches_.count(context) || watched_handles_.count(dispatcher.get()))
133 return MOJO_RESULT_ALREADY_EXISTS;
134
135 scoped_refptr<Watch> watch =
136 new Watch(this, dispatcher, context, signals, condition);
137 watches_.insert({context, watch});
138 auto result =
139 watched_handles_.insert(std::make_pair(dispatcher.get(), watch));
140 DCHECK(result.second);
141 }
142
143 MojoResult rv = dispatcher->AddWatcherRef(this, context);
144 if (rv != MOJO_RESULT_OK) {
145 // Oops. This was not a valid handle to watch. Undo the above work and
146 // fail gracefully.
147 base::AutoLock lock(lock_);
148 watches_.erase(context);
149 watched_handles_.erase(dispatcher.get());
150 return rv;
151 }
152
153 bool remove_now;
154 {
155 // If we've been closed already, there's a chance our closure raced with
156 // the call to AddWatcherRef() above. In that case we want to ensure we've
157 // removed our ref from |dispatcher|. Note that this may in turn race
158 // with normal removal, but that's fine.
159 base::AutoLock lock(lock_);
160 remove_now = closed_;
161 }
162 if (remove_now)
163 dispatcher->RemoveWatcherRef(this, context);
164
165 return MOJO_RESULT_OK;
166 }
167
CancelWatch(uintptr_t context)168 MojoResult WatcherDispatcher::CancelWatch(uintptr_t context) {
169 // We may remove the last stored ref to the Watch below, so we retain
170 // a reference on the stack.
171 scoped_refptr<Watch> watch;
172 {
173 base::AutoLock lock(lock_);
174 if (closed_)
175 return MOJO_RESULT_INVALID_ARGUMENT;
176 auto it = watches_.find(context);
177 if (it == watches_.end())
178 return MOJO_RESULT_NOT_FOUND;
179 watch = it->second;
180 watches_.erase(it);
181 }
182
183 // Mark the watch as cancelled so no further notifications get through.
184 watch->Cancel();
185
186 // We remove the watcher ref for this context before updating any more
187 // internal watcher state, ensuring that we don't receiving further
188 // notifications for this context.
189 watch->dispatcher()->RemoveWatcherRef(this, context);
190
191 {
192 base::AutoLock lock(lock_);
193 auto handle_it = watched_handles_.find(watch->dispatcher().get());
194
195 // If another thread races to close this watcher handler, |watched_handles_|
196 // may have been cleared by the time we reach this section.
197 if (handle_it == watched_handles_.end())
198 return MOJO_RESULT_OK;
199
200 ready_watches_.erase(handle_it->second.get());
201 watched_handles_.erase(handle_it);
202 }
203
204 return MOJO_RESULT_OK;
205 }
206
Arm(uint32_t * num_blocking_events,MojoTrapEvent * blocking_events)207 MojoResult WatcherDispatcher::Arm(uint32_t* num_blocking_events,
208 MojoTrapEvent* blocking_events) {
209 base::AutoLock lock(lock_);
210 if (num_blocking_events && !blocking_events)
211 return MOJO_RESULT_INVALID_ARGUMENT;
212 if (closed_)
213 return MOJO_RESULT_INVALID_ARGUMENT;
214
215 if (watched_handles_.empty())
216 return MOJO_RESULT_NOT_FOUND;
217
218 if (ready_watches_.empty()) {
219 // Fast path: No watches are ready to notify, so we're done.
220 armed_ = true;
221 return MOJO_RESULT_OK;
222 }
223
224 if (num_blocking_events) {
225 DCHECK_LE(ready_watches_.size(), std::numeric_limits<uint32_t>::max());
226 *num_blocking_events = std::min(
227 *num_blocking_events, static_cast<uint32_t>(ready_watches_.size()));
228
229 WatchSet::const_iterator next_ready_iter = ready_watches_.begin();
230 if (last_watch_to_block_arming_) {
231 // Find the next watch to notify in simple round-robin order on the
232 // |ready_watches_| map, wrapping around to the beginning if necessary.
233 next_ready_iter = ready_watches_.find(last_watch_to_block_arming_);
234 if (next_ready_iter != ready_watches_.end())
235 ++next_ready_iter;
236 if (next_ready_iter == ready_watches_.end())
237 next_ready_iter = ready_watches_.begin();
238 }
239
240 for (size_t i = 0; i < *num_blocking_events; ++i) {
241 const Watch* const watch = *next_ready_iter;
242 if (blocking_events[i].struct_size < sizeof(*blocking_events))
243 return MOJO_RESULT_INVALID_ARGUMENT;
244 blocking_events[i].flags = MOJO_TRAP_EVENT_FLAG_WITHIN_API_CALL;
245 blocking_events[i].trigger_context = watch->context();
246 blocking_events[i].result = watch->last_known_result();
247 blocking_events[i].signals_state = watch->last_known_signals_state();
248
249 // Iterate and wrap around.
250 last_watch_to_block_arming_ = watch;
251 ++next_ready_iter;
252 if (next_ready_iter == ready_watches_.end())
253 next_ready_iter = ready_watches_.begin();
254 }
255 }
256
257 return MOJO_RESULT_FAILED_PRECONDITION;
258 }
259
260 WatcherDispatcher::~WatcherDispatcher() = default;
261
262 } // namespace core
263 } // namespace mojo
264