1 // Copyright (c) 2010 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "base/message_pump_glib.h"
6
7 #include <fcntl.h>
8 #include <math.h>
9
10 #include <gtk/gtk.h>
11 #include <glib.h>
12
13 #include "base/eintr_wrapper.h"
14 #include "base/logging.h"
15 #include "base/threading/platform_thread.h"
16
17 namespace {
18
19 // We send a byte across a pipe to wakeup the event loop.
20 const char kWorkScheduled = '\0';
21
22 // Return a timeout suitable for the glib loop, -1 to block forever,
23 // 0 to return right away, or a timeout in milliseconds from now.
GetTimeIntervalMilliseconds(const base::TimeTicks & from)24 int GetTimeIntervalMilliseconds(const base::TimeTicks& from) {
25 if (from.is_null())
26 return -1;
27
28 // Be careful here. TimeDelta has a precision of microseconds, but we want a
29 // value in milliseconds. If there are 5.5ms left, should the delay be 5 or
30 // 6? It should be 6 to avoid executing delayed work too early.
31 int delay = static_cast<int>(
32 ceil((from - base::TimeTicks::Now()).InMillisecondsF()));
33
34 // If this value is negative, then we need to run delayed work soon.
35 return delay < 0 ? 0 : delay;
36 }
37
38 // A brief refresher on GLib:
39 // GLib sources have four callbacks: Prepare, Check, Dispatch and Finalize.
40 // On each iteration of the GLib pump, it calls each source's Prepare function.
41 // This function should return TRUE if it wants GLib to call its Dispatch, and
42 // FALSE otherwise. It can also set a timeout in this case for the next time
43 // Prepare should be called again (it may be called sooner).
44 // After the Prepare calls, GLib does a poll to check for events from the
45 // system. File descriptors can be attached to the sources. The poll may block
46 // if none of the Prepare calls returned TRUE. It will block indefinitely, or
47 // by the minimum time returned by a source in Prepare.
48 // After the poll, GLib calls Check for each source that returned FALSE
49 // from Prepare. The return value of Check has the same meaning as for Prepare,
50 // making Check a second chance to tell GLib we are ready for Dispatch.
51 // Finally, GLib calls Dispatch for each source that is ready. If Dispatch
52 // returns FALSE, GLib will destroy the source. Dispatch calls may be recursive
53 // (i.e., you can call Run from them), but Prepare and Check cannot.
54 // Finalize is called when the source is destroyed.
55 // NOTE: It is common for subsytems to want to process pending events while
56 // doing intensive work, for example the flash plugin. They usually use the
57 // following pattern (recommended by the GTK docs):
58 // while (gtk_events_pending()) {
59 // gtk_main_iteration();
60 // }
61 //
62 // gtk_events_pending just calls g_main_context_pending, which does the
63 // following:
64 // - Call prepare on all the sources.
65 // - Do the poll with a timeout of 0 (not blocking).
66 // - Call check on all the sources.
67 // - *Does not* call dispatch on the sources.
68 // - Return true if any of prepare() or check() returned true.
69 //
70 // gtk_main_iteration just calls g_main_context_iteration, which does the whole
71 // thing, respecting the timeout for the poll (and block, although it is
72 // expected not to if gtk_events_pending returned true), and call dispatch.
73 //
74 // Thus it is important to only return true from prepare or check if we
75 // actually have events or work to do. We also need to make sure we keep
76 // internal state consistent so that if prepare/check return true when called
77 // from gtk_events_pending, they will still return true when called right
78 // after, from gtk_main_iteration.
79 //
80 // For the GLib pump we try to follow the Windows UI pump model:
81 // - Whenever we receive a wakeup event or the timer for delayed work expires,
82 // we run DoWork and/or DoDelayedWork. That part will also run in the other
83 // event pumps.
84 // - We also run DoWork, DoDelayedWork, and possibly DoIdleWork in the main
85 // loop, around event handling.
86
87 struct WorkSource : public GSource {
88 base::MessagePumpForUI* pump;
89 };
90
WorkSourcePrepare(GSource * source,gint * timeout_ms)91 gboolean WorkSourcePrepare(GSource* source,
92 gint* timeout_ms) {
93 *timeout_ms = static_cast<WorkSource*>(source)->pump->HandlePrepare();
94 // We always return FALSE, so that our timeout is honored. If we were
95 // to return TRUE, the timeout would be considered to be 0 and the poll
96 // would never block. Once the poll is finished, Check will be called.
97 return FALSE;
98 }
99
WorkSourceCheck(GSource * source)100 gboolean WorkSourceCheck(GSource* source) {
101 // Only return TRUE if Dispatch should be called.
102 return static_cast<WorkSource*>(source)->pump->HandleCheck();
103 }
104
WorkSourceDispatch(GSource * source,GSourceFunc unused_func,gpointer unused_data)105 gboolean WorkSourceDispatch(GSource* source,
106 GSourceFunc unused_func,
107 gpointer unused_data) {
108
109 static_cast<WorkSource*>(source)->pump->HandleDispatch();
110 // Always return TRUE so our source stays registered.
111 return TRUE;
112 }
113
114 // I wish these could be const, but g_source_new wants non-const.
115 GSourceFuncs WorkSourceFuncs = {
116 WorkSourcePrepare,
117 WorkSourceCheck,
118 WorkSourceDispatch,
119 NULL
120 };
121
122 } // namespace
123
124
125 namespace base {
126
127 struct MessagePumpForUI::RunState {
128 Delegate* delegate;
129 Dispatcher* dispatcher;
130
131 // Used to flag that the current Run() invocation should return ASAP.
132 bool should_quit;
133
134 // Used to count how many Run() invocations are on the stack.
135 int run_depth;
136
137 // This keeps the state of whether the pump got signaled that there was new
138 // work to be done. Since we eat the message on the wake up pipe as soon as
139 // we get it, we keep that state here to stay consistent.
140 bool has_work;
141 };
142
MessagePumpForUI()143 MessagePumpForUI::MessagePumpForUI()
144 : state_(NULL),
145 context_(g_main_context_default()),
146 wakeup_gpollfd_(new GPollFD) {
147 // Create our wakeup pipe, which is used to flag when work was scheduled.
148 int fds[2];
149 CHECK_EQ(pipe(fds), 0);
150 wakeup_pipe_read_ = fds[0];
151 wakeup_pipe_write_ = fds[1];
152 wakeup_gpollfd_->fd = wakeup_pipe_read_;
153 wakeup_gpollfd_->events = G_IO_IN;
154
155 work_source_ = g_source_new(&WorkSourceFuncs, sizeof(WorkSource));
156 static_cast<WorkSource*>(work_source_)->pump = this;
157 g_source_add_poll(work_source_, wakeup_gpollfd_.get());
158 // Use a low priority so that we let other events in the queue go first.
159 g_source_set_priority(work_source_, G_PRIORITY_DEFAULT_IDLE);
160 // This is needed to allow Run calls inside Dispatch.
161 g_source_set_can_recurse(work_source_, TRUE);
162 g_source_attach(work_source_, context_);
163 gdk_event_handler_set(&EventDispatcher, this, NULL);
164 }
165
~MessagePumpForUI()166 MessagePumpForUI::~MessagePumpForUI() {
167 gdk_event_handler_set(reinterpret_cast<GdkEventFunc>(gtk_main_do_event),
168 this, NULL);
169 g_source_destroy(work_source_);
170 g_source_unref(work_source_);
171 close(wakeup_pipe_read_);
172 close(wakeup_pipe_write_);
173 }
174
RunWithDispatcher(Delegate * delegate,Dispatcher * dispatcher)175 void MessagePumpForUI::RunWithDispatcher(Delegate* delegate,
176 Dispatcher* dispatcher) {
177 #ifndef NDEBUG
178 // Make sure we only run this on one thread. GTK only has one message pump
179 // so we can only have one UI loop per process.
180 static base::PlatformThreadId thread_id = base::PlatformThread::CurrentId();
181 DCHECK(thread_id == base::PlatformThread::CurrentId()) <<
182 "Running MessagePumpForUI on two different threads; "
183 "this is unsupported by GLib!";
184 #endif
185
186 RunState state;
187 state.delegate = delegate;
188 state.dispatcher = dispatcher;
189 state.should_quit = false;
190 state.run_depth = state_ ? state_->run_depth + 1 : 1;
191 state.has_work = false;
192
193 RunState* previous_state = state_;
194 state_ = &state;
195
196 // We really only do a single task for each iteration of the loop. If we
197 // have done something, assume there is likely something more to do. This
198 // will mean that we don't block on the message pump until there was nothing
199 // more to do. We also set this to true to make sure not to block on the
200 // first iteration of the loop, so RunAllPending() works correctly.
201 bool more_work_is_plausible = true;
202
203 // We run our own loop instead of using g_main_loop_quit in one of the
204 // callbacks. This is so we only quit our own loops, and we don't quit
205 // nested loops run by others. TODO(deanm): Is this what we want?
206 for (;;) {
207 // Don't block if we think we have more work to do.
208 bool block = !more_work_is_plausible;
209
210 more_work_is_plausible = RunOnce(context_, block);
211 if (state_->should_quit)
212 break;
213
214 more_work_is_plausible |= state_->delegate->DoWork();
215 if (state_->should_quit)
216 break;
217
218 more_work_is_plausible |=
219 state_->delegate->DoDelayedWork(&delayed_work_time_);
220 if (state_->should_quit)
221 break;
222
223 if (more_work_is_plausible)
224 continue;
225
226 more_work_is_plausible = state_->delegate->DoIdleWork();
227 if (state_->should_quit)
228 break;
229 }
230
231 state_ = previous_state;
232 }
233
RunOnce(GMainContext * context,bool block)234 bool MessagePumpForUI::RunOnce(GMainContext* context, bool block) {
235 // g_main_context_iteration returns true if events have been dispatched.
236 return g_main_context_iteration(context, block);
237 }
238
239 // Return the timeout we want passed to poll.
HandlePrepare()240 int MessagePumpForUI::HandlePrepare() {
241 // We know we have work, but we haven't called HandleDispatch yet. Don't let
242 // the pump block so that we can do some processing.
243 if (state_ && // state_ may be null during tests.
244 state_->has_work)
245 return 0;
246
247 // We don't think we have work to do, but make sure not to block
248 // longer than the next time we need to run delayed work.
249 return GetTimeIntervalMilliseconds(delayed_work_time_);
250 }
251
HandleCheck()252 bool MessagePumpForUI::HandleCheck() {
253 if (!state_) // state_ may be null during tests.
254 return false;
255
256 // We should only ever have a single message on the wakeup pipe, since we
257 // are only signaled when the queue went from empty to non-empty. The glib
258 // poll will tell us whether there was data, so this read shouldn't block.
259 if (wakeup_gpollfd_->revents & G_IO_IN) {
260 char msg;
261 if (HANDLE_EINTR(read(wakeup_pipe_read_, &msg, 1)) != 1 || msg != '!') {
262 NOTREACHED() << "Error reading from the wakeup pipe.";
263 }
264 // Since we ate the message, we need to record that we have more work,
265 // because HandleCheck() may be called without HandleDispatch being called
266 // afterwards.
267 state_->has_work = true;
268 }
269
270 if (state_->has_work)
271 return true;
272
273 if (GetTimeIntervalMilliseconds(delayed_work_time_) == 0) {
274 // The timer has expired. That condition will stay true until we process
275 // that delayed work, so we don't need to record this differently.
276 return true;
277 }
278
279 return false;
280 }
281
HandleDispatch()282 void MessagePumpForUI::HandleDispatch() {
283 state_->has_work = false;
284 if (state_->delegate->DoWork()) {
285 // NOTE: on Windows at this point we would call ScheduleWork (see
286 // MessagePumpForUI::HandleWorkMessage in message_pump_win.cc). But here,
287 // instead of posting a message on the wakeup pipe, we can avoid the
288 // syscalls and just signal that we have more work.
289 state_->has_work = true;
290 }
291
292 if (state_->should_quit)
293 return;
294
295 state_->delegate->DoDelayedWork(&delayed_work_time_);
296 }
297
AddObserver(Observer * observer)298 void MessagePumpForUI::AddObserver(Observer* observer) {
299 observers_.AddObserver(observer);
300 }
301
RemoveObserver(Observer * observer)302 void MessagePumpForUI::RemoveObserver(Observer* observer) {
303 observers_.RemoveObserver(observer);
304 }
305
DispatchEvents(GdkEvent * event)306 void MessagePumpForUI::DispatchEvents(GdkEvent* event) {
307 WillProcessEvent(event);
308 if (state_ && state_->dispatcher) { // state_ may be null during tests.
309 if (!state_->dispatcher->Dispatch(event))
310 state_->should_quit = true;
311 } else {
312 gtk_main_do_event(event);
313 }
314 DidProcessEvent(event);
315 }
316
Run(Delegate * delegate)317 void MessagePumpForUI::Run(Delegate* delegate) {
318 RunWithDispatcher(delegate, NULL);
319 }
320
Quit()321 void MessagePumpForUI::Quit() {
322 if (state_) {
323 state_->should_quit = true;
324 } else {
325 NOTREACHED() << "Quit called outside Run!";
326 }
327 }
328
ScheduleWork()329 void MessagePumpForUI::ScheduleWork() {
330 // This can be called on any thread, so we don't want to touch any state
331 // variables as we would then need locks all over. This ensures that if
332 // we are sleeping in a poll that we will wake up.
333 char msg = '!';
334 if (HANDLE_EINTR(write(wakeup_pipe_write_, &msg, 1)) != 1) {
335 NOTREACHED() << "Could not write to the UI message loop wakeup pipe!";
336 }
337 }
338
ScheduleDelayedWork(const TimeTicks & delayed_work_time)339 void MessagePumpForUI::ScheduleDelayedWork(const TimeTicks& delayed_work_time) {
340 // We need to wake up the loop in case the poll timeout needs to be
341 // adjusted. This will cause us to try to do work, but that's ok.
342 delayed_work_time_ = delayed_work_time;
343 ScheduleWork();
344 }
345
GetDispatcher()346 MessagePumpForUI::Dispatcher* MessagePumpForUI::GetDispatcher() {
347 return state_ ? state_->dispatcher : NULL;
348 }
349
WillProcessEvent(GdkEvent * event)350 void MessagePumpForUI::WillProcessEvent(GdkEvent* event) {
351 FOR_EACH_OBSERVER(Observer, observers_, WillProcessEvent(event));
352 }
353
DidProcessEvent(GdkEvent * event)354 void MessagePumpForUI::DidProcessEvent(GdkEvent* event) {
355 FOR_EACH_OBSERVER(Observer, observers_, DidProcessEvent(event));
356 }
357
358 // static
EventDispatcher(GdkEvent * event,gpointer data)359 void MessagePumpForUI::EventDispatcher(GdkEvent* event, gpointer data) {
360 MessagePumpForUI* message_pump = reinterpret_cast<MessagePumpForUI*>(data);
361 message_pump->DispatchEvents(event);
362 }
363
364 } // namespace base
365