1 // TODO(jam): move this file to src/content once we have an interface that the
2 // embedder provides. We can then use it to get the resource and resize the
3 // window.
4 // Copyright (c) 2011 The Chromium Authors. All rights reserved.
5 // Use of this source code is governed by a BSD-style license that can be
6 // found in the LICENSE file.
7
8 #include "chrome/browser/gpu_process_host_ui_shim.h"
9
10 #include "base/command_line.h"
11 #include "base/id_map.h"
12 #include "base/process_util.h"
13 #include "chrome/browser/browser_process.h"
14 #include "chrome/browser/gpu_data_manager.h"
15 #include "chrome/browser/io_thread.h"
16 #include "content/browser/browser_thread.h"
17 #include "content/browser/gpu_process_host.h"
18 #include "content/browser/renderer_host/render_process_host.h"
19 #include "content/browser/renderer_host/render_view_host.h"
20 #include "content/browser/renderer_host/render_widget_host_view.h"
21 #include "content/common/content_switches.h"
22 #include "content/common/gpu_messages.h"
23 #include "gpu/common/gpu_trace_event.h"
24
25 #if defined(OS_LINUX)
26 // These two #includes need to come after gpu_messages.h.
27 #include <gdk/gdkwindow.h> // NOLINT
28 #include <gdk/gdkx.h> // NOLINT
29 #include "ui/base/x/x11_util.h"
30 #include "ui/gfx/gtk_native_view_id_manager.h"
31 #include "ui/gfx/size.h"
32 #endif // defined(OS_LINUX)
33 namespace {
34
35 // One of the linux specific headers defines this as a macro.
36 #ifdef DestroyAll
37 #undef DestroyAll
38 #endif
39
40 IDMap<GpuProcessHostUIShim> g_hosts_by_id;
41
42 class SendOnIOThreadTask : public Task {
43 public:
SendOnIOThreadTask(int host_id,IPC::Message * msg)44 SendOnIOThreadTask(int host_id, IPC::Message* msg)
45 : host_id_(host_id),
46 msg_(msg) {
47 }
48
49 private:
Run()50 void Run() {
51 GpuProcessHost* host = GpuProcessHost::FromID(host_id_);
52 if (host)
53 host->Send(msg_.release());
54 }
55
56 int host_id_;
57 scoped_ptr<IPC::Message> msg_;
58 };
59
60 class UIThreadSender : public IPC::Channel::Sender {
61 public:
Send(IPC::Message * msg)62 virtual bool Send(IPC::Message* msg) {
63 // The GPU process must never send a synchronous IPC message to the browser
64 // process. This could result in deadlock. Unfortunately linux does this for
65 // GpuHostMsg_ResizeXID. TODO(apatrick): fix this before issuing any GL calls
66 // on the browser process' GPU thread.
67 #if !defined(OS_LINUX)
68 DCHECK(!msg->is_sync());
69 #endif
70
71 // When the GpuChannelManager sends an IPC, post it to the UI thread without
72 // using IPC.
73 bool success = BrowserThread::PostTask(
74 BrowserThread::UI,
75 FROM_HERE,
76 new RouteToGpuProcessHostUIShimTask(0, *msg));
77
78 delete msg;
79 return success;
80 }
81 };
82
ForwardMessageToGpuThread(GpuChannelManager * gpu_channel_manager,IPC::Message * msg)83 void ForwardMessageToGpuThread(GpuChannelManager* gpu_channel_manager,
84 IPC::Message* msg) {
85 bool success = gpu_channel_manager->OnMessageReceived(*msg);
86
87 // If the message was not handled, it is likely it was intended for the
88 // GpuChildThread, which does not exist in single process and in process GPU
89 // mode.
90 DCHECK(success);
91
92 delete msg;
93 }
94
95 } // namespace
96
RouteToGpuProcessHostUIShimTask(int host_id,const IPC::Message & msg)97 RouteToGpuProcessHostUIShimTask::RouteToGpuProcessHostUIShimTask(
98 int host_id,
99 const IPC::Message& msg)
100 : host_id_(host_id),
101 msg_(msg) {
102 }
103
~RouteToGpuProcessHostUIShimTask()104 RouteToGpuProcessHostUIShimTask::~RouteToGpuProcessHostUIShimTask() {
105 }
106
Run()107 void RouteToGpuProcessHostUIShimTask::Run() {
108 GpuProcessHostUIShim* ui_shim = GpuProcessHostUIShim::FromID(host_id_);
109 if (ui_shim)
110 ui_shim->OnMessageReceived(msg_);
111 }
112
GpuProcessHostUIShim(int host_id)113 GpuProcessHostUIShim::GpuProcessHostUIShim(int host_id)
114 : host_id_(host_id),
115 gpu_channel_manager_(NULL),
116 ui_thread_sender_(NULL) {
117 g_hosts_by_id.AddWithID(this, host_id_);
118 if (host_id == 0) {
119 ui_thread_sender_ = new UIThreadSender;
120 gpu_channel_manager_ = new GpuChannelManager(
121 ui_thread_sender_,
122 NULL,
123 g_browser_process->io_thread()->message_loop(),
124 g_browser_process->shutdown_event());
125 }
126 }
127
128 // static
Create(int host_id)129 GpuProcessHostUIShim* GpuProcessHostUIShim::Create(int host_id) {
130 DCHECK(!FromID(host_id));
131 return new GpuProcessHostUIShim(host_id);
132 }
133
134 // static
Destroy(int host_id)135 void GpuProcessHostUIShim::Destroy(int host_id) {
136 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::UI));
137 delete FromID(host_id);
138 }
139
140 // static
DestroyAll()141 void GpuProcessHostUIShim::DestroyAll() {
142 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::UI));
143 while (!g_hosts_by_id.IsEmpty()) {
144 IDMap<GpuProcessHostUIShim>::iterator it(&g_hosts_by_id);
145 delete it.GetCurrentValue();
146 }
147 }
148
149 // static
FromID(int host_id)150 GpuProcessHostUIShim* GpuProcessHostUIShim::FromID(int host_id) {
151 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::UI));
152 return g_hosts_by_id.Lookup(host_id);
153 }
154
Send(IPC::Message * msg)155 bool GpuProcessHostUIShim::Send(IPC::Message* msg) {
156 DCHECK(CalledOnValidThread());
157
158 bool success;
159
160 if (host_id_ == 0) {
161 success = BrowserThread::PostTask(
162 BrowserThread::GPU,
163 FROM_HERE,
164 NewRunnableFunction(ForwardMessageToGpuThread,
165 gpu_channel_manager_,
166 msg));
167 } else {
168 success = BrowserThread::PostTask(
169 BrowserThread::IO,
170 FROM_HERE,
171 new SendOnIOThreadTask(host_id_, msg));
172 }
173
174 return success;
175 }
176
OnMessageReceived(const IPC::Message & message)177 bool GpuProcessHostUIShim::OnMessageReceived(const IPC::Message& message) {
178 DCHECK(CalledOnValidThread());
179
180 if (message.routing_id() != MSG_ROUTING_CONTROL)
181 return false;
182
183 return OnControlMessageReceived(message);
184 }
185
186 #if defined(OS_MACOSX)
187
DidDestroyAcceleratedSurface(int renderer_id,int render_view_id)188 void GpuProcessHostUIShim::DidDestroyAcceleratedSurface(int renderer_id,
189 int render_view_id) {
190 // Destroy the command buffer that owns the accelerated surface.
191 Send(new GpuMsg_DestroyCommandBuffer(renderer_id, render_view_id));
192 }
193
SendToGpuHost(int host_id,IPC::Message * msg)194 void GpuProcessHostUIShim::SendToGpuHost(int host_id, IPC::Message* msg) {
195 GpuProcessHostUIShim* ui_shim = FromID(host_id);
196 if (!ui_shim)
197 return;
198
199 ui_shim->Send(msg);
200 }
201
202 #endif
203
~GpuProcessHostUIShim()204 GpuProcessHostUIShim::~GpuProcessHostUIShim() {
205 DCHECK(CalledOnValidThread());
206 g_hosts_by_id.Remove(host_id_);
207
208 // Ensure these are destroyed on the GPU thread.
209 if (gpu_channel_manager_) {
210 BrowserThread::DeleteSoon(BrowserThread::GPU,
211 FROM_HERE,
212 gpu_channel_manager_);
213 gpu_channel_manager_ = NULL;
214 }
215 if (ui_thread_sender_) {
216 BrowserThread::DeleteSoon(BrowserThread::GPU,
217 FROM_HERE,
218 ui_thread_sender_);
219 ui_thread_sender_ = NULL;
220 }
221 }
222
OnControlMessageReceived(const IPC::Message & message)223 bool GpuProcessHostUIShim::OnControlMessageReceived(
224 const IPC::Message& message) {
225 DCHECK(CalledOnValidThread());
226
227 IPC_BEGIN_MESSAGE_MAP(GpuProcessHostUIShim, message)
228 IPC_MESSAGE_HANDLER(GpuHostMsg_OnLogMessage,
229 OnLogMessage)
230 #if defined(OS_LINUX) && !defined(TOUCH_UI) || defined(OS_WIN)
231 IPC_MESSAGE_HANDLER(GpuHostMsg_ResizeView, OnResizeView)
232 #elif defined(OS_MACOSX)
233 IPC_MESSAGE_HANDLER(GpuHostMsg_AcceleratedSurfaceSetIOSurface,
234 OnAcceleratedSurfaceSetIOSurface)
235 IPC_MESSAGE_HANDLER(GpuHostMsg_AcceleratedSurfaceBuffersSwapped,
236 OnAcceleratedSurfaceBuffersSwapped)
237 #elif defined(OS_WIN)
238 IPC_MESSAGE_HANDLER(GpuHostMsg_ScheduleComposite, OnScheduleComposite);
239 #endif
240 IPC_MESSAGE_UNHANDLED_ERROR()
241 IPC_END_MESSAGE_MAP()
242
243 return true;
244 }
245
OnLogMessage(int level,const std::string & header,const std::string & message)246 void GpuProcessHostUIShim::OnLogMessage(
247 int level,
248 const std::string& header,
249 const std::string& message) {
250 DictionaryValue* dict = new DictionaryValue();
251 dict->SetInteger("level", level);
252 dict->SetString("header", header);
253 dict->SetString("message", message);
254 GpuDataManager::GetInstance()->AddLogMessage(dict);
255 }
256
257 #if defined(OS_LINUX) && !defined(TOUCH_UI) || defined(OS_WIN)
258
OnResizeView(int32 renderer_id,int32 render_view_id,int32 command_buffer_route_id,gfx::Size size)259 void GpuProcessHostUIShim::OnResizeView(int32 renderer_id,
260 int32 render_view_id,
261 int32 command_buffer_route_id,
262 gfx::Size size) {
263 RenderViewHost* host = RenderViewHost::FromID(renderer_id, render_view_id);
264 if (host) {
265 RenderWidgetHostView* view = host->view();
266 if (view) {
267 gfx::PluginWindowHandle handle = view->GetCompositingSurface();
268
269 // Resize the window synchronously. The GPU process must not issue GL
270 // calls on the command buffer until the window is the size it expects it
271 // to be.
272 #if defined(OS_LINUX) && !defined(TOUCH_UI)
273 GdkWindow* window = reinterpret_cast<GdkWindow*>(
274 gdk_xid_table_lookup(handle));
275 if (window) {
276 Display* display = GDK_WINDOW_XDISPLAY(window);
277 gdk_window_resize(window, size.width(), size.height());
278 XSync(display, False);
279 }
280 #elif defined(OS_WIN)
281 SetWindowPos(handle,
282 NULL,
283 0, 0,
284 size.width(),
285 size.height(),
286 SWP_NOSENDCHANGING | SWP_NOCOPYBITS | SWP_NOZORDER |
287 SWP_NOACTIVATE | SWP_DEFERERASE);
288 #endif
289 }
290 }
291
292 // Always respond even if the window no longer exists. The GPU process cannot
293 // make progress on the resizing command buffer until it receives the
294 // response.
295 Send(new GpuMsg_ResizeViewACK(renderer_id, command_buffer_route_id));
296 }
297
298 #elif defined(OS_MACOSX)
299
OnAcceleratedSurfaceSetIOSurface(const GpuHostMsg_AcceleratedSurfaceSetIOSurface_Params & params)300 void GpuProcessHostUIShim::OnAcceleratedSurfaceSetIOSurface(
301 const GpuHostMsg_AcceleratedSurfaceSetIOSurface_Params& params) {
302 RenderViewHost* host = RenderViewHost::FromID(params.renderer_id,
303 params.render_view_id);
304 if (!host)
305 return;
306 RenderWidgetHostView* view = host->view();
307 if (!view)
308 return;
309 view->AcceleratedSurfaceSetIOSurface(params.window,
310 params.width,
311 params.height,
312 params.identifier);
313 }
314
OnAcceleratedSurfaceBuffersSwapped(const GpuHostMsg_AcceleratedSurfaceBuffersSwapped_Params & params)315 void GpuProcessHostUIShim::OnAcceleratedSurfaceBuffersSwapped(
316 const GpuHostMsg_AcceleratedSurfaceBuffersSwapped_Params& params) {
317 RenderViewHost* host = RenderViewHost::FromID(params.renderer_id,
318 params.render_view_id);
319 if (!host)
320 return;
321 RenderWidgetHostView* view = host->view();
322 if (!view)
323 return;
324 view->AcceleratedSurfaceBuffersSwapped(
325 // Parameters needed to swap the IOSurface.
326 params.window,
327 params.surface_id,
328 // Parameters needed to formulate an acknowledgment.
329 params.renderer_id,
330 params.route_id,
331 host_id_,
332 params.swap_buffers_count);
333 }
334
335 #endif
336
337 #if defined(OS_WIN)
338
OnScheduleComposite(int renderer_id,int render_view_id)339 void GpuProcessHostUIShim::OnScheduleComposite(int renderer_id,
340 int render_view_id) {
341 RenderViewHost* host = RenderViewHost::FromID(renderer_id,
342 render_view_id);
343 if (!host) {
344 return;
345 }
346 host->ScheduleComposite();
347 }
348
349 #endif
350