• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "content/common/gpu/client/gpu_channel_host.h"
6 
7 #include <algorithm>
8 
9 #include "base/bind.h"
10 #include "base/debug/trace_event.h"
11 #include "base/message_loop/message_loop.h"
12 #include "base/message_loop/message_loop_proxy.h"
13 #include "base/posix/eintr_wrapper.h"
14 #include "base/threading/thread_restrictions.h"
15 #include "content/common/gpu/client/command_buffer_proxy_impl.h"
16 #include "content/common/gpu/gpu_messages.h"
17 #include "ipc/ipc_sync_message_filter.h"
18 #include "url/gurl.h"
19 
20 #if defined(OS_WIN)
21 #include "content/public/common/sandbox_init.h"
22 #endif
23 
24 using base::AutoLock;
25 using base::MessageLoopProxy;
26 
27 namespace content {
28 
GpuListenerInfo()29 GpuListenerInfo::GpuListenerInfo() {}
30 
~GpuListenerInfo()31 GpuListenerInfo::~GpuListenerInfo() {}
32 
33 // static
Create(GpuChannelHostFactory * factory,const gpu::GPUInfo & gpu_info,const IPC::ChannelHandle & channel_handle,base::WaitableEvent * shutdown_event)34 scoped_refptr<GpuChannelHost> GpuChannelHost::Create(
35     GpuChannelHostFactory* factory,
36     const gpu::GPUInfo& gpu_info,
37     const IPC::ChannelHandle& channel_handle,
38     base::WaitableEvent* shutdown_event) {
39   DCHECK(factory->IsMainThread());
40   scoped_refptr<GpuChannelHost> host = new GpuChannelHost(factory, gpu_info);
41   host->Connect(channel_handle, shutdown_event);
42   return host;
43 }
44 
45 // static
IsValidGpuMemoryBuffer(gfx::GpuMemoryBufferHandle handle)46 bool GpuChannelHost::IsValidGpuMemoryBuffer(
47     gfx::GpuMemoryBufferHandle handle) {
48   switch (handle.type) {
49     case gfx::SHARED_MEMORY_BUFFER:
50 #if defined(OS_MACOSX)
51     case gfx::IO_SURFACE_BUFFER:
52 #endif
53 #if defined(OS_ANDROID)
54     case gfx::SURFACE_TEXTURE_BUFFER:
55 #endif
56       return true;
57     default:
58       return false;
59   }
60 }
61 
GpuChannelHost(GpuChannelHostFactory * factory,const gpu::GPUInfo & gpu_info)62 GpuChannelHost::GpuChannelHost(GpuChannelHostFactory* factory,
63                                const gpu::GPUInfo& gpu_info)
64     : factory_(factory),
65       gpu_info_(gpu_info) {
66   next_transfer_buffer_id_.GetNext();
67   next_gpu_memory_buffer_id_.GetNext();
68   next_route_id_.GetNext();
69 }
70 
Connect(const IPC::ChannelHandle & channel_handle,base::WaitableEvent * shutdown_event)71 void GpuChannelHost::Connect(const IPC::ChannelHandle& channel_handle,
72                              base::WaitableEvent* shutdown_event) {
73   // Open a channel to the GPU process. We pass NULL as the main listener here
74   // since we need to filter everything to route it to the right thread.
75   scoped_refptr<base::MessageLoopProxy> io_loop = factory_->GetIOLoopProxy();
76   channel_ = IPC::SyncChannel::Create(channel_handle,
77                                       IPC::Channel::MODE_CLIENT,
78                                       NULL,
79                                       io_loop.get(),
80                                       true,
81                                       shutdown_event);
82 
83   sync_filter_ = new IPC::SyncMessageFilter(shutdown_event);
84 
85   channel_->AddFilter(sync_filter_.get());
86 
87   channel_filter_ = new MessageFilter();
88 
89   // Install the filter last, because we intercept all leftover
90   // messages.
91   channel_->AddFilter(channel_filter_.get());
92 }
93 
Send(IPC::Message * msg)94 bool GpuChannelHost::Send(IPC::Message* msg) {
95   // Callee takes ownership of message, regardless of whether Send is
96   // successful. See IPC::Sender.
97   scoped_ptr<IPC::Message> message(msg);
98   // The GPU process never sends synchronous IPCs so clear the unblock flag to
99   // preserve order.
100   message->set_unblock(false);
101 
102   // Currently we need to choose between two different mechanisms for sending.
103   // On the main thread we use the regular channel Send() method, on another
104   // thread we use SyncMessageFilter. We also have to be careful interpreting
105   // IsMainThread() since it might return false during shutdown,
106   // impl we are actually calling from the main thread (discard message then).
107   //
108   // TODO: Can we just always use sync_filter_ since we setup the channel
109   //       without a main listener?
110   if (factory_->IsMainThread()) {
111     // http://crbug.com/125264
112     base::ThreadRestrictions::ScopedAllowWait allow_wait;
113     bool result = channel_->Send(message.release());
114     if (!result)
115       DVLOG(1) << "GpuChannelHost::Send failed: Channel::Send failed";
116     return result;
117   } else if (base::MessageLoop::current()) {
118     bool result = sync_filter_->Send(message.release());
119     if (!result)
120       DVLOG(1) << "GpuChannelHost::Send failed: SyncMessageFilter::Send failed";
121     return result;
122   }
123 
124   return false;
125 }
126 
CreateViewCommandBuffer(int32 surface_id,CommandBufferProxyImpl * share_group,const std::vector<int32> & attribs,const GURL & active_url,gfx::GpuPreference gpu_preference)127 CommandBufferProxyImpl* GpuChannelHost::CreateViewCommandBuffer(
128     int32 surface_id,
129     CommandBufferProxyImpl* share_group,
130     const std::vector<int32>& attribs,
131     const GURL& active_url,
132     gfx::GpuPreference gpu_preference) {
133   TRACE_EVENT1("gpu",
134                "GpuChannelHost::CreateViewCommandBuffer",
135                "surface_id",
136                surface_id);
137 
138   GPUCreateCommandBufferConfig init_params;
139   init_params.share_group_id =
140       share_group ? share_group->GetRouteID() : MSG_ROUTING_NONE;
141   init_params.attribs = attribs;
142   init_params.active_url = active_url;
143   init_params.gpu_preference = gpu_preference;
144   int32 route_id = GenerateRouteID();
145   CreateCommandBufferResult result = factory_->CreateViewCommandBuffer(
146       surface_id, init_params, route_id);
147   if (result != CREATE_COMMAND_BUFFER_SUCCEEDED) {
148     LOG(ERROR) << "GpuChannelHost::CreateViewCommandBuffer failed.";
149 
150     if (result == CREATE_COMMAND_BUFFER_FAILED_AND_CHANNEL_LOST) {
151       // The GPU channel needs to be considered lost. The caller will
152       // then set up a new connection, and the GPU channel and any
153       // view command buffers will all be associated with the same GPU
154       // process.
155       DCHECK(MessageLoopProxy::current().get());
156 
157       scoped_refptr<base::MessageLoopProxy> io_loop =
158           factory_->GetIOLoopProxy();
159       io_loop->PostTask(
160           FROM_HERE,
161           base::Bind(&GpuChannelHost::MessageFilter::OnChannelError,
162                      channel_filter_.get()));
163     }
164 
165     return NULL;
166   }
167 
168   CommandBufferProxyImpl* command_buffer =
169       new CommandBufferProxyImpl(this, route_id);
170   AddRoute(route_id, command_buffer->AsWeakPtr());
171 
172   AutoLock lock(context_lock_);
173   proxies_[route_id] = command_buffer;
174   return command_buffer;
175 }
176 
CreateOffscreenCommandBuffer(const gfx::Size & size,CommandBufferProxyImpl * share_group,const std::vector<int32> & attribs,const GURL & active_url,gfx::GpuPreference gpu_preference)177 CommandBufferProxyImpl* GpuChannelHost::CreateOffscreenCommandBuffer(
178     const gfx::Size& size,
179     CommandBufferProxyImpl* share_group,
180     const std::vector<int32>& attribs,
181     const GURL& active_url,
182     gfx::GpuPreference gpu_preference) {
183   TRACE_EVENT0("gpu", "GpuChannelHost::CreateOffscreenCommandBuffer");
184 
185   GPUCreateCommandBufferConfig init_params;
186   init_params.share_group_id =
187       share_group ? share_group->GetRouteID() : MSG_ROUTING_NONE;
188   init_params.attribs = attribs;
189   init_params.active_url = active_url;
190   init_params.gpu_preference = gpu_preference;
191   int32 route_id = GenerateRouteID();
192   bool succeeded = false;
193   if (!Send(new GpuChannelMsg_CreateOffscreenCommandBuffer(size,
194                                                            init_params,
195                                                            route_id,
196                                                            &succeeded))) {
197     LOG(ERROR) << "Failed to send GpuChannelMsg_CreateOffscreenCommandBuffer.";
198     return NULL;
199   }
200 
201   if (!succeeded) {
202     LOG(ERROR)
203         << "GpuChannelMsg_CreateOffscreenCommandBuffer returned failure.";
204     return NULL;
205   }
206 
207   CommandBufferProxyImpl* command_buffer =
208       new CommandBufferProxyImpl(this, route_id);
209   AddRoute(route_id, command_buffer->AsWeakPtr());
210 
211   AutoLock lock(context_lock_);
212   proxies_[route_id] = command_buffer;
213   return command_buffer;
214 }
215 
CreateVideoDecoder(int command_buffer_route_id)216 scoped_ptr<media::VideoDecodeAccelerator> GpuChannelHost::CreateVideoDecoder(
217     int command_buffer_route_id) {
218   TRACE_EVENT0("gpu", "GpuChannelHost::CreateVideoDecoder");
219   AutoLock lock(context_lock_);
220   ProxyMap::iterator it = proxies_.find(command_buffer_route_id);
221   DCHECK(it != proxies_.end());
222   return it->second->CreateVideoDecoder();
223 }
224 
CreateVideoEncoder(int command_buffer_route_id)225 scoped_ptr<media::VideoEncodeAccelerator> GpuChannelHost::CreateVideoEncoder(
226     int command_buffer_route_id) {
227   TRACE_EVENT0("gpu", "GpuChannelHost::CreateVideoEncoder");
228   AutoLock lock(context_lock_);
229   ProxyMap::iterator it = proxies_.find(command_buffer_route_id);
230   DCHECK(it != proxies_.end());
231   return it->second->CreateVideoEncoder();
232 }
233 
DestroyCommandBuffer(CommandBufferProxyImpl * command_buffer)234 void GpuChannelHost::DestroyCommandBuffer(
235     CommandBufferProxyImpl* command_buffer) {
236   TRACE_EVENT0("gpu", "GpuChannelHost::DestroyCommandBuffer");
237 
238   int route_id = command_buffer->GetRouteID();
239   Send(new GpuChannelMsg_DestroyCommandBuffer(route_id));
240   RemoveRoute(route_id);
241 
242   AutoLock lock(context_lock_);
243   proxies_.erase(route_id);
244   delete command_buffer;
245 }
246 
AddRoute(int route_id,base::WeakPtr<IPC::Listener> listener)247 void GpuChannelHost::AddRoute(
248     int route_id, base::WeakPtr<IPC::Listener> listener) {
249   DCHECK(MessageLoopProxy::current().get());
250 
251   scoped_refptr<base::MessageLoopProxy> io_loop = factory_->GetIOLoopProxy();
252   io_loop->PostTask(FROM_HERE,
253                     base::Bind(&GpuChannelHost::MessageFilter::AddRoute,
254                                channel_filter_.get(), route_id, listener,
255                                MessageLoopProxy::current()));
256 }
257 
RemoveRoute(int route_id)258 void GpuChannelHost::RemoveRoute(int route_id) {
259   scoped_refptr<base::MessageLoopProxy> io_loop = factory_->GetIOLoopProxy();
260   io_loop->PostTask(FROM_HERE,
261                     base::Bind(&GpuChannelHost::MessageFilter::RemoveRoute,
262                                channel_filter_.get(), route_id));
263 }
264 
ShareToGpuProcess(base::SharedMemoryHandle source_handle)265 base::SharedMemoryHandle GpuChannelHost::ShareToGpuProcess(
266     base::SharedMemoryHandle source_handle) {
267   if (IsLost())
268     return base::SharedMemory::NULLHandle();
269 
270 #if defined(OS_WIN)
271   // Windows needs to explicitly duplicate the handle out to another process.
272   base::SharedMemoryHandle target_handle;
273   if (!BrokerDuplicateHandle(source_handle,
274                              channel_->GetPeerPID(),
275                              &target_handle,
276                              FILE_GENERIC_READ | FILE_GENERIC_WRITE,
277                              0)) {
278     return base::SharedMemory::NULLHandle();
279   }
280 
281   return target_handle;
282 #else
283   int duped_handle = HANDLE_EINTR(dup(source_handle.fd));
284   if (duped_handle < 0)
285     return base::SharedMemory::NULLHandle();
286 
287   return base::FileDescriptor(duped_handle, true);
288 #endif
289 }
290 
ReserveTransferBufferId()291 int32 GpuChannelHost::ReserveTransferBufferId() {
292   return next_transfer_buffer_id_.GetNext();
293 }
294 
ShareGpuMemoryBufferToGpuProcess(gfx::GpuMemoryBufferHandle source_handle)295 gfx::GpuMemoryBufferHandle GpuChannelHost::ShareGpuMemoryBufferToGpuProcess(
296     gfx::GpuMemoryBufferHandle source_handle) {
297   switch (source_handle.type) {
298     case gfx::SHARED_MEMORY_BUFFER: {
299       gfx::GpuMemoryBufferHandle handle;
300       handle.type = gfx::SHARED_MEMORY_BUFFER;
301       handle.handle = ShareToGpuProcess(source_handle.handle);
302       return handle;
303     }
304 #if defined(OS_MACOSX)
305     case gfx::IO_SURFACE_BUFFER:
306       return source_handle;
307 #endif
308 #if defined(OS_ANDROID)
309     case gfx::SURFACE_TEXTURE_BUFFER:
310       return source_handle;
311 #endif
312     default:
313       NOTREACHED();
314       return gfx::GpuMemoryBufferHandle();
315   }
316 }
317 
ReserveGpuMemoryBufferId()318 int32 GpuChannelHost::ReserveGpuMemoryBufferId() {
319   return next_gpu_memory_buffer_id_.GetNext();
320 }
321 
GenerateRouteID()322 int32 GpuChannelHost::GenerateRouteID() {
323   return next_route_id_.GetNext();
324 }
325 
~GpuChannelHost()326 GpuChannelHost::~GpuChannelHost() {
327   // channel_ must be destroyed on the main thread.
328   if (!factory_->IsMainThread())
329     factory_->GetMainLoop()->DeleteSoon(FROM_HERE, channel_.release());
330 }
331 
332 
MessageFilter()333 GpuChannelHost::MessageFilter::MessageFilter()
334     : lost_(false) {
335 }
336 
~MessageFilter()337 GpuChannelHost::MessageFilter::~MessageFilter() {}
338 
AddRoute(int route_id,base::WeakPtr<IPC::Listener> listener,scoped_refptr<MessageLoopProxy> loop)339 void GpuChannelHost::MessageFilter::AddRoute(
340     int route_id,
341     base::WeakPtr<IPC::Listener> listener,
342     scoped_refptr<MessageLoopProxy> loop) {
343   DCHECK(listeners_.find(route_id) == listeners_.end());
344   GpuListenerInfo info;
345   info.listener = listener;
346   info.loop = loop;
347   listeners_[route_id] = info;
348 }
349 
RemoveRoute(int route_id)350 void GpuChannelHost::MessageFilter::RemoveRoute(int route_id) {
351   ListenerMap::iterator it = listeners_.find(route_id);
352   if (it != listeners_.end())
353     listeners_.erase(it);
354 }
355 
OnMessageReceived(const IPC::Message & message)356 bool GpuChannelHost::MessageFilter::OnMessageReceived(
357     const IPC::Message& message) {
358   // Never handle sync message replies or we will deadlock here.
359   if (message.is_reply())
360     return false;
361 
362   ListenerMap::iterator it = listeners_.find(message.routing_id());
363   if (it == listeners_.end())
364     return false;
365 
366   const GpuListenerInfo& info = it->second;
367   info.loop->PostTask(
368       FROM_HERE,
369       base::Bind(
370           base::IgnoreResult(&IPC::Listener::OnMessageReceived),
371           info.listener,
372           message));
373   return true;
374 }
375 
OnChannelError()376 void GpuChannelHost::MessageFilter::OnChannelError() {
377   // Set the lost state before signalling the proxies. That way, if they
378   // themselves post a task to recreate the context, they will not try to re-use
379   // this channel host.
380   {
381     AutoLock lock(lock_);
382     lost_ = true;
383   }
384 
385   // Inform all the proxies that an error has occurred. This will be reported
386   // via OpenGL as a lost context.
387   for (ListenerMap::iterator it = listeners_.begin();
388        it != listeners_.end();
389        it++) {
390     const GpuListenerInfo& info = it->second;
391     info.loop->PostTask(
392         FROM_HERE,
393         base::Bind(&IPC::Listener::OnChannelError, info.listener));
394   }
395 
396   listeners_.clear();
397 }
398 
IsLost() const399 bool GpuChannelHost::MessageFilter::IsLost() const {
400   AutoLock lock(lock_);
401   return lost_;
402 }
403 
404 }  // namespace content
405