• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "content/common/gpu/client/command_buffer_proxy_impl.h"
6 
7 #include "base/callback.h"
8 #include "base/debug/trace_event.h"
9 #include "base/logging.h"
10 #include "base/memory/shared_memory.h"
11 #include "base/stl_util.h"
12 #include "content/common/child_process_messages.h"
13 #include "content/common/gpu/client/gpu_channel_host.h"
14 #include "content/common/gpu/client/gpu_video_decode_accelerator_host.h"
15 #include "content/common/gpu/client/gpu_video_encode_accelerator_host.h"
16 #include "content/common/gpu/gpu_messages.h"
17 #include "content/common/view_messages.h"
18 #include "gpu/command_buffer/common/cmd_buffer_common.h"
19 #include "gpu/command_buffer/common/command_buffer_shared.h"
20 #include "gpu/command_buffer/common/gpu_memory_allocation.h"
21 #include "ui/gfx/size.h"
22 
23 namespace content {
24 
CommandBufferProxyImpl(GpuChannelHost * channel,int route_id)25 CommandBufferProxyImpl::CommandBufferProxyImpl(
26     GpuChannelHost* channel,
27     int route_id)
28     : channel_(channel),
29       route_id_(route_id),
30       flush_count_(0),
31       last_put_offset_(-1),
32       next_signal_id_(0) {
33 }
34 
~CommandBufferProxyImpl()35 CommandBufferProxyImpl::~CommandBufferProxyImpl() {
36   FOR_EACH_OBSERVER(DeletionObserver,
37                     deletion_observers_,
38                     OnWillDeleteImpl());
39 }
40 
OnMessageReceived(const IPC::Message & message)41 bool CommandBufferProxyImpl::OnMessageReceived(const IPC::Message& message) {
42   bool handled = true;
43   IPC_BEGIN_MESSAGE_MAP(CommandBufferProxyImpl, message)
44     IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_Destroyed, OnDestroyed);
45     IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_EchoAck, OnEchoAck);
46     IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_ConsoleMsg, OnConsoleMessage);
47     IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SetMemoryAllocation,
48                         OnSetMemoryAllocation);
49     IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SignalSyncPointAck,
50                         OnSignalSyncPointAck);
51     IPC_MESSAGE_UNHANDLED(handled = false)
52   IPC_END_MESSAGE_MAP()
53 
54   DCHECK(handled);
55   return handled;
56 }
57 
OnChannelError()58 void CommandBufferProxyImpl::OnChannelError() {
59   OnDestroyed(gpu::error::kUnknown);
60 }
61 
OnDestroyed(gpu::error::ContextLostReason reason)62 void CommandBufferProxyImpl::OnDestroyed(gpu::error::ContextLostReason reason) {
63   // Prevent any further messages from being sent.
64   channel_ = NULL;
65 
66   // When the client sees that the context is lost, they should delete this
67   // CommandBufferProxyImpl and create a new one.
68   last_state_.error = gpu::error::kLostContext;
69   last_state_.context_lost_reason = reason;
70 
71   if (!channel_error_callback_.is_null()) {
72     channel_error_callback_.Run();
73     // Avoid calling the error callback more than once.
74     channel_error_callback_.Reset();
75   }
76 }
77 
OnEchoAck()78 void CommandBufferProxyImpl::OnEchoAck() {
79   DCHECK(!echo_tasks_.empty());
80   base::Closure callback = echo_tasks_.front();
81   echo_tasks_.pop();
82   callback.Run();
83 }
84 
OnConsoleMessage(const GPUCommandBufferConsoleMessage & message)85 void CommandBufferProxyImpl::OnConsoleMessage(
86     const GPUCommandBufferConsoleMessage& message) {
87   if (!console_message_callback_.is_null()) {
88     console_message_callback_.Run(message.message, message.id);
89   }
90 }
91 
SetMemoryAllocationChangedCallback(const MemoryAllocationChangedCallback & callback)92 void CommandBufferProxyImpl::SetMemoryAllocationChangedCallback(
93     const MemoryAllocationChangedCallback& callback) {
94   if (last_state_.error != gpu::error::kNoError)
95     return;
96 
97   memory_allocation_changed_callback_ = callback;
98   Send(new GpuCommandBufferMsg_SetClientHasMemoryAllocationChangedCallback(
99       route_id_, !memory_allocation_changed_callback_.is_null()));
100 }
101 
AddDeletionObserver(DeletionObserver * observer)102 void CommandBufferProxyImpl::AddDeletionObserver(DeletionObserver* observer) {
103   deletion_observers_.AddObserver(observer);
104 }
105 
RemoveDeletionObserver(DeletionObserver * observer)106 void CommandBufferProxyImpl::RemoveDeletionObserver(
107     DeletionObserver* observer) {
108   deletion_observers_.RemoveObserver(observer);
109 }
110 
OnSetMemoryAllocation(const gpu::MemoryAllocation & allocation)111 void CommandBufferProxyImpl::OnSetMemoryAllocation(
112     const gpu::MemoryAllocation& allocation) {
113   if (!memory_allocation_changed_callback_.is_null())
114     memory_allocation_changed_callback_.Run(allocation);
115 }
116 
OnSignalSyncPointAck(uint32 id)117 void CommandBufferProxyImpl::OnSignalSyncPointAck(uint32 id) {
118   SignalTaskMap::iterator it = signal_tasks_.find(id);
119   DCHECK(it != signal_tasks_.end());
120   base::Closure callback = it->second;
121   signal_tasks_.erase(it);
122   callback.Run();
123 }
124 
SetChannelErrorCallback(const base::Closure & callback)125 void CommandBufferProxyImpl::SetChannelErrorCallback(
126     const base::Closure& callback) {
127   channel_error_callback_ = callback;
128 }
129 
Initialize()130 bool CommandBufferProxyImpl::Initialize() {
131   TRACE_EVENT0("gpu", "CommandBufferProxyImpl::Initialize");
132   shared_state_shm_.reset(channel_->factory()->AllocateSharedMemory(
133       sizeof(*shared_state())).release());
134   if (!shared_state_shm_)
135     return false;
136 
137   if (!shared_state_shm_->Map(sizeof(*shared_state())))
138     return false;
139 
140   shared_state()->Initialize();
141 
142   // This handle is owned by the GPU process and must be passed to it or it
143   // will leak. In otherwords, do not early out on error between here and the
144   // sending of the Initialize IPC below.
145   base::SharedMemoryHandle handle =
146       channel_->ShareToGpuProcess(shared_state_shm_->handle());
147   if (!base::SharedMemory::IsHandleValid(handle))
148     return false;
149 
150   bool result;
151   if (!Send(new GpuCommandBufferMsg_Initialize(
152       route_id_, handle, &result, &capabilities_))) {
153     LOG(ERROR) << "Could not send GpuCommandBufferMsg_Initialize.";
154     return false;
155   }
156 
157   if (!result) {
158     LOG(ERROR) << "Failed to initialize command buffer service.";
159     return false;
160   }
161 
162   capabilities_.map_image = true;
163 
164   return true;
165 }
166 
GetLastState()167 gpu::CommandBuffer::State CommandBufferProxyImpl::GetLastState() {
168   return last_state_;
169 }
170 
GetLastToken()171 int32 CommandBufferProxyImpl::GetLastToken() {
172   TryUpdateState();
173   return last_state_.token;
174 }
175 
Flush(int32 put_offset)176 void CommandBufferProxyImpl::Flush(int32 put_offset) {
177   if (last_state_.error != gpu::error::kNoError)
178     return;
179 
180   TRACE_EVENT1("gpu",
181                "CommandBufferProxyImpl::Flush",
182                "put_offset",
183                put_offset);
184 
185   if (last_put_offset_ == put_offset)
186     return;
187 
188   last_put_offset_ = put_offset;
189 
190   Send(new GpuCommandBufferMsg_AsyncFlush(route_id_,
191                                           put_offset,
192                                           ++flush_count_));
193 }
194 
SetLatencyInfo(const std::vector<ui::LatencyInfo> & latency_info)195 void CommandBufferProxyImpl::SetLatencyInfo(
196     const std::vector<ui::LatencyInfo>& latency_info) {
197   if (last_state_.error != gpu::error::kNoError)
198     return;
199   Send(new GpuCommandBufferMsg_SetLatencyInfo(route_id_, latency_info));
200 }
201 
WaitForTokenInRange(int32 start,int32 end)202 void CommandBufferProxyImpl::WaitForTokenInRange(int32 start, int32 end) {
203   TRACE_EVENT2("gpu",
204                "CommandBufferProxyImpl::WaitForToken",
205                "start",
206                start,
207                "end",
208                end);
209   TryUpdateState();
210   if (!InRange(start, end, last_state_.token) &&
211       last_state_.error == gpu::error::kNoError) {
212     gpu::CommandBuffer::State state;
213     if (Send(new GpuCommandBufferMsg_WaitForTokenInRange(
214             route_id_, start, end, &state)))
215       OnUpdateState(state);
216   }
217   DCHECK(InRange(start, end, last_state_.token) ||
218          last_state_.error != gpu::error::kNoError);
219 }
220 
WaitForGetOffsetInRange(int32 start,int32 end)221 void CommandBufferProxyImpl::WaitForGetOffsetInRange(int32 start, int32 end) {
222   TRACE_EVENT2("gpu",
223                "CommandBufferProxyImpl::WaitForGetOffset",
224                "start",
225                start,
226                "end",
227                end);
228   TryUpdateState();
229   if (!InRange(start, end, last_state_.get_offset) &&
230       last_state_.error == gpu::error::kNoError) {
231     gpu::CommandBuffer::State state;
232     if (Send(new GpuCommandBufferMsg_WaitForGetOffsetInRange(
233             route_id_, start, end, &state)))
234       OnUpdateState(state);
235   }
236   DCHECK(InRange(start, end, last_state_.get_offset) ||
237          last_state_.error != gpu::error::kNoError);
238 }
239 
SetGetBuffer(int32 shm_id)240 void CommandBufferProxyImpl::SetGetBuffer(int32 shm_id) {
241   if (last_state_.error != gpu::error::kNoError)
242     return;
243 
244   Send(new GpuCommandBufferMsg_SetGetBuffer(route_id_, shm_id));
245   last_put_offset_ = -1;
246 }
247 
CreateTransferBuffer(size_t size,int32 * id)248 scoped_refptr<gpu::Buffer> CommandBufferProxyImpl::CreateTransferBuffer(
249     size_t size,
250     int32* id) {
251   *id = -1;
252 
253   if (last_state_.error != gpu::error::kNoError)
254     return NULL;
255 
256   int32 new_id = channel_->ReserveTransferBufferId();
257 
258   scoped_ptr<base::SharedMemory> shared_memory(
259       channel_->factory()->AllocateSharedMemory(size));
260   if (!shared_memory)
261     return NULL;
262 
263   DCHECK(!shared_memory->memory());
264   if (!shared_memory->Map(size))
265     return NULL;
266 
267   // This handle is owned by the GPU process and must be passed to it or it
268   // will leak. In otherwords, do not early out on error between here and the
269   // sending of the RegisterTransferBuffer IPC below.
270   base::SharedMemoryHandle handle =
271       channel_->ShareToGpuProcess(shared_memory->handle());
272   if (!base::SharedMemory::IsHandleValid(handle))
273     return NULL;
274 
275   if (!Send(new GpuCommandBufferMsg_RegisterTransferBuffer(route_id_,
276                                                            new_id,
277                                                            handle,
278                                                            size))) {
279     return NULL;
280   }
281 
282   *id = new_id;
283   scoped_refptr<gpu::Buffer> buffer(
284       gpu::MakeBufferFromSharedMemory(shared_memory.Pass(), size));
285   return buffer;
286 }
287 
DestroyTransferBuffer(int32 id)288 void CommandBufferProxyImpl::DestroyTransferBuffer(int32 id) {
289   if (last_state_.error != gpu::error::kNoError)
290     return;
291 
292   Send(new GpuCommandBufferMsg_DestroyTransferBuffer(route_id_, id));
293 }
294 
GetCapabilities()295 gpu::Capabilities CommandBufferProxyImpl::GetCapabilities() {
296   return capabilities_;
297 }
298 
CreateGpuMemoryBuffer(size_t width,size_t height,unsigned internalformat,unsigned usage,int32 * id)299 gfx::GpuMemoryBuffer* CommandBufferProxyImpl::CreateGpuMemoryBuffer(
300     size_t width,
301     size_t height,
302     unsigned internalformat,
303     unsigned usage,
304     int32* id) {
305   *id = -1;
306 
307   if (last_state_.error != gpu::error::kNoError)
308     return NULL;
309 
310   int32 new_id = channel_->ReserveGpuMemoryBufferId();
311   DCHECK(gpu_memory_buffers_.find(new_id) == gpu_memory_buffers_.end());
312 
313   scoped_ptr<gfx::GpuMemoryBuffer> gpu_memory_buffer(
314       channel_->factory()->AllocateGpuMemoryBuffer(
315           width, height, internalformat, usage));
316   if (!gpu_memory_buffer)
317     return NULL;
318 
319   DCHECK(GpuChannelHost::IsValidGpuMemoryBuffer(
320              gpu_memory_buffer->GetHandle()));
321 
322   // This handle is owned by the GPU process and must be passed to it or it
323   // will leak. In otherwords, do not early out on error between here and the
324   // sending of the RegisterGpuMemoryBuffer IPC below.
325   gfx::GpuMemoryBufferHandle handle =
326       channel_->ShareGpuMemoryBufferToGpuProcess(
327           gpu_memory_buffer->GetHandle());
328 
329   if (!Send(new GpuCommandBufferMsg_RegisterGpuMemoryBuffer(
330                 route_id_,
331                 new_id,
332                 handle,
333                 width,
334                 height,
335                 internalformat))) {
336     return NULL;
337   }
338 
339   *id = new_id;
340   gpu_memory_buffers_[new_id] = gpu_memory_buffer.release();
341   return gpu_memory_buffers_[new_id];
342 }
343 
DestroyGpuMemoryBuffer(int32 id)344 void CommandBufferProxyImpl::DestroyGpuMemoryBuffer(int32 id) {
345   if (last_state_.error != gpu::error::kNoError)
346     return;
347 
348   // Remove the gpu memory buffer from the client side cache.
349   GpuMemoryBufferMap::iterator it = gpu_memory_buffers_.find(id);
350   if (it != gpu_memory_buffers_.end()) {
351     delete it->second;
352     gpu_memory_buffers_.erase(it);
353   }
354 
355   Send(new GpuCommandBufferMsg_DestroyGpuMemoryBuffer(route_id_, id));
356 }
357 
GetRouteID() const358 int CommandBufferProxyImpl::GetRouteID() const {
359   return route_id_;
360 }
361 
Echo(const base::Closure & callback)362 void CommandBufferProxyImpl::Echo(const base::Closure& callback) {
363   if (last_state_.error != gpu::error::kNoError) {
364     return;
365   }
366 
367   if (!Send(new GpuCommandBufferMsg_Echo(
368            route_id_, GpuCommandBufferMsg_EchoAck(route_id_)))) {
369     return;
370   }
371 
372   echo_tasks_.push(callback);
373 }
374 
CreateStreamTexture(uint32 texture_id)375 uint32 CommandBufferProxyImpl::CreateStreamTexture(uint32 texture_id) {
376   if (last_state_.error != gpu::error::kNoError)
377     return 0;
378 
379   int32 stream_id = channel_->GenerateRouteID();
380   bool succeeded;
381   Send(new GpuCommandBufferMsg_CreateStreamTexture(
382       route_id_, texture_id, stream_id, &succeeded));
383   if (!succeeded) {
384     DLOG(ERROR) << "GpuCommandBufferMsg_CreateStreamTexture returned failure";
385     return 0;
386   }
387   return stream_id;
388 }
389 
InsertSyncPoint()390 uint32 CommandBufferProxyImpl::InsertSyncPoint() {
391   if (last_state_.error != gpu::error::kNoError)
392     return 0;
393 
394   uint32 sync_point = 0;
395   Send(new GpuCommandBufferMsg_InsertSyncPoint(route_id_, &sync_point));
396   return sync_point;
397 }
398 
SignalSyncPoint(uint32 sync_point,const base::Closure & callback)399 void CommandBufferProxyImpl::SignalSyncPoint(uint32 sync_point,
400                                              const base::Closure& callback) {
401   if (last_state_.error != gpu::error::kNoError)
402     return;
403 
404   uint32 signal_id = next_signal_id_++;
405   if (!Send(new GpuCommandBufferMsg_SignalSyncPoint(route_id_,
406                                                     sync_point,
407                                                     signal_id))) {
408     return;
409   }
410 
411   signal_tasks_.insert(std::make_pair(signal_id, callback));
412 }
413 
SignalQuery(uint32 query,const base::Closure & callback)414 void CommandBufferProxyImpl::SignalQuery(uint32 query,
415                                          const base::Closure& callback) {
416   if (last_state_.error != gpu::error::kNoError)
417     return;
418 
419   // Signal identifiers are hidden, so nobody outside of this class will see
420   // them. (And thus, they cannot save them.) The IDs themselves only last
421   // until the callback is invoked, which will happen as soon as the GPU
422   // catches upwith the command buffer.
423   // A malicious caller trying to create a collision by making next_signal_id
424   // would have to make calls at an astounding rate (300B/s) and even if they
425   // could do that, all they would do is to prevent some callbacks from getting
426   // called, leading to stalled threads and/or memory leaks.
427   uint32 signal_id = next_signal_id_++;
428   if (!Send(new GpuCommandBufferMsg_SignalQuery(route_id_,
429                                                 query,
430                                                 signal_id))) {
431     return;
432   }
433 
434   signal_tasks_.insert(std::make_pair(signal_id, callback));
435 }
436 
SetSurfaceVisible(bool visible)437 void CommandBufferProxyImpl::SetSurfaceVisible(bool visible) {
438   if (last_state_.error != gpu::error::kNoError)
439     return;
440 
441   Send(new GpuCommandBufferMsg_SetSurfaceVisible(route_id_, visible));
442 }
443 
ProduceFrontBuffer(const gpu::Mailbox & mailbox)444 bool CommandBufferProxyImpl::ProduceFrontBuffer(const gpu::Mailbox& mailbox) {
445   if (last_state_.error != gpu::error::kNoError)
446     return false;
447 
448   return Send(new GpuCommandBufferMsg_ProduceFrontBuffer(route_id_, mailbox));
449 }
450 
451 scoped_ptr<media::VideoDecodeAccelerator>
CreateVideoDecoder()452 CommandBufferProxyImpl::CreateVideoDecoder() {
453   if (!channel_)
454     return scoped_ptr<media::VideoDecodeAccelerator>();
455   return scoped_ptr<media::VideoDecodeAccelerator>(
456       new GpuVideoDecodeAcceleratorHost(channel_, this));
457 }
458 
459 scoped_ptr<media::VideoEncodeAccelerator>
CreateVideoEncoder()460 CommandBufferProxyImpl::CreateVideoEncoder() {
461   if (!channel_)
462     return scoped_ptr<media::VideoEncodeAccelerator>();
463   return scoped_ptr<media::VideoEncodeAccelerator>(
464       new GpuVideoEncodeAcceleratorHost(channel_, this));
465 }
466 
GetLastError()467 gpu::error::Error CommandBufferProxyImpl::GetLastError() {
468   return last_state_.error;
469 }
470 
Send(IPC::Message * msg)471 bool CommandBufferProxyImpl::Send(IPC::Message* msg) {
472   // Caller should not intentionally send a message if the context is lost.
473   DCHECK(last_state_.error == gpu::error::kNoError);
474 
475   if (channel_) {
476     if (channel_->Send(msg)) {
477       return true;
478     } else {
479       // Flag the command buffer as lost. Defer deleting the channel until
480       // OnChannelError is called after returning to the message loop in case
481       // it is referenced elsewhere.
482       DVLOG(1) << "CommandBufferProxyImpl::Send failed. Losing context.";
483       last_state_.error = gpu::error::kLostContext;
484       return false;
485     }
486   }
487 
488   // Callee takes ownership of message, regardless of whether Send is
489   // successful. See IPC::Sender.
490   delete msg;
491   return false;
492 }
493 
OnUpdateState(const gpu::CommandBuffer::State & state)494 void CommandBufferProxyImpl::OnUpdateState(
495     const gpu::CommandBuffer::State& state) {
496   // Handle wraparound. It works as long as we don't have more than 2B state
497   // updates in flight across which reordering occurs.
498   if (state.generation - last_state_.generation < 0x80000000U)
499     last_state_ = state;
500 }
501 
SetOnConsoleMessageCallback(const GpuConsoleMessageCallback & callback)502 void CommandBufferProxyImpl::SetOnConsoleMessageCallback(
503     const GpuConsoleMessageCallback& callback) {
504   console_message_callback_ = callback;
505 }
506 
TryUpdateState()507 void CommandBufferProxyImpl::TryUpdateState() {
508   if (last_state_.error == gpu::error::kNoError)
509     shared_state()->Read(&last_state_);
510 }
511 
shared_state() const512 gpu::CommandBufferSharedState* CommandBufferProxyImpl::shared_state() const {
513   return reinterpret_cast<gpu::CommandBufferSharedState*>(
514       shared_state_shm_->memory());
515 }
516 
517 }  // namespace content
518