• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "gpu/command_buffer/service/in_process_command_buffer.h"
6 
7 #include <queue>
8 #include <set>
9 #include <utility>
10 
11 #include <GLES2/gl2.h>
12 #ifndef GL_GLEXT_PROTOTYPES
13 #define GL_GLEXT_PROTOTYPES 1
14 #endif
15 #include <GLES2/gl2ext.h>
16 #include <GLES2/gl2extchromium.h>
17 
18 #include "base/bind.h"
19 #include "base/bind_helpers.h"
20 #include "base/lazy_instance.h"
21 #include "base/logging.h"
22 #include "base/memory/weak_ptr.h"
23 #include "base/message_loop/message_loop_proxy.h"
24 #include "base/sequence_checker.h"
25 #include "base/synchronization/condition_variable.h"
26 #include "base/threading/thread.h"
27 #include "gpu/command_buffer/service/command_buffer_service.h"
28 #include "gpu/command_buffer/service/context_group.h"
29 #include "gpu/command_buffer/service/gl_context_virtual.h"
30 #include "gpu/command_buffer/service/gpu_control_service.h"
31 #include "gpu/command_buffer/service/gpu_scheduler.h"
32 #include "gpu/command_buffer/service/image_manager.h"
33 #include "gpu/command_buffer/service/mailbox_manager.h"
34 #include "gpu/command_buffer/service/transfer_buffer_manager.h"
35 #include "ui/gfx/size.h"
36 #include "ui/gl/gl_context.h"
37 #include "ui/gl/gl_image.h"
38 #include "ui/gl/gl_share_group.h"
39 
40 #if defined(OS_ANDROID)
41 #include "gpu/command_buffer/service/stream_texture_manager_in_process_android.h"
42 #include "ui/gl/android/surface_texture.h"
43 #endif
44 
45 namespace gpu {
46 
47 namespace {
48 
49 static GpuMemoryBufferFactory* g_gpu_memory_buffer_factory = NULL;
50 
51 template <typename T>
RunTaskWithResult(base::Callback<T (void)> task,T * result,base::WaitableEvent * completion)52 static void RunTaskWithResult(base::Callback<T(void)> task,
53                               T* result,
54                               base::WaitableEvent* completion) {
55   *result = task.Run();
56   completion->Signal();
57 }
58 
59 class GpuInProcessThread
60     : public base::Thread,
61       public InProcessCommandBuffer::Service,
62       public base::RefCountedThreadSafe<GpuInProcessThread> {
63  public:
64   GpuInProcessThread();
65 
AddRef() const66   virtual void AddRef() const OVERRIDE {
67     base::RefCountedThreadSafe<GpuInProcessThread>::AddRef();
68   }
Release() const69   virtual void Release() const OVERRIDE {
70     base::RefCountedThreadSafe<GpuInProcessThread>::Release();
71   }
72 
73   virtual void ScheduleTask(const base::Closure& task) OVERRIDE;
74   virtual void ScheduleIdleWork(const base::Closure& callback) OVERRIDE;
UseVirtualizedGLContexts()75   virtual bool UseVirtualizedGLContexts() OVERRIDE { return false; }
76 
77  private:
78   virtual ~GpuInProcessThread();
79   friend class base::RefCountedThreadSafe<GpuInProcessThread>;
80 
81   DISALLOW_COPY_AND_ASSIGN(GpuInProcessThread);
82 };
83 
GpuInProcessThread()84 GpuInProcessThread::GpuInProcessThread() : base::Thread("GpuThread") {
85   Start();
86 }
87 
~GpuInProcessThread()88 GpuInProcessThread::~GpuInProcessThread() {
89   Stop();
90 }
91 
ScheduleTask(const base::Closure & task)92 void GpuInProcessThread::ScheduleTask(const base::Closure& task) {
93   message_loop()->PostTask(FROM_HERE, task);
94 }
95 
ScheduleIdleWork(const base::Closure & callback)96 void GpuInProcessThread::ScheduleIdleWork(const base::Closure& callback) {
97   message_loop()->PostDelayedTask(
98       FROM_HERE, callback, base::TimeDelta::FromMilliseconds(5));
99 }
100 
101 base::LazyInstance<std::set<InProcessCommandBuffer*> > default_thread_clients_ =
102     LAZY_INSTANCE_INITIALIZER;
103 base::LazyInstance<base::Lock> default_thread_clients_lock_ =
104     LAZY_INSTANCE_INITIALIZER;
105 
106 class ScopedEvent {
107  public:
ScopedEvent(base::WaitableEvent * event)108   ScopedEvent(base::WaitableEvent* event) : event_(event) {}
~ScopedEvent()109   ~ScopedEvent() { event_->Signal(); }
110 
111  private:
112   base::WaitableEvent* event_;
113 };
114 
115 class SyncPointManager {
116  public:
117   SyncPointManager();
118   ~SyncPointManager();
119 
120   uint32 GenerateSyncPoint();
121   void RetireSyncPoint(uint32 sync_point);
122 
123   bool IsSyncPointPassed(uint32 sync_point);
124   void WaitSyncPoint(uint32 sync_point);
125 
126 private:
127   // This lock protects access to pending_sync_points_ and next_sync_point_ and
128   // is used with the ConditionVariable to signal when a sync point is retired.
129   base::Lock lock_;
130   std::set<uint32> pending_sync_points_;
131   uint32 next_sync_point_;
132   base::ConditionVariable cond_var_;
133 };
134 
SyncPointManager()135 SyncPointManager::SyncPointManager() : next_sync_point_(0), cond_var_(&lock_) {}
136 
~SyncPointManager()137 SyncPointManager::~SyncPointManager() {
138   DCHECK_EQ(pending_sync_points_.size(), 0U);
139 }
140 
GenerateSyncPoint()141 uint32 SyncPointManager::GenerateSyncPoint() {
142   base::AutoLock lock(lock_);
143   uint32 sync_point = next_sync_point_++;
144   DCHECK_EQ(pending_sync_points_.count(sync_point), 0U);
145   pending_sync_points_.insert(sync_point);
146   return sync_point;
147 }
148 
RetireSyncPoint(uint32 sync_point)149 void SyncPointManager::RetireSyncPoint(uint32 sync_point) {
150   base::AutoLock lock(lock_);
151   DCHECK(pending_sync_points_.count(sync_point));
152   pending_sync_points_.erase(sync_point);
153   cond_var_.Broadcast();
154 }
155 
IsSyncPointPassed(uint32 sync_point)156 bool SyncPointManager::IsSyncPointPassed(uint32 sync_point) {
157   base::AutoLock lock(lock_);
158   return pending_sync_points_.count(sync_point) == 0;
159 }
160 
WaitSyncPoint(uint32 sync_point)161 void SyncPointManager::WaitSyncPoint(uint32 sync_point) {
162   base::AutoLock lock(lock_);
163   while (pending_sync_points_.count(sync_point)) {
164     cond_var_.Wait();
165   }
166 }
167 
168 base::LazyInstance<SyncPointManager> g_sync_point_manager =
169     LAZY_INSTANCE_INITIALIZER;
170 
WaitSyncPoint(uint32 sync_point)171 bool WaitSyncPoint(uint32 sync_point) {
172   g_sync_point_manager.Get().WaitSyncPoint(sync_point);
173   return true;
174 }
175 
176 }  // anonyous namespace
177 
Service()178 InProcessCommandBuffer::Service::Service() {}
179 
~Service()180 InProcessCommandBuffer::Service::~Service() {}
181 
182 scoped_refptr<InProcessCommandBuffer::Service>
GetDefaultService()183 InProcessCommandBuffer::GetDefaultService() {
184   base::AutoLock lock(default_thread_clients_lock_.Get());
185   scoped_refptr<Service> service;
186   if (!default_thread_clients_.Get().empty()) {
187     InProcessCommandBuffer* other = *default_thread_clients_.Get().begin();
188     service = other->service_;
189     DCHECK(service.get());
190   } else {
191     service = new GpuInProcessThread;
192   }
193   return service;
194 }
195 
InProcessCommandBuffer(const scoped_refptr<Service> & service)196 InProcessCommandBuffer::InProcessCommandBuffer(
197     const scoped_refptr<Service>& service)
198     : context_lost_(false),
199       last_put_offset_(-1),
200       flush_event_(false, false),
201       service_(service.get() ? service : GetDefaultService()),
202       gpu_thread_weak_ptr_factory_(this) {
203   if (!service) {
204     base::AutoLock lock(default_thread_clients_lock_.Get());
205     default_thread_clients_.Get().insert(this);
206   }
207 }
208 
~InProcessCommandBuffer()209 InProcessCommandBuffer::~InProcessCommandBuffer() {
210   Destroy();
211   base::AutoLock lock(default_thread_clients_lock_.Get());
212   default_thread_clients_.Get().erase(this);
213 }
214 
OnResizeView(gfx::Size size,float scale_factor)215 void InProcessCommandBuffer::OnResizeView(gfx::Size size, float scale_factor) {
216   CheckSequencedThread();
217   DCHECK(!surface_->IsOffscreen());
218   surface_->Resize(size);
219 }
220 
MakeCurrent()221 bool InProcessCommandBuffer::MakeCurrent() {
222   CheckSequencedThread();
223   command_buffer_lock_.AssertAcquired();
224 
225   if (!context_lost_ && decoder_->MakeCurrent())
226     return true;
227   DLOG(ERROR) << "Context lost because MakeCurrent failed.";
228   command_buffer_->SetContextLostReason(decoder_->GetContextLostReason());
229   command_buffer_->SetParseError(gpu::error::kLostContext);
230   return false;
231 }
232 
PumpCommands()233 void InProcessCommandBuffer::PumpCommands() {
234   CheckSequencedThread();
235   command_buffer_lock_.AssertAcquired();
236 
237   if (!MakeCurrent())
238     return;
239 
240   gpu_scheduler_->PutChanged();
241 }
242 
GetBufferChanged(int32 transfer_buffer_id)243 bool InProcessCommandBuffer::GetBufferChanged(int32 transfer_buffer_id) {
244   CheckSequencedThread();
245   command_buffer_lock_.AssertAcquired();
246   command_buffer_->SetGetBuffer(transfer_buffer_id);
247   return true;
248 }
249 
Initialize(scoped_refptr<gfx::GLSurface> surface,bool is_offscreen,gfx::AcceleratedWidget window,const gfx::Size & size,const std::vector<int32> & attribs,gfx::GpuPreference gpu_preference,const base::Closure & context_lost_callback,InProcessCommandBuffer * share_group)250 bool InProcessCommandBuffer::Initialize(
251     scoped_refptr<gfx::GLSurface> surface,
252     bool is_offscreen,
253     gfx::AcceleratedWidget window,
254     const gfx::Size& size,
255     const std::vector<int32>& attribs,
256     gfx::GpuPreference gpu_preference,
257     const base::Closure& context_lost_callback,
258     InProcessCommandBuffer* share_group) {
259   DCHECK(!share_group || service_ == share_group->service_);
260   context_lost_callback_ = WrapCallback(context_lost_callback);
261 
262   if (surface) {
263     // GPU thread must be the same as client thread due to GLSurface not being
264     // thread safe.
265     sequence_checker_.reset(new base::SequenceChecker);
266     surface_ = surface;
267   }
268 
269   gpu::Capabilities capabilities;
270   InitializeOnGpuThreadParams params(is_offscreen,
271                                      window,
272                                      size,
273                                      attribs,
274                                      gpu_preference,
275                                      &capabilities,
276                                      share_group);
277 
278   base::Callback<bool(void)> init_task =
279       base::Bind(&InProcessCommandBuffer::InitializeOnGpuThread,
280                  base::Unretained(this),
281                  params);
282 
283   base::WaitableEvent completion(true, false);
284   bool result = false;
285   QueueTask(
286       base::Bind(&RunTaskWithResult<bool>, init_task, &result, &completion));
287   completion.Wait();
288 
289   if (result)
290     capabilities_ = capabilities;
291   return result;
292 }
293 
InitializeOnGpuThread(const InitializeOnGpuThreadParams & params)294 bool InProcessCommandBuffer::InitializeOnGpuThread(
295     const InitializeOnGpuThreadParams& params) {
296   CheckSequencedThread();
297   gpu_thread_weak_ptr_ = gpu_thread_weak_ptr_factory_.GetWeakPtr();
298 
299   DCHECK(params.size.width() >= 0 && params.size.height() >= 0);
300 
301   TransferBufferManager* manager = new TransferBufferManager();
302   transfer_buffer_manager_.reset(manager);
303   manager->Initialize();
304 
305   scoped_ptr<CommandBufferService> command_buffer(
306       new CommandBufferService(transfer_buffer_manager_.get()));
307   command_buffer->SetPutOffsetChangeCallback(base::Bind(
308       &InProcessCommandBuffer::PumpCommands, gpu_thread_weak_ptr_));
309   command_buffer->SetParseErrorCallback(base::Bind(
310       &InProcessCommandBuffer::OnContextLost, gpu_thread_weak_ptr_));
311 
312   if (!command_buffer->Initialize()) {
313     LOG(ERROR) << "Could not initialize command buffer.";
314     DestroyOnGpuThread();
315     return false;
316   }
317 
318   gl_share_group_ = params.context_group
319                         ? params.context_group->gl_share_group_.get()
320                         : new gfx::GLShareGroup;
321 
322   StreamTextureManager* stream_texture_manager = NULL;
323 #if defined(OS_ANDROID)
324   stream_texture_manager = stream_texture_manager_ =
325       params.context_group ? params.context_group->stream_texture_manager_.get()
326                     : new StreamTextureManagerInProcess;
327 #endif
328 
329   bool bind_generates_resource = false;
330   decoder_.reset(gles2::GLES2Decoder::Create(
331       params.context_group ? params.context_group->decoder_->GetContextGroup()
332                     : new gles2::ContextGroup(NULL,
333                                               NULL,
334                                               NULL,
335                                               stream_texture_manager,
336                                               NULL,
337                                               bind_generates_resource)));
338 
339   gpu_scheduler_.reset(
340       new GpuScheduler(command_buffer.get(), decoder_.get(), decoder_.get()));
341   command_buffer->SetGetBufferChangeCallback(base::Bind(
342       &GpuScheduler::SetGetBuffer, base::Unretained(gpu_scheduler_.get())));
343   command_buffer_ = command_buffer.Pass();
344 
345   decoder_->set_engine(gpu_scheduler_.get());
346 
347   if (!surface_) {
348     if (params.is_offscreen)
349       surface_ = gfx::GLSurface::CreateOffscreenGLSurface(params.size);
350     else
351       surface_ = gfx::GLSurface::CreateViewGLSurface(params.window);
352   }
353 
354   if (!surface_.get()) {
355     LOG(ERROR) << "Could not create GLSurface.";
356     DestroyOnGpuThread();
357     return false;
358   }
359 
360   if (service_->UseVirtualizedGLContexts()) {
361     context_ = gl_share_group_->GetSharedContext();
362     if (!context_.get()) {
363       context_ = gfx::GLContext::CreateGLContext(
364           gl_share_group_.get(), surface_.get(), params.gpu_preference);
365       gl_share_group_->SetSharedContext(context_.get());
366     }
367 
368     context_ = new GLContextVirtual(
369         gl_share_group_.get(), context_.get(), decoder_->AsWeakPtr());
370     if (context_->Initialize(surface_.get(), params.gpu_preference)) {
371       VLOG(1) << "Created virtual GL context.";
372     } else {
373       context_ = NULL;
374     }
375   } else {
376     context_ = gfx::GLContext::CreateGLContext(
377         gl_share_group_.get(), surface_.get(), params.gpu_preference);
378   }
379 
380   if (!context_.get()) {
381     LOG(ERROR) << "Could not create GLContext.";
382     DestroyOnGpuThread();
383     return false;
384   }
385 
386   if (!context_->MakeCurrent(surface_.get())) {
387     LOG(ERROR) << "Could not make context current.";
388     DestroyOnGpuThread();
389     return false;
390   }
391 
392   gles2::DisallowedFeatures disallowed_features;
393   disallowed_features.gpu_memory_manager = true;
394   if (!decoder_->Initialize(surface_,
395                             context_,
396                             params.is_offscreen,
397                             params.size,
398                             disallowed_features,
399                             params.attribs)) {
400     LOG(ERROR) << "Could not initialize decoder.";
401     DestroyOnGpuThread();
402     return false;
403   }
404 
405   gpu_control_.reset(
406       new GpuControlService(decoder_->GetContextGroup()->image_manager(),
407                             g_gpu_memory_buffer_factory,
408                             decoder_->GetContextGroup()->mailbox_manager(),
409                             decoder_->GetQueryManager(),
410                             decoder_->GetCapabilities()));
411 
412   *params.capabilities = gpu_control_->GetCapabilities();
413 
414   if (!params.is_offscreen) {
415     decoder_->SetResizeCallback(base::Bind(
416         &InProcessCommandBuffer::OnResizeView, gpu_thread_weak_ptr_));
417   }
418   decoder_->SetWaitSyncPointCallback(base::Bind(&WaitSyncPoint));
419 
420   return true;
421 }
422 
Destroy()423 void InProcessCommandBuffer::Destroy() {
424   CheckSequencedThread();
425 
426   base::WaitableEvent completion(true, false);
427   bool result = false;
428   base::Callback<bool(void)> destroy_task = base::Bind(
429       &InProcessCommandBuffer::DestroyOnGpuThread, base::Unretained(this));
430   QueueTask(
431       base::Bind(&RunTaskWithResult<bool>, destroy_task, &result, &completion));
432   completion.Wait();
433 }
434 
DestroyOnGpuThread()435 bool InProcessCommandBuffer::DestroyOnGpuThread() {
436   CheckSequencedThread();
437   gpu_thread_weak_ptr_factory_.InvalidateWeakPtrs();
438   command_buffer_.reset();
439   // Clean up GL resources if possible.
440   bool have_context = context_ && context_->MakeCurrent(surface_);
441   if (decoder_) {
442     decoder_->Destroy(have_context);
443     decoder_.reset();
444   }
445   context_ = NULL;
446   surface_ = NULL;
447   gl_share_group_ = NULL;
448 
449   return true;
450 }
451 
CheckSequencedThread()452 void InProcessCommandBuffer::CheckSequencedThread() {
453   DCHECK(!sequence_checker_ ||
454          sequence_checker_->CalledOnValidSequencedThread());
455 }
456 
OnContextLost()457 void InProcessCommandBuffer::OnContextLost() {
458   CheckSequencedThread();
459   if (!context_lost_callback_.is_null()) {
460     context_lost_callback_.Run();
461     context_lost_callback_.Reset();
462   }
463 
464   context_lost_ = true;
465 }
466 
GetStateFast()467 CommandBuffer::State InProcessCommandBuffer::GetStateFast() {
468   CheckSequencedThread();
469   base::AutoLock lock(state_after_last_flush_lock_);
470   if (state_after_last_flush_.generation - last_state_.generation < 0x80000000U)
471     last_state_ = state_after_last_flush_;
472   return last_state_;
473 }
474 
GetState()475 CommandBuffer::State InProcessCommandBuffer::GetState() {
476   CheckSequencedThread();
477   return GetStateFast();
478 }
479 
GetLastState()480 CommandBuffer::State InProcessCommandBuffer::GetLastState() {
481   CheckSequencedThread();
482   return last_state_;
483 }
484 
GetLastToken()485 int32 InProcessCommandBuffer::GetLastToken() {
486   CheckSequencedThread();
487   GetStateFast();
488   return last_state_.token;
489 }
490 
FlushOnGpuThread(int32 put_offset)491 void InProcessCommandBuffer::FlushOnGpuThread(int32 put_offset) {
492   CheckSequencedThread();
493   ScopedEvent handle_flush(&flush_event_);
494   base::AutoLock lock(command_buffer_lock_);
495   command_buffer_->Flush(put_offset);
496   {
497     // Update state before signaling the flush event.
498     base::AutoLock lock(state_after_last_flush_lock_);
499     state_after_last_flush_ = command_buffer_->GetState();
500   }
501   DCHECK((!error::IsError(state_after_last_flush_.error) && !context_lost_) ||
502          (error::IsError(state_after_last_flush_.error) && context_lost_));
503 
504   // If we've processed all pending commands but still have pending queries,
505   // pump idle work until the query is passed.
506   if (put_offset == state_after_last_flush_.get_offset &&
507       gpu_scheduler_->HasMoreWork()) {
508     service_->ScheduleIdleWork(
509         base::Bind(&InProcessCommandBuffer::ScheduleMoreIdleWork,
510                    gpu_thread_weak_ptr_));
511   }
512 }
513 
ScheduleMoreIdleWork()514 void InProcessCommandBuffer::ScheduleMoreIdleWork() {
515   CheckSequencedThread();
516   base::AutoLock lock(command_buffer_lock_);
517   if (gpu_scheduler_->HasMoreWork()) {
518     gpu_scheduler_->PerformIdleWork();
519     service_->ScheduleIdleWork(
520         base::Bind(&InProcessCommandBuffer::ScheduleMoreIdleWork,
521                    gpu_thread_weak_ptr_));
522   }
523 }
524 
Flush(int32 put_offset)525 void InProcessCommandBuffer::Flush(int32 put_offset) {
526   CheckSequencedThread();
527   if (last_state_.error != gpu::error::kNoError)
528     return;
529 
530   if (last_put_offset_ == put_offset)
531     return;
532 
533   last_put_offset_ = put_offset;
534   base::Closure task = base::Bind(&InProcessCommandBuffer::FlushOnGpuThread,
535                                   gpu_thread_weak_ptr_,
536                                   put_offset);
537   QueueTask(task);
538 }
539 
FlushSync(int32 put_offset,int32 last_known_get)540 CommandBuffer::State InProcessCommandBuffer::FlushSync(int32 put_offset,
541                                                        int32 last_known_get) {
542   CheckSequencedThread();
543   if (put_offset == last_known_get || last_state_.error != gpu::error::kNoError)
544     return last_state_;
545 
546   Flush(put_offset);
547   GetStateFast();
548   while (last_known_get == last_state_.get_offset &&
549          last_state_.error == gpu::error::kNoError) {
550     flush_event_.Wait();
551     GetStateFast();
552   }
553 
554   return last_state_;
555 }
556 
SetGetBuffer(int32 shm_id)557 void InProcessCommandBuffer::SetGetBuffer(int32 shm_id) {
558   CheckSequencedThread();
559   if (last_state_.error != gpu::error::kNoError)
560     return;
561 
562   {
563     base::AutoLock lock(command_buffer_lock_);
564     command_buffer_->SetGetBuffer(shm_id);
565     last_put_offset_ = 0;
566   }
567   {
568     base::AutoLock lock(state_after_last_flush_lock_);
569     state_after_last_flush_ = command_buffer_->GetState();
570   }
571 }
572 
CreateTransferBuffer(size_t size,int32 * id)573 gpu::Buffer InProcessCommandBuffer::CreateTransferBuffer(size_t size,
574                                                          int32* id) {
575   CheckSequencedThread();
576   base::AutoLock lock(command_buffer_lock_);
577   return command_buffer_->CreateTransferBuffer(size, id);
578 }
579 
DestroyTransferBuffer(int32 id)580 void InProcessCommandBuffer::DestroyTransferBuffer(int32 id) {
581   CheckSequencedThread();
582   base::Closure task = base::Bind(&CommandBuffer::DestroyTransferBuffer,
583                                   base::Unretained(command_buffer_.get()),
584                                   id);
585 
586   QueueTask(task);
587 }
588 
GetTransferBuffer(int32 id)589 gpu::Buffer InProcessCommandBuffer::GetTransferBuffer(int32 id) {
590   NOTREACHED();
591   return gpu::Buffer();
592 }
593 
GetCapabilities()594 gpu::Capabilities InProcessCommandBuffer::GetCapabilities() {
595   return capabilities_;
596 }
597 
CreateGpuMemoryBuffer(size_t width,size_t height,unsigned internalformat,int32 * id)598 gfx::GpuMemoryBuffer* InProcessCommandBuffer::CreateGpuMemoryBuffer(
599     size_t width,
600     size_t height,
601     unsigned internalformat,
602     int32* id) {
603   CheckSequencedThread();
604   base::AutoLock lock(command_buffer_lock_);
605   return gpu_control_->CreateGpuMemoryBuffer(width,
606                                              height,
607                                              internalformat,
608                                              id);
609 }
610 
DestroyGpuMemoryBuffer(int32 id)611 void InProcessCommandBuffer::DestroyGpuMemoryBuffer(int32 id) {
612   CheckSequencedThread();
613   base::Closure task = base::Bind(&GpuControl::DestroyGpuMemoryBuffer,
614                                   base::Unretained(gpu_control_.get()),
615                                   id);
616 
617   QueueTask(task);
618 }
619 
GenerateMailboxNames(unsigned num,std::vector<gpu::Mailbox> * names)620 bool InProcessCommandBuffer::GenerateMailboxNames(
621     unsigned num, std::vector<gpu::Mailbox>* names) {
622   CheckSequencedThread();
623   base::AutoLock lock(command_buffer_lock_);
624   return gpu_control_->GenerateMailboxNames(num, names);
625 }
626 
InsertSyncPoint()627 uint32 InProcessCommandBuffer::InsertSyncPoint() {
628   uint32 sync_point = g_sync_point_manager.Get().GenerateSyncPoint();
629   QueueTask(base::Bind(&InProcessCommandBuffer::RetireSyncPointOnGpuThread,
630                        base::Unretained(this),
631                        sync_point));
632   return sync_point;
633 }
634 
RetireSyncPointOnGpuThread(uint32 sync_point)635 void InProcessCommandBuffer::RetireSyncPointOnGpuThread(uint32 sync_point) {
636   gles2::MailboxManager* mailbox_manager =
637       decoder_->GetContextGroup()->mailbox_manager();
638   if (mailbox_manager->UsesSync() && MakeCurrent())
639     mailbox_manager->PushTextureUpdates();
640   g_sync_point_manager.Get().RetireSyncPoint(sync_point);
641 }
642 
SignalSyncPoint(unsigned sync_point,const base::Closure & callback)643 void InProcessCommandBuffer::SignalSyncPoint(unsigned sync_point,
644                                              const base::Closure& callback) {
645   CheckSequencedThread();
646   QueueTask(base::Bind(&InProcessCommandBuffer::SignalSyncPointOnGpuThread,
647                        base::Unretained(this),
648                        sync_point,
649                        WrapCallback(callback)));
650 }
651 
SignalSyncPointOnGpuThread(unsigned sync_point,const base::Closure & callback)652 void InProcessCommandBuffer::SignalSyncPointOnGpuThread(
653     unsigned sync_point,
654     const base::Closure& callback) {
655   if (g_sync_point_manager.Get().IsSyncPointPassed(sync_point)) {
656     callback.Run();
657   } else {
658     service_->ScheduleIdleWork(
659         base::Bind(&InProcessCommandBuffer::SignalSyncPointOnGpuThread,
660                    gpu_thread_weak_ptr_,
661                    sync_point,
662                    callback));
663   }
664 }
665 
SignalQuery(unsigned query,const base::Closure & callback)666 void InProcessCommandBuffer::SignalQuery(unsigned query,
667                                          const base::Closure& callback) {
668   CheckSequencedThread();
669   QueueTask(base::Bind(&GpuControl::SignalQuery,
670                        base::Unretained(gpu_control_.get()),
671                        query,
672                        WrapCallback(callback)));
673 }
674 
SetSurfaceVisible(bool visible)675 void InProcessCommandBuffer::SetSurfaceVisible(bool visible) {}
676 
SendManagedMemoryStats(const gpu::ManagedMemoryStats & stats)677 void InProcessCommandBuffer::SendManagedMemoryStats(
678     const gpu::ManagedMemoryStats& stats) {
679 }
680 
Echo(const base::Closure & callback)681 void InProcessCommandBuffer::Echo(const base::Closure& callback) {
682   QueueTask(WrapCallback(callback));
683 }
684 
GetLastError()685 gpu::error::Error InProcessCommandBuffer::GetLastError() {
686   CheckSequencedThread();
687   return last_state_.error;
688 }
689 
Initialize()690 bool InProcessCommandBuffer::Initialize() {
691   NOTREACHED();
692   return false;
693 }
694 
SetGetOffset(int32 get_offset)695 void InProcessCommandBuffer::SetGetOffset(int32 get_offset) { NOTREACHED(); }
696 
SetToken(int32 token)697 void InProcessCommandBuffer::SetToken(int32 token) { NOTREACHED(); }
698 
SetParseError(gpu::error::Error error)699 void InProcessCommandBuffer::SetParseError(gpu::error::Error error) {
700   NOTREACHED();
701 }
702 
SetContextLostReason(gpu::error::ContextLostReason reason)703 void InProcessCommandBuffer::SetContextLostReason(
704     gpu::error::ContextLostReason reason) {
705   NOTREACHED();
706 }
707 
708 namespace {
709 
PostCallback(const scoped_refptr<base::MessageLoopProxy> & loop,const base::Closure & callback)710 void PostCallback(const scoped_refptr<base::MessageLoopProxy>& loop,
711                          const base::Closure& callback) {
712   if (!loop->BelongsToCurrentThread()) {
713     loop->PostTask(FROM_HERE, callback);
714   } else {
715     callback.Run();
716   }
717 }
718 
RunOnTargetThread(scoped_ptr<base::Closure> callback)719 void RunOnTargetThread(scoped_ptr<base::Closure> callback) {
720   DCHECK(callback.get());
721   callback->Run();
722 }
723 
724 }  // anonymous namespace
725 
WrapCallback(const base::Closure & callback)726 base::Closure InProcessCommandBuffer::WrapCallback(
727     const base::Closure& callback) {
728   // Make sure the callback gets deleted on the target thread by passing
729   // ownership.
730   scoped_ptr<base::Closure> scoped_callback(new base::Closure(callback));
731   base::Closure callback_on_client_thread =
732       base::Bind(&RunOnTargetThread, base::Passed(&scoped_callback));
733   base::Closure wrapped_callback =
734       base::Bind(&PostCallback, base::MessageLoopProxy::current(),
735                  callback_on_client_thread);
736   return wrapped_callback;
737 }
738 
739 #if defined(OS_ANDROID)
740 scoped_refptr<gfx::SurfaceTexture>
GetSurfaceTexture(uint32 stream_id)741 InProcessCommandBuffer::GetSurfaceTexture(uint32 stream_id) {
742   DCHECK(stream_texture_manager_);
743   return stream_texture_manager_->GetSurfaceTexture(stream_id);
744 }
745 #endif
746 
747 // static
SetGpuMemoryBufferFactory(GpuMemoryBufferFactory * factory)748 void InProcessCommandBuffer::SetGpuMemoryBufferFactory(
749     GpuMemoryBufferFactory* factory) {
750   g_gpu_memory_buffer_factory = factory;
751 }
752 
753 }  // namespace gpu
754