1 // Copyright 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "gpu/command_buffer/service/in_process_command_buffer.h"
6
7 #include <queue>
8 #include <set>
9 #include <utility>
10
11 #include <GLES2/gl2.h>
12 #ifndef GL_GLEXT_PROTOTYPES
13 #define GL_GLEXT_PROTOTYPES 1
14 #endif
15 #include <GLES2/gl2ext.h>
16 #include <GLES2/gl2extchromium.h>
17
18 #include "base/bind.h"
19 #include "base/bind_helpers.h"
20 #include "base/lazy_instance.h"
21 #include "base/logging.h"
22 #include "base/memory/weak_ptr.h"
23 #include "base/message_loop/message_loop_proxy.h"
24 #include "base/sequence_checker.h"
25 #include "base/synchronization/condition_variable.h"
26 #include "base/threading/thread.h"
27 #include "gpu/command_buffer/client/gpu_memory_buffer_factory.h"
28 #include "gpu/command_buffer/service/command_buffer_service.h"
29 #include "gpu/command_buffer/service/context_group.h"
30 #include "gpu/command_buffer/service/gl_context_virtual.h"
31 #include "gpu/command_buffer/service/gpu_control_service.h"
32 #include "gpu/command_buffer/service/gpu_scheduler.h"
33 #include "gpu/command_buffer/service/image_manager.h"
34 #include "gpu/command_buffer/service/mailbox_manager.h"
35 #include "gpu/command_buffer/service/transfer_buffer_manager.h"
36 #include "ui/gfx/size.h"
37 #include "ui/gl/gl_context.h"
38 #include "ui/gl/gl_image.h"
39 #include "ui/gl/gl_share_group.h"
40
41 #if defined(OS_ANDROID)
42 #include "gpu/command_buffer/service/stream_texture_manager_in_process_android.h"
43 #include "ui/gl/android/surface_texture.h"
44 #endif
45
46 namespace gpu {
47
48 namespace {
49
50 static GpuMemoryBufferFactory* g_gpu_memory_buffer_factory = NULL;
51
52 template <typename T>
RunTaskWithResult(base::Callback<T (void)> task,T * result,base::WaitableEvent * completion)53 static void RunTaskWithResult(base::Callback<T(void)> task,
54 T* result,
55 base::WaitableEvent* completion) {
56 *result = task.Run();
57 completion->Signal();
58 }
59
60 class GpuInProcessThread
61 : public base::Thread,
62 public InProcessCommandBuffer::Service,
63 public base::RefCountedThreadSafe<GpuInProcessThread> {
64 public:
65 GpuInProcessThread();
66
AddRef() const67 virtual void AddRef() const OVERRIDE {
68 base::RefCountedThreadSafe<GpuInProcessThread>::AddRef();
69 }
Release() const70 virtual void Release() const OVERRIDE {
71 base::RefCountedThreadSafe<GpuInProcessThread>::Release();
72 }
73
74 virtual void ScheduleTask(const base::Closure& task) OVERRIDE;
75 virtual void ScheduleIdleWork(const base::Closure& callback) OVERRIDE;
UseVirtualizedGLContexts()76 virtual bool UseVirtualizedGLContexts() OVERRIDE { return false; }
77 virtual scoped_refptr<gles2::ShaderTranslatorCache> shader_translator_cache()
78 OVERRIDE;
79
80 private:
81 virtual ~GpuInProcessThread();
82 friend class base::RefCountedThreadSafe<GpuInProcessThread>;
83
84 scoped_refptr<gpu::gles2::ShaderTranslatorCache> shader_translator_cache_;
85 DISALLOW_COPY_AND_ASSIGN(GpuInProcessThread);
86 };
87
GpuInProcessThread()88 GpuInProcessThread::GpuInProcessThread() : base::Thread("GpuThread") {
89 Start();
90 }
91
~GpuInProcessThread()92 GpuInProcessThread::~GpuInProcessThread() {
93 Stop();
94 }
95
ScheduleTask(const base::Closure & task)96 void GpuInProcessThread::ScheduleTask(const base::Closure& task) {
97 message_loop()->PostTask(FROM_HERE, task);
98 }
99
ScheduleIdleWork(const base::Closure & callback)100 void GpuInProcessThread::ScheduleIdleWork(const base::Closure& callback) {
101 message_loop()->PostDelayedTask(
102 FROM_HERE, callback, base::TimeDelta::FromMilliseconds(5));
103 }
104
105 scoped_refptr<gles2::ShaderTranslatorCache>
shader_translator_cache()106 GpuInProcessThread::shader_translator_cache() {
107 if (!shader_translator_cache_.get())
108 shader_translator_cache_ = new gpu::gles2::ShaderTranslatorCache;
109 return shader_translator_cache_;
110 }
111
112 base::LazyInstance<std::set<InProcessCommandBuffer*> > default_thread_clients_ =
113 LAZY_INSTANCE_INITIALIZER;
114 base::LazyInstance<base::Lock> default_thread_clients_lock_ =
115 LAZY_INSTANCE_INITIALIZER;
116
117 class ScopedEvent {
118 public:
ScopedEvent(base::WaitableEvent * event)119 ScopedEvent(base::WaitableEvent* event) : event_(event) {}
~ScopedEvent()120 ~ScopedEvent() { event_->Signal(); }
121
122 private:
123 base::WaitableEvent* event_;
124 };
125
126 class SyncPointManager {
127 public:
128 SyncPointManager();
129 ~SyncPointManager();
130
131 uint32 GenerateSyncPoint();
132 void RetireSyncPoint(uint32 sync_point);
133
134 bool IsSyncPointPassed(uint32 sync_point);
135 void WaitSyncPoint(uint32 sync_point);
136
137 private:
138 // This lock protects access to pending_sync_points_ and next_sync_point_ and
139 // is used with the ConditionVariable to signal when a sync point is retired.
140 base::Lock lock_;
141 std::set<uint32> pending_sync_points_;
142 uint32 next_sync_point_;
143 base::ConditionVariable cond_var_;
144 };
145
SyncPointManager()146 SyncPointManager::SyncPointManager() : next_sync_point_(1), cond_var_(&lock_) {}
147
~SyncPointManager()148 SyncPointManager::~SyncPointManager() {
149 DCHECK_EQ(pending_sync_points_.size(), 0U);
150 }
151
GenerateSyncPoint()152 uint32 SyncPointManager::GenerateSyncPoint() {
153 base::AutoLock lock(lock_);
154 uint32 sync_point = next_sync_point_++;
155 DCHECK_EQ(pending_sync_points_.count(sync_point), 0U);
156 pending_sync_points_.insert(sync_point);
157 return sync_point;
158 }
159
RetireSyncPoint(uint32 sync_point)160 void SyncPointManager::RetireSyncPoint(uint32 sync_point) {
161 base::AutoLock lock(lock_);
162 DCHECK(pending_sync_points_.count(sync_point));
163 pending_sync_points_.erase(sync_point);
164 cond_var_.Broadcast();
165 }
166
IsSyncPointPassed(uint32 sync_point)167 bool SyncPointManager::IsSyncPointPassed(uint32 sync_point) {
168 base::AutoLock lock(lock_);
169 return pending_sync_points_.count(sync_point) == 0;
170 }
171
WaitSyncPoint(uint32 sync_point)172 void SyncPointManager::WaitSyncPoint(uint32 sync_point) {
173 base::AutoLock lock(lock_);
174 while (pending_sync_points_.count(sync_point)) {
175 cond_var_.Wait();
176 }
177 }
178
179 base::LazyInstance<SyncPointManager> g_sync_point_manager =
180 LAZY_INSTANCE_INITIALIZER;
181
WaitSyncPoint(uint32 sync_point)182 bool WaitSyncPoint(uint32 sync_point) {
183 g_sync_point_manager.Get().WaitSyncPoint(sync_point);
184 return true;
185 }
186
187 } // anonyous namespace
188
Service()189 InProcessCommandBuffer::Service::Service() {}
190
~Service()191 InProcessCommandBuffer::Service::~Service() {}
192
193 scoped_refptr<InProcessCommandBuffer::Service>
GetDefaultService()194 InProcessCommandBuffer::GetDefaultService() {
195 base::AutoLock lock(default_thread_clients_lock_.Get());
196 scoped_refptr<Service> service;
197 if (!default_thread_clients_.Get().empty()) {
198 InProcessCommandBuffer* other = *default_thread_clients_.Get().begin();
199 service = other->service_;
200 DCHECK(service.get());
201 } else {
202 service = new GpuInProcessThread;
203 }
204 return service;
205 }
206
InProcessCommandBuffer(const scoped_refptr<Service> & service)207 InProcessCommandBuffer::InProcessCommandBuffer(
208 const scoped_refptr<Service>& service)
209 : context_lost_(false),
210 idle_work_pending_(false),
211 last_put_offset_(-1),
212 flush_event_(false, false),
213 service_(service.get() ? service : GetDefaultService()),
214 gpu_thread_weak_ptr_factory_(this) {
215 if (!service) {
216 base::AutoLock lock(default_thread_clients_lock_.Get());
217 default_thread_clients_.Get().insert(this);
218 }
219 }
220
~InProcessCommandBuffer()221 InProcessCommandBuffer::~InProcessCommandBuffer() {
222 Destroy();
223 base::AutoLock lock(default_thread_clients_lock_.Get());
224 default_thread_clients_.Get().erase(this);
225 }
226
OnResizeView(gfx::Size size,float scale_factor)227 void InProcessCommandBuffer::OnResizeView(gfx::Size size, float scale_factor) {
228 CheckSequencedThread();
229 DCHECK(!surface_->IsOffscreen());
230 surface_->Resize(size);
231 }
232
MakeCurrent()233 bool InProcessCommandBuffer::MakeCurrent() {
234 CheckSequencedThread();
235 command_buffer_lock_.AssertAcquired();
236
237 if (!context_lost_ && decoder_->MakeCurrent())
238 return true;
239 DLOG(ERROR) << "Context lost because MakeCurrent failed.";
240 command_buffer_->SetContextLostReason(decoder_->GetContextLostReason());
241 command_buffer_->SetParseError(gpu::error::kLostContext);
242 return false;
243 }
244
PumpCommands()245 void InProcessCommandBuffer::PumpCommands() {
246 CheckSequencedThread();
247 command_buffer_lock_.AssertAcquired();
248
249 if (!MakeCurrent())
250 return;
251
252 gpu_scheduler_->PutChanged();
253 }
254
GetBufferChanged(int32 transfer_buffer_id)255 bool InProcessCommandBuffer::GetBufferChanged(int32 transfer_buffer_id) {
256 CheckSequencedThread();
257 command_buffer_lock_.AssertAcquired();
258 command_buffer_->SetGetBuffer(transfer_buffer_id);
259 return true;
260 }
261
Initialize(scoped_refptr<gfx::GLSurface> surface,bool is_offscreen,gfx::AcceleratedWidget window,const gfx::Size & size,const std::vector<int32> & attribs,gfx::GpuPreference gpu_preference,const base::Closure & context_lost_callback,InProcessCommandBuffer * share_group)262 bool InProcessCommandBuffer::Initialize(
263 scoped_refptr<gfx::GLSurface> surface,
264 bool is_offscreen,
265 gfx::AcceleratedWidget window,
266 const gfx::Size& size,
267 const std::vector<int32>& attribs,
268 gfx::GpuPreference gpu_preference,
269 const base::Closure& context_lost_callback,
270 InProcessCommandBuffer* share_group) {
271 DCHECK(!share_group || service_ == share_group->service_);
272 context_lost_callback_ = WrapCallback(context_lost_callback);
273
274 if (surface) {
275 // GPU thread must be the same as client thread due to GLSurface not being
276 // thread safe.
277 sequence_checker_.reset(new base::SequenceChecker);
278 surface_ = surface;
279 }
280
281 gpu::Capabilities capabilities;
282 InitializeOnGpuThreadParams params(is_offscreen,
283 window,
284 size,
285 attribs,
286 gpu_preference,
287 &capabilities,
288 share_group);
289
290 base::Callback<bool(void)> init_task =
291 base::Bind(&InProcessCommandBuffer::InitializeOnGpuThread,
292 base::Unretained(this),
293 params);
294
295 base::WaitableEvent completion(true, false);
296 bool result = false;
297 QueueTask(
298 base::Bind(&RunTaskWithResult<bool>, init_task, &result, &completion));
299 completion.Wait();
300
301 if (result) {
302 capabilities_ = capabilities;
303 capabilities_.map_image =
304 capabilities_.map_image && g_gpu_memory_buffer_factory;
305 }
306 return result;
307 }
308
InitializeOnGpuThread(const InitializeOnGpuThreadParams & params)309 bool InProcessCommandBuffer::InitializeOnGpuThread(
310 const InitializeOnGpuThreadParams& params) {
311 CheckSequencedThread();
312 gpu_thread_weak_ptr_ = gpu_thread_weak_ptr_factory_.GetWeakPtr();
313
314 DCHECK(params.size.width() >= 0 && params.size.height() >= 0);
315
316 TransferBufferManager* manager = new TransferBufferManager();
317 transfer_buffer_manager_.reset(manager);
318 manager->Initialize();
319
320 scoped_ptr<CommandBufferService> command_buffer(
321 new CommandBufferService(transfer_buffer_manager_.get()));
322 command_buffer->SetPutOffsetChangeCallback(base::Bind(
323 &InProcessCommandBuffer::PumpCommands, gpu_thread_weak_ptr_));
324 command_buffer->SetParseErrorCallback(base::Bind(
325 &InProcessCommandBuffer::OnContextLost, gpu_thread_weak_ptr_));
326
327 if (!command_buffer->Initialize()) {
328 LOG(ERROR) << "Could not initialize command buffer.";
329 DestroyOnGpuThread();
330 return false;
331 }
332
333 gl_share_group_ = params.context_group
334 ? params.context_group->gl_share_group_.get()
335 : new gfx::GLShareGroup;
336
337 #if defined(OS_ANDROID)
338 stream_texture_manager_.reset(new StreamTextureManagerInProcess);
339 #endif
340
341 bool bind_generates_resource = false;
342 decoder_.reset(gles2::GLES2Decoder::Create(
343 params.context_group
344 ? params.context_group->decoder_->GetContextGroup()
345 : new gles2::ContextGroup(NULL,
346 NULL,
347 NULL,
348 service_->shader_translator_cache(),
349 NULL,
350 bind_generates_resource)));
351
352 gpu_scheduler_.reset(
353 new GpuScheduler(command_buffer.get(), decoder_.get(), decoder_.get()));
354 command_buffer->SetGetBufferChangeCallback(base::Bind(
355 &GpuScheduler::SetGetBuffer, base::Unretained(gpu_scheduler_.get())));
356 command_buffer_ = command_buffer.Pass();
357
358 decoder_->set_engine(gpu_scheduler_.get());
359
360 if (!surface_) {
361 if (params.is_offscreen)
362 surface_ = gfx::GLSurface::CreateOffscreenGLSurface(params.size);
363 else
364 surface_ = gfx::GLSurface::CreateViewGLSurface(params.window);
365 }
366
367 if (!surface_.get()) {
368 LOG(ERROR) << "Could not create GLSurface.";
369 DestroyOnGpuThread();
370 return false;
371 }
372
373 if (service_->UseVirtualizedGLContexts()) {
374 context_ = gl_share_group_->GetSharedContext();
375 if (!context_.get()) {
376 context_ = gfx::GLContext::CreateGLContext(
377 gl_share_group_.get(), surface_.get(), params.gpu_preference);
378 gl_share_group_->SetSharedContext(context_.get());
379 }
380
381 context_ = new GLContextVirtual(
382 gl_share_group_.get(), context_.get(), decoder_->AsWeakPtr());
383 if (context_->Initialize(surface_.get(), params.gpu_preference)) {
384 VLOG(1) << "Created virtual GL context.";
385 } else {
386 context_ = NULL;
387 }
388 } else {
389 context_ = gfx::GLContext::CreateGLContext(
390 gl_share_group_.get(), surface_.get(), params.gpu_preference);
391 }
392
393 if (!context_.get()) {
394 LOG(ERROR) << "Could not create GLContext.";
395 DestroyOnGpuThread();
396 return false;
397 }
398
399 if (!context_->MakeCurrent(surface_.get())) {
400 LOG(ERROR) << "Could not make context current.";
401 DestroyOnGpuThread();
402 return false;
403 }
404
405 gles2::DisallowedFeatures disallowed_features;
406 disallowed_features.gpu_memory_manager = true;
407 if (!decoder_->Initialize(surface_,
408 context_,
409 params.is_offscreen,
410 params.size,
411 disallowed_features,
412 params.attribs)) {
413 LOG(ERROR) << "Could not initialize decoder.";
414 DestroyOnGpuThread();
415 return false;
416 }
417 *params.capabilities = decoder_->GetCapabilities();
418
419 gpu_control_.reset(
420 new GpuControlService(decoder_->GetContextGroup()->image_manager(),
421 decoder_->GetQueryManager()));
422
423 if (!params.is_offscreen) {
424 decoder_->SetResizeCallback(base::Bind(
425 &InProcessCommandBuffer::OnResizeView, gpu_thread_weak_ptr_));
426 }
427 decoder_->SetWaitSyncPointCallback(base::Bind(&WaitSyncPoint));
428
429 return true;
430 }
431
Destroy()432 void InProcessCommandBuffer::Destroy() {
433 CheckSequencedThread();
434
435 base::WaitableEvent completion(true, false);
436 bool result = false;
437 base::Callback<bool(void)> destroy_task = base::Bind(
438 &InProcessCommandBuffer::DestroyOnGpuThread, base::Unretained(this));
439 QueueTask(
440 base::Bind(&RunTaskWithResult<bool>, destroy_task, &result, &completion));
441 completion.Wait();
442 }
443
DestroyOnGpuThread()444 bool InProcessCommandBuffer::DestroyOnGpuThread() {
445 CheckSequencedThread();
446 gpu_thread_weak_ptr_factory_.InvalidateWeakPtrs();
447 command_buffer_.reset();
448 // Clean up GL resources if possible.
449 bool have_context = context_ && context_->MakeCurrent(surface_);
450 if (decoder_) {
451 decoder_->Destroy(have_context);
452 decoder_.reset();
453 }
454 context_ = NULL;
455 surface_ = NULL;
456 gl_share_group_ = NULL;
457 #if defined(OS_ANDROID)
458 stream_texture_manager_.reset();
459 #endif
460
461 return true;
462 }
463
CheckSequencedThread()464 void InProcessCommandBuffer::CheckSequencedThread() {
465 DCHECK(!sequence_checker_ ||
466 sequence_checker_->CalledOnValidSequencedThread());
467 }
468
OnContextLost()469 void InProcessCommandBuffer::OnContextLost() {
470 CheckSequencedThread();
471 if (!context_lost_callback_.is_null()) {
472 context_lost_callback_.Run();
473 context_lost_callback_.Reset();
474 }
475
476 context_lost_ = true;
477 }
478
GetStateFast()479 CommandBuffer::State InProcessCommandBuffer::GetStateFast() {
480 CheckSequencedThread();
481 base::AutoLock lock(state_after_last_flush_lock_);
482 if (state_after_last_flush_.generation - last_state_.generation < 0x80000000U)
483 last_state_ = state_after_last_flush_;
484 return last_state_;
485 }
486
GetLastState()487 CommandBuffer::State InProcessCommandBuffer::GetLastState() {
488 CheckSequencedThread();
489 return last_state_;
490 }
491
GetLastToken()492 int32 InProcessCommandBuffer::GetLastToken() {
493 CheckSequencedThread();
494 GetStateFast();
495 return last_state_.token;
496 }
497
FlushOnGpuThread(int32 put_offset)498 void InProcessCommandBuffer::FlushOnGpuThread(int32 put_offset) {
499 CheckSequencedThread();
500 ScopedEvent handle_flush(&flush_event_);
501 base::AutoLock lock(command_buffer_lock_);
502 command_buffer_->Flush(put_offset);
503 {
504 // Update state before signaling the flush event.
505 base::AutoLock lock(state_after_last_flush_lock_);
506 state_after_last_flush_ = command_buffer_->GetLastState();
507 }
508 DCHECK((!error::IsError(state_after_last_flush_.error) && !context_lost_) ||
509 (error::IsError(state_after_last_flush_.error) && context_lost_));
510
511 // If we've processed all pending commands but still have pending queries,
512 // pump idle work until the query is passed.
513 if (put_offset == state_after_last_flush_.get_offset &&
514 gpu_scheduler_->HasMoreWork()) {
515 ScheduleIdleWorkOnGpuThread();
516 }
517 }
518
PerformIdleWork()519 void InProcessCommandBuffer::PerformIdleWork() {
520 CheckSequencedThread();
521 idle_work_pending_ = false;
522 base::AutoLock lock(command_buffer_lock_);
523 if (MakeCurrent() && gpu_scheduler_->HasMoreWork()) {
524 gpu_scheduler_->PerformIdleWork();
525 ScheduleIdleWorkOnGpuThread();
526 }
527 }
528
ScheduleIdleWorkOnGpuThread()529 void InProcessCommandBuffer::ScheduleIdleWorkOnGpuThread() {
530 CheckSequencedThread();
531 if (idle_work_pending_)
532 return;
533 idle_work_pending_ = true;
534 service_->ScheduleIdleWork(
535 base::Bind(&InProcessCommandBuffer::PerformIdleWork,
536 gpu_thread_weak_ptr_));
537 }
538
Flush(int32 put_offset)539 void InProcessCommandBuffer::Flush(int32 put_offset) {
540 CheckSequencedThread();
541 if (last_state_.error != gpu::error::kNoError)
542 return;
543
544 if (last_put_offset_ == put_offset)
545 return;
546
547 last_put_offset_ = put_offset;
548 base::Closure task = base::Bind(&InProcessCommandBuffer::FlushOnGpuThread,
549 gpu_thread_weak_ptr_,
550 put_offset);
551 QueueTask(task);
552 }
553
WaitForTokenInRange(int32 start,int32 end)554 void InProcessCommandBuffer::WaitForTokenInRange(int32 start, int32 end) {
555 CheckSequencedThread();
556 while (!InRange(start, end, GetLastToken()) &&
557 last_state_.error == gpu::error::kNoError)
558 flush_event_.Wait();
559 }
560
WaitForGetOffsetInRange(int32 start,int32 end)561 void InProcessCommandBuffer::WaitForGetOffsetInRange(int32 start, int32 end) {
562 CheckSequencedThread();
563
564 GetStateFast();
565 while (!InRange(start, end, last_state_.get_offset) &&
566 last_state_.error == gpu::error::kNoError) {
567 flush_event_.Wait();
568 GetStateFast();
569 }
570 }
571
SetGetBuffer(int32 shm_id)572 void InProcessCommandBuffer::SetGetBuffer(int32 shm_id) {
573 CheckSequencedThread();
574 if (last_state_.error != gpu::error::kNoError)
575 return;
576
577 {
578 base::AutoLock lock(command_buffer_lock_);
579 command_buffer_->SetGetBuffer(shm_id);
580 last_put_offset_ = 0;
581 }
582 {
583 base::AutoLock lock(state_after_last_flush_lock_);
584 state_after_last_flush_ = command_buffer_->GetLastState();
585 }
586 }
587
CreateTransferBuffer(size_t size,int32 * id)588 scoped_refptr<Buffer> InProcessCommandBuffer::CreateTransferBuffer(size_t size,
589 int32* id) {
590 CheckSequencedThread();
591 base::AutoLock lock(command_buffer_lock_);
592 return command_buffer_->CreateTransferBuffer(size, id);
593 }
594
DestroyTransferBuffer(int32 id)595 void InProcessCommandBuffer::DestroyTransferBuffer(int32 id) {
596 CheckSequencedThread();
597 base::Closure task =
598 base::Bind(&InProcessCommandBuffer::DestroyTransferBufferOnGputhread,
599 base::Unretained(this),
600 id);
601
602 QueueTask(task);
603 }
604
DestroyTransferBufferOnGputhread(int32 id)605 void InProcessCommandBuffer::DestroyTransferBufferOnGputhread(int32 id) {
606 base::AutoLock lock(command_buffer_lock_);
607 command_buffer_->DestroyTransferBuffer(id);
608 }
609
GetCapabilities()610 gpu::Capabilities InProcessCommandBuffer::GetCapabilities() {
611 return capabilities_;
612 }
613
CreateGpuMemoryBuffer(size_t width,size_t height,unsigned internalformat,unsigned usage,int32 * id)614 gfx::GpuMemoryBuffer* InProcessCommandBuffer::CreateGpuMemoryBuffer(
615 size_t width,
616 size_t height,
617 unsigned internalformat,
618 unsigned usage,
619 int32* id) {
620 CheckSequencedThread();
621
622 *id = -1;
623 linked_ptr<gfx::GpuMemoryBuffer> buffer =
624 make_linked_ptr(g_gpu_memory_buffer_factory->CreateGpuMemoryBuffer(
625 width, height, internalformat, usage));
626 if (!buffer.get())
627 return NULL;
628
629 static int32 next_id = 1;
630 *id = next_id++;
631
632 base::Closure task = base::Bind(&GpuControlService::RegisterGpuMemoryBuffer,
633 base::Unretained(gpu_control_.get()),
634 *id,
635 buffer->GetHandle(),
636 width,
637 height,
638 internalformat);
639
640 QueueTask(task);
641
642 gpu_memory_buffers_[*id] = buffer;
643 return buffer.get();
644 }
645
DestroyGpuMemoryBuffer(int32 id)646 void InProcessCommandBuffer::DestroyGpuMemoryBuffer(int32 id) {
647 CheckSequencedThread();
648 GpuMemoryBufferMap::iterator it = gpu_memory_buffers_.find(id);
649 if (it != gpu_memory_buffers_.end())
650 gpu_memory_buffers_.erase(it);
651 base::Closure task = base::Bind(&GpuControlService::UnregisterGpuMemoryBuffer,
652 base::Unretained(gpu_control_.get()),
653 id);
654
655 QueueTask(task);
656 }
657
InsertSyncPoint()658 uint32 InProcessCommandBuffer::InsertSyncPoint() {
659 uint32 sync_point = g_sync_point_manager.Get().GenerateSyncPoint();
660 QueueTask(base::Bind(&InProcessCommandBuffer::RetireSyncPointOnGpuThread,
661 base::Unretained(this),
662 sync_point));
663 return sync_point;
664 }
665
RetireSyncPointOnGpuThread(uint32 sync_point)666 void InProcessCommandBuffer::RetireSyncPointOnGpuThread(uint32 sync_point) {
667 gles2::MailboxManager* mailbox_manager =
668 decoder_->GetContextGroup()->mailbox_manager();
669 if (mailbox_manager->UsesSync()) {
670 bool make_current_success = false;
671 {
672 base::AutoLock lock(command_buffer_lock_);
673 make_current_success = MakeCurrent();
674 }
675 if (make_current_success)
676 mailbox_manager->PushTextureUpdates();
677 }
678 g_sync_point_manager.Get().RetireSyncPoint(sync_point);
679 }
680
SignalSyncPoint(unsigned sync_point,const base::Closure & callback)681 void InProcessCommandBuffer::SignalSyncPoint(unsigned sync_point,
682 const base::Closure& callback) {
683 CheckSequencedThread();
684 QueueTask(base::Bind(&InProcessCommandBuffer::SignalSyncPointOnGpuThread,
685 base::Unretained(this),
686 sync_point,
687 WrapCallback(callback)));
688 }
689
SignalSyncPointOnGpuThread(unsigned sync_point,const base::Closure & callback)690 void InProcessCommandBuffer::SignalSyncPointOnGpuThread(
691 unsigned sync_point,
692 const base::Closure& callback) {
693 if (g_sync_point_manager.Get().IsSyncPointPassed(sync_point)) {
694 callback.Run();
695 } else {
696 service_->ScheduleIdleWork(
697 base::Bind(&InProcessCommandBuffer::SignalSyncPointOnGpuThread,
698 gpu_thread_weak_ptr_,
699 sync_point,
700 callback));
701 }
702 }
703
SignalQuery(unsigned query,const base::Closure & callback)704 void InProcessCommandBuffer::SignalQuery(unsigned query,
705 const base::Closure& callback) {
706 CheckSequencedThread();
707 QueueTask(base::Bind(&GpuControlService::SignalQuery,
708 base::Unretained(gpu_control_.get()),
709 query,
710 WrapCallback(callback)));
711 }
712
SetSurfaceVisible(bool visible)713 void InProcessCommandBuffer::SetSurfaceVisible(bool visible) {}
714
Echo(const base::Closure & callback)715 void InProcessCommandBuffer::Echo(const base::Closure& callback) {
716 QueueTask(WrapCallback(callback));
717 }
718
CreateStreamTexture(uint32 texture_id)719 uint32 InProcessCommandBuffer::CreateStreamTexture(uint32 texture_id) {
720 base::WaitableEvent completion(true, false);
721 uint32 stream_id = 0;
722 base::Callback<uint32(void)> task =
723 base::Bind(&InProcessCommandBuffer::CreateStreamTextureOnGpuThread,
724 base::Unretained(this),
725 texture_id);
726 QueueTask(
727 base::Bind(&RunTaskWithResult<uint32>, task, &stream_id, &completion));
728 completion.Wait();
729 return stream_id;
730 }
731
CreateStreamTextureOnGpuThread(uint32 client_texture_id)732 uint32 InProcessCommandBuffer::CreateStreamTextureOnGpuThread(
733 uint32 client_texture_id) {
734 #if defined(OS_ANDROID)
735 return stream_texture_manager_->CreateStreamTexture(
736 client_texture_id, decoder_->GetContextGroup()->texture_manager());
737 #else
738 return 0;
739 #endif
740 }
741
GetLastError()742 gpu::error::Error InProcessCommandBuffer::GetLastError() {
743 CheckSequencedThread();
744 return last_state_.error;
745 }
746
Initialize()747 bool InProcessCommandBuffer::Initialize() {
748 NOTREACHED();
749 return false;
750 }
751
752 namespace {
753
PostCallback(const scoped_refptr<base::MessageLoopProxy> & loop,const base::Closure & callback)754 void PostCallback(const scoped_refptr<base::MessageLoopProxy>& loop,
755 const base::Closure& callback) {
756 if (!loop->BelongsToCurrentThread()) {
757 loop->PostTask(FROM_HERE, callback);
758 } else {
759 callback.Run();
760 }
761 }
762
RunOnTargetThread(scoped_ptr<base::Closure> callback)763 void RunOnTargetThread(scoped_ptr<base::Closure> callback) {
764 DCHECK(callback.get());
765 callback->Run();
766 }
767
768 } // anonymous namespace
769
WrapCallback(const base::Closure & callback)770 base::Closure InProcessCommandBuffer::WrapCallback(
771 const base::Closure& callback) {
772 // Make sure the callback gets deleted on the target thread by passing
773 // ownership.
774 scoped_ptr<base::Closure> scoped_callback(new base::Closure(callback));
775 base::Closure callback_on_client_thread =
776 base::Bind(&RunOnTargetThread, base::Passed(&scoped_callback));
777 base::Closure wrapped_callback =
778 base::Bind(&PostCallback, base::MessageLoopProxy::current(),
779 callback_on_client_thread);
780 return wrapped_callback;
781 }
782
783 #if defined(OS_ANDROID)
784 scoped_refptr<gfx::SurfaceTexture>
GetSurfaceTexture(uint32 stream_id)785 InProcessCommandBuffer::GetSurfaceTexture(uint32 stream_id) {
786 DCHECK(stream_texture_manager_);
787 return stream_texture_manager_->GetSurfaceTexture(stream_id);
788 }
789 #endif
790
791 // static
SetGpuMemoryBufferFactory(GpuMemoryBufferFactory * factory)792 void InProcessCommandBuffer::SetGpuMemoryBufferFactory(
793 GpuMemoryBufferFactory* factory) {
794 g_gpu_memory_buffer_factory = factory;
795 }
796
797 } // namespace gpu
798