1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "base/bind.h"
6 #include "base/bind_helpers.h"
7 #include "base/command_line.h"
8 #include "base/debug/trace_event.h"
9 #include "base/hash.h"
10 #include "base/memory/shared_memory.h"
11 #include "base/time/time.h"
12 #include "build/build_config.h"
13 #include "content/common/gpu/devtools_gpu_instrumentation.h"
14 #include "content/common/gpu/gpu_channel.h"
15 #include "content/common/gpu/gpu_channel_manager.h"
16 #include "content/common/gpu/gpu_command_buffer_stub.h"
17 #include "content/common/gpu/gpu_memory_manager.h"
18 #include "content/common/gpu/gpu_memory_tracking.h"
19 #include "content/common/gpu/gpu_messages.h"
20 #include "content/common/gpu/gpu_watchdog.h"
21 #include "content/common/gpu/image_transport_surface.h"
22 #include "content/common/gpu/media/gpu_video_decode_accelerator.h"
23 #include "content/common/gpu/media/gpu_video_encode_accelerator.h"
24 #include "content/common/gpu/sync_point_manager.h"
25 #include "content/public/common/content_client.h"
26 #include "gpu/command_buffer/common/constants.h"
27 #include "gpu/command_buffer/common/gles2_cmd_utils.h"
28 #include "gpu/command_buffer/common/mailbox.h"
29 #include "gpu/command_buffer/service/gl_context_virtual.h"
30 #include "gpu/command_buffer/service/gl_state_restorer_impl.h"
31 #include "gpu/command_buffer/service/gpu_control_service.h"
32 #include "gpu/command_buffer/service/image_manager.h"
33 #include "gpu/command_buffer/service/logger.h"
34 #include "gpu/command_buffer/service/mailbox_manager.h"
35 #include "gpu/command_buffer/service/memory_tracking.h"
36 #include "gpu/command_buffer/service/query_manager.h"
37 #include "ui/gl/gl_bindings.h"
38 #include "ui/gl/gl_switches.h"
39
40 #if defined(OS_WIN)
41 #include "content/public/common/sandbox_init.h"
42 #endif
43
44 #if defined(OS_ANDROID)
45 #include "content/common/gpu/stream_texture_android.h"
46 #endif
47
48 namespace content {
49 struct WaitForCommandState {
WaitForCommandStatecontent::WaitForCommandState50 WaitForCommandState(int32 start, int32 end, IPC::Message* reply)
51 : start(start), end(end), reply(reply) {}
52
53 int32 start;
54 int32 end;
55 scoped_ptr<IPC::Message> reply;
56 };
57
58 namespace {
59
60 // The GpuCommandBufferMemoryTracker class provides a bridge between the
61 // ContextGroup's memory type managers and the GpuMemoryManager class.
62 class GpuCommandBufferMemoryTracker : public gpu::gles2::MemoryTracker {
63 public:
GpuCommandBufferMemoryTracker(GpuChannel * channel)64 explicit GpuCommandBufferMemoryTracker(GpuChannel* channel) :
65 tracking_group_(channel->gpu_channel_manager()->gpu_memory_manager()->
66 CreateTrackingGroup(channel->renderer_pid(), this)) {
67 }
68
TrackMemoryAllocatedChange(size_t old_size,size_t new_size,gpu::gles2::MemoryTracker::Pool pool)69 virtual void TrackMemoryAllocatedChange(
70 size_t old_size,
71 size_t new_size,
72 gpu::gles2::MemoryTracker::Pool pool) OVERRIDE {
73 tracking_group_->TrackMemoryAllocatedChange(
74 old_size, new_size, pool);
75 }
76
EnsureGPUMemoryAvailable(size_t size_needed)77 virtual bool EnsureGPUMemoryAvailable(size_t size_needed) OVERRIDE {
78 return tracking_group_->EnsureGPUMemoryAvailable(size_needed);
79 };
80
81 private:
~GpuCommandBufferMemoryTracker()82 virtual ~GpuCommandBufferMemoryTracker() {
83 }
84 scoped_ptr<GpuMemoryTrackingGroup> tracking_group_;
85
86 DISALLOW_COPY_AND_ASSIGN(GpuCommandBufferMemoryTracker);
87 };
88
89 // FastSetActiveURL will shortcut the expensive call to SetActiveURL when the
90 // url_hash matches.
FastSetActiveURL(const GURL & url,size_t url_hash)91 void FastSetActiveURL(const GURL& url, size_t url_hash) {
92 // Leave the previously set URL in the empty case -- empty URLs are given by
93 // WebKitPlatformSupportImpl::createOffscreenGraphicsContext3D. Hopefully the
94 // onscreen context URL was set previously and will show up even when a crash
95 // occurs during offscreen command processing.
96 if (url.is_empty())
97 return;
98 static size_t g_last_url_hash = 0;
99 if (url_hash != g_last_url_hash) {
100 g_last_url_hash = url_hash;
101 GetContentClient()->SetActiveURL(url);
102 }
103 }
104
105 // The first time polling a fence, delay some extra time to allow other
106 // stubs to process some work, or else the timing of the fences could
107 // allow a pattern of alternating fast and slow frames to occur.
108 const int64 kHandleMoreWorkPeriodMs = 2;
109 const int64 kHandleMoreWorkPeriodBusyMs = 1;
110
111 // Prevents idle work from being starved.
112 const int64 kMaxTimeSinceIdleMs = 10;
113
114 } // namespace
115
GpuCommandBufferStub(GpuChannel * channel,GpuCommandBufferStub * share_group,const gfx::GLSurfaceHandle & handle,gpu::gles2::MailboxManager * mailbox_manager,gpu::gles2::ImageManager * image_manager,const gfx::Size & size,const gpu::gles2::DisallowedFeatures & disallowed_features,const std::vector<int32> & attribs,gfx::GpuPreference gpu_preference,bool use_virtualized_gl_context,int32 route_id,int32 surface_id,GpuWatchdog * watchdog,bool software,const GURL & active_url)116 GpuCommandBufferStub::GpuCommandBufferStub(
117 GpuChannel* channel,
118 GpuCommandBufferStub* share_group,
119 const gfx::GLSurfaceHandle& handle,
120 gpu::gles2::MailboxManager* mailbox_manager,
121 gpu::gles2::ImageManager* image_manager,
122 const gfx::Size& size,
123 const gpu::gles2::DisallowedFeatures& disallowed_features,
124 const std::vector<int32>& attribs,
125 gfx::GpuPreference gpu_preference,
126 bool use_virtualized_gl_context,
127 int32 route_id,
128 int32 surface_id,
129 GpuWatchdog* watchdog,
130 bool software,
131 const GURL& active_url)
132 : channel_(channel),
133 handle_(handle),
134 initial_size_(size),
135 disallowed_features_(disallowed_features),
136 requested_attribs_(attribs),
137 gpu_preference_(gpu_preference),
138 use_virtualized_gl_context_(use_virtualized_gl_context),
139 route_id_(route_id),
140 surface_id_(surface_id),
141 software_(software),
142 last_flush_count_(0),
143 last_memory_allocation_valid_(false),
144 watchdog_(watchdog),
145 sync_point_wait_count_(0),
146 delayed_work_scheduled_(false),
147 previous_messages_processed_(0),
148 active_url_(active_url),
149 total_gpu_memory_(0) {
150 active_url_hash_ = base::Hash(active_url.possibly_invalid_spec());
151 FastSetActiveURL(active_url_, active_url_hash_);
152
153 gpu::gles2::ContextCreationAttribHelper attrib_parser;
154 attrib_parser.Parse(requested_attribs_);
155
156 if (share_group) {
157 context_group_ = share_group->context_group_;
158 DCHECK(context_group_->bind_generates_resource() ==
159 attrib_parser.bind_generates_resource_);
160 } else {
161 context_group_ = new gpu::gles2::ContextGroup(
162 mailbox_manager,
163 image_manager,
164 new GpuCommandBufferMemoryTracker(channel),
165 channel_->gpu_channel_manager()->shader_translator_cache(),
166 NULL,
167 attrib_parser.bind_generates_resource_);
168 }
169
170 use_virtualized_gl_context_ |=
171 context_group_->feature_info()->workarounds().use_virtualized_gl_contexts;
172 }
173
~GpuCommandBufferStub()174 GpuCommandBufferStub::~GpuCommandBufferStub() {
175 Destroy();
176
177 GpuChannelManager* gpu_channel_manager = channel_->gpu_channel_manager();
178 gpu_channel_manager->Send(new GpuHostMsg_DestroyCommandBuffer(surface_id()));
179 }
180
GetMemoryManager() const181 GpuMemoryManager* GpuCommandBufferStub::GetMemoryManager() const {
182 return channel()->gpu_channel_manager()->gpu_memory_manager();
183 }
184
OnMessageReceived(const IPC::Message & message)185 bool GpuCommandBufferStub::OnMessageReceived(const IPC::Message& message) {
186 devtools_gpu_instrumentation::ScopedGpuTask task(channel());
187 FastSetActiveURL(active_url_, active_url_hash_);
188
189 bool have_context = false;
190 // Ensure the appropriate GL context is current before handling any IPC
191 // messages directed at the command buffer. This ensures that the message
192 // handler can assume that the context is current (not necessary for
193 // Echo, RetireSyncPoint, or WaitSyncPoint).
194 if (decoder_.get() && message.type() != GpuCommandBufferMsg_Echo::ID &&
195 message.type() != GpuCommandBufferMsg_WaitForTokenInRange::ID &&
196 message.type() != GpuCommandBufferMsg_WaitForGetOffsetInRange::ID &&
197 message.type() != GpuCommandBufferMsg_RetireSyncPoint::ID &&
198 message.type() != GpuCommandBufferMsg_SetLatencyInfo::ID) {
199 if (!MakeCurrent())
200 return false;
201 have_context = true;
202 }
203
204 // Always use IPC_MESSAGE_HANDLER_DELAY_REPLY for synchronous message handlers
205 // here. This is so the reply can be delayed if the scheduler is unscheduled.
206 bool handled = true;
207 IPC_BEGIN_MESSAGE_MAP(GpuCommandBufferStub, message)
208 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_Initialize,
209 OnInitialize);
210 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_SetGetBuffer,
211 OnSetGetBuffer);
212 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_ProduceFrontBuffer,
213 OnProduceFrontBuffer);
214 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_Echo, OnEcho);
215 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_WaitForTokenInRange,
216 OnWaitForTokenInRange);
217 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_WaitForGetOffsetInRange,
218 OnWaitForGetOffsetInRange);
219 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_AsyncFlush, OnAsyncFlush);
220 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SetLatencyInfo, OnSetLatencyInfo);
221 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_Rescheduled, OnRescheduled);
222 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_RegisterTransferBuffer,
223 OnRegisterTransferBuffer);
224 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_DestroyTransferBuffer,
225 OnDestroyTransferBuffer);
226 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_CreateVideoDecoder,
227 OnCreateVideoDecoder)
228 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_CreateVideoEncoder,
229 OnCreateVideoEncoder)
230 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SetSurfaceVisible,
231 OnSetSurfaceVisible)
232 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_RetireSyncPoint,
233 OnRetireSyncPoint)
234 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SignalSyncPoint,
235 OnSignalSyncPoint)
236 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SignalQuery,
237 OnSignalQuery)
238 IPC_MESSAGE_HANDLER(
239 GpuCommandBufferMsg_SetClientHasMemoryAllocationChangedCallback,
240 OnSetClientHasMemoryAllocationChangedCallback)
241 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_RegisterGpuMemoryBuffer,
242 OnRegisterGpuMemoryBuffer);
243 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_DestroyGpuMemoryBuffer,
244 OnDestroyGpuMemoryBuffer);
245 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_CreateStreamTexture,
246 OnCreateStreamTexture)
247 IPC_MESSAGE_UNHANDLED(handled = false)
248 IPC_END_MESSAGE_MAP()
249
250 CheckCompleteWaits();
251
252 if (have_context) {
253 // Ensure that any delayed work that was created will be handled.
254 ScheduleDelayedWork(kHandleMoreWorkPeriodMs);
255 }
256
257 DCHECK(handled);
258 return handled;
259 }
260
Send(IPC::Message * message)261 bool GpuCommandBufferStub::Send(IPC::Message* message) {
262 return channel_->Send(message);
263 }
264
IsScheduled()265 bool GpuCommandBufferStub::IsScheduled() {
266 return (!scheduler_.get() || scheduler_->IsScheduled());
267 }
268
HasMoreWork()269 bool GpuCommandBufferStub::HasMoreWork() {
270 return scheduler_.get() && scheduler_->HasMoreWork();
271 }
272
PollWork()273 void GpuCommandBufferStub::PollWork() {
274 TRACE_EVENT0("gpu", "GpuCommandBufferStub::PollWork");
275 delayed_work_scheduled_ = false;
276 FastSetActiveURL(active_url_, active_url_hash_);
277 if (decoder_.get() && !MakeCurrent())
278 return;
279
280 if (scheduler_) {
281 bool fences_complete = scheduler_->PollUnscheduleFences();
282 // Perform idle work if all fences are complete.
283 if (fences_complete) {
284 uint64 current_messages_processed =
285 channel()->gpu_channel_manager()->MessagesProcessed();
286 // We're idle when no messages were processed or scheduled.
287 bool is_idle =
288 (previous_messages_processed_ == current_messages_processed) &&
289 !channel()->gpu_channel_manager()->HandleMessagesScheduled();
290 if (!is_idle && !last_idle_time_.is_null()) {
291 base::TimeDelta time_since_idle = base::TimeTicks::Now() -
292 last_idle_time_;
293 base::TimeDelta max_time_since_idle =
294 base::TimeDelta::FromMilliseconds(kMaxTimeSinceIdleMs);
295
296 // Force idle when it's been too long since last time we were idle.
297 if (time_since_idle > max_time_since_idle)
298 is_idle = true;
299 }
300
301 if (is_idle) {
302 last_idle_time_ = base::TimeTicks::Now();
303 scheduler_->PerformIdleWork();
304 }
305 }
306 }
307 ScheduleDelayedWork(kHandleMoreWorkPeriodBusyMs);
308 }
309
HasUnprocessedCommands()310 bool GpuCommandBufferStub::HasUnprocessedCommands() {
311 if (command_buffer_) {
312 gpu::CommandBuffer::State state = command_buffer_->GetLastState();
313 return state.put_offset != state.get_offset &&
314 !gpu::error::IsError(state.error);
315 }
316 return false;
317 }
318
ScheduleDelayedWork(int64 delay)319 void GpuCommandBufferStub::ScheduleDelayedWork(int64 delay) {
320 if (!HasMoreWork()) {
321 last_idle_time_ = base::TimeTicks();
322 return;
323 }
324
325 if (delayed_work_scheduled_)
326 return;
327 delayed_work_scheduled_ = true;
328
329 // Idle when no messages are processed between now and when
330 // PollWork is called.
331 previous_messages_processed_ =
332 channel()->gpu_channel_manager()->MessagesProcessed();
333 if (last_idle_time_.is_null())
334 last_idle_time_ = base::TimeTicks::Now();
335
336 // IsScheduled() returns true after passing all unschedule fences
337 // and this is when we can start performing idle work. Idle work
338 // is done synchronously so we can set delay to 0 and instead poll
339 // for more work at the rate idle work is performed. This also ensures
340 // that idle work is done as efficiently as possible without any
341 // unnecessary delays.
342 if (scheduler_.get() &&
343 scheduler_->IsScheduled() &&
344 scheduler_->HasMoreIdleWork()) {
345 delay = 0;
346 }
347
348 base::MessageLoop::current()->PostDelayedTask(
349 FROM_HERE,
350 base::Bind(&GpuCommandBufferStub::PollWork, AsWeakPtr()),
351 base::TimeDelta::FromMilliseconds(delay));
352 }
353
OnEcho(const IPC::Message & message)354 void GpuCommandBufferStub::OnEcho(const IPC::Message& message) {
355 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnEcho");
356 Send(new IPC::Message(message));
357 }
358
MakeCurrent()359 bool GpuCommandBufferStub::MakeCurrent() {
360 if (decoder_->MakeCurrent())
361 return true;
362 DLOG(ERROR) << "Context lost because MakeCurrent failed.";
363 command_buffer_->SetContextLostReason(decoder_->GetContextLostReason());
364 command_buffer_->SetParseError(gpu::error::kLostContext);
365 CheckContextLost();
366 return false;
367 }
368
Destroy()369 void GpuCommandBufferStub::Destroy() {
370 if (wait_for_token_) {
371 Send(wait_for_token_->reply.release());
372 wait_for_token_.reset();
373 }
374 if (wait_for_get_offset_) {
375 Send(wait_for_get_offset_->reply.release());
376 wait_for_get_offset_.reset();
377 }
378 if (handle_.is_null() && !active_url_.is_empty()) {
379 GpuChannelManager* gpu_channel_manager = channel_->gpu_channel_manager();
380 gpu_channel_manager->Send(new GpuHostMsg_DidDestroyOffscreenContext(
381 active_url_));
382 }
383
384 memory_manager_client_state_.reset();
385
386 while (!sync_points_.empty())
387 OnRetireSyncPoint(sync_points_.front());
388
389 if (decoder_)
390 decoder_->set_engine(NULL);
391
392 // The scheduler has raw references to the decoder and the command buffer so
393 // destroy it before those.
394 scheduler_.reset();
395
396 bool have_context = false;
397 if (decoder_ && command_buffer_ &&
398 command_buffer_->GetLastState().error != gpu::error::kLostContext)
399 have_context = decoder_->MakeCurrent();
400 FOR_EACH_OBSERVER(DestructionObserver,
401 destruction_observers_,
402 OnWillDestroyStub());
403
404 if (decoder_) {
405 decoder_->Destroy(have_context);
406 decoder_.reset();
407 }
408
409 command_buffer_.reset();
410
411 // Remove this after crbug.com/248395 is sorted out.
412 surface_ = NULL;
413 }
414
OnInitializeFailed(IPC::Message * reply_message)415 void GpuCommandBufferStub::OnInitializeFailed(IPC::Message* reply_message) {
416 Destroy();
417 GpuCommandBufferMsg_Initialize::WriteReplyParams(
418 reply_message, false, gpu::Capabilities());
419 Send(reply_message);
420 }
421
OnInitialize(base::SharedMemoryHandle shared_state_handle,IPC::Message * reply_message)422 void GpuCommandBufferStub::OnInitialize(
423 base::SharedMemoryHandle shared_state_handle,
424 IPC::Message* reply_message) {
425 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnInitialize");
426 DCHECK(!command_buffer_.get());
427
428 scoped_ptr<base::SharedMemory> shared_state_shm(
429 new base::SharedMemory(shared_state_handle, false));
430
431 command_buffer_.reset(new gpu::CommandBufferService(
432 context_group_->transfer_buffer_manager()));
433
434 bool result = command_buffer_->Initialize();
435 DCHECK(result);
436
437 decoder_.reset(::gpu::gles2::GLES2Decoder::Create(context_group_.get()));
438
439 scheduler_.reset(new gpu::GpuScheduler(command_buffer_.get(),
440 decoder_.get(),
441 decoder_.get()));
442 if (preemption_flag_.get())
443 scheduler_->SetPreemptByFlag(preemption_flag_);
444
445 decoder_->set_engine(scheduler_.get());
446
447 if (!handle_.is_null()) {
448 #if defined(OS_MACOSX) || defined(UI_COMPOSITOR_IMAGE_TRANSPORT)
449 if (software_) {
450 LOG(ERROR) << "No software support.";
451 OnInitializeFailed(reply_message);
452 return;
453 }
454 #endif
455
456 surface_ = ImageTransportSurface::CreateSurface(
457 channel_->gpu_channel_manager(),
458 this,
459 handle_);
460 } else {
461 GpuChannelManager* manager = channel_->gpu_channel_manager();
462 surface_ = manager->GetDefaultOffscreenSurface();
463 }
464
465 if (!surface_.get()) {
466 DLOG(ERROR) << "Failed to create surface.";
467 OnInitializeFailed(reply_message);
468 return;
469 }
470
471 scoped_refptr<gfx::GLContext> context;
472 if (use_virtualized_gl_context_ && channel_->share_group()) {
473 context = channel_->share_group()->GetSharedContext();
474 if (!context.get()) {
475 context = gfx::GLContext::CreateGLContext(
476 channel_->share_group(),
477 channel_->gpu_channel_manager()->GetDefaultOffscreenSurface(),
478 gpu_preference_);
479 if (!context.get()) {
480 DLOG(ERROR) << "Failed to create shared context for virtualization.";
481 OnInitializeFailed(reply_message);
482 return;
483 }
484 channel_->share_group()->SetSharedContext(context.get());
485 }
486 // This should be a non-virtual GL context.
487 DCHECK(context->GetHandle());
488 context = new gpu::GLContextVirtual(
489 channel_->share_group(), context.get(), decoder_->AsWeakPtr());
490 if (!context->Initialize(surface_.get(), gpu_preference_)) {
491 // TODO(sievers): The real context created above for the default
492 // offscreen surface might not be compatible with this surface.
493 // Need to adjust at least GLX to be able to create the initial context
494 // with a config that is compatible with onscreen and offscreen surfaces.
495 context = NULL;
496
497 DLOG(ERROR) << "Failed to initialize virtual GL context.";
498 OnInitializeFailed(reply_message);
499 return;
500 }
501 }
502 if (!context.get()) {
503 context = gfx::GLContext::CreateGLContext(
504 channel_->share_group(), surface_.get(), gpu_preference_);
505 }
506 if (!context.get()) {
507 DLOG(ERROR) << "Failed to create context.";
508 OnInitializeFailed(reply_message);
509 return;
510 }
511
512 if (!context->MakeCurrent(surface_.get())) {
513 LOG(ERROR) << "Failed to make context current.";
514 OnInitializeFailed(reply_message);
515 return;
516 }
517
518 if (!context->GetGLStateRestorer()) {
519 context->SetGLStateRestorer(
520 new gpu::GLStateRestorerImpl(decoder_->AsWeakPtr()));
521 }
522
523 if (!context->GetTotalGpuMemory(&total_gpu_memory_))
524 total_gpu_memory_ = 0;
525
526 if (!context_group_->has_program_cache()) {
527 context_group_->set_program_cache(
528 channel_->gpu_channel_manager()->program_cache());
529 }
530
531 // Initialize the decoder with either the view or pbuffer GLContext.
532 if (!decoder_->Initialize(surface_,
533 context,
534 !surface_id(),
535 initial_size_,
536 disallowed_features_,
537 requested_attribs_)) {
538 DLOG(ERROR) << "Failed to initialize decoder.";
539 OnInitializeFailed(reply_message);
540 return;
541 }
542
543 gpu_control_service_.reset(
544 new gpu::GpuControlService(context_group_->image_manager(), NULL));
545
546 if (CommandLine::ForCurrentProcess()->HasSwitch(
547 switches::kEnableGPUServiceLogging)) {
548 decoder_->set_log_commands(true);
549 }
550
551 decoder_->GetLogger()->SetMsgCallback(
552 base::Bind(&GpuCommandBufferStub::SendConsoleMessage,
553 base::Unretained(this)));
554 decoder_->SetShaderCacheCallback(
555 base::Bind(&GpuCommandBufferStub::SendCachedShader,
556 base::Unretained(this)));
557 decoder_->SetWaitSyncPointCallback(
558 base::Bind(&GpuCommandBufferStub::OnWaitSyncPoint,
559 base::Unretained(this)));
560
561 command_buffer_->SetPutOffsetChangeCallback(
562 base::Bind(&GpuCommandBufferStub::PutChanged, base::Unretained(this)));
563 command_buffer_->SetGetBufferChangeCallback(
564 base::Bind(&gpu::GpuScheduler::SetGetBuffer,
565 base::Unretained(scheduler_.get())));
566 command_buffer_->SetParseErrorCallback(
567 base::Bind(&GpuCommandBufferStub::OnParseError, base::Unretained(this)));
568 scheduler_->SetSchedulingChangedCallback(
569 base::Bind(&GpuChannel::StubSchedulingChanged,
570 base::Unretained(channel_)));
571
572 if (watchdog_) {
573 scheduler_->SetCommandProcessedCallback(
574 base::Bind(&GpuCommandBufferStub::OnCommandProcessed,
575 base::Unretained(this)));
576 }
577
578 const size_t kSharedStateSize = sizeof(gpu::CommandBufferSharedState);
579 if (!shared_state_shm->Map(kSharedStateSize)) {
580 DLOG(ERROR) << "Failed to map shared state buffer.";
581 OnInitializeFailed(reply_message);
582 return;
583 }
584 command_buffer_->SetSharedStateBuffer(gpu::MakeBackingFromSharedMemory(
585 shared_state_shm.Pass(), kSharedStateSize));
586
587 GpuCommandBufferMsg_Initialize::WriteReplyParams(
588 reply_message, true, decoder_->GetCapabilities());
589 Send(reply_message);
590
591 if (handle_.is_null() && !active_url_.is_empty()) {
592 GpuChannelManager* gpu_channel_manager = channel_->gpu_channel_manager();
593 gpu_channel_manager->Send(new GpuHostMsg_DidCreateOffscreenContext(
594 active_url_));
595 }
596 }
597
OnSetLatencyInfo(const std::vector<ui::LatencyInfo> & latency_info)598 void GpuCommandBufferStub::OnSetLatencyInfo(
599 const std::vector<ui::LatencyInfo>& latency_info) {
600 if (!ui::LatencyInfo::Verify(latency_info,
601 "GpuCommandBufferStub::OnSetLatencyInfo"))
602 return;
603 if (!latency_info_callback_.is_null())
604 latency_info_callback_.Run(latency_info);
605 }
606
OnCreateStreamTexture(uint32 texture_id,int32 stream_id,bool * succeeded)607 void GpuCommandBufferStub::OnCreateStreamTexture(
608 uint32 texture_id, int32 stream_id, bool* succeeded) {
609 #if defined(OS_ANDROID)
610 *succeeded = StreamTexture::Create(this, texture_id, stream_id);
611 #else
612 *succeeded = false;
613 #endif
614 }
615
SetLatencyInfoCallback(const LatencyInfoCallback & callback)616 void GpuCommandBufferStub::SetLatencyInfoCallback(
617 const LatencyInfoCallback& callback) {
618 latency_info_callback_ = callback;
619 }
620
GetRequestedAttribute(int attr) const621 int32 GpuCommandBufferStub::GetRequestedAttribute(int attr) const {
622 // The command buffer is pairs of enum, value
623 // search for the requested attribute, return the value.
624 for (std::vector<int32>::const_iterator it = requested_attribs_.begin();
625 it != requested_attribs_.end(); ++it) {
626 if (*it++ == attr) {
627 return *it;
628 }
629 }
630 return -1;
631 }
632
OnSetGetBuffer(int32 shm_id,IPC::Message * reply_message)633 void GpuCommandBufferStub::OnSetGetBuffer(int32 shm_id,
634 IPC::Message* reply_message) {
635 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnSetGetBuffer");
636 if (command_buffer_)
637 command_buffer_->SetGetBuffer(shm_id);
638 Send(reply_message);
639 }
640
OnProduceFrontBuffer(const gpu::Mailbox & mailbox)641 void GpuCommandBufferStub::OnProduceFrontBuffer(const gpu::Mailbox& mailbox) {
642 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnProduceFrontBuffer");
643 if (!decoder_) {
644 LOG(ERROR) << "Can't produce front buffer before initialization.";
645 return;
646 }
647
648 decoder_->ProduceFrontBuffer(mailbox);
649 }
650
OnParseError()651 void GpuCommandBufferStub::OnParseError() {
652 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnParseError");
653 DCHECK(command_buffer_.get());
654 gpu::CommandBuffer::State state = command_buffer_->GetLastState();
655 IPC::Message* msg = new GpuCommandBufferMsg_Destroyed(
656 route_id_, state.context_lost_reason);
657 msg->set_unblock(true);
658 Send(msg);
659
660 // Tell the browser about this context loss as well, so it can
661 // determine whether client APIs like WebGL need to be immediately
662 // blocked from automatically running.
663 GpuChannelManager* gpu_channel_manager = channel_->gpu_channel_manager();
664 gpu_channel_manager->Send(new GpuHostMsg_DidLoseContext(
665 handle_.is_null(), state.context_lost_reason, active_url_));
666
667 CheckContextLost();
668 }
669
OnWaitForTokenInRange(int32 start,int32 end,IPC::Message * reply_message)670 void GpuCommandBufferStub::OnWaitForTokenInRange(int32 start,
671 int32 end,
672 IPC::Message* reply_message) {
673 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnWaitForTokenInRange");
674 DCHECK(command_buffer_.get());
675 CheckContextLost();
676 if (wait_for_token_)
677 LOG(ERROR) << "Got WaitForToken command while currently waiting for token.";
678 wait_for_token_ =
679 make_scoped_ptr(new WaitForCommandState(start, end, reply_message));
680 CheckCompleteWaits();
681 }
682
OnWaitForGetOffsetInRange(int32 start,int32 end,IPC::Message * reply_message)683 void GpuCommandBufferStub::OnWaitForGetOffsetInRange(
684 int32 start,
685 int32 end,
686 IPC::Message* reply_message) {
687 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnWaitForGetOffsetInRange");
688 DCHECK(command_buffer_.get());
689 CheckContextLost();
690 if (wait_for_get_offset_) {
691 LOG(ERROR)
692 << "Got WaitForGetOffset command while currently waiting for offset.";
693 }
694 wait_for_get_offset_ =
695 make_scoped_ptr(new WaitForCommandState(start, end, reply_message));
696 CheckCompleteWaits();
697 }
698
CheckCompleteWaits()699 void GpuCommandBufferStub::CheckCompleteWaits() {
700 if (wait_for_token_ || wait_for_get_offset_) {
701 gpu::CommandBuffer::State state = command_buffer_->GetLastState();
702 if (wait_for_token_ &&
703 (gpu::CommandBuffer::InRange(
704 wait_for_token_->start, wait_for_token_->end, state.token) ||
705 state.error != gpu::error::kNoError)) {
706 ReportState();
707 GpuCommandBufferMsg_WaitForTokenInRange::WriteReplyParams(
708 wait_for_token_->reply.get(), state);
709 Send(wait_for_token_->reply.release());
710 wait_for_token_.reset();
711 }
712 if (wait_for_get_offset_ &&
713 (gpu::CommandBuffer::InRange(wait_for_get_offset_->start,
714 wait_for_get_offset_->end,
715 state.get_offset) ||
716 state.error != gpu::error::kNoError)) {
717 ReportState();
718 GpuCommandBufferMsg_WaitForGetOffsetInRange::WriteReplyParams(
719 wait_for_get_offset_->reply.get(), state);
720 Send(wait_for_get_offset_->reply.release());
721 wait_for_get_offset_.reset();
722 }
723 }
724 }
725
OnAsyncFlush(int32 put_offset,uint32 flush_count)726 void GpuCommandBufferStub::OnAsyncFlush(int32 put_offset, uint32 flush_count) {
727 TRACE_EVENT1(
728 "gpu", "GpuCommandBufferStub::OnAsyncFlush", "put_offset", put_offset);
729 DCHECK(command_buffer_.get());
730 if (flush_count - last_flush_count_ < 0x8000000U) {
731 last_flush_count_ = flush_count;
732 command_buffer_->Flush(put_offset);
733 } else {
734 // We received this message out-of-order. This should not happen but is here
735 // to catch regressions. Ignore the message.
736 NOTREACHED() << "Received a Flush message out-of-order";
737 }
738
739 ReportState();
740 }
741
OnRescheduled()742 void GpuCommandBufferStub::OnRescheduled() {
743 gpu::CommandBuffer::State pre_state = command_buffer_->GetLastState();
744 command_buffer_->Flush(pre_state.put_offset);
745 gpu::CommandBuffer::State post_state = command_buffer_->GetLastState();
746
747 if (pre_state.get_offset != post_state.get_offset)
748 ReportState();
749 }
750
OnRegisterTransferBuffer(int32 id,base::SharedMemoryHandle transfer_buffer,uint32 size)751 void GpuCommandBufferStub::OnRegisterTransferBuffer(
752 int32 id,
753 base::SharedMemoryHandle transfer_buffer,
754 uint32 size) {
755 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnRegisterTransferBuffer");
756
757 // Take ownership of the memory and map it into this process.
758 // This validates the size.
759 scoped_ptr<base::SharedMemory> shared_memory(
760 new base::SharedMemory(transfer_buffer, false));
761 if (!shared_memory->Map(size)) {
762 DVLOG(0) << "Failed to map shared memory.";
763 return;
764 }
765
766 if (command_buffer_) {
767 command_buffer_->RegisterTransferBuffer(
768 id, gpu::MakeBackingFromSharedMemory(shared_memory.Pass(), size));
769 }
770 }
771
OnDestroyTransferBuffer(int32 id)772 void GpuCommandBufferStub::OnDestroyTransferBuffer(int32 id) {
773 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnDestroyTransferBuffer");
774
775 if (command_buffer_)
776 command_buffer_->DestroyTransferBuffer(id);
777 }
778
OnCommandProcessed()779 void GpuCommandBufferStub::OnCommandProcessed() {
780 if (watchdog_)
781 watchdog_->CheckArmed();
782 }
783
ReportState()784 void GpuCommandBufferStub::ReportState() { command_buffer_->UpdateState(); }
785
PutChanged()786 void GpuCommandBufferStub::PutChanged() {
787 FastSetActiveURL(active_url_, active_url_hash_);
788 scheduler_->PutChanged();
789 }
790
OnCreateVideoDecoder(media::VideoCodecProfile profile,int32 decoder_route_id,IPC::Message * reply_message)791 void GpuCommandBufferStub::OnCreateVideoDecoder(
792 media::VideoCodecProfile profile,
793 int32 decoder_route_id,
794 IPC::Message* reply_message) {
795 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnCreateVideoDecoder");
796 GpuVideoDecodeAccelerator* decoder = new GpuVideoDecodeAccelerator(
797 decoder_route_id, this, channel_->io_message_loop());
798 decoder->Initialize(profile, reply_message);
799 // decoder is registered as a DestructionObserver of this stub and will
800 // self-delete during destruction of this stub.
801 }
802
OnCreateVideoEncoder(media::VideoFrame::Format input_format,const gfx::Size & input_visible_size,media::VideoCodecProfile output_profile,uint32 initial_bitrate,int32 encoder_route_id,IPC::Message * reply_message)803 void GpuCommandBufferStub::OnCreateVideoEncoder(
804 media::VideoFrame::Format input_format,
805 const gfx::Size& input_visible_size,
806 media::VideoCodecProfile output_profile,
807 uint32 initial_bitrate,
808 int32 encoder_route_id,
809 IPC::Message* reply_message) {
810 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnCreateVideoEncoder");
811 GpuVideoEncodeAccelerator* encoder =
812 new GpuVideoEncodeAccelerator(encoder_route_id, this);
813 encoder->Initialize(input_format,
814 input_visible_size,
815 output_profile,
816 initial_bitrate,
817 reply_message);
818 // encoder is registered as a DestructionObserver of this stub and will
819 // self-delete during destruction of this stub.
820 }
821
OnSetSurfaceVisible(bool visible)822 void GpuCommandBufferStub::OnSetSurfaceVisible(bool visible) {
823 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnSetSurfaceVisible");
824 if (memory_manager_client_state_)
825 memory_manager_client_state_->SetVisible(visible);
826 }
827
AddSyncPoint(uint32 sync_point)828 void GpuCommandBufferStub::AddSyncPoint(uint32 sync_point) {
829 sync_points_.push_back(sync_point);
830 }
831
OnRetireSyncPoint(uint32 sync_point)832 void GpuCommandBufferStub::OnRetireSyncPoint(uint32 sync_point) {
833 DCHECK(!sync_points_.empty() && sync_points_.front() == sync_point);
834 sync_points_.pop_front();
835 if (context_group_->mailbox_manager()->UsesSync() && MakeCurrent())
836 context_group_->mailbox_manager()->PushTextureUpdates();
837 GpuChannelManager* manager = channel_->gpu_channel_manager();
838 manager->sync_point_manager()->RetireSyncPoint(sync_point);
839 }
840
OnWaitSyncPoint(uint32 sync_point)841 bool GpuCommandBufferStub::OnWaitSyncPoint(uint32 sync_point) {
842 if (!sync_point)
843 return true;
844 GpuChannelManager* manager = channel_->gpu_channel_manager();
845 if (manager->sync_point_manager()->IsSyncPointRetired(sync_point))
846 return true;
847
848 if (sync_point_wait_count_ == 0) {
849 TRACE_EVENT_ASYNC_BEGIN1("gpu", "WaitSyncPoint", this,
850 "GpuCommandBufferStub", this);
851 }
852 scheduler_->SetScheduled(false);
853 ++sync_point_wait_count_;
854 manager->sync_point_manager()->AddSyncPointCallback(
855 sync_point,
856 base::Bind(&GpuCommandBufferStub::OnSyncPointRetired,
857 this->AsWeakPtr()));
858 return scheduler_->IsScheduled();
859 }
860
OnSyncPointRetired()861 void GpuCommandBufferStub::OnSyncPointRetired() {
862 --sync_point_wait_count_;
863 if (sync_point_wait_count_ == 0) {
864 TRACE_EVENT_ASYNC_END1("gpu", "WaitSyncPoint", this,
865 "GpuCommandBufferStub", this);
866 }
867 scheduler_->SetScheduled(true);
868 }
869
OnSignalSyncPoint(uint32 sync_point,uint32 id)870 void GpuCommandBufferStub::OnSignalSyncPoint(uint32 sync_point, uint32 id) {
871 GpuChannelManager* manager = channel_->gpu_channel_manager();
872 manager->sync_point_manager()->AddSyncPointCallback(
873 sync_point,
874 base::Bind(&GpuCommandBufferStub::OnSignalSyncPointAck,
875 this->AsWeakPtr(),
876 id));
877 }
878
OnSignalSyncPointAck(uint32 id)879 void GpuCommandBufferStub::OnSignalSyncPointAck(uint32 id) {
880 Send(new GpuCommandBufferMsg_SignalSyncPointAck(route_id_, id));
881 }
882
OnSignalQuery(uint32 query_id,uint32 id)883 void GpuCommandBufferStub::OnSignalQuery(uint32 query_id, uint32 id) {
884 if (decoder_) {
885 gpu::gles2::QueryManager* query_manager = decoder_->GetQueryManager();
886 if (query_manager) {
887 gpu::gles2::QueryManager::Query* query =
888 query_manager->GetQuery(query_id);
889 if (query) {
890 query->AddCallback(
891 base::Bind(&GpuCommandBufferStub::OnSignalSyncPointAck,
892 this->AsWeakPtr(),
893 id));
894 return;
895 }
896 }
897 }
898 // Something went wrong, run callback immediately.
899 OnSignalSyncPointAck(id);
900 }
901
902
OnSetClientHasMemoryAllocationChangedCallback(bool has_callback)903 void GpuCommandBufferStub::OnSetClientHasMemoryAllocationChangedCallback(
904 bool has_callback) {
905 TRACE_EVENT0(
906 "gpu",
907 "GpuCommandBufferStub::OnSetClientHasMemoryAllocationChangedCallback");
908 if (has_callback) {
909 if (!memory_manager_client_state_) {
910 memory_manager_client_state_.reset(GetMemoryManager()->CreateClientState(
911 this, surface_id_ != 0, true));
912 }
913 } else {
914 memory_manager_client_state_.reset();
915 }
916 }
917
OnRegisterGpuMemoryBuffer(int32 id,gfx::GpuMemoryBufferHandle gpu_memory_buffer,uint32 width,uint32 height,uint32 internalformat)918 void GpuCommandBufferStub::OnRegisterGpuMemoryBuffer(
919 int32 id,
920 gfx::GpuMemoryBufferHandle gpu_memory_buffer,
921 uint32 width,
922 uint32 height,
923 uint32 internalformat) {
924 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnRegisterGpuMemoryBuffer");
925 #if defined(OS_ANDROID)
926 // Verify that renderer is not trying to use a surface texture it doesn't own.
927 if (gpu_memory_buffer.type == gfx::SURFACE_TEXTURE_BUFFER &&
928 gpu_memory_buffer.surface_texture_id.secondary_id !=
929 channel()->client_id()) {
930 LOG(ERROR) << "Illegal surface texture ID for renderer.";
931 return;
932 }
933 #endif
934 if (gpu_control_service_) {
935 gpu_control_service_->RegisterGpuMemoryBuffer(
936 id, gpu_memory_buffer, width, height, internalformat);
937 }
938 }
939
OnDestroyGpuMemoryBuffer(int32 id)940 void GpuCommandBufferStub::OnDestroyGpuMemoryBuffer(int32 id) {
941 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnDestroyGpuMemoryBuffer");
942 if (gpu_control_service_)
943 gpu_control_service_->UnregisterGpuMemoryBuffer(id);
944 }
945
SendConsoleMessage(int32 id,const std::string & message)946 void GpuCommandBufferStub::SendConsoleMessage(
947 int32 id,
948 const std::string& message) {
949 GPUCommandBufferConsoleMessage console_message;
950 console_message.id = id;
951 console_message.message = message;
952 IPC::Message* msg = new GpuCommandBufferMsg_ConsoleMsg(
953 route_id_, console_message);
954 msg->set_unblock(true);
955 Send(msg);
956 }
957
SendCachedShader(const std::string & key,const std::string & shader)958 void GpuCommandBufferStub::SendCachedShader(
959 const std::string& key, const std::string& shader) {
960 channel_->CacheShader(key, shader);
961 }
962
AddDestructionObserver(DestructionObserver * observer)963 void GpuCommandBufferStub::AddDestructionObserver(
964 DestructionObserver* observer) {
965 destruction_observers_.AddObserver(observer);
966 }
967
RemoveDestructionObserver(DestructionObserver * observer)968 void GpuCommandBufferStub::RemoveDestructionObserver(
969 DestructionObserver* observer) {
970 destruction_observers_.RemoveObserver(observer);
971 }
972
SetPreemptByFlag(scoped_refptr<gpu::PreemptionFlag> flag)973 void GpuCommandBufferStub::SetPreemptByFlag(
974 scoped_refptr<gpu::PreemptionFlag> flag) {
975 preemption_flag_ = flag;
976 if (scheduler_)
977 scheduler_->SetPreemptByFlag(preemption_flag_);
978 }
979
GetTotalGpuMemory(uint64 * bytes)980 bool GpuCommandBufferStub::GetTotalGpuMemory(uint64* bytes) {
981 *bytes = total_gpu_memory_;
982 return !!total_gpu_memory_;
983 }
984
GetSurfaceSize() const985 gfx::Size GpuCommandBufferStub::GetSurfaceSize() const {
986 if (!surface_.get())
987 return gfx::Size();
988 return surface_->GetSize();
989 }
990
GetMemoryTracker() const991 gpu::gles2::MemoryTracker* GpuCommandBufferStub::GetMemoryTracker() const {
992 return context_group_->memory_tracker();
993 }
994
SetMemoryAllocation(const gpu::MemoryAllocation & allocation)995 void GpuCommandBufferStub::SetMemoryAllocation(
996 const gpu::MemoryAllocation& allocation) {
997 if (!last_memory_allocation_valid_ ||
998 !allocation.Equals(last_memory_allocation_)) {
999 Send(new GpuCommandBufferMsg_SetMemoryAllocation(
1000 route_id_, allocation));
1001 }
1002
1003 last_memory_allocation_valid_ = true;
1004 last_memory_allocation_ = allocation;
1005 }
1006
SuggestHaveFrontBuffer(bool suggest_have_frontbuffer)1007 void GpuCommandBufferStub::SuggestHaveFrontBuffer(
1008 bool suggest_have_frontbuffer) {
1009 // This can be called outside of OnMessageReceived, so the context needs
1010 // to be made current before calling methods on the surface.
1011 if (surface_.get() && MakeCurrent())
1012 surface_->SetFrontbufferAllocation(suggest_have_frontbuffer);
1013 }
1014
CheckContextLost()1015 bool GpuCommandBufferStub::CheckContextLost() {
1016 DCHECK(command_buffer_);
1017 gpu::CommandBuffer::State state = command_buffer_->GetLastState();
1018 bool was_lost = state.error == gpu::error::kLostContext;
1019 // Lose all other contexts if the reset was triggered by the robustness
1020 // extension instead of being synthetic.
1021 if (was_lost && decoder_ && decoder_->WasContextLostByRobustnessExtension() &&
1022 (gfx::GLContext::LosesAllContextsOnContextLost() ||
1023 use_virtualized_gl_context_))
1024 channel_->LoseAllContexts();
1025 CheckCompleteWaits();
1026 return was_lost;
1027 }
1028
MarkContextLost()1029 void GpuCommandBufferStub::MarkContextLost() {
1030 if (!command_buffer_ ||
1031 command_buffer_->GetLastState().error == gpu::error::kLostContext)
1032 return;
1033
1034 command_buffer_->SetContextLostReason(gpu::error::kUnknown);
1035 if (decoder_)
1036 decoder_->LoseContext(GL_UNKNOWN_CONTEXT_RESET_ARB);
1037 command_buffer_->SetParseError(gpu::error::kLostContext);
1038 }
1039
GetMemoryUsage() const1040 uint64 GpuCommandBufferStub::GetMemoryUsage() const {
1041 return GetMemoryManager()->GetClientMemoryUsage(this);
1042 }
1043
1044 } // namespace content
1045