1 // Copyright 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "gpu/command_buffer/service/async_pixel_transfer_manager_share_group.h"
6
7 #include <list>
8
9 #include "base/bind.h"
10 #include "base/debug/trace_event.h"
11 #include "base/lazy_instance.h"
12 #include "base/logging.h"
13 #include "base/memory/ref_counted.h"
14 #include "base/memory/weak_ptr.h"
15 #include "base/synchronization/cancellation_flag.h"
16 #include "base/synchronization/lock.h"
17 #include "base/synchronization/waitable_event.h"
18 #include "base/threading/thread.h"
19 #include "base/threading/thread_checker.h"
20 #include "gpu/command_buffer/service/async_pixel_transfer_delegate.h"
21 #include "gpu/command_buffer/service/safe_shared_memory_pool.h"
22 #include "ui/gl/gl_bindings.h"
23 #include "ui/gl/gl_context.h"
24 #include "ui/gl/gl_surface.h"
25 #include "ui/gl/gpu_preference.h"
26 #include "ui/gl/scoped_binders.h"
27
28 namespace gpu {
29
30 namespace {
31
32 const char kAsyncTransferThreadName[] = "AsyncTransferThread";
33
PerformNotifyCompletion(AsyncMemoryParams mem_params,ScopedSafeSharedMemory * safe_shared_memory,scoped_refptr<AsyncPixelTransferCompletionObserver> observer)34 void PerformNotifyCompletion(
35 AsyncMemoryParams mem_params,
36 ScopedSafeSharedMemory* safe_shared_memory,
37 scoped_refptr<AsyncPixelTransferCompletionObserver> observer) {
38 TRACE_EVENT0("gpu", "PerformNotifyCompletion");
39 AsyncMemoryParams safe_mem_params = mem_params;
40 safe_mem_params.shared_memory = safe_shared_memory->shared_memory();
41 observer->DidComplete(safe_mem_params);
42 }
43
44 // TODO(backer): Factor out common thread scheduling logic from the EGL and
45 // ShareGroup implementations. http://crbug.com/239889
46 class TransferThread : public base::Thread {
47 public:
TransferThread()48 TransferThread()
49 : base::Thread(kAsyncTransferThreadName),
50 initialized_(false) {
51 Start();
52 #if defined(OS_ANDROID) || defined(OS_LINUX)
53 SetPriority(base::kThreadPriority_Background);
54 #endif
55 }
56
~TransferThread()57 virtual ~TransferThread() {
58 // The only instance of this class was declared leaky.
59 NOTREACHED();
60 }
61
InitializeOnMainThread(gfx::GLContext * parent_context)62 void InitializeOnMainThread(gfx::GLContext* parent_context) {
63 TRACE_EVENT0("gpu", "TransferThread::InitializeOnMainThread");
64 if (initialized_)
65 return;
66
67 base::WaitableEvent wait_for_init(true, false);
68 message_loop_proxy()->PostTask(
69 FROM_HERE,
70 base::Bind(&TransferThread::InitializeOnTransferThread,
71 base::Unretained(this),
72 base::Unretained(parent_context),
73 &wait_for_init));
74 wait_for_init.Wait();
75 }
76
CleanUp()77 virtual void CleanUp() OVERRIDE {
78 surface_ = NULL;
79 context_ = NULL;
80 }
81
safe_shared_memory_pool()82 SafeSharedMemoryPool* safe_shared_memory_pool() {
83 return &safe_shared_memory_pool_;
84 }
85
86 private:
87 bool initialized_;
88
89 scoped_refptr<gfx::GLSurface> surface_;
90 scoped_refptr<gfx::GLContext> context_;
91 SafeSharedMemoryPool safe_shared_memory_pool_;
92
InitializeOnTransferThread(gfx::GLContext * parent_context,base::WaitableEvent * caller_wait)93 void InitializeOnTransferThread(gfx::GLContext* parent_context,
94 base::WaitableEvent* caller_wait) {
95 TRACE_EVENT0("gpu", "InitializeOnTransferThread");
96
97 if (!parent_context) {
98 LOG(ERROR) << "No parent context provided.";
99 caller_wait->Signal();
100 return;
101 }
102
103 surface_ = gfx::GLSurface::CreateOffscreenGLSurface(gfx::Size(1, 1));
104 if (!surface_.get()) {
105 LOG(ERROR) << "Unable to create GLSurface";
106 caller_wait->Signal();
107 return;
108 }
109
110 // TODO(backer): This is coded for integrated GPUs. For discrete GPUs
111 // we would probably want to use a PBO texture upload for a true async
112 // upload (that would hopefully be optimized as a DMA transfer by the
113 // driver).
114 context_ = gfx::GLContext::CreateGLContext(parent_context->share_group(),
115 surface_.get(),
116 gfx::PreferIntegratedGpu);
117 if (!context_.get()) {
118 LOG(ERROR) << "Unable to create GLContext.";
119 caller_wait->Signal();
120 return;
121 }
122
123 context_->MakeCurrent(surface_.get());
124 initialized_ = true;
125 caller_wait->Signal();
126 }
127
128 DISALLOW_COPY_AND_ASSIGN(TransferThread);
129 };
130
131 base::LazyInstance<TransferThread>::Leaky
132 g_transfer_thread = LAZY_INSTANCE_INITIALIZER;
133
transfer_message_loop_proxy()134 base::MessageLoopProxy* transfer_message_loop_proxy() {
135 return g_transfer_thread.Pointer()->message_loop_proxy().get();
136 }
137
safe_shared_memory_pool()138 SafeSharedMemoryPool* safe_shared_memory_pool() {
139 return g_transfer_thread.Pointer()->safe_shared_memory_pool();
140 }
141
142 class PendingTask : public base::RefCountedThreadSafe<PendingTask> {
143 public:
PendingTask(const base::Closure & task)144 explicit PendingTask(const base::Closure& task)
145 : task_(task), task_pending_(true, false) {}
146
TryRun()147 bool TryRun() {
148 // This is meant to be called on the main thread where the texture
149 // is already bound.
150 DCHECK(checker_.CalledOnValidThread());
151 if (task_lock_.Try()) {
152 // Only run once.
153 if (!task_.is_null())
154 task_.Run();
155 task_.Reset();
156
157 task_lock_.Release();
158 task_pending_.Signal();
159 return true;
160 }
161 return false;
162 }
163
BindAndRun(GLuint texture_id)164 void BindAndRun(GLuint texture_id) {
165 // This is meant to be called on the upload thread where we don't have to
166 // restore the previous texture binding.
167 DCHECK(!checker_.CalledOnValidThread());
168 base::AutoLock locked(task_lock_);
169 if (!task_.is_null()) {
170 glBindTexture(GL_TEXTURE_2D, texture_id);
171 task_.Run();
172 task_.Reset();
173 glBindTexture(GL_TEXTURE_2D, 0);
174 // Flush for synchronization between threads.
175 glFlush();
176 task_pending_.Signal();
177 }
178 }
179
Cancel()180 void Cancel() {
181 base::AutoLock locked(task_lock_);
182 task_.Reset();
183 task_pending_.Signal();
184 }
185
TaskIsInProgress()186 bool TaskIsInProgress() {
187 return !task_pending_.IsSignaled();
188 }
189
WaitForTask()190 void WaitForTask() {
191 task_pending_.Wait();
192 }
193
194 private:
195 friend class base::RefCountedThreadSafe<PendingTask>;
196
~PendingTask()197 virtual ~PendingTask() {}
198
199 base::ThreadChecker checker_;
200
201 base::Lock task_lock_;
202 base::Closure task_;
203 base::WaitableEvent task_pending_;
204
205 DISALLOW_COPY_AND_ASSIGN(PendingTask);
206 };
207
208 // Class which holds async pixel transfers state.
209 // The texture_id is accessed by either thread, but everything
210 // else accessed only on the main thread.
211 class TransferStateInternal
212 : public base::RefCountedThreadSafe<TransferStateInternal> {
213 public:
TransferStateInternal(GLuint texture_id,const AsyncTexImage2DParams & define_params)214 TransferStateInternal(GLuint texture_id,
215 const AsyncTexImage2DParams& define_params)
216 : texture_id_(texture_id), define_params_(define_params) {}
217
TransferIsInProgress()218 bool TransferIsInProgress() {
219 return pending_upload_task_.get() &&
220 pending_upload_task_->TaskIsInProgress();
221 }
222
BindTransfer()223 void BindTransfer() {
224 TRACE_EVENT2("gpu", "BindAsyncTransfer",
225 "width", define_params_.width,
226 "height", define_params_.height);
227 DCHECK(texture_id_);
228
229 glBindTexture(GL_TEXTURE_2D, texture_id_);
230 bind_callback_.Run();
231 }
232
WaitForTransferCompletion()233 void WaitForTransferCompletion() {
234 TRACE_EVENT0("gpu", "WaitForTransferCompletion");
235 DCHECK(pending_upload_task_.get());
236 if (!pending_upload_task_->TryRun()) {
237 pending_upload_task_->WaitForTask();
238 }
239 pending_upload_task_ = NULL;
240 }
241
CancelUpload()242 void CancelUpload() {
243 TRACE_EVENT0("gpu", "CancelUpload");
244 if (pending_upload_task_.get())
245 pending_upload_task_->Cancel();
246 pending_upload_task_ = NULL;
247 }
248
ScheduleAsyncTexImage2D(const AsyncTexImage2DParams tex_params,const AsyncMemoryParams mem_params,scoped_refptr<AsyncPixelTransferUploadStats> texture_upload_stats,const base::Closure & bind_callback)249 void ScheduleAsyncTexImage2D(
250 const AsyncTexImage2DParams tex_params,
251 const AsyncMemoryParams mem_params,
252 scoped_refptr<AsyncPixelTransferUploadStats> texture_upload_stats,
253 const base::Closure& bind_callback) {
254 pending_upload_task_ = new PendingTask(base::Bind(
255 &TransferStateInternal::PerformAsyncTexImage2D,
256 this,
257 tex_params,
258 mem_params,
259 // Duplicate the shared memory so there is no way we can get
260 // a use-after-free of the raw pixels.
261 base::Owned(new ScopedSafeSharedMemory(safe_shared_memory_pool(),
262 mem_params.shared_memory,
263 mem_params.shm_size)),
264 texture_upload_stats));
265 transfer_message_loop_proxy()->PostTask(
266 FROM_HERE,
267 base::Bind(
268 &PendingTask::BindAndRun, pending_upload_task_, texture_id_));
269
270 // Save the late bind callback, so we can notify the client when it is
271 // bound.
272 bind_callback_ = bind_callback;
273 }
274
ScheduleAsyncTexSubImage2D(AsyncTexSubImage2DParams tex_params,AsyncMemoryParams mem_params,scoped_refptr<AsyncPixelTransferUploadStats> texture_upload_stats)275 void ScheduleAsyncTexSubImage2D(
276 AsyncTexSubImage2DParams tex_params,
277 AsyncMemoryParams mem_params,
278 scoped_refptr<AsyncPixelTransferUploadStats> texture_upload_stats) {
279 pending_upload_task_ = new PendingTask(base::Bind(
280 &TransferStateInternal::PerformAsyncTexSubImage2D,
281 this,
282 tex_params,
283 mem_params,
284 base::Owned(new ScopedSafeSharedMemory(safe_shared_memory_pool(),
285 mem_params.shared_memory,
286 mem_params.shm_size)),
287 texture_upload_stats));
288 transfer_message_loop_proxy()->PostTask(
289 FROM_HERE,
290 base::Bind(
291 &PendingTask::BindAndRun, pending_upload_task_, texture_id_));
292 }
293
294 private:
295 friend class base::RefCountedThreadSafe<TransferStateInternal>;
296
~TransferStateInternal()297 virtual ~TransferStateInternal() {
298 }
299
PerformAsyncTexImage2D(AsyncTexImage2DParams tex_params,AsyncMemoryParams mem_params,ScopedSafeSharedMemory * safe_shared_memory,scoped_refptr<AsyncPixelTransferUploadStats> texture_upload_stats)300 void PerformAsyncTexImage2D(
301 AsyncTexImage2DParams tex_params,
302 AsyncMemoryParams mem_params,
303 ScopedSafeSharedMemory* safe_shared_memory,
304 scoped_refptr<AsyncPixelTransferUploadStats> texture_upload_stats) {
305 TRACE_EVENT2("gpu",
306 "PerformAsyncTexImage",
307 "width",
308 tex_params.width,
309 "height",
310 tex_params.height);
311 DCHECK_EQ(0, tex_params.level);
312
313 base::TimeTicks begin_time;
314 if (texture_upload_stats.get())
315 begin_time = base::TimeTicks::HighResNow();
316
317 void* data =
318 AsyncPixelTransferDelegate::GetAddress(safe_shared_memory, mem_params);
319
320 {
321 TRACE_EVENT0("gpu", "glTexImage2D");
322 glTexImage2D(GL_TEXTURE_2D,
323 tex_params.level,
324 tex_params.internal_format,
325 tex_params.width,
326 tex_params.height,
327 tex_params.border,
328 tex_params.format,
329 tex_params.type,
330 data);
331 }
332
333 if (texture_upload_stats.get()) {
334 texture_upload_stats->AddUpload(base::TimeTicks::HighResNow() -
335 begin_time);
336 }
337 }
338
PerformAsyncTexSubImage2D(AsyncTexSubImage2DParams tex_params,AsyncMemoryParams mem_params,ScopedSafeSharedMemory * safe_shared_memory,scoped_refptr<AsyncPixelTransferUploadStats> texture_upload_stats)339 void PerformAsyncTexSubImage2D(
340 AsyncTexSubImage2DParams tex_params,
341 AsyncMemoryParams mem_params,
342 ScopedSafeSharedMemory* safe_shared_memory,
343 scoped_refptr<AsyncPixelTransferUploadStats> texture_upload_stats) {
344 TRACE_EVENT2("gpu",
345 "PerformAsyncTexSubImage2D",
346 "width",
347 tex_params.width,
348 "height",
349 tex_params.height);
350 DCHECK_EQ(0, tex_params.level);
351
352 base::TimeTicks begin_time;
353 if (texture_upload_stats.get())
354 begin_time = base::TimeTicks::HighResNow();
355
356 void* data =
357 AsyncPixelTransferDelegate::GetAddress(safe_shared_memory, mem_params);
358
359 {
360 TRACE_EVENT0("gpu", "glTexSubImage2D");
361 glTexSubImage2D(GL_TEXTURE_2D,
362 tex_params.level,
363 tex_params.xoffset,
364 tex_params.yoffset,
365 tex_params.width,
366 tex_params.height,
367 tex_params.format,
368 tex_params.type,
369 data);
370 }
371
372 if (texture_upload_stats.get()) {
373 texture_upload_stats->AddUpload(base::TimeTicks::HighResNow() -
374 begin_time);
375 }
376 }
377
378 scoped_refptr<PendingTask> pending_upload_task_;
379
380 GLuint texture_id_;
381
382 // Definition params for texture that needs binding.
383 AsyncTexImage2DParams define_params_;
384
385 // Callback to invoke when AsyncTexImage2D is complete
386 // and the client can safely use the texture. This occurs
387 // during BindCompletedAsyncTransfers().
388 base::Closure bind_callback_;
389 };
390
391 } // namespace
392
393 class AsyncPixelTransferDelegateShareGroup
394 : public AsyncPixelTransferDelegate,
395 public base::SupportsWeakPtr<AsyncPixelTransferDelegateShareGroup> {
396 public:
397 AsyncPixelTransferDelegateShareGroup(
398 AsyncPixelTransferManagerShareGroup::SharedState* shared_state,
399 GLuint texture_id,
400 const AsyncTexImage2DParams& define_params);
401 virtual ~AsyncPixelTransferDelegateShareGroup();
402
BindTransfer()403 void BindTransfer() { state_->BindTransfer(); }
404
405 // Implement AsyncPixelTransferDelegate:
406 virtual void AsyncTexImage2D(
407 const AsyncTexImage2DParams& tex_params,
408 const AsyncMemoryParams& mem_params,
409 const base::Closure& bind_callback) OVERRIDE;
410 virtual void AsyncTexSubImage2D(
411 const AsyncTexSubImage2DParams& tex_params,
412 const AsyncMemoryParams& mem_params) OVERRIDE;
413 virtual bool TransferIsInProgress() OVERRIDE;
414 virtual void WaitForTransferCompletion() OVERRIDE;
415
416 private:
417 // A raw pointer is safe because the SharedState is owned by the Manager,
418 // which owns this Delegate.
419 AsyncPixelTransferManagerShareGroup::SharedState* shared_state_;
420 scoped_refptr<TransferStateInternal> state_;
421
422 DISALLOW_COPY_AND_ASSIGN(AsyncPixelTransferDelegateShareGroup);
423 };
424
AsyncPixelTransferDelegateShareGroup(AsyncPixelTransferManagerShareGroup::SharedState * shared_state,GLuint texture_id,const AsyncTexImage2DParams & define_params)425 AsyncPixelTransferDelegateShareGroup::AsyncPixelTransferDelegateShareGroup(
426 AsyncPixelTransferManagerShareGroup::SharedState* shared_state,
427 GLuint texture_id,
428 const AsyncTexImage2DParams& define_params)
429 : shared_state_(shared_state),
430 state_(new TransferStateInternal(texture_id, define_params)) {}
431
~AsyncPixelTransferDelegateShareGroup()432 AsyncPixelTransferDelegateShareGroup::~AsyncPixelTransferDelegateShareGroup() {
433 TRACE_EVENT0("gpu", " ~AsyncPixelTransferDelegateShareGroup");
434 state_->CancelUpload();
435 }
436
TransferIsInProgress()437 bool AsyncPixelTransferDelegateShareGroup::TransferIsInProgress() {
438 return state_->TransferIsInProgress();
439 }
440
WaitForTransferCompletion()441 void AsyncPixelTransferDelegateShareGroup::WaitForTransferCompletion() {
442 if (state_->TransferIsInProgress()) {
443 state_->WaitForTransferCompletion();
444 DCHECK(!state_->TransferIsInProgress());
445 }
446
447 // Fast track the BindTransfer, if applicable.
448 for (AsyncPixelTransferManagerShareGroup::SharedState::TransferQueue::iterator
449 iter = shared_state_->pending_allocations.begin();
450 iter != shared_state_->pending_allocations.end();
451 ++iter) {
452 if (iter->get() != this)
453 continue;
454
455 shared_state_->pending_allocations.erase(iter);
456 BindTransfer();
457 break;
458 }
459 }
460
AsyncTexImage2D(const AsyncTexImage2DParams & tex_params,const AsyncMemoryParams & mem_params,const base::Closure & bind_callback)461 void AsyncPixelTransferDelegateShareGroup::AsyncTexImage2D(
462 const AsyncTexImage2DParams& tex_params,
463 const AsyncMemoryParams& mem_params,
464 const base::Closure& bind_callback) {
465 DCHECK(mem_params.shared_memory);
466 DCHECK_LE(mem_params.shm_data_offset + mem_params.shm_data_size,
467 mem_params.shm_size);
468 DCHECK(!state_->TransferIsInProgress());
469 DCHECK_EQ(static_cast<GLenum>(GL_TEXTURE_2D), tex_params.target);
470 DCHECK_EQ(tex_params.level, 0);
471
472 shared_state_->pending_allocations.push_back(AsWeakPtr());
473 state_->ScheduleAsyncTexImage2D(tex_params,
474 mem_params,
475 shared_state_->texture_upload_stats,
476 bind_callback);
477 }
478
AsyncTexSubImage2D(const AsyncTexSubImage2DParams & tex_params,const AsyncMemoryParams & mem_params)479 void AsyncPixelTransferDelegateShareGroup::AsyncTexSubImage2D(
480 const AsyncTexSubImage2DParams& tex_params,
481 const AsyncMemoryParams& mem_params) {
482 TRACE_EVENT2("gpu", "AsyncTexSubImage2D",
483 "width", tex_params.width,
484 "height", tex_params.height);
485 DCHECK(!state_->TransferIsInProgress());
486 DCHECK(mem_params.shared_memory);
487 DCHECK_LE(mem_params.shm_data_offset + mem_params.shm_data_size,
488 mem_params.shm_size);
489 DCHECK_EQ(static_cast<GLenum>(GL_TEXTURE_2D), tex_params.target);
490 DCHECK_EQ(tex_params.level, 0);
491
492 state_->ScheduleAsyncTexSubImage2D(
493 tex_params, mem_params, shared_state_->texture_upload_stats);
494 }
495
SharedState()496 AsyncPixelTransferManagerShareGroup::SharedState::SharedState()
497 // TODO(reveman): Skip this if --enable-gpu-benchmarking is not present.
498 : texture_upload_stats(new AsyncPixelTransferUploadStats) {}
499
~SharedState()500 AsyncPixelTransferManagerShareGroup::SharedState::~SharedState() {}
501
AsyncPixelTransferManagerShareGroup(gfx::GLContext * context)502 AsyncPixelTransferManagerShareGroup::AsyncPixelTransferManagerShareGroup(
503 gfx::GLContext* context) {
504 g_transfer_thread.Pointer()->InitializeOnMainThread(context);
505 }
506
~AsyncPixelTransferManagerShareGroup()507 AsyncPixelTransferManagerShareGroup::~AsyncPixelTransferManagerShareGroup() {}
508
BindCompletedAsyncTransfers()509 void AsyncPixelTransferManagerShareGroup::BindCompletedAsyncTransfers() {
510 scoped_ptr<gfx::ScopedTextureBinder> texture_binder;
511
512 while (!shared_state_.pending_allocations.empty()) {
513 if (!shared_state_.pending_allocations.front().get()) {
514 shared_state_.pending_allocations.pop_front();
515 continue;
516 }
517 AsyncPixelTransferDelegateShareGroup* delegate =
518 shared_state_.pending_allocations.front().get();
519 // Terminate early, as all transfers finish in order, currently.
520 if (delegate->TransferIsInProgress())
521 break;
522
523 if (!texture_binder)
524 texture_binder.reset(new gfx::ScopedTextureBinder(GL_TEXTURE_2D, 0));
525
526 // Used to set tex info from the gles2 cmd decoder once upload has
527 // finished (it'll bind the texture and call a callback).
528 delegate->BindTransfer();
529
530 shared_state_.pending_allocations.pop_front();
531 }
532 }
533
AsyncNotifyCompletion(const AsyncMemoryParams & mem_params,AsyncPixelTransferCompletionObserver * observer)534 void AsyncPixelTransferManagerShareGroup::AsyncNotifyCompletion(
535 const AsyncMemoryParams& mem_params,
536 AsyncPixelTransferCompletionObserver* observer) {
537 DCHECK(mem_params.shared_memory);
538 DCHECK_LE(mem_params.shm_data_offset + mem_params.shm_data_size,
539 mem_params.shm_size);
540 // Post a PerformNotifyCompletion task to the upload thread. This task
541 // will run after all async transfers are complete.
542 transfer_message_loop_proxy()->PostTask(
543 FROM_HERE,
544 base::Bind(&PerformNotifyCompletion,
545 mem_params,
546 base::Owned(
547 new ScopedSafeSharedMemory(safe_shared_memory_pool(),
548 mem_params.shared_memory,
549 mem_params.shm_size)),
550 make_scoped_refptr(observer)));
551 }
552
GetTextureUploadCount()553 uint32 AsyncPixelTransferManagerShareGroup::GetTextureUploadCount() {
554 return shared_state_.texture_upload_stats->GetStats(NULL);
555 }
556
557 base::TimeDelta
GetTotalTextureUploadTime()558 AsyncPixelTransferManagerShareGroup::GetTotalTextureUploadTime() {
559 base::TimeDelta total_texture_upload_time;
560 shared_state_.texture_upload_stats->GetStats(&total_texture_upload_time);
561 return total_texture_upload_time;
562 }
563
ProcessMorePendingTransfers()564 void AsyncPixelTransferManagerShareGroup::ProcessMorePendingTransfers() {
565 }
566
NeedsProcessMorePendingTransfers()567 bool AsyncPixelTransferManagerShareGroup::NeedsProcessMorePendingTransfers() {
568 return false;
569 }
570
571 AsyncPixelTransferDelegate*
CreatePixelTransferDelegateImpl(gles2::TextureRef * ref,const AsyncTexImage2DParams & define_params)572 AsyncPixelTransferManagerShareGroup::CreatePixelTransferDelegateImpl(
573 gles2::TextureRef* ref,
574 const AsyncTexImage2DParams& define_params) {
575 return new AsyncPixelTransferDelegateShareGroup(
576 &shared_state_, ref->service_id(), define_params);
577 }
578
579 } // namespace gpu
580