• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "gpu/command_buffer/service/gpu_scheduler.h"
6 
7 #include "base/bind.h"
8 #include "base/command_line.h"
9 #include "base/compiler_specific.h"
10 #include "base/debug/trace_event.h"
11 #include "base/message_loop/message_loop.h"
12 #include "base/time/time.h"
13 #include "ui/gl/gl_bindings.h"
14 #include "ui/gl/gl_fence.h"
15 #include "ui/gl/gl_switches.h"
16 
17 #if defined(OS_WIN)
18 #include "base/win/windows_version.h"
19 #endif
20 
21 using ::base::SharedMemory;
22 
23 namespace gpu {
24 
25 const int64 kUnscheduleFenceTimeOutDelay = 10000;
26 
27 #if defined(OS_WIN)
28 const int64 kRescheduleTimeOutDelay = 1000;
29 #endif
30 
GpuScheduler(CommandBufferServiceBase * command_buffer,AsyncAPIInterface * handler,gles2::GLES2Decoder * decoder)31 GpuScheduler::GpuScheduler(CommandBufferServiceBase* command_buffer,
32                            AsyncAPIInterface* handler,
33                            gles2::GLES2Decoder* decoder)
34     : command_buffer_(command_buffer),
35       handler_(handler),
36       decoder_(decoder),
37       unscheduled_count_(0),
38       rescheduled_count_(0),
39       reschedule_task_factory_(this),
40       was_preempted_(false) {}
41 
~GpuScheduler()42 GpuScheduler::~GpuScheduler() {
43 }
44 
PutChanged()45 void GpuScheduler::PutChanged() {
46   TRACE_EVENT1(
47      "gpu", "GpuScheduler:PutChanged",
48      "decoder", decoder_ ? decoder_->GetLogger()->GetLogPrefix() : "None");
49 
50   CommandBuffer::State state = command_buffer_->GetLastState();
51 
52   // If there is no parser, exit.
53   if (!parser_.get()) {
54     DCHECK_EQ(state.get_offset, state.put_offset);
55     return;
56   }
57 
58   parser_->set_put(state.put_offset);
59   if (state.error != error::kNoError)
60     return;
61 
62   // Check that the GPU has passed all fences.
63   if (!PollUnscheduleFences())
64     return;
65 
66   // One of the unschedule fence tasks might have unscheduled us.
67   if (!IsScheduled())
68     return;
69 
70   base::TimeTicks begin_time(base::TimeTicks::HighResNow());
71   error::Error error = error::kNoError;
72   if (decoder_)
73     decoder_->BeginDecoding();
74   while (!parser_->IsEmpty()) {
75     if (IsPreempted())
76       break;
77 
78     DCHECK(IsScheduled());
79     DCHECK(unschedule_fences_.empty());
80 
81     error = parser_->ProcessCommand();
82 
83     if (error == error::kDeferCommandUntilLater) {
84       DCHECK_GT(unscheduled_count_, 0);
85       break;
86     }
87 
88     // TODO(piman): various classes duplicate various pieces of state, leading
89     // to needlessly complex update logic. It should be possible to simply
90     // share the state across all of them.
91     command_buffer_->SetGetOffset(static_cast<int32>(parser_->get()));
92 
93     if (error::IsError(error)) {
94       LOG(ERROR) << "[" << decoder_ << "] "
95                  << "GPU PARSE ERROR: " << error;
96       command_buffer_->SetContextLostReason(decoder_->GetContextLostReason());
97       command_buffer_->SetParseError(error);
98       break;
99     }
100 
101     if (!command_processed_callback_.is_null())
102       command_processed_callback_.Run();
103 
104     if (unscheduled_count_ > 0)
105       break;
106   }
107 
108   if (decoder_) {
109     if (!error::IsError(error) && decoder_->WasContextLost()) {
110       command_buffer_->SetContextLostReason(decoder_->GetContextLostReason());
111       command_buffer_->SetParseError(error::kLostContext);
112     }
113     decoder_->EndDecoding();
114     decoder_->AddProcessingCommandsTime(
115         base::TimeTicks::HighResNow() - begin_time);
116   }
117 }
118 
SetScheduled(bool scheduled)119 void GpuScheduler::SetScheduled(bool scheduled) {
120   TRACE_EVENT2("gpu", "GpuScheduler:SetScheduled", "this", this,
121                "new unscheduled_count_",
122                unscheduled_count_ + (scheduled? -1 : 1));
123   if (scheduled) {
124     // If the scheduler was rescheduled after a timeout, ignore the subsequent
125     // calls to SetScheduled when they eventually arrive until they are all
126     // accounted for.
127     if (rescheduled_count_ > 0) {
128       --rescheduled_count_;
129       return;
130     } else {
131       --unscheduled_count_;
132     }
133 
134     DCHECK_GE(unscheduled_count_, 0);
135 
136     if (unscheduled_count_ == 0) {
137       TRACE_EVENT_ASYNC_END1("gpu", "ProcessingSwap", this,
138                              "GpuScheduler", this);
139       // When the scheduler transitions from the unscheduled to the scheduled
140       // state, cancel the task that would reschedule it after a timeout.
141       reschedule_task_factory_.InvalidateWeakPtrs();
142 
143       if (!scheduling_changed_callback_.is_null())
144         scheduling_changed_callback_.Run(true);
145     }
146   } else {
147     ++unscheduled_count_;
148     if (unscheduled_count_ == 1) {
149       TRACE_EVENT_ASYNC_BEGIN1("gpu", "ProcessingSwap", this,
150                                "GpuScheduler", this);
151 #if defined(OS_WIN)
152       if (base::win::GetVersion() < base::win::VERSION_VISTA) {
153         // When the scheduler transitions from scheduled to unscheduled, post a
154         // delayed task that it will force it back into a scheduled state after
155         // a timeout. This should only be necessary on pre-Vista.
156         base::MessageLoop::current()->PostDelayedTask(
157             FROM_HERE,
158             base::Bind(&GpuScheduler::RescheduleTimeOut,
159                        reschedule_task_factory_.GetWeakPtr()),
160             base::TimeDelta::FromMilliseconds(kRescheduleTimeOutDelay));
161       }
162 #endif
163       if (!scheduling_changed_callback_.is_null())
164         scheduling_changed_callback_.Run(false);
165     }
166   }
167 }
168 
IsScheduled()169 bool GpuScheduler::IsScheduled() {
170   return unscheduled_count_ == 0;
171 }
172 
HasMoreWork()173 bool GpuScheduler::HasMoreWork() {
174   return !unschedule_fences_.empty() ||
175          (decoder_ && decoder_->ProcessPendingQueries()) ||
176          HasMoreIdleWork();
177 }
178 
SetSchedulingChangedCallback(const SchedulingChangedCallback & callback)179 void GpuScheduler::SetSchedulingChangedCallback(
180     const SchedulingChangedCallback& callback) {
181   scheduling_changed_callback_ = callback;
182 }
183 
GetSharedMemoryBuffer(int32 shm_id)184 scoped_refptr<Buffer> GpuScheduler::GetSharedMemoryBuffer(int32 shm_id) {
185   return command_buffer_->GetTransferBuffer(shm_id);
186 }
187 
set_token(int32 token)188 void GpuScheduler::set_token(int32 token) {
189   command_buffer_->SetToken(token);
190 }
191 
SetGetBuffer(int32 transfer_buffer_id)192 bool GpuScheduler::SetGetBuffer(int32 transfer_buffer_id) {
193   scoped_refptr<Buffer> ring_buffer =
194       command_buffer_->GetTransferBuffer(transfer_buffer_id);
195   if (!ring_buffer) {
196     return false;
197   }
198 
199   if (!parser_.get()) {
200     parser_.reset(new CommandParser(handler_));
201   }
202 
203   parser_->SetBuffer(
204       ring_buffer->memory(), ring_buffer->size(), 0, ring_buffer->size());
205 
206   SetGetOffset(0);
207   return true;
208 }
209 
SetGetOffset(int32 offset)210 bool GpuScheduler::SetGetOffset(int32 offset) {
211   if (parser_->set_get(offset)) {
212     command_buffer_->SetGetOffset(static_cast<int32>(parser_->get()));
213     return true;
214   }
215   return false;
216 }
217 
GetGetOffset()218 int32 GpuScheduler::GetGetOffset() {
219   return parser_->get();
220 }
221 
SetCommandProcessedCallback(const base::Closure & callback)222 void GpuScheduler::SetCommandProcessedCallback(
223     const base::Closure& callback) {
224   command_processed_callback_ = callback;
225 }
226 
DeferToFence(base::Closure task)227 void GpuScheduler::DeferToFence(base::Closure task) {
228   unschedule_fences_.push(make_linked_ptr(
229        new UnscheduleFence(gfx::GLFence::Create(), task)));
230   SetScheduled(false);
231 }
232 
PollUnscheduleFences()233 bool GpuScheduler::PollUnscheduleFences() {
234   if (unschedule_fences_.empty())
235     return true;
236 
237   if (unschedule_fences_.front()->fence.get()) {
238     base::Time now = base::Time::Now();
239     base::TimeDelta timeout =
240         base::TimeDelta::FromMilliseconds(kUnscheduleFenceTimeOutDelay);
241 
242     while (!unschedule_fences_.empty()) {
243       const UnscheduleFence& fence = *unschedule_fences_.front();
244       if (fence.fence->HasCompleted() ||
245           now - fence.issue_time > timeout) {
246         unschedule_fences_.front()->task.Run();
247         unschedule_fences_.pop();
248         SetScheduled(true);
249       } else {
250         return false;
251       }
252     }
253   } else {
254     glFinish();
255 
256     while (!unschedule_fences_.empty()) {
257       unschedule_fences_.front()->task.Run();
258       unschedule_fences_.pop();
259       SetScheduled(true);
260     }
261   }
262 
263   return true;
264 }
265 
IsPreempted()266 bool GpuScheduler::IsPreempted() {
267   if (!preemption_flag_.get())
268     return false;
269 
270   if (!was_preempted_ && preemption_flag_->IsSet()) {
271     TRACE_COUNTER_ID1("gpu", "GpuScheduler::Preempted", this, 1);
272     was_preempted_ = true;
273   } else if (was_preempted_ && !preemption_flag_->IsSet()) {
274     TRACE_COUNTER_ID1("gpu", "GpuScheduler::Preempted", this, 0);
275     was_preempted_ = false;
276   }
277 
278   return preemption_flag_->IsSet();
279 }
280 
HasMoreIdleWork()281 bool GpuScheduler::HasMoreIdleWork() {
282   return (decoder_ && decoder_->HasMoreIdleWork());
283 }
284 
PerformIdleWork()285 void GpuScheduler::PerformIdleWork() {
286   if (!decoder_)
287     return;
288   decoder_->PerformIdleWork();
289 }
290 
RescheduleTimeOut()291 void GpuScheduler::RescheduleTimeOut() {
292   int new_count = unscheduled_count_ + rescheduled_count_;
293 
294   rescheduled_count_ = 0;
295 
296   while (unscheduled_count_)
297     SetScheduled(true);
298 
299   rescheduled_count_ = new_count;
300 }
301 
UnscheduleFence(gfx::GLFence * fence_,base::Closure task_)302 GpuScheduler::UnscheduleFence::UnscheduleFence(gfx::GLFence* fence_,
303                                                base::Closure task_)
304   : fence(fence_),
305     issue_time(base::Time::Now()),
306     task(task_) {
307 }
308 
~UnscheduleFence()309 GpuScheduler::UnscheduleFence::~UnscheduleFence() {
310 }
311 
312 }  // namespace gpu
313