• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 // This file contains the implementation of the command buffer helper class.
6 
7 #include "gpu/command_buffer/client/cmd_buffer_helper.h"
8 
9 #include "base/logging.h"
10 #include "base/time/time.h"
11 #include "gpu/command_buffer/common/command_buffer.h"
12 #include "gpu/command_buffer/common/trace_event.h"
13 
14 namespace gpu {
15 
CommandBufferHelper(CommandBuffer * command_buffer)16 CommandBufferHelper::CommandBufferHelper(CommandBuffer* command_buffer)
17     : command_buffer_(command_buffer),
18       ring_buffer_id_(-1),
19       ring_buffer_size_(0),
20       entries_(NULL),
21       total_entry_count_(0),
22       immediate_entry_count_(0),
23       token_(0),
24       put_(0),
25       last_put_sent_(0),
26 #if defined(CMD_HELPER_PERIODIC_FLUSH_CHECK)
27       commands_issued_(0),
28 #endif
29       usable_(true),
30       context_lost_(false),
31       flush_automatically_(true),
32       flush_generation_(0) {
33 }
34 
SetAutomaticFlushes(bool enabled)35 void CommandBufferHelper::SetAutomaticFlushes(bool enabled) {
36   flush_automatically_ = enabled;
37   CalcImmediateEntries(0);
38 }
39 
IsContextLost()40 bool CommandBufferHelper::IsContextLost() {
41   if (!context_lost_) {
42     context_lost_ = error::IsError(command_buffer()->GetLastError());
43   }
44   return context_lost_;
45 }
46 
CalcImmediateEntries(int waiting_count)47 void CommandBufferHelper::CalcImmediateEntries(int waiting_count) {
48   DCHECK_GE(waiting_count, 0);
49 
50   // Check if usable & allocated.
51   if (!usable() || !HaveRingBuffer()) {
52     immediate_entry_count_ = 0;
53     return;
54   }
55 
56   // Get maximum safe contiguous entries.
57   const int32 curr_get = get_offset();
58   if (curr_get > put_) {
59     immediate_entry_count_ = curr_get - put_ - 1;
60   } else {
61     immediate_entry_count_ =
62         total_entry_count_ - put_ - (curr_get == 0 ? 1 : 0);
63   }
64 
65   // Limit entry count to force early flushing.
66   if (flush_automatically_) {
67     int32 limit =
68         total_entry_count_ /
69         ((curr_get == last_put_sent_) ? kAutoFlushSmall : kAutoFlushBig);
70 
71     int32 pending =
72         (put_ + total_entry_count_ - last_put_sent_) % total_entry_count_;
73 
74     if (pending > 0 && pending >= limit) {
75       // Time to force flush.
76       immediate_entry_count_ = 0;
77     } else {
78       // Limit remaining entries, but not lower than waiting_count entries to
79       // prevent deadlock when command size is greater than the flush limit.
80       limit -= pending;
81       limit = limit < waiting_count ? waiting_count : limit;
82       immediate_entry_count_ =
83           immediate_entry_count_ > limit ? limit : immediate_entry_count_;
84     }
85   }
86 }
87 
AllocateRingBuffer()88 bool CommandBufferHelper::AllocateRingBuffer() {
89   if (!usable()) {
90     return false;
91   }
92 
93   if (HaveRingBuffer()) {
94     return true;
95   }
96 
97   int32 id = -1;
98   scoped_refptr<Buffer> buffer =
99       command_buffer_->CreateTransferBuffer(ring_buffer_size_, &id);
100   if (id < 0) {
101     ClearUsable();
102     return false;
103   }
104 
105   ring_buffer_ = buffer;
106   ring_buffer_id_ = id;
107   command_buffer_->SetGetBuffer(id);
108   entries_ = static_cast<CommandBufferEntry*>(ring_buffer_->memory());
109   total_entry_count_ = ring_buffer_size_ / sizeof(CommandBufferEntry);
110   // Call to SetGetBuffer(id) above resets get and put offsets to 0.
111   // No need to query it through IPC.
112   put_ = 0;
113   CalcImmediateEntries(0);
114   return true;
115 }
116 
FreeResources()117 void CommandBufferHelper::FreeResources() {
118   if (HaveRingBuffer()) {
119     command_buffer_->DestroyTransferBuffer(ring_buffer_id_);
120     ring_buffer_id_ = -1;
121     CalcImmediateEntries(0);
122   }
123 }
124 
FreeRingBuffer()125 void CommandBufferHelper::FreeRingBuffer() {
126   CHECK((put_ == get_offset()) ||
127       error::IsError(command_buffer_->GetLastState().error));
128   FreeResources();
129 }
130 
Initialize(int32 ring_buffer_size)131 bool CommandBufferHelper::Initialize(int32 ring_buffer_size) {
132   ring_buffer_size_ = ring_buffer_size;
133   return AllocateRingBuffer();
134 }
135 
~CommandBufferHelper()136 CommandBufferHelper::~CommandBufferHelper() {
137   FreeResources();
138 }
139 
WaitForGetOffsetInRange(int32 start,int32 end)140 bool CommandBufferHelper::WaitForGetOffsetInRange(int32 start, int32 end) {
141   if (!usable()) {
142     return false;
143   }
144   command_buffer_->WaitForGetOffsetInRange(start, end);
145   return command_buffer_->GetLastError() == gpu::error::kNoError;
146 }
147 
Flush()148 void CommandBufferHelper::Flush() {
149   // Wrap put_ before flush.
150   if (put_ == total_entry_count_)
151     put_ = 0;
152 
153   if (usable() && last_put_sent_ != put_) {
154     last_flush_time_ = base::TimeTicks::Now();
155     last_put_sent_ = put_;
156     command_buffer_->Flush(put_);
157     ++flush_generation_;
158     CalcImmediateEntries(0);
159   }
160 }
161 
162 #if defined(CMD_HELPER_PERIODIC_FLUSH_CHECK)
PeriodicFlushCheck()163 void CommandBufferHelper::PeriodicFlushCheck() {
164   base::TimeTicks current_time = base::TimeTicks::Now();
165   if (current_time - last_flush_time_ >
166       base::TimeDelta::FromMicroseconds(kPeriodicFlushDelayInMicroseconds)) {
167     Flush();
168   }
169 }
170 #endif
171 
172 // Calls Flush() and then waits until the buffer is empty. Break early if the
173 // error is set.
Finish()174 bool CommandBufferHelper::Finish() {
175   TRACE_EVENT0("gpu", "CommandBufferHelper::Finish");
176   if (!usable()) {
177     return false;
178   }
179   // If there is no work just exit.
180   if (put_ == get_offset()) {
181     return true;
182   }
183   DCHECK(HaveRingBuffer());
184   Flush();
185   if (!WaitForGetOffsetInRange(put_, put_))
186     return false;
187   DCHECK_EQ(get_offset(), put_);
188 
189   CalcImmediateEntries(0);
190 
191   return true;
192 }
193 
194 // Inserts a new token into the command stream. It uses an increasing value
195 // scheme so that we don't lose tokens (a token has passed if the current token
196 // value is higher than that token). Calls Finish() if the token value wraps,
197 // which will be rare.
InsertToken()198 int32 CommandBufferHelper::InsertToken() {
199   AllocateRingBuffer();
200   if (!usable()) {
201     return token_;
202   }
203   DCHECK(HaveRingBuffer());
204   // Increment token as 31-bit integer. Negative values are used to signal an
205   // error.
206   token_ = (token_ + 1) & 0x7FFFFFFF;
207   cmd::SetToken* cmd = GetCmdSpace<cmd::SetToken>();
208   if (cmd) {
209     cmd->Init(token_);
210     if (token_ == 0) {
211       TRACE_EVENT0("gpu", "CommandBufferHelper::InsertToken(wrapped)");
212       // we wrapped
213       Finish();
214       DCHECK_EQ(token_, last_token_read());
215     }
216   }
217   return token_;
218 }
219 
220 // Waits until the current token value is greater or equal to the value passed
221 // in argument.
WaitForToken(int32 token)222 void CommandBufferHelper::WaitForToken(int32 token) {
223   if (!usable() || !HaveRingBuffer()) {
224     return;
225   }
226   // Return immediately if corresponding InsertToken failed.
227   if (token < 0)
228     return;
229   if (token > token_) return;  // we wrapped
230   if (last_token_read() >= token)
231     return;
232   Flush();
233   command_buffer_->WaitForTokenInRange(token, token_);
234 }
235 
236 // Waits for available entries, basically waiting until get >= put + count + 1.
237 // It actually waits for contiguous entries, so it may need to wrap the buffer
238 // around, adding a noops. Thus this function may change the value of put_. The
239 // function will return early if an error occurs, in which case the available
240 // space may not be available.
WaitForAvailableEntries(int32 count)241 void CommandBufferHelper::WaitForAvailableEntries(int32 count) {
242   AllocateRingBuffer();
243   if (!usable()) {
244     return;
245   }
246   DCHECK(HaveRingBuffer());
247   DCHECK(count < total_entry_count_);
248   if (put_ + count > total_entry_count_) {
249     // There's not enough room between the current put and the end of the
250     // buffer, so we need to wrap. We will add noops all the way to the end,
251     // but we need to make sure get wraps first, actually that get is 1 or
252     // more (since put will wrap to 0 after we add the noops).
253     DCHECK_LE(1, put_);
254     int32 curr_get = get_offset();
255     if (curr_get > put_ || curr_get == 0) {
256       TRACE_EVENT0("gpu", "CommandBufferHelper::WaitForAvailableEntries");
257       Flush();
258       if (!WaitForGetOffsetInRange(1, put_))
259         return;
260       curr_get = get_offset();
261       DCHECK_LE(curr_get, put_);
262       DCHECK_NE(0, curr_get);
263     }
264     // Insert Noops to fill out the buffer.
265     int32 num_entries = total_entry_count_ - put_;
266     while (num_entries > 0) {
267       int32 num_to_skip = std::min(CommandHeader::kMaxSize, num_entries);
268       cmd::Noop::Set(&entries_[put_], num_to_skip);
269       put_ += num_to_skip;
270       num_entries -= num_to_skip;
271     }
272     put_ = 0;
273   }
274 
275   // Try to get 'count' entries without flushing.
276   CalcImmediateEntries(count);
277   if (immediate_entry_count_ < count) {
278     // Try again with a shallow Flush().
279     Flush();
280     CalcImmediateEntries(count);
281     if (immediate_entry_count_ < count) {
282       // Buffer is full.  Need to wait for entries.
283       TRACE_EVENT0("gpu", "CommandBufferHelper::WaitForAvailableEntries1");
284       if (!WaitForGetOffsetInRange(put_ + count + 1, put_))
285         return;
286       CalcImmediateEntries(count);
287       DCHECK_GE(immediate_entry_count_, count);
288     }
289   }
290 }
291 
292 
293 }  // namespace gpu
294