1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 // This file contains the implementation of the command buffer helper class.
6
7 #include "gpu/command_buffer/client/cmd_buffer_helper.h"
8
9 #include "base/logging.h"
10 #include "gpu/command_buffer/common/command_buffer.h"
11 #include "gpu/command_buffer/common/trace_event.h"
12
13 namespace gpu {
14
CommandBufferHelper(CommandBuffer * command_buffer)15 CommandBufferHelper::CommandBufferHelper(CommandBuffer* command_buffer)
16 : command_buffer_(command_buffer),
17 ring_buffer_id_(-1),
18 ring_buffer_size_(0),
19 entries_(NULL),
20 total_entry_count_(0),
21 immediate_entry_count_(0),
22 token_(0),
23 put_(0),
24 last_put_sent_(0),
25 #if defined(CMD_HELPER_PERIODIC_FLUSH_CHECK)
26 commands_issued_(0),
27 #endif
28 usable_(true),
29 context_lost_(false),
30 flush_automatically_(true),
31 last_flush_time_(0),
32 flush_generation_(0) {
33 }
34
SetAutomaticFlushes(bool enabled)35 void CommandBufferHelper::SetAutomaticFlushes(bool enabled) {
36 flush_automatically_ = enabled;
37 CalcImmediateEntries(0);
38 }
39
IsContextLost()40 bool CommandBufferHelper::IsContextLost() {
41 if (!context_lost_) {
42 context_lost_ = error::IsError(command_buffer()->GetLastError());
43 }
44 return context_lost_;
45 }
46
CalcImmediateEntries(int waiting_count)47 void CommandBufferHelper::CalcImmediateEntries(int waiting_count) {
48 DCHECK_GE(waiting_count, 0);
49
50 // Check if usable & allocated.
51 if (!usable() || !HaveRingBuffer()) {
52 immediate_entry_count_ = 0;
53 return;
54 }
55
56 // Get maximum safe contiguous entries.
57 const int32 curr_get = get_offset();
58 if (curr_get > put_) {
59 immediate_entry_count_ = curr_get - put_ - 1;
60 } else {
61 immediate_entry_count_ =
62 total_entry_count_ - put_ - (curr_get == 0 ? 1 : 0);
63 }
64
65 // Limit entry count to force early flushing.
66 if (flush_automatically_) {
67 int32 limit =
68 total_entry_count_ /
69 ((curr_get == last_put_sent_) ? kAutoFlushSmall : kAutoFlushBig);
70
71 int32 pending =
72 (put_ + total_entry_count_ - last_put_sent_) % total_entry_count_;
73
74 if (pending > 0 && pending >= limit) {
75 // Time to force flush.
76 immediate_entry_count_ = 0;
77 } else {
78 // Limit remaining entries, but not lower than waiting_count entries to
79 // prevent deadlock when command size is greater than the flush limit.
80 limit -= pending;
81 limit = limit < waiting_count ? waiting_count : limit;
82 immediate_entry_count_ =
83 immediate_entry_count_ > limit ? limit : immediate_entry_count_;
84 }
85 }
86 }
87
AllocateRingBuffer()88 bool CommandBufferHelper::AllocateRingBuffer() {
89 if (!usable()) {
90 return false;
91 }
92
93 if (HaveRingBuffer()) {
94 return true;
95 }
96
97 int32 id = -1;
98 scoped_refptr<Buffer> buffer =
99 command_buffer_->CreateTransferBuffer(ring_buffer_size_, &id);
100 if (id < 0) {
101 ClearUsable();
102 return false;
103 }
104
105 ring_buffer_ = buffer;
106 ring_buffer_id_ = id;
107 command_buffer_->SetGetBuffer(id);
108 entries_ = static_cast<CommandBufferEntry*>(ring_buffer_->memory());
109 total_entry_count_ = ring_buffer_size_ / sizeof(CommandBufferEntry);
110 // Call to SetGetBuffer(id) above resets get and put offsets to 0.
111 // No need to query it through IPC.
112 put_ = 0;
113 CalcImmediateEntries(0);
114 return true;
115 }
116
FreeResources()117 void CommandBufferHelper::FreeResources() {
118 if (HaveRingBuffer()) {
119 command_buffer_->DestroyTransferBuffer(ring_buffer_id_);
120 ring_buffer_id_ = -1;
121 CalcImmediateEntries(0);
122 }
123 }
124
FreeRingBuffer()125 void CommandBufferHelper::FreeRingBuffer() {
126 CHECK((put_ == get_offset()) ||
127 error::IsError(command_buffer_->GetLastState().error));
128 FreeResources();
129 }
130
Initialize(int32 ring_buffer_size)131 bool CommandBufferHelper::Initialize(int32 ring_buffer_size) {
132 ring_buffer_size_ = ring_buffer_size;
133 return AllocateRingBuffer();
134 }
135
~CommandBufferHelper()136 CommandBufferHelper::~CommandBufferHelper() {
137 FreeResources();
138 }
139
WaitForGetOffsetInRange(int32 start,int32 end)140 bool CommandBufferHelper::WaitForGetOffsetInRange(int32 start, int32 end) {
141 if (!usable()) {
142 return false;
143 }
144 command_buffer_->WaitForGetOffsetInRange(start, end);
145 return command_buffer_->GetLastError() == gpu::error::kNoError;
146 }
147
Flush()148 void CommandBufferHelper::Flush() {
149 // Wrap put_ before flush.
150 if (put_ == total_entry_count_)
151 put_ = 0;
152
153 if (usable() && last_put_sent_ != put_) {
154 last_flush_time_ = clock();
155 last_put_sent_ = put_;
156 command_buffer_->Flush(put_);
157 ++flush_generation_;
158 CalcImmediateEntries(0);
159 }
160 }
161
162 #if defined(CMD_HELPER_PERIODIC_FLUSH_CHECK)
PeriodicFlushCheck()163 void CommandBufferHelper::PeriodicFlushCheck() {
164 clock_t current_time = clock();
165 if (current_time - last_flush_time_ > kPeriodicFlushDelay * CLOCKS_PER_SEC)
166 Flush();
167 }
168 #endif
169
170 // Calls Flush() and then waits until the buffer is empty. Break early if the
171 // error is set.
Finish()172 bool CommandBufferHelper::Finish() {
173 TRACE_EVENT0("gpu", "CommandBufferHelper::Finish");
174 if (!usable()) {
175 return false;
176 }
177 // If there is no work just exit.
178 if (put_ == get_offset()) {
179 return true;
180 }
181 DCHECK(HaveRingBuffer());
182 Flush();
183 if (!WaitForGetOffsetInRange(put_, put_))
184 return false;
185 DCHECK_EQ(get_offset(), put_);
186
187 CalcImmediateEntries(0);
188
189 return true;
190 }
191
192 // Inserts a new token into the command stream. It uses an increasing value
193 // scheme so that we don't lose tokens (a token has passed if the current token
194 // value is higher than that token). Calls Finish() if the token value wraps,
195 // which will be rare.
InsertToken()196 int32 CommandBufferHelper::InsertToken() {
197 AllocateRingBuffer();
198 if (!usable()) {
199 return token_;
200 }
201 DCHECK(HaveRingBuffer());
202 // Increment token as 31-bit integer. Negative values are used to signal an
203 // error.
204 token_ = (token_ + 1) & 0x7FFFFFFF;
205 cmd::SetToken* cmd = GetCmdSpace<cmd::SetToken>();
206 if (cmd) {
207 cmd->Init(token_);
208 if (token_ == 0) {
209 TRACE_EVENT0("gpu", "CommandBufferHelper::InsertToken(wrapped)");
210 // we wrapped
211 Finish();
212 DCHECK_EQ(token_, last_token_read());
213 }
214 }
215 return token_;
216 }
217
218 // Waits until the current token value is greater or equal to the value passed
219 // in argument.
WaitForToken(int32 token)220 void CommandBufferHelper::WaitForToken(int32 token) {
221 if (!usable() || !HaveRingBuffer()) {
222 return;
223 }
224 // Return immediately if corresponding InsertToken failed.
225 if (token < 0)
226 return;
227 if (token > token_) return; // we wrapped
228 if (last_token_read() >= token)
229 return;
230 Flush();
231 command_buffer_->WaitForTokenInRange(token, token_);
232 }
233
234 // Waits for available entries, basically waiting until get >= put + count + 1.
235 // It actually waits for contiguous entries, so it may need to wrap the buffer
236 // around, adding a noops. Thus this function may change the value of put_. The
237 // function will return early if an error occurs, in which case the available
238 // space may not be available.
WaitForAvailableEntries(int32 count)239 void CommandBufferHelper::WaitForAvailableEntries(int32 count) {
240 AllocateRingBuffer();
241 if (!usable()) {
242 return;
243 }
244 DCHECK(HaveRingBuffer());
245 DCHECK(count < total_entry_count_);
246 if (put_ + count > total_entry_count_) {
247 // There's not enough room between the current put and the end of the
248 // buffer, so we need to wrap. We will add noops all the way to the end,
249 // but we need to make sure get wraps first, actually that get is 1 or
250 // more (since put will wrap to 0 after we add the noops).
251 DCHECK_LE(1, put_);
252 int32 curr_get = get_offset();
253 if (curr_get > put_ || curr_get == 0) {
254 TRACE_EVENT0("gpu", "CommandBufferHelper::WaitForAvailableEntries");
255 Flush();
256 if (!WaitForGetOffsetInRange(1, put_))
257 return;
258 curr_get = get_offset();
259 DCHECK_LE(curr_get, put_);
260 DCHECK_NE(0, curr_get);
261 }
262 // Insert Noops to fill out the buffer.
263 int32 num_entries = total_entry_count_ - put_;
264 while (num_entries > 0) {
265 int32 num_to_skip = std::min(CommandHeader::kMaxSize, num_entries);
266 cmd::Noop::Set(&entries_[put_], num_to_skip);
267 put_ += num_to_skip;
268 num_entries -= num_to_skip;
269 }
270 put_ = 0;
271 }
272
273 // Try to get 'count' entries without flushing.
274 CalcImmediateEntries(count);
275 if (immediate_entry_count_ < count) {
276 // Try again with a shallow Flush().
277 Flush();
278 CalcImmediateEntries(count);
279 if (immediate_entry_count_ < count) {
280 // Buffer is full. Need to wait for entries.
281 TRACE_EVENT0("gpu", "CommandBufferHelper::WaitForAvailableEntries1");
282 if (!WaitForGetOffsetInRange(put_ + count + 1, put_))
283 return;
284 CalcImmediateEntries(count);
285 DCHECK_GE(immediate_entry_count_, count);
286 }
287 }
288 }
289
290
291 } // namespace gpu
292