1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 // This file contains the implementation of the command buffer helper class.
6
7 #include "gpu/command_buffer/client/cmd_buffer_helper.h"
8
9 #include "base/logging.h"
10 #include "gpu/command_buffer/common/command_buffer.h"
11 #include "gpu/command_buffer/common/trace_event.h"
12
13 namespace gpu {
14
15 const int kCommandsPerFlushCheck = 100;
16
17 #if !defined(OS_ANDROID)
18 const double kFlushDelay = 1.0 / (5.0 * 60.0);
19 #endif
20
CommandBufferHelper(CommandBuffer * command_buffer)21 CommandBufferHelper::CommandBufferHelper(CommandBuffer* command_buffer)
22 : command_buffer_(command_buffer),
23 ring_buffer_id_(-1),
24 ring_buffer_size_(0),
25 entries_(NULL),
26 total_entry_count_(0),
27 token_(0),
28 put_(0),
29 last_put_sent_(0),
30 commands_issued_(0),
31 usable_(true),
32 context_lost_(false),
33 flush_automatically_(true),
34 last_flush_time_(0) {
35 }
36
SetAutomaticFlushes(bool enabled)37 void CommandBufferHelper::SetAutomaticFlushes(bool enabled) {
38 flush_automatically_ = enabled;
39 }
40
IsContextLost()41 bool CommandBufferHelper::IsContextLost() {
42 if (!context_lost_) {
43 context_lost_ = error::IsError(command_buffer()->GetLastError());
44 }
45 return context_lost_;
46 }
47
AllocateRingBuffer()48 bool CommandBufferHelper::AllocateRingBuffer() {
49 if (!usable()) {
50 return false;
51 }
52
53 if (HaveRingBuffer()) {
54 return true;
55 }
56
57 int32 id = -1;
58 Buffer buffer = command_buffer_->CreateTransferBuffer(ring_buffer_size_, &id);
59 if (id < 0) {
60 ClearUsable();
61 return false;
62 }
63
64 ring_buffer_ = buffer;
65 ring_buffer_id_ = id;
66 command_buffer_->SetGetBuffer(id);
67
68 // TODO(gman): Do we really need to call GetState here? We know get & put = 0
69 // Also do we need to check state.num_entries?
70 CommandBuffer::State state = command_buffer_->GetState();
71 entries_ = static_cast<CommandBufferEntry*>(ring_buffer_.ptr);
72 int32 num_ring_buffer_entries =
73 ring_buffer_size_ / sizeof(CommandBufferEntry);
74 if (num_ring_buffer_entries > state.num_entries) {
75 ClearUsable();
76 return false;
77 }
78
79 total_entry_count_ = num_ring_buffer_entries;
80 put_ = state.put_offset;
81 return true;
82 }
83
FreeResources()84 void CommandBufferHelper::FreeResources() {
85 if (HaveRingBuffer()) {
86 command_buffer_->DestroyTransferBuffer(ring_buffer_id_);
87 ring_buffer_id_ = -1;
88 }
89 }
90
FreeRingBuffer()91 void CommandBufferHelper::FreeRingBuffer() {
92 CHECK((put_ == get_offset()) ||
93 error::IsError(command_buffer_->GetLastState().error));
94 FreeResources();
95 }
96
Initialize(int32 ring_buffer_size)97 bool CommandBufferHelper::Initialize(int32 ring_buffer_size) {
98 ring_buffer_size_ = ring_buffer_size;
99 return AllocateRingBuffer();
100 }
101
~CommandBufferHelper()102 CommandBufferHelper::~CommandBufferHelper() {
103 FreeResources();
104 }
105
FlushSync()106 bool CommandBufferHelper::FlushSync() {
107 if (!usable()) {
108 return false;
109 }
110 last_flush_time_ = clock();
111 last_put_sent_ = put_;
112 CommandBuffer::State state = command_buffer_->FlushSync(put_, get_offset());
113 return state.error == error::kNoError;
114 }
115
Flush()116 void CommandBufferHelper::Flush() {
117 if (usable() && last_put_sent_ != put_) {
118 last_flush_time_ = clock();
119 last_put_sent_ = put_;
120 command_buffer_->Flush(put_);
121 }
122 }
123
124 // Calls Flush() and then waits until the buffer is empty. Break early if the
125 // error is set.
Finish()126 bool CommandBufferHelper::Finish() {
127 TRACE_EVENT0("gpu", "CommandBufferHelper::Finish");
128 if (!usable()) {
129 return false;
130 }
131 // If there is no work just exit.
132 if (put_ == get_offset()) {
133 return true;
134 }
135 DCHECK(HaveRingBuffer());
136 do {
137 // Do not loop forever if the flush fails, meaning the command buffer reader
138 // has shutdown.
139 if (!FlushSync())
140 return false;
141 } while (put_ != get_offset());
142
143 return true;
144 }
145
146 // Inserts a new token into the command stream. It uses an increasing value
147 // scheme so that we don't lose tokens (a token has passed if the current token
148 // value is higher than that token). Calls Finish() if the token value wraps,
149 // which will be rare.
InsertToken()150 int32 CommandBufferHelper::InsertToken() {
151 AllocateRingBuffer();
152 if (!usable()) {
153 return token_;
154 }
155 DCHECK(HaveRingBuffer());
156 // Increment token as 31-bit integer. Negative values are used to signal an
157 // error.
158 token_ = (token_ + 1) & 0x7FFFFFFF;
159 cmd::SetToken* cmd = GetCmdSpace<cmd::SetToken>();
160 if (cmd) {
161 cmd->Init(token_);
162 if (token_ == 0) {
163 TRACE_EVENT0("gpu", "CommandBufferHelper::InsertToken(wrapped)");
164 // we wrapped
165 Finish();
166 DCHECK_EQ(token_, last_token_read());
167 }
168 }
169 return token_;
170 }
171
172 // Waits until the current token value is greater or equal to the value passed
173 // in argument.
WaitForToken(int32 token)174 void CommandBufferHelper::WaitForToken(int32 token) {
175 if (!usable() || !HaveRingBuffer()) {
176 return;
177 }
178 // Return immediately if corresponding InsertToken failed.
179 if (token < 0)
180 return;
181 if (token > token_) return; // we wrapped
182 while (last_token_read() < token) {
183 if (get_offset() == put_) {
184 LOG(FATAL) << "Empty command buffer while waiting on a token.";
185 return;
186 }
187 // Do not loop forever if the flush fails, meaning the command buffer reader
188 // has shutdown.
189 if (!FlushSync())
190 return;
191 }
192 }
193
194 // Waits for available entries, basically waiting until get >= put + count + 1.
195 // It actually waits for contiguous entries, so it may need to wrap the buffer
196 // around, adding a noops. Thus this function may change the value of put_. The
197 // function will return early if an error occurs, in which case the available
198 // space may not be available.
WaitForAvailableEntries(int32 count)199 void CommandBufferHelper::WaitForAvailableEntries(int32 count) {
200 AllocateRingBuffer();
201 if (!usable()) {
202 return;
203 }
204 DCHECK(HaveRingBuffer());
205 DCHECK(count < total_entry_count_);
206 if (put_ + count > total_entry_count_) {
207 // There's not enough room between the current put and the end of the
208 // buffer, so we need to wrap. We will add noops all the way to the end,
209 // but we need to make sure get wraps first, actually that get is 1 or
210 // more (since put will wrap to 0 after we add the noops).
211 DCHECK_LE(1, put_);
212 if (get_offset() > put_ || get_offset() == 0) {
213 TRACE_EVENT0("gpu", "CommandBufferHelper::WaitForAvailableEntries");
214 while (get_offset() > put_ || get_offset() == 0) {
215 // Do not loop forever if the flush fails, meaning the command buffer
216 // reader has shutdown.
217 if (!FlushSync())
218 return;
219 }
220 }
221 // Insert Noops to fill out the buffer.
222 int32 num_entries = total_entry_count_ - put_;
223 while (num_entries > 0) {
224 int32 num_to_skip = std::min(CommandHeader::kMaxSize, num_entries);
225 cmd::Noop::Set(&entries_[put_], num_to_skip);
226 put_ += num_to_skip;
227 num_entries -= num_to_skip;
228 }
229 put_ = 0;
230 }
231 if (AvailableEntries() < count) {
232 TRACE_EVENT0("gpu", "CommandBufferHelper::WaitForAvailableEntries1");
233 while (AvailableEntries() < count) {
234 // Do not loop forever if the flush fails, meaning the command buffer
235 // reader has shutdown.
236 if (!FlushSync())
237 return;
238 }
239 }
240 // Force a flush if the buffer is getting half full, or even earlier if the
241 // reader is known to be idle.
242 int32 pending =
243 (put_ + total_entry_count_ - last_put_sent_) % total_entry_count_;
244 int32 limit = total_entry_count_ /
245 ((get_offset() == last_put_sent_) ? 16 : 2);
246 if (pending > limit) {
247 Flush();
248 } else if (flush_automatically_ &&
249 (commands_issued_ % kCommandsPerFlushCheck == 0)) {
250 #if !defined(OS_ANDROID)
251 // Allow this command buffer to be pre-empted by another if a "reasonable"
252 // amount of work has been done. On highend machines, this reduces the
253 // latency of GPU commands. However, on Android, this can cause the
254 // kernel to thrash between generating GPU commands and executing them.
255 clock_t current_time = clock();
256 if (current_time - last_flush_time_ > kFlushDelay * CLOCKS_PER_SEC)
257 Flush();
258 #endif
259 }
260 }
261
GetSpace(uint32 entries)262 CommandBufferEntry* CommandBufferHelper::GetSpace(uint32 entries) {
263 AllocateRingBuffer();
264 if (!usable()) {
265 return NULL;
266 }
267 DCHECK(HaveRingBuffer());
268 ++commands_issued_;
269 WaitForAvailableEntries(entries);
270 CommandBufferEntry* space = &entries_[put_];
271 put_ += entries;
272 DCHECK_LE(put_, total_entry_count_);
273 if (put_ == total_entry_count_) {
274 put_ = 0;
275 }
276 return space;
277 }
278
279 } // namespace gpu
280