• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef GPU_COMMAND_BUFFER_CLIENT_TRANSFER_BUFFER_H_
6 #define GPU_COMMAND_BUFFER_CLIENT_TRANSFER_BUFFER_H_
7 
8 #include "base/compiler_specific.h"
9 #include "base/memory/scoped_ptr.h"
10 #include "gpu/command_buffer/client/ring_buffer.h"
11 #include "gpu/command_buffer/common/buffer.h"
12 #include "gpu/command_buffer/common/gles2_cmd_utils.h"
13 #include "gpu/gpu_export.h"
14 
15 namespace gpu {
16 
17 class CommandBufferHelper;
18 
19 // Interface for managing the transfer buffer.
20 class GPU_EXPORT TransferBufferInterface {
21  public:
TransferBufferInterface()22   TransferBufferInterface() { }
~TransferBufferInterface()23   virtual ~TransferBufferInterface() { }
24 
25   virtual bool Initialize(
26       unsigned int buffer_size,
27       unsigned int result_size,
28       unsigned int min_buffer_size,
29       unsigned int max_buffer_size,
30       unsigned int alignment,
31       unsigned int size_to_flush) = 0;
32 
33   virtual int GetShmId() = 0;
34   virtual void* GetResultBuffer() = 0;
35   virtual int GetResultOffset() = 0;
36 
37   virtual void Free() = 0;
38 
39   virtual bool HaveBuffer() const = 0;
40 
41   // Allocates up to size bytes.
42   virtual void* AllocUpTo(unsigned int size, unsigned int* size_allocated) = 0;
43 
44   // Allocates size bytes.
45   // Note: Alloc will fail if it can not return size bytes.
46   virtual void* Alloc(unsigned int size) = 0;
47 
48   virtual RingBuffer::Offset GetOffset(void* pointer) const = 0;
49 
50   virtual void FreePendingToken(void* p, unsigned int token) = 0;
51 };
52 
53 // Class that manages the transfer buffer.
54 class GPU_EXPORT TransferBuffer : public TransferBufferInterface {
55  public:
56   TransferBuffer(CommandBufferHelper* helper);
57   virtual ~TransferBuffer();
58 
59   // Overridden from TransferBufferInterface.
60   virtual bool Initialize(
61       unsigned int default_buffer_size,
62       unsigned int result_size,
63       unsigned int min_buffer_size,
64       unsigned int max_buffer_size,
65       unsigned int alignment,
66       unsigned int size_to_flush) OVERRIDE;
67   virtual int GetShmId() OVERRIDE;
68   virtual void* GetResultBuffer() OVERRIDE;
69   virtual int GetResultOffset() OVERRIDE;
70   virtual void Free() OVERRIDE;
71   virtual bool HaveBuffer() const OVERRIDE;
72   virtual void* AllocUpTo(
73       unsigned int size, unsigned int* size_allocated) OVERRIDE;
74   virtual void* Alloc(unsigned int size) OVERRIDE;
75   virtual RingBuffer::Offset GetOffset(void* pointer) const OVERRIDE;
76   virtual void FreePendingToken(void* p, unsigned int token) OVERRIDE;
77 
78   // These are for testing.
79   unsigned int GetCurrentMaxAllocationWithoutRealloc() const;
80   unsigned int GetMaxAllocation() const;
81 
82  private:
83   // Tries to reallocate the ring buffer if it's not large enough for size.
84   void ReallocateRingBuffer(unsigned int size);
85 
86   void AllocateRingBuffer(unsigned int size);
87 
88   CommandBufferHelper* helper_;
89   scoped_ptr<RingBuffer> ring_buffer_;
90 
91   // size reserved for results
92   unsigned int result_size_;
93 
94   // default size. Size we want when starting or re-allocating
95   unsigned int default_buffer_size_;
96 
97   // min size we'll consider successful
98   unsigned int min_buffer_size_;
99 
100   // max size we'll let the buffer grow
101   unsigned int max_buffer_size_;
102 
103   // alignment for allocations
104   unsigned int alignment_;
105 
106   // Size at which to do an async flush. 0 = never.
107   unsigned int size_to_flush_;
108 
109   // Number of bytes since we last flushed.
110   unsigned int bytes_since_last_flush_;
111 
112   // the current buffer.
113   scoped_refptr<gpu::Buffer> buffer_;
114 
115   // id of buffer. -1 = no buffer
116   int32 buffer_id_;
117 
118   // address of result area
119   void* result_buffer_;
120 
121   // offset to result area
122   uint32 result_shm_offset_;
123 
124   // false if we failed to allocate min_buffer_size
125   bool usable_;
126 };
127 
128 // A class that will manage the lifetime of a transferbuffer allocation.
129 class GPU_EXPORT ScopedTransferBufferPtr {
130  public:
ScopedTransferBufferPtr(unsigned int size,CommandBufferHelper * helper,TransferBufferInterface * transfer_buffer)131   ScopedTransferBufferPtr(
132       unsigned int size,
133       CommandBufferHelper* helper,
134       TransferBufferInterface* transfer_buffer)
135       : buffer_(NULL),
136         size_(0),
137         helper_(helper),
138         transfer_buffer_(transfer_buffer) {
139     Reset(size);
140   }
141 
~ScopedTransferBufferPtr()142   ~ScopedTransferBufferPtr() {
143     Release();
144   }
145 
valid()146   bool valid() const {
147     return buffer_ != NULL;
148   }
149 
size()150   unsigned int size() const {
151     return size_;
152   }
153 
shm_id()154   int shm_id() const {
155     return transfer_buffer_->GetShmId();
156   }
157 
offset()158   RingBuffer::Offset offset() const {
159     return transfer_buffer_->GetOffset(buffer_);
160   }
161 
address()162   void* address() const {
163     return buffer_;
164   }
165 
166   void Release();
167 
168   void Reset(unsigned int new_size);
169 
170  private:
171   void* buffer_;
172   unsigned int size_;
173   CommandBufferHelper* helper_;
174   TransferBufferInterface* transfer_buffer_;
175   DISALLOW_COPY_AND_ASSIGN(ScopedTransferBufferPtr);
176 };
177 
178 template <typename T>
179 class ScopedTransferBufferArray : public ScopedTransferBufferPtr {
180  public:
ScopedTransferBufferArray(unsigned int num_elements,CommandBufferHelper * helper,TransferBufferInterface * transfer_buffer)181   ScopedTransferBufferArray(
182       unsigned int num_elements,
183       CommandBufferHelper* helper, TransferBufferInterface* transfer_buffer)
184       : ScopedTransferBufferPtr(
185           num_elements * sizeof(T), helper, transfer_buffer) {
186   }
187 
elements()188   T* elements() {
189     return static_cast<T*>(address());
190   }
191 
num_elements()192   unsigned int num_elements() const {
193     return size() / sizeof(T);
194   }
195 };
196 
197 }  // namespace gpu
198 
199 #endif  // GPU_COMMAND_BUFFER_CLIENT_TRANSFER_BUFFER_H_
200