• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef GPU_COMMAND_BUFFER_CLIENT_MAPPED_MEMORY_H_
6 #define GPU_COMMAND_BUFFER_CLIENT_MAPPED_MEMORY_H_
7 
8 #include "base/memory/scoped_vector.h"
9 #include "gpu/command_buffer/client/fenced_allocator.h"
10 #include "gpu/command_buffer/common/buffer.h"
11 #include "gpu/command_buffer/common/types.h"
12 #include "gpu/gpu_export.h"
13 
14 namespace gpu {
15 
16 class CommandBufferHelper;
17 
18 // Manages a shared memory segment.
19 class GPU_EXPORT MemoryChunk {
20  public:
21   MemoryChunk(int32 shm_id, gpu::Buffer shm, CommandBufferHelper* helper);
22 
23   // Gets the size of the largest free block that is available without waiting.
GetLargestFreeSizeWithoutWaiting()24   unsigned int GetLargestFreeSizeWithoutWaiting() {
25     return allocator_.GetLargestFreeSize();
26   }
27 
28   // Gets the size of the largest free block that can be allocated if the
29   // caller can wait.
GetLargestFreeSizeWithWaiting()30   unsigned int GetLargestFreeSizeWithWaiting() {
31     return allocator_.GetLargestFreeOrPendingSize();
32   }
33 
34   // Gets the size of the chunk.
GetSize()35   unsigned int GetSize() const {
36     return static_cast<unsigned int>(shm_.size);
37   }
38 
39   // The shared memory id for this chunk.
shm_id()40   int32 shm_id() const {
41     return shm_id_;
42   }
43 
44   // Allocates a block of memory. If the buffer is out of directly available
45   // memory, this function may wait until memory that was freed "pending a
46   // token" can be re-used.
47   //
48   // Parameters:
49   //   size: the size of the memory block to allocate.
50   //
51   // Returns:
52   //   the pointer to the allocated memory block, or NULL if out of
53   //   memory.
Alloc(unsigned int size)54   void* Alloc(unsigned int size) {
55     return allocator_.Alloc(size);
56   }
57 
58   // Gets the offset to a memory block given the base memory and the address.
59   // It translates NULL to FencedAllocator::kInvalidOffset.
GetOffset(void * pointer)60   unsigned int GetOffset(void* pointer) {
61     return allocator_.GetOffset(pointer);
62   }
63 
64   // Frees a block of memory.
65   //
66   // Parameters:
67   //   pointer: the pointer to the memory block to free.
Free(void * pointer)68   void Free(void* pointer) {
69     allocator_.Free(pointer);
70   }
71 
72   // Frees a block of memory, pending the passage of a token. That memory won't
73   // be re-allocated until the token has passed through the command stream.
74   //
75   // Parameters:
76   //   pointer: the pointer to the memory block to free.
77   //   token: the token value to wait for before re-using the memory.
FreePendingToken(void * pointer,unsigned int token)78   void FreePendingToken(void* pointer, unsigned int token) {
79     allocator_.FreePendingToken(pointer, token);
80   }
81 
82   // Frees any blocks whose tokens have passed.
FreeUnused()83   void FreeUnused() {
84     allocator_.FreeUnused();
85   }
86 
87   // Returns true if pointer is in the range of this block.
IsInChunk(void * pointer)88   bool IsInChunk(void* pointer) const {
89     return pointer >= shm_.ptr &&
90            pointer < reinterpret_cast<const int8*>(shm_.ptr) + shm_.size;
91   }
92 
93   // Returns true of any memory in this chunk is in use.
InUse()94   bool InUse() {
95     return allocator_.InUse();
96   }
97 
bytes_in_use()98   size_t bytes_in_use() const {
99     return allocator_.bytes_in_use();
100   }
101 
102  private:
103   int32 shm_id_;
104   gpu::Buffer shm_;
105   FencedAllocatorWrapper allocator_;
106 
107   DISALLOW_COPY_AND_ASSIGN(MemoryChunk);
108 };
109 
110 // Manages MemoryChunks.
111 class GPU_EXPORT MappedMemoryManager {
112  public:
113   enum MemoryLimit {
114     kNoLimit = 0,
115   };
116 
117   // |unused_memory_reclaim_limit|: When exceeded this causes pending memory
118   // to be reclaimed before allocating more memory.
119   MappedMemoryManager(CommandBufferHelper* helper,
120                       size_t unused_memory_reclaim_limit);
121 
122   ~MappedMemoryManager();
123 
chunk_size_multiple()124   unsigned int chunk_size_multiple() const {
125     return chunk_size_multiple_;
126   }
127 
set_chunk_size_multiple(unsigned int multiple)128   void set_chunk_size_multiple(unsigned int multiple) {
129     chunk_size_multiple_ = multiple;
130   }
131 
132   // Allocates a block of memory
133   // Parameters:
134   //   size: size of memory to allocate.
135   //   shm_id: pointer to variable to receive the shared memory id.
136   //   shm_offset: pointer to variable to receive the shared memory offset.
137   // Returns:
138   //   pointer to allocated block of memory. NULL if failure.
139   void* Alloc(
140       unsigned int size, int32* shm_id, unsigned int* shm_offset);
141 
142   // Frees a block of memory.
143   //
144   // Parameters:
145   //   pointer: the pointer to the memory block to free.
146   void Free(void* pointer);
147 
148   // Frees a block of memory, pending the passage of a token. That memory won't
149   // be re-allocated until the token has passed through the command stream.
150   //
151   // Parameters:
152   //   pointer: the pointer to the memory block to free.
153   //   token: the token value to wait for before re-using the memory.
154   void FreePendingToken(void* pointer, int32 token);
155 
156   // Free Any Shared memory that is not in use.
157   void FreeUnused();
158 
159   // Used for testing
num_chunks()160   size_t num_chunks() const {
161     return chunks_.size();
162   }
163 
164   // Used for testing
allocated_memory()165   size_t allocated_memory() const {
166     return allocated_memory_;
167   }
168 
169  private:
170   typedef ScopedVector<MemoryChunk> MemoryChunkVector;
171 
172   // size a chunk is rounded up to.
173   unsigned int chunk_size_multiple_;
174   CommandBufferHelper* helper_;
175   MemoryChunkVector chunks_;
176   size_t allocated_memory_;
177   size_t max_free_bytes_;
178 
179   DISALLOW_COPY_AND_ASSIGN(MappedMemoryManager);
180 };
181 
182 }  // namespace gpu
183 
184 #endif  // GPU_COMMAND_BUFFER_CLIENT_MAPPED_MEMORY_H_
185 
186