1 /* Copyright 2015 The TensorFlow Authors. All Rights Reserved. 2 3 Licensed under the Apache License, Version 2.0 (the "License"); 4 you may not use this file except in compliance with the License. 5 You may obtain a copy of the License at 6 7 http://www.apache.org/licenses/LICENSE-2.0 8 9 Unless required by applicable law or agreed to in writing, software 10 distributed under the License is distributed on an "AS IS" BASIS, 11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 See the License for the specific language governing permissions and 13 limitations under the License. 14 ==============================================================================*/ 15 16 // TODO(vrv): Switch this to an open-sourced version of Arena. 17 18 #ifndef TENSORFLOW_CORE_LIB_CORE_ARENA_H_ 19 #define TENSORFLOW_CORE_LIB_CORE_ARENA_H_ 20 21 #include <assert.h> 22 23 #include <vector> 24 25 #include "tensorflow/core/platform/logging.h" 26 #include "tensorflow/core/platform/macros.h" 27 #include "tensorflow/core/platform/types.h" 28 29 namespace tensorflow { 30 namespace core { 31 32 // This class is "thread-compatible": different threads can access the 33 // arena at the same time without locking, as long as they use only 34 // const methods. 35 class Arena { 36 public: 37 // Allocates a thread-compatible arena with the specified block size. 38 explicit Arena(const size_t block_size); 39 ~Arena(); 40 Alloc(const size_t size)41 char* Alloc(const size_t size) { 42 return reinterpret_cast<char*>(GetMemory(size, 1)); 43 } 44 AllocAligned(const size_t size,const size_t alignment)45 char* AllocAligned(const size_t size, const size_t alignment) { 46 return reinterpret_cast<char*>(GetMemory(size, alignment)); 47 } 48 49 void Reset(); 50 51 // This should be the worst-case alignment for any type. This is 52 // good for IA-32, SPARC version 7 (the last one I know), and 53 // supposedly Alpha. i386 would be more time-efficient with a 54 // default alignment of 8, but ::operator new() uses alignment of 4, 55 // and an assertion will fail below after the call to MakeNewBlock() 56 // if you try to use a larger alignment. 57 #ifdef __i386__ 58 static const int kDefaultAlignment = 4; 59 #else 60 static const int kDefaultAlignment = 8; 61 #endif 62 63 protected: 64 bool SatisfyAlignment(const size_t alignment); 65 void MakeNewBlock(const uint32 alignment); 66 void* GetMemoryFallback(const size_t size, const int align); GetMemory(const size_t size,const int align)67 void* GetMemory(const size_t size, const int align) { 68 assert(remaining_ <= block_size_); // an invariant 69 if (size > 0 && size < remaining_ && align == 1) { // common case 70 void* result = freestart_; 71 freestart_ += size; 72 remaining_ -= size; 73 return result; 74 } 75 return GetMemoryFallback(size, align); 76 } 77 78 size_t remaining_; 79 80 private: 81 struct AllocatedBlock { 82 char* mem; 83 size_t size; 84 }; 85 86 // Allocate new block of at least block_size, with the specified 87 // alignment. 88 // The returned AllocatedBlock* is valid until the next call to AllocNewBlock 89 // or Reset (i.e. anything that might affect overflow_blocks_). 90 AllocatedBlock* AllocNewBlock(const size_t block_size, 91 const uint32 alignment); 92 93 const size_t block_size_; 94 char* freestart_; // beginning of the free space in most recent block 95 char* freestart_when_empty_; // beginning of the free space when we're empty 96 // STL vector isn't as efficient as it could be, so we use an array at first 97 size_t blocks_alloced_; // how many of the first_blocks_ have been alloced 98 AllocatedBlock first_blocks_[16]; // the length of this array is arbitrary 99 // if the first_blocks_ aren't enough, expand into overflow_blocks_. 100 std::vector<AllocatedBlock>* overflow_blocks_; 101 102 void FreeBlocks(); // Frees all except first block 103 104 TF_DISALLOW_COPY_AND_ASSIGN(Arena); 105 }; 106 107 } // namespace core 108 } // namespace tensorflow 109 110 #endif // TENSORFLOW_CORE_LIB_CORE_ARENA_H_ 111