• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2 
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6 
7     http://www.apache.org/licenses/LICENSE-2.0
8 
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 
16 #include "tensorflow/compiler/tf2xla/cpu_function_runtime.h"
17 
18 #include "tensorflow/core/platform/dynamic_annotations.h"
19 
20 namespace tensorflow {
21 namespace {
22 // Inline memory allocation routines here, because depending on '//base' brings
23 // in libraries which use c++ streams, which adds considerable code size on
24 // android.
aligned_malloc(size_t size,int minimum_alignment)25 void* aligned_malloc(size_t size, int minimum_alignment) {
26 #if defined(__ANDROID__) || defined(OS_ANDROID) || defined(OS_CYGWIN)
27   return memalign(minimum_alignment, size);
28 #elif defined(_WIN32)
29   return _aligned_malloc(size, minimum_alignment);
30 #else  // !__ANDROID__ && !OS_ANDROID && !OS_CYGWIN
31   void* ptr = nullptr;
32   // posix_memalign requires that the requested alignment be at least
33   // sizeof(void*). In this case, fall back on malloc which should return memory
34   // aligned to at least the size of a pointer.
35   const int required_alignment = sizeof(void*);
36   if (minimum_alignment < required_alignment) return malloc(size);
37   if (posix_memalign(&ptr, minimum_alignment, size) != 0)
38     return nullptr;
39   else
40     return ptr;
41 #endif
42 }
43 
aligned_free(void * aligned_memory)44 void aligned_free(void* aligned_memory) {
45 #if defined(_WIN32)
46   _aligned_free(aligned_memory);
47 #else
48   free(aligned_memory);
49 #endif
50 }
51 
align_to(size_t n,size_t align)52 size_t align_to(size_t n, size_t align) {
53   return (((n - 1) / align) + 1) * align;
54 }
55 }  // namespace
56 
57 namespace cpu_function_runtime {
AlignedBufferBytes(const BufferInfo * buffer_infos,size_t n,bool allocate_entry_params)58 size_t AlignedBufferBytes(const BufferInfo* buffer_infos, size_t n,
59                           bool allocate_entry_params) {
60   size_t total = 0;
61   for (size_t i = 0; i < n; ++i) {
62     bool should_allocate =
63         buffer_infos[i].is_temp_buffer() ||
64         (buffer_infos[i].is_entry_parameter() && allocate_entry_params);
65 
66     if (should_allocate) {
67       total += align_to(buffer_infos[i].size(), kAlign);
68     }
69   }
70   return total;
71 }
72 
MallocContiguousBuffers(const BufferInfo * buffer_infos,size_t n,bool allocate_entry_params,void ** bufs,bool annotate_initialized)73 void* MallocContiguousBuffers(const BufferInfo* buffer_infos, size_t n,
74                               bool allocate_entry_params, void** bufs,
75                               bool annotate_initialized) {
76   const size_t total =
77       AlignedBufferBytes(buffer_infos, n, allocate_entry_params);
78   void* contiguous = nullptr;
79   if (total > 0) {
80     contiguous = aligned_malloc(total, kAlign);
81     if (annotate_initialized) {
82       // Since the memory for temp buffers is written to by JITed code, msan has
83       // no way of knowing the memory was initialized, so explicitly mark it.
84       TF_ANNOTATE_MEMORY_IS_INITIALIZED(contiguous, total);
85     }
86   }
87   uintptr_t pos = reinterpret_cast<uintptr_t>(contiguous);
88   for (size_t i = 0; i < n; ++i) {
89     bool should_allocate =
90         buffer_infos[i].is_temp_buffer() ||
91         (buffer_infos[i].is_entry_parameter() && allocate_entry_params);
92     if (should_allocate) {
93       bufs[i] = reinterpret_cast<void*>(pos);
94       pos += align_to(buffer_infos[i].size(), kAlign);
95     } else {
96       bufs[i] = nullptr;
97     }
98   }
99   return contiguous;
100 }
101 
FreeContiguous(void * contiguous)102 void FreeContiguous(void* contiguous) {
103   if (contiguous != nullptr) {
104     aligned_free(contiguous);
105   }
106 }
107 }  // namespace cpu_function_runtime
108 }  // namespace tensorflow
109