• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2 
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6 
7     http://www.apache.org/licenses/LICENSE-2.0
8 
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 
16 #ifndef TENSORFLOW_CORE_PLATFORM_NUMA_H_
17 #define TENSORFLOW_CORE_PLATFORM_NUMA_H_
18 
19 #include "tensorflow/core/platform/platform.h"
20 #include "tensorflow/core/platform/types.h"
21 
22 namespace tensorflow {
23 namespace port {
24 
25 // Returns true iff NUMA functions are supported.
26 bool NUMAEnabled();
27 
28 // Returns the number of NUMA nodes present with respect to CPU operations.
29 // Typically this will be the number of sockets where some RAM has greater
30 // affinity with one socket than another.
31 int NUMANumNodes();
32 
33 static const int kNUMANoAffinity = -1;
34 
35 // If possible sets affinity of the current thread to the specified NUMA node.
36 // If node == kNUMANoAffinity removes affinity to any particular node.
37 void NUMASetThreadNodeAffinity(int node);
38 
39 // Returns NUMA node affinity of the current thread, kNUMANoAffinity if none.
40 int NUMAGetThreadNodeAffinity();
41 
42 // Like AlignedMalloc, but allocates memory with affinity to the specified NUMA
43 // node.
44 //
45 // Notes:
46 //  1. node must be >= 0 and < NUMANumNodes.
47 //  1. minimum_alignment must a factor of system page size, the memory
48 //     returned will be page-aligned.
49 //  2. This function is likely significantly slower than AlignedMalloc
50 //     and should not be used for lots of small allocations.  It makes more
51 //     sense as a backing allocator for BFCAllocator, PoolAllocator, or similar.
52 void* NUMAMalloc(int node, size_t size, int minimum_alignment);
53 
54 // Memory allocated by NUMAMalloc must be freed via NUMAFree.
55 void NUMAFree(void* ptr, size_t size);
56 
57 // Returns NUMA node affinity of memory address, kNUMANoAffinity if none.
58 int NUMAGetMemAffinity(const void* ptr);
59 
60 }  // namespace port
61 }  // namespace tensorflow
62 #endif  // TENSORFLOW_CORE_PLATFORM_NUMA_H_
63