/external/swiftshader/third_party/llvm-10.0/llvm/include/llvm/Support/ |
D | Memory.h | 34 MemoryBlock() : Address(nullptr), AllocatedSize(0) {} in MemoryBlock() 36 : Address(addr), AllocatedSize(allocatedSize) {} in MemoryBlock() 40 size_t allocatedSize() const { return AllocatedSize; } in allocatedSize() 44 size_t AllocatedSize; ///< Size, in bytes of the memory area variable
|
/external/llvm-project/llvm/include/llvm/Support/ |
D | Memory.h | 34 MemoryBlock() : Address(nullptr), AllocatedSize(0) {} in MemoryBlock() 36 : Address(addr), AllocatedSize(allocatedSize) {} in MemoryBlock() 40 size_t allocatedSize() const { return AllocatedSize; } in allocatedSize() 44 size_t AllocatedSize; ///< Size, in bytes of the memory area variable
|
/external/tensorflow/tensorflow/core/framework/ |
D | tracking_allocator.cc | 42 size_t allocated_bytes = allocator_->AllocatedSize(ptr); in AllocateRaw() 86 allocated_bytes = allocator_->AllocatedSize(ptr); in DeallocateRaw() 129 size_t TrackingAllocator::AllocatedSize(const void* ptr) const { in AllocatedSize() function in tensorflow::TrackingAllocator 138 return allocator_->AllocatedSize(ptr); in AllocatedSize()
|
D | allocator.h | 254 virtual size_t AllocatedSize(const void* ptr) const { in AllocatedSize() function 277 return AllocatedSize(ptr); in AllocatedSizeSlow() 329 size_t AllocatedSize(const void* ptr) const override { in AllocatedSize() function 330 return wrapped_->AllocatedSize(ptr); in AllocatedSize()
|
D | tracking_allocator_test.cc | 93 EXPECT_LE(4, ta->AllocatedSize(p1)); in TEST() 99 EXPECT_LE(12, ta->AllocatedSize(p2)); in TEST()
|
D | tracking_allocator.h | 66 size_t AllocatedSize(const void* ptr) const override;
|
/external/swiftshader/third_party/llvm-10.0/llvm/lib/Support/Unix/ |
D | Memory.inc | 146 Result.AllocatedSize = PageSize*NumPages; 161 if (M.Address == nullptr || M.AllocatedSize == 0) 164 if (0 != ::munmap(M.Address, M.AllocatedSize)) 168 M.AllocatedSize = 0; 176 if (M.Address == nullptr || M.AllocatedSize == 0) 184 uintptr_t End = alignAddr((const uint8_t *)M.Address + M.AllocatedSize, PageSize); 197 Memory::InvalidateInstructionCache(M.Address, M.AllocatedSize); 208 Memory::InvalidateInstructionCache(M.Address, M.AllocatedSize);
|
/external/llvm-project/llvm/lib/Support/Windows/ |
D | Memory.inc | 148 Result.AllocatedSize = AllocSize; 158 if (M.Address == 0 || M.AllocatedSize == 0) 165 M.AllocatedSize = 0; 172 if (M.Address == 0 || M.AllocatedSize == 0) 178 if (!VirtualProtect(M.Address, M.AllocatedSize, Protect, &OldFlags)) 182 Memory::InvalidateInstructionCache(M.Address, M.AllocatedSize);
|
/external/swiftshader/third_party/llvm-10.0/llvm/lib/Support/Windows/ |
D | Memory.inc | 148 Result.AllocatedSize = AllocSize; 158 if (M.Address == 0 || M.AllocatedSize == 0) 165 M.AllocatedSize = 0; 172 if (M.Address == 0 || M.AllocatedSize == 0) 178 if (!VirtualProtect(M.Address, M.AllocatedSize, Protect, &OldFlags)) 182 Memory::InvalidateInstructionCache(M.Address, M.AllocatedSize);
|
/external/tensorflow/tensorflow/core/common_runtime/gpu/ |
D | gpu_debug_allocator.cc | 126 size_t GPUDebugAllocator::AllocatedSize(const void* ptr) const { in AllocatedSize() function in tensorflow::GPUDebugAllocator 127 return base_allocator_->AllocatedSize(static_cast<const char*>(ptr) - in AllocatedSize() 209 size_t GPUNanResetAllocator::AllocatedSize(const void* ptr) const { in AllocatedSize() function in tensorflow::GPUNanResetAllocator 210 return base_allocator_->AllocatedSize(ptr); in AllocatedSize()
|
D | gpu_debug_allocator.h | 44 size_t AllocatedSize(const void* ptr) const override; 73 size_t AllocatedSize(const void* ptr) const override;
|
D | gpu_debug_allocator_test.cc | 251 EXPECT_EQ(256, a.AllocatedSize(t1)); in TEST()
|
D | gpu_bfc_allocator_test.cc | 242 EXPECT_EQ(256, a.AllocatedSize(t1)); in TEST_P() 452 initial_ptrs_allocated_sizes.push_back(a.AllocatedSize(raw)); in TestBinDebugInfo()
|
/external/llvm-project/llvm/lib/Support/Unix/ |
D | Memory.inc | 147 Result.AllocatedSize = PageSize*NumPages; 162 if (M.Address == nullptr || M.AllocatedSize == 0) 165 if (0 != ::munmap(M.Address, M.AllocatedSize)) 169 M.AllocatedSize = 0; 177 if (M.Address == nullptr || M.AllocatedSize == 0) 185 uintptr_t End = alignAddr((const uint8_t *)M.Address + M.AllocatedSize, PageSize); 198 Memory::InvalidateInstructionCache(M.Address, M.AllocatedSize); 209 Memory::InvalidateInstructionCache(M.Address, M.AllocatedSize);
|
/external/tensorflow/tensorflow/core/common_runtime/ |
D | process_state.h | 144 size_t AllocatedSize(const void* p) const override { in AllocatedSize() function 145 return a_->AllocatedSize(p); in AllocatedSize()
|
D | scoped_allocator.h | 111 size_t AllocatedSize(const void* ptr) const override { return 0; } in AllocatedSize() function
|
D | bfc_allocator.h | 72 size_t AllocatedSize(const void* ptr) const override;
|
/external/llvm-project/llvm/include/llvm/ExecutionEngine/Orc/ |
D | OrcRPCTargetProcessControl.h | 37 uint64_t AllocatedSize = 0; member 84 TA.Address, TA.AllocatedSize}); in finalizeAsync() 136 KV.second.Address, KV.second.AllocatedSize}); in deallocate() 198 E.Address, E.AllocatedSize}; in allocate()
|
/external/tensorflow/tensorflow/compiler/xla/service/ |
D | hlo_rematerialization.cc | 589 int64 AllocatedSize(Item* item) const { in AllocatedSize() function in xla::__anon95ba67740111::MemoryUsageTracker 592 size += AllocatedSize(buffer_id); in AllocatedSize() 684 int64 AllocatedSize(BufferId buffer_id) const { in AllocatedSize() function in xla::__anon95ba67740111::MemoryUsageTracker 873 memory_usage_ += AllocatedSize(buffer_id); in BeginInstruction() 900 memory_usage_ -= AllocatedSize(buffer_id); in EndInstruction() 912 memory_usage_ -= AllocatedSize(buffer_id); in EndInstruction() 986 memory_reduced += AllocatedSize(buffer_id); in MemoryReducedIfRematerialized() 1001 memory_reduced -= AllocatedSize(buffer_id); in MemoryReducedIfRematerialized() 1086 memory_usage_ += AllocatedSize(buffer.id); in AddRematerializedInstruction() 1131 memory_usage_ -= AllocatedSize(old_buffer.id); in AddRematerializedInstruction() [all …]
|
/external/llvm/lib/Analysis/ |
D | InlineCost.cpp | 105 uint64_t AllocatedSize; member in __anoned94fc440111::CallAnalyzer 214 AllocatedSize(0), NumInstructions(0), NumVectorInstructions(0), in CallAnalyzer() 344 AllocatedSize = SaturatingMultiplyAdd( in visitAlloca() 345 AllocSize->getLimitedValue(), DL.getTypeAllocSize(Ty), AllocatedSize); in visitAlloca() 354 AllocatedSize = SaturatingAdd(DL.getTypeAllocSize(Ty), AllocatedSize); in visitAlloca() 1135 AllocatedSize > InlineConstants::TotalAllocaSizeRecursiveCaller) in analyzeBlock()
|
/external/angle/third_party/abseil-cpp/absl/strings/internal/ |
D | cord_rep_flat.h | 128 size_t AllocatedSize() const { return TagToAllocatedSize(tag); } in AllocatedSize() function
|
D | cordz_info.cc | 172 size_t size = rep.rep->flat()->AllocatedSize(); in CountLinearReps()
|
/external/llvm-project/llvm/include/llvm/ExecutionEngine/Orc/TargetProcess/ |
D | OrcRPCTPCServer.h | 80 uint64_t AllocatedSize = 0; member 209 E.AllocatedSize); in serialize() 214 E.AllocatedSize); in deserialize()
|
/external/llvm-project/llvm/lib/Analysis/ |
D | InlineCost.cpp | 281 uint64_t AllocatedSize = 0; member in __anonae1cb0f20111::CallAnalyzer 869 AllocatedSize = SaturatingMultiplyAdd( in visitAlloca() 871 AllocatedSize); in visitAlloca() 872 if (AllocatedSize > InlineConstants::MaxSimplifiedDynamicAllocaToInline) { in visitAlloca() 883 AllocatedSize = in visitAlloca() 884 SaturatingAdd(DL.getTypeAllocSize(Ty).getKnownMinSize(), AllocatedSize); in visitAlloca() 1970 AllocatedSize > InlineConstants::TotalAllocaSizeRecursiveCaller) { in analyzeBlock()
|
/external/swiftshader/third_party/llvm-10.0/llvm/lib/Analysis/ |
D | InlineCost.cpp | 221 uint64_t AllocatedSize = 0; member in __anon18c107e60111::CallAnalyzer 724 AllocatedSize = SaturatingMultiplyAdd( in visitAlloca() 726 AllocatedSize); in visitAlloca() 734 AllocatedSize = in visitAlloca() 735 SaturatingAdd(DL.getTypeAllocSize(Ty).getFixedSize(), AllocatedSize); in visitAlloca() 1800 AllocatedSize > InlineConstants::TotalAllocaSizeRecursiveCaller) { in analyzeBlock()
|