/external/tensorflow/tensorflow/compiler/xla/service/gpu/ |
D | buffer_allocations.h | 52 DeviceMemoryAllocator* memory_allocator); 62 DeviceMemoryAllocator* memory_allocator() const { return memory_allocator_; } in memory_allocator() function 88 DeviceMemoryAllocator* memory_allocator) in BufferAllocations() argument 91 memory_allocator_(memory_allocator) {} in BufferAllocations()
|
D | buffer_allocations.cc | 43 DeviceMemoryAllocator* memory_allocator) { in Build() argument 46 new BufferAllocations(num_buffers, device_ordinal, memory_allocator)); in Build() 72 TF_ASSIGN_OR_RETURN(buffer_address, memory_allocator->Allocate( in Build()
|
D | fft_thunk.cc | 33 int device_ordinal, DeviceMemoryAllocator* memory_allocator) in FftScratchAllocator() argument 34 : device_ordinal_(device_ordinal), memory_allocator_(memory_allocator) {} in FftScratchAllocator() 134 buffer_allocations.memory_allocator()); in ExecuteOnStream()
|
D | gpu_executable.cc | 254 DeviceMemoryAllocator* memory_allocator = run_options->allocator(); in ExecuteOnStream() local 281 memory_allocator)); in ExecuteOnStream() 284 !memory_allocator->AllowsAsynchronousDeallocation(); in ExecuteOnStream()
|
D | fft_thunk.h | 40 DeviceMemoryAllocator* memory_allocator);
|
D | cudnn_convolution_algorithm_picker.cc | 37 ScratchAllocator(int device_ordinal, DeviceMemoryAllocator* memory_allocator) in ScratchAllocator() argument 38 : device_ordinal_(device_ordinal), memory_allocator_(memory_allocator) {} in ScratchAllocator()
|
/external/libchrome/base/metrics/ |
D | persistent_histogram_allocator_unittest.cc | 39 allocator_ = GlobalHistogramAllocator::Get()->memory_allocator(); in CreatePersistentHistogramAllocator() 140 GlobalHistogramAllocator::Get()->memory_allocator()->Name()); in TEST_F() 146 GlobalHistogramAllocator::Get()->memory_allocator()->Name()); in TEST_F() 152 GlobalHistogramAllocator::Get()->memory_allocator()->Name()); in TEST_F() 208 const_cast<void*>(new_allocator->memory_allocator()->data()), in TEST_F() 209 new_allocator->memory_allocator()->size(), 0, 0, "", false)); in TEST_F() 254 const_cast<void*>(new_allocator->memory_allocator()->data()), in TEST_F() 255 new_allocator->memory_allocator()->size(), 0, 0, "", false)); in TEST_F()
|
D | persistent_histogram_allocator.cc | 255 : allocator_(allocator), memory_iter_(allocator->memory_allocator()) {} in Iterator() 831 PersistentMemoryAllocator* memory_allocator = in ReleaseForTesting() local 832 histogram_allocator->memory_allocator(); in ReleaseForTesting() 837 PersistentMemoryAllocator::Iterator iter(memory_allocator); in ReleaseForTesting() 890 memory_allocator()->SetMemoryState(PersistentMemoryAllocator::MEMORY_DELETED); in DeletePersistentLocation()
|
D | persistent_histogram_allocator.h | 217 PersistentMemoryAllocator* memory_allocator() { in memory_allocator() function
|
/external/tensorflow/tensorflow/compiler/xla/service/cpu/ |
D | cpu_executable.cc | 77 DeviceMemoryAllocator* memory_allocator, int device_ordinal, in AllocateBuffers() argument 103 TF_ASSIGN_OR_RETURN((*buffers)[i], memory_allocator->Allocate( in AllocateBuffers() 296 DeviceMemoryAllocator* memory_allocator = run_options->allocator(); in ExecuteOnStream() local 300 memory_allocator, stream->parent()->device_ordinal(), &buffers)); in ExecuteOnStream() 310 TF_RETURN_IF_ERROR(DeallocateTempBuffers(memory_allocator, stream, buffers, in ExecuteOnStream() 328 DeviceMemoryAllocator* memory_allocator = run_options->allocator(); in ExecuteAsyncOnStream() local 332 memory_allocator, stream->parent()->device_ordinal(), &buffers)); in ExecuteAsyncOnStream() 342 buffers_in_result, memory_allocator, stream]() { in ExecuteAsyncOnStream() 348 TF_CHECK_OK(DeallocateTempBuffers(memory_allocator, stream, buffers, in ExecuteAsyncOnStream()
|
D | parallel_cpu_executable.cc | 327 DeviceMemoryAllocator* memory_allocator, int device_ordinal, in AllocateBuffers() argument 353 TF_ASSIGN_OR_RETURN((*buffers)[i], memory_allocator->Allocate( in AllocateBuffers() 461 DeviceMemoryAllocator* memory_allocator = run_options->allocator(); in ExecuteOnStream() local 469 memory_allocator, stream->parent()->device_ordinal(), &buffers)); in ExecuteOnStream() 508 TF_RETURN_IF_ERROR(memory_allocator->Deallocate( in ExecuteOnStream()
|
D | parallel_cpu_executable.h | 99 DeviceMemoryAllocator* memory_allocator, int device_ordinal,
|
D | cpu_executable.h | 99 DeviceMemoryAllocator* memory_allocator, int device_ordinal,
|
/external/tensorflow/tensorflow/compiler/jit/ |
D | xla_device_context.cc | 39 backend_->memory_allocator() in AllocateRaw() 49 TF_CHECK_OK(backend_->memory_allocator()->Deallocate(device_ordinal_, &dmem)); in DeallocateRaw()
|
/external/v8/src/heap/ |
D | spaces.cc | 219 if (!isolate_->heap()->memory_allocator()->CommitExecutableMemory( in AllocateRawMemory() 230 return isolate_->heap()->memory_allocator()->CommitMemory(start, length, in CommitRawMemory() 580 if (!heap()->memory_allocator()->CommitMemory(start, length, in CommitArea() 585 CodeRange* code_range = heap_->memory_allocator()->code_range(); in CommitArea() 591 heap_->memory_allocator()->ZapBlock(start, length); in CommitArea() 601 CodeRange* code_range = heap_->memory_allocator()->code_range(); in CommitArea() 845 heap()->memory_allocator()->ShrinkChunk(this, unused); in AllocateChunk() 1233 heap()->memory_allocator()->Free<MemoryAllocator::kFull>(page); in AllocateChunk() 1341 Page* p = heap()->memory_allocator()->AllocatePage(size, this, executable()); in AllocateChunk() 1441 heap()->memory_allocator()->Free<MemoryAllocator::kPreFreeAndQueue>(page); in AllocateChunk() [all …]
|
D | heap.cc | 226 return static_cast<size_t>(memory_allocator()->SizeExecutable()); in CommittedMemoryExecutable() 286 if (memory_allocator()->MaxAvailable() <= new_space_->Size()) { in SelectGarbageCollector() 332 memory_allocator()->Size() / KB, in PrintShortHeapStatistics() 333 memory_allocator()->Available() / KB); in PrintShortHeapStatistics() 3413 DCHECK(!memory_allocator()->code_range()->valid() || in AllocateCode() 3414 memory_allocator()->code_range()->contains(code->address()) || in AllocateCode() 3439 DCHECK(!memory_allocator()->code_range()->valid() || in CopyCode() 3440 memory_allocator()->code_range()->contains(code->address()) || in CopyCode() 4606 memory_allocator()->ReportStatistics(); in ReportHeapStatistics() 4675 if (memory_allocator()->IsOutsideAllocatedSpace(value->address())) { in Contains() [all …]
|
/external/tensorflow/tensorflow/compiler/xla/client/ |
D | local_client.cc | 156 run_options.set_allocator(backend_->memory_allocator()); in Run() 272 allocator = backend().memory_allocator(); in LiteralToShapedBuffer()
|
/external/tensorflow/tensorflow/compiler/xla/service/ |
D | shaped_buffer.h | 137 DeviceMemoryAllocator* memory_allocator() const { return allocator_; } in memory_allocator() function
|
D | backend.h | 84 DeviceMemoryAllocator* memory_allocator() const { in memory_allocator() function
|
D | allocation_tracker.cc | 178 TF_RETURN_IF_ERROR(backend_->memory_allocator()->Deallocate( in DecrementRefCount()
|
D | hlo_runner.cc | 135 run_options.set_allocator(backend().memory_allocator()); in ExecuteInternal()
|
/external/tensorflow/tensorflow/compiler/xla/tests/ |
D | xla_hlo_profile_test.cc | 127 DeviceMemoryAllocator* allocator = backend->memory_allocator(); in ExecuteAndFetchProfile() 159 exec_run_options.set_allocator(backend->memory_allocator()); in ExecuteAndFetchProfile()
|
/external/tensorflow/tensorflow/compiler/xla/python/ |
D | local_computation_builder.cc | 104 client->backend().memory_allocator()); in ToBuffer() 199 options.set_allocator(client->backend().memory_allocator()); in Execute() 244 options.set_allocator(client->backend().memory_allocator()); in ExecuteWithShapedBuffers()
|
/external/v8/src/extensions/ |
D | statistics-extension.cc | 120 {heap->memory_allocator()->Size(), "total_committed_bytes"}, in GetCounters()
|
/external/v8/src/x64/ |
D | assembler-x64-inl.h | 82 entry - isolate()->heap()->memory_allocator()->code_range()->start())); in emit_runtime_entry() 323 isolate()->heap()->memory_allocator()->code_range()->start(); in runtime_entry_at()
|