/external/tensorflow/tensorflow/core/framework/ |
D | memory_types_test.cc | 71 MemoryTypeVector({DEVICE_MEMORY, DEVICE_MEMORY, HOST_MEMORY, HOST_MEMORY, in TEST() 72 HOST_MEMORY, DEVICE_MEMORY, DEVICE_MEMORY, in TEST() 73 DEVICE_MEMORY, HOST_MEMORY, HOST_MEMORY, HOST_MEMORY}), in TEST() 76 EXPECT_EQ(MemoryTypeVector({DEVICE_MEMORY, DEVICE_MEMORY, DEVICE_MEMORY, in TEST() 77 DEVICE_MEMORY, DEVICE_MEMORY, DEVICE_MEMORY}), in TEST() 83 MemoryTypeVector({HOST_MEMORY, DEVICE_MEMORY, HOST_MEMORY, HOST_MEMORY, in TEST() 88 DEVICE_MEMORY, DEVICE_MEMORY, DEVICE_MEMORY}), in TEST()
|
D | memory_types.cc | 74 : DEVICE_MEMORY; in MTypeFromDType() 78 return DataTypeAlwaysOnHost(dtype) ? HOST_MEMORY : DEVICE_MEMORY; in MTypeFromDTypeIntsOnDevice() 137 inp_mtypes->resize(GetTotal(inp_names), DEVICE_MEMORY); in MemoryTypesForNode() 138 out_mtypes->resize(GetTotal(out_names), DEVICE_MEMORY); in MemoryTypesForNode()
|
D | types.h | 48 DEVICE_MEMORY = 0, enumerator
|
D | op_kernel.cc | 603 type, shape, DEVICE_MEMORY, allocator_attr); in forward_input_or_allocate_temp()
|
/external/swiftshader/src/Vulkan/ |
D | VkCommandPool.cpp | 27 DEVICE_MEMORY, GetAllocationScope()); in CommandPool() 37 vk::destroy(commandBuffer, DEVICE_MEMORY); in destroy() 41 vk::deallocate(commandBuffers, DEVICE_MEMORY); in destroy() 55 DEVICE_MEMORY, DispatchableCommandBuffer::GetAllocationScope()); in allocateCommandBuffers() 66 vk::destroy(pCommandBuffers[j], DEVICE_MEMORY); in allocateCommandBuffers() 86 vk::destroy(pCommandBuffers[i], DEVICE_MEMORY); in freeCommandBuffers()
|
D | VkPipelineCache.cpp | 25 allocate(sizeof(VkSpecializationInfo), REQUIRED_MEMORY_ALIGNMENT, DEVICE_MEMORY)); in SpecializationInfo() 34 allocate(entriesSize, REQUIRED_MEMORY_ALIGNMENT, DEVICE_MEMORY)); in SpecializationInfo() 42 void *data = allocate(specializationInfo->dataSize, REQUIRED_MEMORY_ALIGNMENT, DEVICE_MEMORY); in SpecializationInfo() 57 deallocate(const_cast<VkSpecializationMapEntry *>(info->pMapEntries), DEVICE_MEMORY); in operator ()() 58 deallocate(const_cast<void *>(info->pData), DEVICE_MEMORY); in operator ()() 59 deallocate(info, DEVICE_MEMORY); in operator ()()
|
D | VkDeviceMemory.cpp | 110 void *buffer = vk::allocate(size, REQUIRED_MEMORY_ALIGNMENT, DEVICE_MEMORY); in allocate() 122 vk::deallocate(buffer, DEVICE_MEMORY); in deallocate()
|
D | VkQueue.cpp | 44 …vk::allocate(totalSize, vk::REQUIRED_MEMORY_ALIGNMENT, vk::DEVICE_MEMORY, vk::Fence::GetAllocation… in DeepCopySubmitInfo() 210 vk::deallocate(v.first, DEVICE_MEMORY); in garbageCollect()
|
D | VkObject.hpp | 42 static constexpr VkAllocationCallbacks *DEVICE_MEMORY = nullptr; variable
|
/external/swiftshader/src/WSI/ |
D | VkSurfaceKHR.cpp | 44 …= reinterpret_cast<VkImage *>(allocate(sizeof(VkImage), REQUIRED_MEMORY_ALIGNMENT, DEVICE_MEMORY)); in allocateImage() 53 deallocate(vkImagePtr, DEVICE_MEMORY); in allocateImage() 58 deallocate(vkImagePtr, DEVICE_MEMORY); in allocateImage() 68 allocate(sizeof(VkDeviceMemory), REQUIRED_MEMORY_ALIGNMENT, DEVICE_MEMORY)); in allocateAndBindImageMemory() 77 deallocate(vkDeviceMemoryPtr, DEVICE_MEMORY); in allocateAndBindImageMemory() 85 deallocate(vkDeviceMemoryPtr, DEVICE_MEMORY); in allocateAndBindImageMemory()
|
/external/tensorflow/tensorflow/core/common_runtime/ |
D | memory_types.cc | 83 DEVICE_MEMORY); in ProcessMemoryTypes() 85 DEVICE_MEMORY); in ProcessMemoryTypes() 164 if (((sm == HOST_MEMORY) && (dm == DEVICE_MEMORY)) || in EnsureMemoryTypes() 165 ((sm == DEVICE_MEMORY) && (dm == HOST_MEMORY))) { in EnsureMemoryTypes()
|
D | memory_types_test.cc | 82 EXPECT_EQ(memory_type, DEVICE_MEMORY); in TEST()
|
D | constant_folding.cc | 505 (memory_type == DEVICE_MEMORY && is_int32)) { in ReplaceTensorWithConstant()
|
/external/tensorflow/tensorflow/compiler/jit/ |
D | xla_kernel_creator_test.cc | 117 EXPECT_EQ(DEVICE_MEMORY, kernel_->input_memory_types()[0]); in TEST_F() 122 EXPECT_EQ(DEVICE_MEMORY, kernel_->output_memory_types()[0]); in TEST_F()
|
D | xla_kernel_creator_util.cc | 171 MemoryTypeVector input_memory_types(fbody->arg_types.size(), DEVICE_MEMORY); in CreateXlaKernel() 210 MemoryTypeVector output_memory_types(fbody->ret_types.size(), DEVICE_MEMORY); in CreateXlaKernel()
|
D | partially_decluster_pass.cc | 84 if (output_mtypes[e->src_output()] == DEVICE_MEMORY) { in FindNodesToDecluster()
|
D | build_xla_ops_pass.cc | 434 if (source_output_mem_types[e->src_output()] == DEVICE_MEMORY) { in PredicateInt32Inputs()
|
/external/tensorflow/tensorflow/core/kernels/ |
D | assign_op.h | 104 rhs.dtype(), rhs.shape(), DEVICE_MEMORY, attr); in Compute()
|
D | strided_slice_op.cc | 311 0, 0, input.dtype(), shape, DEVICE_MEMORY, AllocatorAttributes()); in Compute()
|
D | list_kernels.cc | 466 DEVICE_MEMORY /* input is always on DEVICE_MEMORY */, attr); in Compute()
|
D | scatter_nd_op.cc | 200 0, 0, input.dtype(), shape, DEVICE_MEMORY, AllocatorAttributes()); in Compute()
|
D | resource_variable_ops.cc | 468 DEVICE_MEMORY /* HOST_MEMORY is only reserved for special cases */, in Compute()
|
D | list_kernels.h | 916 DEVICE_MEMORY /* input is always on DEVICE_MEMORY */, attr); in Compute()
|
/external/swiftshader/src/Device/ |
D | Renderer.cpp | 173 …return vk::allocate(sizeof(Renderer), alignof(Renderer), vk::DEVICE_MEMORY, VK_SYSTEM_ALLOCATION_S… in operator new() 178 vk::deallocate(mem, vk::DEVICE_MEMORY); in operator delete()
|