1 // Copyright (C) 2018 The Android Open Source Project
2 // Copyright (C) 2018 Google Inc.
3 //
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
7 //
8 // http://www.apache.org/licenses/LICENSE-2.0
9 //
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
15 #include "HostVisibleMemoryVirtualization.h"
16
17 #include "android/base/AndroidSubAllocator.h"
18
19 #include "Resources.h"
20 #include "VkEncoder.h"
21
22 #include "../OpenglSystemCommon/EmulatorFeatureInfo.h"
23
24 #include <log/log.h>
25
26 #include <set>
27
28 #if defined(__ANDROID__) || defined(__linux__)
29 #include <unistd.h>
30 #include <errno.h>
31 #endif
32
33 #include <sys/mman.h>
34
35 #if !defined(HOST_BUILD) && defined(VIRTIO_GPU)
36 #include <xf86drm.h>
37 #endif
38
39 using android::base::guest::SubAllocator;
40
41 namespace goldfish_vk {
42
canFitVirtualHostVisibleMemoryInfo(const VkPhysicalDeviceMemoryProperties * memoryProperties)43 bool canFitVirtualHostVisibleMemoryInfo(
44 const VkPhysicalDeviceMemoryProperties* memoryProperties) {
45 uint32_t typeCount =
46 memoryProperties->memoryTypeCount;
47 uint32_t heapCount =
48 memoryProperties->memoryHeapCount;
49
50 bool canFit = true;
51
52 if (typeCount == VK_MAX_MEMORY_TYPES) {
53 canFit = false;
54 ALOGE("Underlying device has no free memory types");
55 }
56
57 if (heapCount == VK_MAX_MEMORY_HEAPS) {
58 canFit = false;
59 ALOGE("Underlying device has no free memory heaps");
60 }
61
62 uint32_t numFreeMemoryTypes = VK_MAX_MEMORY_TYPES - typeCount;
63 uint32_t hostVisibleMemoryTypeCount = 0;
64
65 if (hostVisibleMemoryTypeCount > numFreeMemoryTypes) {
66 ALOGE("Underlying device has too many host visible memory types (%u)"
67 "and not enough free types (%u)",
68 hostVisibleMemoryTypeCount, numFreeMemoryTypes);
69 canFit = false;
70 }
71
72 return canFit;
73 }
74
initHostVisibleMemoryVirtualizationInfo(VkPhysicalDevice physicalDevice,const VkPhysicalDeviceMemoryProperties * memoryProperties,const EmulatorFeatureInfo * featureInfo,HostVisibleMemoryVirtualizationInfo * info_out)75 void initHostVisibleMemoryVirtualizationInfo(
76 VkPhysicalDevice physicalDevice,
77 const VkPhysicalDeviceMemoryProperties* memoryProperties,
78 const EmulatorFeatureInfo* featureInfo,
79 HostVisibleMemoryVirtualizationInfo* info_out) {
80
81 if (info_out->initialized) return;
82
83 info_out->hostMemoryProperties = *memoryProperties;
84 info_out->initialized = true;
85
86 info_out->memoryPropertiesSupported =
87 canFitVirtualHostVisibleMemoryInfo(memoryProperties);
88
89 info_out->directMemSupported = featureInfo->hasDirectMem;
90 info_out->virtioGpuNextSupported = featureInfo->hasVirtioGpuNext;
91
92 if (!info_out->memoryPropertiesSupported ||
93 (!info_out->directMemSupported &&
94 !info_out->virtioGpuNextSupported)) {
95 info_out->virtualizationSupported = false;
96 return;
97 }
98
99 info_out->virtualizationSupported = true;
100
101 info_out->physicalDevice = physicalDevice;
102 info_out->guestMemoryProperties = *memoryProperties;
103
104 uint32_t typeCount =
105 memoryProperties->memoryTypeCount;
106 uint32_t heapCount =
107 memoryProperties->memoryHeapCount;
108
109 uint32_t firstFreeTypeIndex = typeCount;
110 uint32_t firstFreeHeapIndex = heapCount;
111
112 for (uint32_t i = 0; i < typeCount; ++i) {
113
114 // Set up identity mapping and not-both
115 // by default, to be edited later.
116 info_out->memoryTypeIndexMappingToHost[i] = i;
117 info_out->memoryHeapIndexMappingToHost[i] = i;
118
119 info_out->memoryTypeIndexMappingFromHost[i] = i;
120 info_out->memoryHeapIndexMappingFromHost[i] = i;
121
122 info_out->memoryTypeBitsShouldAdvertiseBoth[i] = false;
123
124 const auto& type = memoryProperties->memoryTypes[i];
125
126 if (type.propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) {
127 uint32_t heapIndex = type.heapIndex;
128
129 auto& guestMemoryType =
130 info_out->guestMemoryProperties.memoryTypes[i];
131
132 auto& newVirtualMemoryType =
133 info_out->guestMemoryProperties.memoryTypes[firstFreeTypeIndex];
134
135 auto& newVirtualMemoryHeap =
136 info_out->guestMemoryProperties.memoryHeaps[firstFreeHeapIndex];
137
138 // Remove all references to host visible in the guest memory type at
139 // index i, while transferring them to the new virtual memory type.
140 newVirtualMemoryType = type;
141
142 // Set this memory type to have a separate heap.
143 newVirtualMemoryType.heapIndex = firstFreeHeapIndex;
144
145 newVirtualMemoryType.propertyFlags =
146 type.propertyFlags &
147 ~(VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT);
148
149 guestMemoryType.propertyFlags =
150 type.propertyFlags & \
151 ~(VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
152 VK_MEMORY_PROPERTY_HOST_COHERENT_BIT |
153 VK_MEMORY_PROPERTY_HOST_CACHED_BIT);
154
155 // In the corresponding new memory heap, copy the information over,
156 // remove device local flags, and resize it based on what is
157 // supported by the PCI device.
158 newVirtualMemoryHeap =
159 memoryProperties->memoryHeaps[heapIndex];
160 newVirtualMemoryHeap.flags =
161 newVirtualMemoryHeap.flags &
162 ~(VK_MEMORY_HEAP_DEVICE_LOCAL_BIT);
163
164 // TODO: Figure out how to support bigger sizes
165 newVirtualMemoryHeap.size = VIRTUAL_HOST_VISIBLE_HEAP_SIZE;
166
167 info_out->memoryTypeIndexMappingToHost[firstFreeTypeIndex] = i;
168 info_out->memoryHeapIndexMappingToHost[firstFreeHeapIndex] = i;
169
170 info_out->memoryTypeIndexMappingFromHost[i] = firstFreeTypeIndex;
171 info_out->memoryHeapIndexMappingFromHost[i] = firstFreeHeapIndex;
172
173 // Was the original memory type also a device local type? If so,
174 // advertise both types in resulting type bits.
175 info_out->memoryTypeBitsShouldAdvertiseBoth[i] =
176 type.propertyFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT ||
177 type.propertyFlags == 0;
178
179 ++firstFreeTypeIndex;
180
181 // Explicitly only create one new heap.
182 // ++firstFreeHeapIndex;
183 }
184 }
185
186 info_out->guestMemoryProperties.memoryTypeCount = firstFreeTypeIndex;
187 info_out->guestMemoryProperties.memoryHeapCount = firstFreeHeapIndex + 1;
188
189 for (uint32_t i = info_out->guestMemoryProperties.memoryTypeCount; i < VK_MAX_MEMORY_TYPES; ++i) {
190 memset(&info_out->guestMemoryProperties.memoryTypes[i],
191 0x0, sizeof(VkMemoryType));
192 }
193 }
194
isHostVisibleMemoryTypeIndexForGuest(const HostVisibleMemoryVirtualizationInfo * info,uint32_t index)195 bool isHostVisibleMemoryTypeIndexForGuest(
196 const HostVisibleMemoryVirtualizationInfo* info,
197 uint32_t index) {
198
199 const auto& props =
200 info->virtualizationSupported ?
201 info->guestMemoryProperties :
202 info->hostMemoryProperties;
203
204 return props.memoryTypes[index].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
205 }
206
isDeviceLocalMemoryTypeIndexForGuest(const HostVisibleMemoryVirtualizationInfo * info,uint32_t index)207 bool isDeviceLocalMemoryTypeIndexForGuest(
208 const HostVisibleMemoryVirtualizationInfo* info,
209 uint32_t index) {
210
211 const auto& props =
212 info->virtualizationSupported ?
213 info->guestMemoryProperties :
214 info->hostMemoryProperties;
215
216 return props.memoryTypes[index].propertyFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
217 }
218
finishHostMemAllocInit(VkEncoder *,VkDevice device,uint32_t memoryTypeIndex,VkDeviceSize nonCoherentAtomSize,VkDeviceSize allocSize,VkDeviceSize mappedSize,uint8_t * mappedPtr,HostMemAlloc * out)219 VkResult finishHostMemAllocInit(
220 VkEncoder*,
221 VkDevice device,
222 uint32_t memoryTypeIndex,
223 VkDeviceSize nonCoherentAtomSize,
224 VkDeviceSize allocSize,
225 VkDeviceSize mappedSize,
226 uint8_t* mappedPtr,
227 HostMemAlloc* out) {
228
229 out->device = device;
230 out->memoryTypeIndex = memoryTypeIndex;
231 out->nonCoherentAtomSize = nonCoherentAtomSize;
232 out->allocSize = allocSize;
233 out->mappedSize = mappedSize;
234 out->mappedPtr = mappedPtr;
235
236 // because it's not just nonCoherentAtomSize granularity,
237 // people will also use it for uniform buffers, images, etc.
238 // that need some bigger alignment
239 // #define HIGHEST_BUFFER_OR_IMAGE_ALIGNMENT 1024
240 // bug: 145153816
241 // HACK: Make it 65k so yuv images are happy on vk cts 1.2.1
242 // TODO: Use a munmap/mmap MAP_FIXED scheme to realign memories
243 // if it's found that the buffer or image bind alignment will be violated
244 #define HIGHEST_BUFFER_OR_IMAGE_ALIGNMENT 65536
245
246 uint64_t neededPageSize = out->nonCoherentAtomSize;
247 if (HIGHEST_BUFFER_OR_IMAGE_ALIGNMENT >
248 neededPageSize) {
249 neededPageSize = HIGHEST_BUFFER_OR_IMAGE_ALIGNMENT;
250 }
251
252 out->subAlloc = new
253 SubAllocator(
254 out->mappedPtr,
255 out->mappedSize,
256 neededPageSize);
257
258 out->initialized = true;
259 out->initResult = VK_SUCCESS;
260 return VK_SUCCESS;
261 }
262
destroyHostMemAlloc(bool freeMemorySyncSupported,VkEncoder * enc,VkDevice device,HostMemAlloc * toDestroy,bool doLock)263 void destroyHostMemAlloc(
264 bool freeMemorySyncSupported,
265 VkEncoder* enc,
266 VkDevice device,
267 HostMemAlloc* toDestroy,
268 bool doLock) {
269 #if !defined(HOST_BUILD) && defined(VIRTIO_GPU)
270 if (toDestroy->rendernodeFd >= 0) {
271
272 if (toDestroy->memoryAddr) {
273 int ret = munmap((void*)toDestroy->memoryAddr, toDestroy->memorySize);
274 if (ret != 0) {
275 ALOGE("%s: fail to unmap addr = 0x%" PRIx64", size = %d, ret = "
276 "%d, errno = %d", __func__, toDestroy->memoryAddr,
277 (int32_t)toDestroy->memorySize, ret, errno);
278 }
279 }
280
281 if (toDestroy->boCreated) {
282 ALOGV("%s: trying to destroy bo = %u\n", __func__,
283 toDestroy->boHandle);
284 struct drm_gem_close drmGemClose = {};
285 drmGemClose.handle = toDestroy->boHandle;
286 int ret = drmIoctl(toDestroy->rendernodeFd, DRM_IOCTL_GEM_CLOSE, &drmGemClose);
287 if (ret != 0) {
288 ALOGE("%s: fail to close gem = %u, ret = %d, errno = %d\n",
289 __func__, toDestroy->boHandle, ret, errno);
290 } else {
291 ALOGV("%s: successfully close gem = %u, ret = %d\n", __func__,
292 toDestroy->boHandle, ret);
293 }
294 }
295 }
296 #endif
297
298 if (toDestroy->initResult != VK_SUCCESS) return;
299 if (!toDestroy->initialized) return;
300
301
302 if (freeMemorySyncSupported) {
303 enc->vkFreeMemorySyncGOOGLE(device, toDestroy->memory, nullptr, doLock);
304 } else {
305 enc->vkFreeMemory(device, toDestroy->memory, nullptr, doLock);
306 }
307
308 delete toDestroy->subAlloc;
309 }
310
subAllocHostMemory(HostMemAlloc * alloc,const VkMemoryAllocateInfo * pAllocateInfo,SubAlloc * out)311 void subAllocHostMemory(
312 HostMemAlloc* alloc,
313 const VkMemoryAllocateInfo* pAllocateInfo,
314 SubAlloc* out) {
315
316 VkDeviceSize mappedSize =
317 alloc->nonCoherentAtomSize * (
318 (pAllocateInfo->allocationSize +
319 alloc->nonCoherentAtomSize - 1) /
320 alloc->nonCoherentAtomSize);
321
322 ALOGV("%s: alloc size %u mapped size %u ncaSize %u\n", __func__,
323 (unsigned int)pAllocateInfo->allocationSize,
324 (unsigned int)mappedSize,
325 (unsigned int)alloc->nonCoherentAtomSize);
326
327 void* subMapped = alloc->subAlloc->alloc(mappedSize);
328 out->mappedPtr = (uint8_t*)subMapped;
329
330 out->subAllocSize = pAllocateInfo->allocationSize;
331 out->subMappedSize = mappedSize;
332
333 out->baseMemory = alloc->memory;
334 out->baseOffset = alloc->subAlloc->getOffset(subMapped);
335
336 out->subMemory = new_from_host_VkDeviceMemory(VK_NULL_HANDLE);
337 out->subAlloc = alloc->subAlloc;
338 out->isDeviceAddressMemoryAllocation = alloc->isDeviceAddressMemoryAllocation;
339 out->memoryTypeIndex = alloc->memoryTypeIndex;
340 }
341
subFreeHostMemory(SubAlloc * toFree)342 bool subFreeHostMemory(SubAlloc* toFree) {
343 delete_goldfish_VkDeviceMemory(toFree->subMemory);
344 toFree->subAlloc->free(toFree->mappedPtr);
345 bool nowEmpty = toFree->subAlloc->empty();
346 if (nowEmpty) {
347 ALOGV("%s: We have an empty suballoc, time to free the block perhaps?\n", __func__);
348 }
349 memset(toFree, 0x0, sizeof(SubAlloc));
350 return nowEmpty;
351 }
352
canSubAlloc(android::base::guest::SubAllocator * subAlloc,VkDeviceSize size)353 bool canSubAlloc(android::base::guest::SubAllocator* subAlloc, VkDeviceSize size) {
354 auto ptr = subAlloc->alloc(size);
355 if (!ptr) return false;
356 subAlloc->free(ptr);
357 return true;
358 }
359
isNoFlagsMemoryTypeIndexForGuest(const HostVisibleMemoryVirtualizationInfo * info,uint32_t index)360 bool isNoFlagsMemoryTypeIndexForGuest(
361 const HostVisibleMemoryVirtualizationInfo* info,
362 uint32_t index) {
363 const auto& props =
364 info->virtualizationSupported ?
365 info->guestMemoryProperties :
366 info->hostMemoryProperties;
367 return props.memoryTypes[index].propertyFlags == 0;
368 }
369
370
371 } // namespace goldfish_vk
372