1 // 2 // Copyright 2016 The ANGLE Project Authors. All rights reserved. 3 // Use of this source code is governed by a BSD-style license that can be 4 // found in the LICENSE file. 5 // 6 // BufferVk.h: 7 // Defines the class interface for BufferVk, implementing BufferImpl. 8 // 9 10 #ifndef LIBANGLE_RENDERER_VULKAN_BUFFERVK_H_ 11 #define LIBANGLE_RENDERER_VULKAN_BUFFERVK_H_ 12 13 #include "libANGLE/Buffer.h" 14 #include "libANGLE/Observer.h" 15 #include "libANGLE/renderer/BufferImpl.h" 16 #include "libANGLE/renderer/vulkan/vk_helpers.h" 17 18 namespace rx 19 { 20 typedef gl::Range<VkDeviceSize> RangeDeviceSize; 21 22 // Conversion buffers hold translated index and vertex data. 23 class ConversionBuffer 24 { 25 public: ConversionBuffer()26 ConversionBuffer() : mEntireBufferDirty(true) 27 { 28 mData = std::make_unique<vk::BufferHelper>(); 29 mDirtyRanges.reserve(32); 30 } 31 ConversionBuffer(vk::Renderer *renderer, 32 VkBufferUsageFlags usageFlags, 33 size_t initialSize, 34 size_t alignment, 35 bool hostVisible); 36 ~ConversionBuffer(); 37 38 ConversionBuffer(ConversionBuffer &&other); 39 dirty()40 bool dirty() const { return mEntireBufferDirty || !mDirtyRanges.empty(); } isEntireBufferDirty()41 bool isEntireBufferDirty() const { return mEntireBufferDirty; } setEntireBufferDirty()42 void setEntireBufferDirty() { mEntireBufferDirty = true; } addDirtyBufferRange(const RangeDeviceSize & range)43 void addDirtyBufferRange(const RangeDeviceSize &range) { mDirtyRanges.emplace_back(range); } 44 void consolidateDirtyRanges(); getDirtyBufferRanges()45 const std::vector<RangeDeviceSize> &getDirtyBufferRanges() const { return mDirtyRanges; } clearDirty()46 void clearDirty() 47 { 48 mEntireBufferDirty = false; 49 mDirtyRanges.clear(); 50 } 51 valid()52 bool valid() const { return mData && mData->valid(); } getBuffer()53 vk::BufferHelper *getBuffer() const { return mData.get(); } release(vk::Context * context)54 void release(vk::Context *context) { mData->release(context); } destroy(vk::Renderer * renderer)55 void destroy(vk::Renderer *renderer) { mData->destroy(renderer); } 56 57 private: 58 // state value determines if we need to re-stream vertex data. mEntireBufferDirty indicates 59 // entire buffer data has changed. mDirtyRange should be ignored when mEntireBufferDirty is 60 // true. If mEntireBufferDirty is false, mDirtyRange is the ranges of data that has been 61 // modified. Note that there is no guarantee that ranges will not overlap. 62 bool mEntireBufferDirty; 63 std::vector<RangeDeviceSize> mDirtyRanges; 64 65 // Where the conversion data is stored. 66 std::unique_ptr<vk::BufferHelper> mData; 67 }; 68 69 class VertexConversionBuffer : public ConversionBuffer 70 { 71 public: 72 struct CacheKey final 73 { 74 angle::FormatID formatID; 75 GLuint stride; 76 size_t offset; 77 bool hostVisible; 78 bool offsetMustMatchExactly; 79 }; 80 81 VertexConversionBuffer(vk::Renderer *renderer, const CacheKey &cacheKey); 82 ~VertexConversionBuffer(); 83 84 VertexConversionBuffer(VertexConversionBuffer &&other); 85 match(const CacheKey & cacheKey)86 bool match(const CacheKey &cacheKey) 87 { 88 // If anything other than offset mismatch, it can't reuse. 89 if (mCacheKey.formatID != cacheKey.formatID || mCacheKey.stride != cacheKey.stride || 90 mCacheKey.offsetMustMatchExactly != cacheKey.offsetMustMatchExactly || 91 mCacheKey.hostVisible != cacheKey.hostVisible) 92 { 93 return false; 94 } 95 96 // If offset matches, for sure we can reuse. 97 if (mCacheKey.offset == cacheKey.offset) 98 { 99 return true; 100 } 101 102 // If offset exact match is not required and offsets are multiple strides apart, then we 103 // adjust the offset to reuse the buffer. The benefit of reused the buffer is that the 104 // previous conversion result is still valid. We only need to convert the modified data. 105 if (!cacheKey.offsetMustMatchExactly) 106 { 107 int64_t offsetGap = cacheKey.offset - mCacheKey.offset; 108 if ((offsetGap % cacheKey.stride) == 0) 109 { 110 if (cacheKey.offset < mCacheKey.offset) 111 { 112 addDirtyBufferRange(RangeDeviceSize(cacheKey.offset, mCacheKey.offset)); 113 mCacheKey.offset = cacheKey.offset; 114 } 115 return true; 116 } 117 } 118 return false; 119 } 120 getCacheKey()121 const CacheKey &getCacheKey() const { return mCacheKey; } 122 123 private: 124 // The conversion is identified by the triple of {format, stride, offset}. 125 CacheKey mCacheKey; 126 }; 127 128 enum class BufferUpdateType 129 { 130 StorageRedefined, 131 ContentsUpdate, 132 }; 133 134 struct BufferDataSource 135 { 136 // Buffer data can come from two sources: 137 // glBufferData and glBufferSubData upload through a CPU pointer 138 const void *data = nullptr; 139 // glCopyBufferSubData copies data from another buffer 140 vk::BufferHelper *buffer = nullptr; 141 VkDeviceSize bufferOffset = 0; 142 }; 143 144 VkBufferUsageFlags GetDefaultBufferUsageFlags(vk::Renderer *renderer); 145 146 class BufferVk : public BufferImpl 147 { 148 public: 149 BufferVk(const gl::BufferState &state); 150 ~BufferVk() override; 151 void destroy(const gl::Context *context) override; 152 153 angle::Result setExternalBufferData(const gl::Context *context, 154 gl::BufferBinding target, 155 GLeglClientBufferEXT clientBuffer, 156 size_t size, 157 VkMemoryPropertyFlags memoryPropertyFlags); 158 angle::Result setDataWithUsageFlags(const gl::Context *context, 159 gl::BufferBinding target, 160 GLeglClientBufferEXT clientBuffer, 161 const void *data, 162 size_t size, 163 gl::BufferUsage usage, 164 GLbitfield flags, 165 gl::BufferStorage bufferStorage) override; 166 angle::Result setData(const gl::Context *context, 167 gl::BufferBinding target, 168 const void *data, 169 size_t size, 170 gl::BufferUsage usage) override; 171 angle::Result setSubData(const gl::Context *context, 172 gl::BufferBinding target, 173 const void *data, 174 size_t size, 175 size_t offset) override; 176 angle::Result copySubData(const gl::Context *context, 177 BufferImpl *source, 178 GLintptr sourceOffset, 179 GLintptr destOffset, 180 GLsizeiptr size) override; 181 angle::Result map(const gl::Context *context, GLenum access, void **mapPtr) override; 182 angle::Result mapRange(const gl::Context *context, 183 size_t offset, 184 size_t length, 185 GLbitfield access, 186 void **mapPtr) override; 187 angle::Result unmap(const gl::Context *context, GLboolean *result) override; 188 angle::Result getSubData(const gl::Context *context, 189 GLintptr offset, 190 GLsizeiptr size, 191 void *outData) override; 192 193 angle::Result getIndexRange(const gl::Context *context, 194 gl::DrawElementsType type, 195 size_t offset, 196 size_t count, 197 bool primitiveRestartEnabled, 198 gl::IndexRange *outRange) override; 199 getSize()200 GLint64 getSize() const { return mState.getSize(); } 201 202 void onDataChanged() override; 203 getBuffer()204 vk::BufferHelper &getBuffer() 205 { 206 ASSERT(isBufferValid()); 207 return mBuffer; 208 } 209 getBufferSerial()210 vk::BufferSerial getBufferSerial() { return mBuffer.getBufferSerial(); } 211 isBufferValid()212 bool isBufferValid() const { return mBuffer.valid(); } 213 bool isCurrentlyInUse(vk::Renderer *renderer) const; 214 215 angle::Result mapImpl(ContextVk *contextVk, GLbitfield access, void **mapPtr); 216 angle::Result mapRangeImpl(ContextVk *contextVk, 217 VkDeviceSize offset, 218 VkDeviceSize length, 219 GLbitfield access, 220 void **mapPtr); 221 angle::Result unmapImpl(ContextVk *contextVk); 222 angle::Result ghostMappedBuffer(ContextVk *contextVk, 223 VkDeviceSize offset, 224 VkDeviceSize length, 225 GLbitfield access, 226 void **mapPtr); 227 228 VertexConversionBuffer *getVertexConversionBuffer( 229 vk::Renderer *renderer, 230 const VertexConversionBuffer::CacheKey &cacheKey); 231 232 private: 233 angle::Result updateBuffer(ContextVk *contextVk, 234 size_t bufferSize, 235 const BufferDataSource &dataSource, 236 size_t size, 237 size_t offset); 238 angle::Result directUpdate(ContextVk *contextVk, 239 const BufferDataSource &dataSource, 240 size_t size, 241 size_t offset); 242 angle::Result stagedUpdate(ContextVk *contextVk, 243 const BufferDataSource &dataSource, 244 size_t size, 245 size_t offset); 246 angle::Result allocStagingBuffer(ContextVk *contextVk, 247 vk::MemoryCoherency coherency, 248 VkDeviceSize size, 249 uint8_t **mapPtr); 250 angle::Result flushStagingBuffer(ContextVk *contextVk, VkDeviceSize offset, VkDeviceSize size); 251 angle::Result acquireAndUpdate(ContextVk *contextVk, 252 size_t bufferSize, 253 const BufferDataSource &dataSource, 254 size_t updateSize, 255 size_t updateOffset, 256 BufferUpdateType updateType); 257 angle::Result setDataWithMemoryType(const gl::Context *context, 258 gl::BufferBinding target, 259 const void *data, 260 size_t size, 261 VkMemoryPropertyFlags memoryPropertyFlags, 262 gl::BufferUsage usage); 263 angle::Result handleDeviceLocalBufferMap(ContextVk *contextVk, 264 VkDeviceSize offset, 265 VkDeviceSize size, 266 uint8_t **mapPtr); 267 angle::Result mapHostVisibleBuffer(ContextVk *contextVk, 268 VkDeviceSize offset, 269 GLbitfield access, 270 uint8_t **mapPtr); 271 angle::Result setDataImpl(ContextVk *contextVk, 272 size_t bufferSize, 273 const BufferDataSource &dataSource, 274 size_t updateSize, 275 size_t updateOffset, 276 BufferUpdateType updateType); 277 angle::Result release(ContextVk *context); 278 void dataUpdated(); 279 void dataRangeUpdated(const RangeDeviceSize &range); 280 281 angle::Result acquireBufferHelper(ContextVk *contextVk, 282 size_t sizeInBytes, 283 BufferUsageType usageType); 284 isExternalBuffer()285 bool isExternalBuffer() const { return mClientBuffer != nullptr; } 286 BufferUpdateType calculateBufferUpdateTypeOnFullUpdate( 287 vk::Renderer *renderer, 288 size_t size, 289 VkMemoryPropertyFlags memoryPropertyFlags, 290 BufferUsageType usageType, 291 const void *data) const; 292 bool shouldRedefineStorage(vk::Renderer *renderer, 293 BufferUsageType usageType, 294 VkMemoryPropertyFlags memoryPropertyFlags, 295 size_t size) const; 296 297 void releaseConversionBuffers(vk::Context *context); 298 299 vk::BufferHelper mBuffer; 300 301 // If not null, this is the external memory pointer passed from client API. 302 void *mClientBuffer; 303 304 uint32_t mMemoryTypeIndex; 305 // Memory/Usage property that will be used for memory allocation. 306 VkMemoryPropertyFlags mMemoryPropertyFlags; 307 308 // The staging buffer to aid map operations. This is used when buffers are not host visible or 309 // for performance optimization when only a smaller range of buffer is mapped. 310 vk::BufferHelper mStagingBuffer; 311 312 // A cache of converted vertex data. 313 std::vector<VertexConversionBuffer> mVertexConversionBuffers; 314 315 // Tracks whether mStagingBuffer has been mapped to user or not 316 bool mIsStagingBufferMapped; 317 318 // Tracks if BufferVk object has valid data or not. 319 bool mHasValidData; 320 321 // True if the buffer is currently mapped for CPU write access. If the map call is originated 322 // from OpenGLES API call, then this should be consistent with mState.getAccessFlags() bits. 323 // Otherwise it is mapped from ANGLE internal and will not be consistent with mState access 324 // bits, so we have to keep record of it. 325 bool mIsMappedForWrite; 326 // True if usage is dynamic. May affect how we allocate memory. 327 BufferUsageType mUsageType; 328 // Similar as mIsMappedForWrite, this maybe different from mState's getMapOffset/getMapLength if 329 // mapped from angle internal. 330 RangeDeviceSize mMappedRange; 331 }; 332 333 } // namespace rx 334 335 #endif // LIBANGLE_RENDERER_VULKAN_BUFFERVK_H_ 336