1/* 2 * Copyright 2018 Google Inc. 3 * 4 * Use of this source code is governed by a BSD-style license that can be 5 * found in the LICENSE file. 6 */ 7 8#include "GrMtlBuffer.h" 9#include "GrMtlGpu.h" 10#include "GrGpuResourcePriv.h" 11#include "GrTypesPriv.h" 12 13#ifdef SK_DEBUG 14#define VALIDATE() this->validate() 15#else 16#define VALIDATE() do {} while(false) 17#endif 18 19sk_sp<GrMtlBuffer> GrMtlBuffer::Make(GrMtlGpu* gpu, size_t size, GrGpuBufferType intendedType, 20 GrAccessPattern accessPattern, const void* data) { 21 sk_sp<GrMtlBuffer> buffer(new GrMtlBuffer(gpu, size, intendedType, accessPattern)); 22 if (data && !buffer->onUpdateData(data, size)) { 23 return nullptr; 24 } 25 return buffer; 26} 27 28GrMtlBuffer::GrMtlBuffer(GrMtlGpu* gpu, size_t size, GrGpuBufferType intendedType, 29 GrAccessPattern accessPattern) 30 : INHERITED(gpu, size, intendedType, accessPattern) 31 , fIsDynamic(accessPattern == kDynamic_GrAccessPattern) { 32 // TODO: We are treating all buffers as static access since we don't have an implementation to 33 // synchronize gpu and cpu access of a resource yet. See comments in GrMtlBuffer::internalMap() 34 // and interalUnmap() for more details. 35 fIsDynamic = false; 36 37 // The managed resource mode is only available for macOS. iOS should use shared. 38 fMtlBuffer = size == 0 ? nil : 39 [gpu->device() newBufferWithLength: size 40 options: !fIsDynamic ? MTLResourceStorageModePrivate 41#ifdef SK_BUILD_FOR_MAC 42 : MTLResourceStorageModeManaged]; 43#else 44 : MTLResourceStorageModeShared]; 45#endif 46 this->registerWithCache(SkBudgeted::kYes); 47 VALIDATE(); 48} 49 50GrMtlBuffer::~GrMtlBuffer() { 51 SkASSERT(fMtlBuffer == nil); 52 SkASSERT(fMappedBuffer == nil); 53 SkASSERT(fMapPtr == nullptr); 54} 55 56bool GrMtlBuffer::onUpdateData(const void* src, size_t srcInBytes) { 57 if (fMtlBuffer == nil) { 58 return false; 59 } 60 if (srcInBytes > fMtlBuffer.length) { 61 return false; 62 } 63 VALIDATE(); 64 65 this->internalMap(srcInBytes); 66 if (fMapPtr == nil) { 67 return false; 68 } 69 SkASSERT(fMappedBuffer); 70 SkASSERT(srcInBytes == fMappedBuffer.length); 71 memcpy(fMapPtr, src, srcInBytes); 72 this->internalUnmap(srcInBytes); 73 74 VALIDATE(); 75 return true; 76} 77 78inline GrMtlGpu* GrMtlBuffer::mtlGpu() const { 79 SkASSERT(!this->wasDestroyed()); 80 return static_cast<GrMtlGpu*>(this->getGpu()); 81} 82 83void GrMtlBuffer::onAbandon() { 84 fMtlBuffer = nil; 85 fMappedBuffer = nil; 86 fMapPtr = nullptr; 87 VALIDATE(); 88 INHERITED::onAbandon(); 89} 90 91void GrMtlBuffer::onRelease() { 92 if (!this->wasDestroyed()) { 93 VALIDATE(); 94 fMtlBuffer = nil; 95 fMappedBuffer = nil; 96 fMapPtr = nullptr; 97 VALIDATE(); 98 } 99 INHERITED::onRelease(); 100} 101 102void GrMtlBuffer::internalMap(size_t sizeInBytes) { 103 SkASSERT(fMtlBuffer); 104 if (this->wasDestroyed()) { 105 return; 106 } 107 VALIDATE(); 108 SkASSERT(!this->isMapped()); 109 if (fIsDynamic) { 110 // TODO: We will want to decide if we need to create a new buffer here in order to avoid 111 // possibly invalidating a buffer which is being used by the gpu. 112 fMappedBuffer = fMtlBuffer; 113 fMapPtr = fMappedBuffer.contents; 114 } else { 115 SK_BEGIN_AUTORELEASE_BLOCK 116 // TODO: We can't ensure that map will only be called once on static access buffers until 117 // we actually enable dynamic access. 118 // SkASSERT(fMappedBuffer == nil); 119 fMappedBuffer = 120 [this->mtlGpu()->device() newBufferWithLength: sizeInBytes 121#ifdef SK_BUILD_FOR_MAC 122 options: MTLResourceStorageModeManaged]; 123#else 124 options: MTLResourceStorageModeShared]; 125#endif 126 fMapPtr = fMappedBuffer.contents; 127 SK_END_AUTORELEASE_BLOCK 128 } 129 VALIDATE(); 130} 131 132void GrMtlBuffer::internalUnmap(size_t sizeInBytes) { 133 SkASSERT(fMtlBuffer); 134 if (this->wasDestroyed()) { 135 return; 136 } 137 VALIDATE(); 138 SkASSERT(this->isMapped()); 139 if (fMtlBuffer == nil) { 140 fMappedBuffer = nil; 141 fMapPtr = nullptr; 142 return; 143 } 144#ifdef SK_BUILD_FOR_MAC 145 // TODO: by calling didModifyRange here we invalidate the buffer. This will cause problems for 146 // dynamic access buffers if they are being used by the gpu. 147 [fMappedBuffer didModifyRange: NSMakeRange(0, sizeInBytes)]; 148#endif 149 if (!fIsDynamic) { 150 SK_BEGIN_AUTORELEASE_BLOCK 151 id<MTLBlitCommandEncoder> blitCmdEncoder = 152 [this->mtlGpu()->commandBuffer() blitCommandEncoder]; 153 [blitCmdEncoder copyFromBuffer: fMappedBuffer 154 sourceOffset: 0 155 toBuffer: fMtlBuffer 156 destinationOffset: 0 157 size: sizeInBytes]; 158 [blitCmdEncoder endEncoding]; 159 SK_END_AUTORELEASE_BLOCK 160 } 161 fMappedBuffer = nil; 162 fMapPtr = nullptr; 163} 164 165void GrMtlBuffer::onMap() { 166 this->internalMap(fMtlBuffer.length); 167} 168 169void GrMtlBuffer::onUnmap() { 170 this->internalUnmap(fMappedBuffer.length); 171} 172 173#ifdef SK_DEBUG 174void GrMtlBuffer::validate() const { 175 SkASSERT(fMtlBuffer == nil || 176 this->intendedType() == GrGpuBufferType::kVertex || 177 this->intendedType() == GrGpuBufferType::kIndex || 178 this->intendedType() == GrGpuBufferType::kXferCpuToGpu || 179 this->intendedType() == GrGpuBufferType::kXferGpuToCpu); 180 SkASSERT(fMappedBuffer == nil || fMtlBuffer == nil || 181 fMappedBuffer.length <= fMtlBuffer.length); 182 SkASSERT(fIsDynamic == false); // TODO: implement synchronization to allow dynamic access. 183} 184#endif 185