• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1/*
2 * Copyright 2017 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8#include "src/gpu/ganesh/mtl/GrMtlGpu.h"
9
10#include "include/core/SkColorSpace.h"
11#include "include/gpu/GpuTypes.h"
12#include "include/private/gpu/ganesh/GrTypesPriv.h"
13#include "src/base/SkMathPriv.h"
14#include "src/core/SkCompressedDataUtils.h"
15#include "src/core/SkConvertPixels.h"
16#include "src/core/SkMipmap.h"
17#include "src/gpu/ganesh/GrBackendUtils.h"
18#include "src/gpu/ganesh/GrDataUtils.h"
19#include "src/gpu/ganesh/GrDirectContextPriv.h"
20#include "src/gpu/ganesh/GrRenderTarget.h"
21#include "src/gpu/ganesh/GrResourceProvider.h"
22#include "src/gpu/ganesh/GrTexture.h"
23#include "src/gpu/ganesh/GrThreadSafePipelineBuilder.h"
24#include "src/gpu/ganesh/mtl/GrMtlBuffer.h"
25#include "src/gpu/ganesh/mtl/GrMtlCommandBuffer.h"
26#include "src/gpu/ganesh/mtl/GrMtlOpsRenderPass.h"
27#include "src/gpu/ganesh/mtl/GrMtlPipelineStateBuilder.h"
28#include "src/gpu/ganesh/mtl/GrMtlRenderCommandEncoder.h"
29#include "src/gpu/ganesh/mtl/GrMtlSemaphore.h"
30#include "src/gpu/ganesh/mtl/GrMtlTexture.h"
31#include "src/gpu/ganesh/mtl/GrMtlTextureRenderTarget.h"
32#include "src/gpu/ganesh/mtl/GrMtlUtil.h"
33
34#import <simd/simd.h>
35
36#if !__has_feature(objc_arc)
37#error This file must be compiled with Arc. Use -fobjc-arc flag
38#endif
39
40GR_NORETAIN_BEGIN
41
42#if GR_TEST_UTILS
43// set to 1 if you want to do GPU capture of each commandBuffer
44#define GR_METAL_CAPTURE_COMMANDBUFFER 0
45#endif
46
47sk_sp<GrGpu> GrMtlGpu::Make(const GrMtlBackendContext& context, const GrContextOptions& options,
48                            GrDirectContext* direct) {
49    if (!context.fDevice || !context.fQueue) {
50        return nullptr;
51    }
52    if (@available(macOS 10.14, iOS 10.0, *)) {
53        // no warning needed
54    } else {
55        SkDebugf("*** Error ***: Skia's Metal backend no longer supports this OS version.\n");
56#ifdef SK_BUILD_FOR_IOS
57        SkDebugf("Minimum supported version is iOS 10.0.\n");
58#else
59        SkDebugf("Minimum supported version is MacOS 10.14.\n");
60#endif
61        return nullptr;
62    }
63
64    id<MTLDevice> GR_NORETAIN device = (__bridge id<MTLDevice>)(context.fDevice.get());
65    id<MTLCommandQueue> GR_NORETAIN queue = (__bridge id<MTLCommandQueue>)(context.fQueue.get());
66
67    return sk_sp<GrGpu>(new GrMtlGpu(direct, options, device, queue, context.fBinaryArchive.get()));
68}
69
70// This constant determines how many OutstandingCommandBuffers are allocated together as a block in
71// the deque. As such it needs to balance allocating too much memory vs. incurring
72// allocation/deallocation thrashing. It should roughly correspond to the max number of outstanding
73// command buffers we expect to see.
74static const int kDefaultOutstandingAllocCnt = 8;
75
76GrMtlGpu::GrMtlGpu(GrDirectContext* direct, const GrContextOptions& options,
77                   id<MTLDevice> device, id<MTLCommandQueue> queue, GrMTLHandle binaryArchive)
78        : INHERITED(direct)
79        , fDevice(device)
80        , fQueue(queue)
81        , fOutstandingCommandBuffers(sizeof(OutstandingCommandBuffer), kDefaultOutstandingAllocCnt)
82        , fResourceProvider(this)
83        , fStagingBufferManager(this)
84        , fUniformsRingBuffer(this, 128 * 1024, 256, GrGpuBufferType::kUniform)
85        , fDisconnected(false) {
86    fMtlCaps.reset(new GrMtlCaps(options, fDevice));
87    this->initCapsAndCompiler(fMtlCaps);
88#if GR_METAL_CAPTURE_COMMANDBUFFER
89    this->testingOnly_startCapture();
90#endif
91    fCurrentCmdBuffer = GrMtlCommandBuffer::Make(fQueue);
92#if GR_METAL_SDK_VERSION >= 230
93    if (@available(macOS 11.0, iOS 14.0, *)) {
94        fBinaryArchive = (__bridge id<MTLBinaryArchive>)(binaryArchive);
95    }
96#endif
97}
98
99GrMtlGpu::~GrMtlGpu() {
100    if (!fDisconnected) {
101        this->destroyResources();
102    }
103}
104
105void GrMtlGpu::disconnect(DisconnectType type) {
106    INHERITED::disconnect(type);
107
108    if (!fDisconnected) {
109        this->destroyResources();
110        fDisconnected = true;
111    }
112}
113
114GrThreadSafePipelineBuilder* GrMtlGpu::pipelineBuilder() {
115    return nullptr;
116}
117
118sk_sp<GrThreadSafePipelineBuilder> GrMtlGpu::refPipelineBuilder() {
119    return nullptr;
120}
121
122void GrMtlGpu::destroyResources() {
123    this->submitCommandBuffer(SyncQueue::kForce_SyncQueue);
124    // if there's no work we won't release the command buffer, so we do it here
125    fCurrentCmdBuffer = nil;
126
127    // We used a placement new for each object in fOutstandingCommandBuffers, so we're responsible
128    // for calling the destructor on each of them as well.
129    while (!fOutstandingCommandBuffers.empty()) {
130        OutstandingCommandBuffer* buffer =
131                (OutstandingCommandBuffer*)fOutstandingCommandBuffers.front();
132        // make sure we remove before deleting as deletion might try to kick off another submit
133        fOutstandingCommandBuffers.pop_front();
134        buffer->~OutstandingCommandBuffer();
135    }
136
137    fStagingBufferManager.reset();
138
139    fResourceProvider.destroyResources();
140
141    fQueue = nil;
142    fDevice = nil;
143}
144
145GrOpsRenderPass* GrMtlGpu::onGetOpsRenderPass(
146            GrRenderTarget* renderTarget, bool useMSAASurface, GrAttachment* stencil,
147            GrSurfaceOrigin origin, const SkIRect& bounds,
148            const GrOpsRenderPass::LoadAndStoreInfo& colorInfo,
149            const GrOpsRenderPass::StencilLoadAndStoreInfo& stencilInfo,
150            const SkTArray<GrSurfaceProxy*, true>& sampledProxies,
151            GrXferBarrierFlags renderPassXferBarriers) {
152    // For the given render target and requested render pass features we need to find a compatible
153    // framebuffer to use.
154    GrMtlRenderTarget* mtlRT = static_cast<GrMtlRenderTarget*>(renderTarget);
155
156    // TODO: support DMSAA
157    SkASSERT(!useMSAASurface ||
158             (renderTarget->numSamples() > 1));
159
160    bool withResolve = false;
161
162    // Figure out if we can use a Resolve store action for this render pass. When we set up
163    // the render pass we'll update the color load/store ops since we don't want to ever load
164    // or store the msaa color attachment, but may need to for the resolve attachment.
165    if (useMSAASurface && this->mtlCaps().renderTargetSupportsDiscardableMSAA(mtlRT)) {
166        withResolve = true;
167    }
168
169    sk_sp<GrMtlFramebuffer> framebuffer =
170            sk_ref_sp(mtlRT->getFramebuffer(withResolve, SkToBool(stencil)));
171    if (!framebuffer) {
172        return nullptr;
173    }
174
175    return new GrMtlOpsRenderPass(this, renderTarget, std::move(framebuffer), origin, colorInfo,
176                                  stencilInfo);
177}
178
179GrMtlCommandBuffer* GrMtlGpu::commandBuffer() {
180    if (!fCurrentCmdBuffer) {
181#if GR_METAL_CAPTURE_COMMANDBUFFER
182        this->testingOnly_startCapture();
183#endif
184        // Create a new command buffer for the next submit
185        fCurrentCmdBuffer = GrMtlCommandBuffer::Make(fQueue);
186    }
187
188    SkASSERT(fCurrentCmdBuffer);
189    return fCurrentCmdBuffer.get();
190}
191
192void GrMtlGpu::takeOwnershipOfBuffer(sk_sp<GrGpuBuffer> buffer) {
193    SkASSERT(buffer);
194    this->commandBuffer()->addGrBuffer(std::move(buffer));
195}
196
197void GrMtlGpu::submit(GrOpsRenderPass* renderPass) {
198    GrMtlOpsRenderPass* mtlRenderPass = reinterpret_cast<GrMtlOpsRenderPass*>(renderPass);
199    mtlRenderPass->submit();
200    delete renderPass;
201}
202
203bool GrMtlGpu::submitCommandBuffer(SyncQueue sync) {
204    if (!fCurrentCmdBuffer || !fCurrentCmdBuffer->hasWork()) {
205        if (sync == SyncQueue::kForce_SyncQueue) {
206            this->finishOutstandingGpuWork();
207            this->checkForFinishedCommandBuffers();
208        }
209        // We need to manually call the finishedCallbacks since we don't add this
210        // to the OutstandingCommandBuffer list
211        if (fCurrentCmdBuffer) {
212            fCurrentCmdBuffer->callFinishedCallbacks();
213        }
214        return true;
215    }
216
217    SkASSERT(fCurrentCmdBuffer);
218    bool didCommit = fCurrentCmdBuffer->commit(sync == SyncQueue::kForce_SyncQueue);
219    if (didCommit) {
220        new (fOutstandingCommandBuffers.push_back()) OutstandingCommandBuffer(fCurrentCmdBuffer);
221    }
222
223    // We don't create a new command buffer here because we may end up using it
224    // in the next frame, and that confuses the GPU debugger. Instead we
225    // create when we next need one.
226    fCurrentCmdBuffer.reset();
227
228    // If the freeing of any resources held by a finished command buffer causes us to send
229    // a new command to the gpu we'll create the new command buffer in commandBuffer(), above.
230    this->checkForFinishedCommandBuffers();
231
232#if GR_METAL_CAPTURE_COMMANDBUFFER
233    this->testingOnly_stopCapture();
234#endif
235    return didCommit;
236}
237
238void GrMtlGpu::checkForFinishedCommandBuffers() {
239    // Iterate over all the outstanding command buffers to see if any have finished. The command
240    // buffers are in order from oldest to newest, so we start at the front to check if their fence
241    // has signaled. If so we pop it off and move onto the next.
242    // Repeat till we find a command list that has not finished yet (and all others afterwards are
243    // also guaranteed to not have finished).
244    OutstandingCommandBuffer* front = (OutstandingCommandBuffer*)fOutstandingCommandBuffers.front();
245    while (front && (*front)->isCompleted()) {
246        // Make sure we remove before deleting as deletion might try to kick off another submit
247        fOutstandingCommandBuffers.pop_front();
248        // Since we used placement new we are responsible for calling the destructor manually.
249        front->~OutstandingCommandBuffer();
250        front = (OutstandingCommandBuffer*)fOutstandingCommandBuffers.front();
251    }
252}
253
254void GrMtlGpu::finishOutstandingGpuWork() {
255    // wait for the last command buffer we've submitted to finish
256    OutstandingCommandBuffer* back =
257            (OutstandingCommandBuffer*)fOutstandingCommandBuffers.back();
258    if (back) {
259        (*back)->waitUntilCompleted();
260    }
261}
262
263void GrMtlGpu::addFinishedProc(GrGpuFinishedProc finishedProc,
264                               GrGpuFinishedContext finishedContext) {
265    SkASSERT(finishedProc);
266    this->addFinishedCallback(skgpu::RefCntedCallback::Make(finishedProc, finishedContext));
267}
268
269void GrMtlGpu::addFinishedCallback(sk_sp<skgpu::RefCntedCallback> finishedCallback) {
270    SkASSERT(finishedCallback);
271    // Besides the current commandbuffer, we also add the finishedCallback to the newest outstanding
272    // commandbuffer. Our contract for calling the proc is that all previous submitted cmdbuffers
273    // have finished when we call it. However, if our current command buffer has no work when it is
274    // flushed it will drop its ref to the callback immediately. But the previous work may not have
275    // finished. It is safe to only add the proc to the newest outstanding commandbuffer cause that
276    // must finish after all previously submitted command buffers.
277    OutstandingCommandBuffer* back = (OutstandingCommandBuffer*)fOutstandingCommandBuffers.back();
278    if (back) {
279        (*back)->addFinishedCallback(finishedCallback);
280    }
281    commandBuffer()->addFinishedCallback(std::move(finishedCallback));
282}
283
284bool GrMtlGpu::onSubmitToGpu(bool syncCpu) {
285    if (syncCpu) {
286        return this->submitCommandBuffer(kForce_SyncQueue);
287    } else {
288        return this->submitCommandBuffer(kSkip_SyncQueue);
289    }
290}
291
292std::unique_ptr<GrSemaphore> GrMtlGpu::prepareTextureForCrossContextUsage(GrTexture*) {
293    this->submitToGpu(false);
294    return nullptr;
295}
296
297sk_sp<GrGpuBuffer> GrMtlGpu::onCreateBuffer(size_t size,
298                                            GrGpuBufferType type,
299                                            GrAccessPattern accessPattern) {
300    return GrMtlBuffer::Make(this, size, type, accessPattern);
301}
302
303static bool check_max_blit_width(int widthInPixels) {
304    if (widthInPixels > 32767) {
305        SkASSERT(false); // surfaces should not be this wide anyway
306        return false;
307    }
308    return true;
309}
310
311bool GrMtlGpu::uploadToTexture(GrMtlTexture* tex,
312                               SkIRect rect,
313                               GrColorType dataColorType,
314                               const GrMipLevel texels[],
315                               int mipLevelCount) {
316    SkASSERT(this->mtlCaps().isFormatTexturable(tex->mtlTexture().pixelFormat));
317    // The assumption is either that we have no mipmaps, or that our rect is the entire texture
318    SkASSERT(mipLevelCount == 1 || rect == SkIRect::MakeSize(tex->dimensions()));
319
320    // We assume that if the texture has mip levels, we either upload to all the levels or just the
321    // first.
322    SkASSERT(mipLevelCount == 1 || mipLevelCount == (tex->maxMipmapLevel() + 1));
323
324    if (!check_max_blit_width(rect.width())) {
325        return false;
326    }
327    if (rect.isEmpty()) {
328        return false;
329    }
330
331    SkASSERT(this->mtlCaps().surfaceSupportsWritePixels(tex));
332    SkASSERT(this->mtlCaps().areColorTypeAndFormatCompatible(dataColorType, tex->backendFormat()));
333
334    id<MTLTexture> GR_NORETAIN mtlTexture = tex->mtlTexture();
335    SkASSERT(mtlTexture);
336    // Either upload only the first miplevel or all miplevels
337    SkASSERT(1 == mipLevelCount || mipLevelCount == (int)mtlTexture.mipmapLevelCount);
338
339    if (mipLevelCount == 1 && !texels[0].fPixels) {
340        return true;   // no data to upload
341    }
342
343    for (int i = 0; i < mipLevelCount; ++i) {
344        // We do not allow any gaps in the mip data
345        if (!texels[i].fPixels) {
346            return false;
347        }
348    }
349
350    size_t bpp = GrColorTypeBytesPerPixel(dataColorType);
351
352    SkTArray<size_t> individualMipOffsets(mipLevelCount);
353    size_t combinedBufferSize = GrComputeTightCombinedBufferSize(bpp,
354                                                                 rect.size(),
355                                                                 &individualMipOffsets,
356                                                                 mipLevelCount);
357    SkASSERT(combinedBufferSize);
358
359
360    // offset value must be a multiple of the destination texture's pixel size in bytes
361    size_t alignment = std::max(bpp, this->mtlCaps().getMinBufferAlignment());
362    GrStagingBufferManager::Slice slice = fStagingBufferManager.allocateStagingBufferSlice(
363            combinedBufferSize, alignment);
364    if (!slice.fBuffer) {
365        return false;
366    }
367    char* bufferData = (char*)slice.fOffsetMapPtr;
368    GrMtlBuffer* mtlBuffer = static_cast<GrMtlBuffer*>(slice.fBuffer);
369
370    int currentWidth = rect.width();
371    int currentHeight = rect.height();
372    SkDEBUGCODE(int layerHeight = tex->height());
373    MTLOrigin origin = MTLOriginMake(rect.left(), rect.top(), 0);
374
375    auto cmdBuffer = this->commandBuffer();
376    id<MTLBlitCommandEncoder> GR_NORETAIN blitCmdEncoder = cmdBuffer->getBlitCommandEncoder();
377    if (!blitCmdEncoder) {
378        return false;
379    }
380#ifdef SK_ENABLE_MTL_DEBUG_INFO
381    [blitCmdEncoder pushDebugGroup:@"uploadToTexture"];
382#endif
383    for (int currentMipLevel = 0; currentMipLevel < mipLevelCount; currentMipLevel++) {
384        if (texels[currentMipLevel].fPixels) {
385            SkASSERT(1 == mipLevelCount || currentHeight == layerHeight);
386            const size_t trimRowBytes = currentWidth * bpp;
387            const size_t rowBytes = texels[currentMipLevel].fRowBytes;
388
389            // copy data into the buffer, skipping any trailing bytes
390            char* dst = bufferData + individualMipOffsets[currentMipLevel];
391            const char* src = (const char*)texels[currentMipLevel].fPixels;
392            SkRectMemcpy(dst, trimRowBytes, src, rowBytes, trimRowBytes, currentHeight);
393
394            [blitCmdEncoder copyFromBuffer: mtlBuffer->mtlBuffer()
395                              sourceOffset: slice.fOffset + individualMipOffsets[currentMipLevel]
396                         sourceBytesPerRow: trimRowBytes
397                       sourceBytesPerImage: trimRowBytes*currentHeight
398                                sourceSize: MTLSizeMake(currentWidth, currentHeight, 1)
399                                 toTexture: mtlTexture
400                          destinationSlice: 0
401                          destinationLevel: currentMipLevel
402                         destinationOrigin: origin];
403        }
404        currentWidth = std::max(1, currentWidth/2);
405        currentHeight = std::max(1, currentHeight/2);
406        SkDEBUGCODE(layerHeight = currentHeight);
407    }
408#ifdef SK_BUILD_FOR_MAC
409    if (this->mtlCaps().isMac()) {
410        [mtlBuffer->mtlBuffer() didModifyRange: NSMakeRange(slice.fOffset, combinedBufferSize)];
411    }
412#endif
413#ifdef SK_ENABLE_MTL_DEBUG_INFO
414    [blitCmdEncoder popDebugGroup];
415#endif
416
417    if (mipLevelCount < (int) tex->mtlTexture().mipmapLevelCount) {
418        tex->markMipmapsDirty();
419    }
420
421    return true;
422}
423
424bool GrMtlGpu::clearTexture(GrMtlTexture* tex, size_t bpp, uint32_t levelMask) {
425    SkASSERT(this->mtlCaps().isFormatTexturable(tex->mtlTexture().pixelFormat));
426
427    if (!levelMask) {
428        return true;
429    }
430
431    id<MTLTexture> GR_NORETAIN mtlTexture = tex->mtlTexture();
432    SkASSERT(mtlTexture);
433    // Either upload only the first miplevel or all miplevels
434    int mipLevelCount = (int)mtlTexture.mipmapLevelCount;
435
436    SkTArray<size_t> individualMipOffsets(mipLevelCount);
437    size_t combinedBufferSize = 0;
438    int currentWidth = tex->width();
439    int currentHeight = tex->height();
440
441    // The alignment must be at least 4 bytes and a multiple of the bytes per pixel of the image
442    // config. This works with the assumption that the bytes in pixel config is always a power of 2.
443    // TODO: can we just copy from a single buffer the size of the largest cleared level w/o a perf
444    // penalty?
445    SkASSERT((bpp & (bpp - 1)) == 0);
446    const size_t alignmentMask = 0x3 | (bpp - 1);
447    for (int currentMipLevel = 0; currentMipLevel < mipLevelCount; currentMipLevel++) {
448        if (levelMask & (1 << currentMipLevel)) {
449            const size_t trimmedSize = currentWidth * bpp * currentHeight;
450            const size_t alignmentDiff = combinedBufferSize & alignmentMask;
451            if (alignmentDiff != 0) {
452                combinedBufferSize += alignmentMask - alignmentDiff + 1;
453            }
454            individualMipOffsets.push_back(combinedBufferSize);
455            combinedBufferSize += trimmedSize;
456        }
457        currentWidth = std::max(1, currentWidth/2);
458        currentHeight = std::max(1, currentHeight/2);
459    }
460    SkASSERT(combinedBufferSize > 0 && !individualMipOffsets.empty());
461
462    size_t alignment = std::max(bpp, this->mtlCaps().getMinBufferAlignment());
463    GrStagingBufferManager::Slice slice = fStagingBufferManager.allocateStagingBufferSlice(
464            combinedBufferSize, alignment);
465    if (!slice.fBuffer) {
466        return false;
467    }
468    GrMtlBuffer* mtlBuffer = static_cast<GrMtlBuffer*>(slice.fBuffer);
469    id<MTLBuffer> transferBuffer = mtlBuffer->mtlBuffer();
470
471    auto cmdBuffer = this->commandBuffer();
472    id<MTLBlitCommandEncoder> GR_NORETAIN blitCmdEncoder = cmdBuffer->getBlitCommandEncoder();
473    if (!blitCmdEncoder) {
474        return false;
475    }
476#ifdef SK_ENABLE_MTL_DEBUG_INFO
477    [blitCmdEncoder pushDebugGroup:@"clearTexture"];
478#endif
479    // clear the buffer to transparent black
480    NSRange clearRange;
481    clearRange.location = 0;
482    clearRange.length = combinedBufferSize;
483    [blitCmdEncoder fillBuffer: transferBuffer
484                         range: clearRange
485                         value: 0];
486
487    // now copy buffer to texture
488    currentWidth = tex->width();
489    currentHeight = tex->height();
490    MTLOrigin origin = MTLOriginMake(0, 0, 0);
491    for (int currentMipLevel = 0; currentMipLevel < mipLevelCount; currentMipLevel++) {
492        if (levelMask & (1 << currentMipLevel)) {
493            const size_t rowBytes = currentWidth * bpp;
494
495            [blitCmdEncoder copyFromBuffer: transferBuffer
496                              sourceOffset: individualMipOffsets[currentMipLevel]
497                         sourceBytesPerRow: rowBytes
498                       sourceBytesPerImage: rowBytes * currentHeight
499                                sourceSize: MTLSizeMake(currentWidth, currentHeight, 1)
500                                 toTexture: mtlTexture
501                          destinationSlice: 0
502                          destinationLevel: currentMipLevel
503                         destinationOrigin: origin];
504        }
505        currentWidth = std::max(1, currentWidth/2);
506        currentHeight = std::max(1, currentHeight/2);
507    }
508    // Don't need didModifyRange: here because fillBuffer: happens on the GPU
509#ifdef SK_ENABLE_MTL_DEBUG_INFO
510    [blitCmdEncoder popDebugGroup];
511#endif
512
513    if (mipLevelCount < (int) tex->mtlTexture().mipmapLevelCount) {
514        tex->markMipmapsDirty();
515    }
516
517    return true;
518}
519
520sk_sp<GrAttachment> GrMtlGpu::makeStencilAttachment(const GrBackendFormat& /*colorFormat*/,
521                                                    SkISize dimensions, int numStencilSamples) {
522    MTLPixelFormat sFmt = this->mtlCaps().preferredStencilFormat();
523
524    fStats.incStencilAttachmentCreates();
525    return GrMtlAttachment::GrMtlAttachment::MakeStencil(this, dimensions, numStencilSamples, sFmt);
526}
527
528sk_sp<GrAttachment> GrMtlGpu::makeMSAAAttachment(SkISize dimensions,
529                                                 const GrBackendFormat& format,
530                                                 int numSamples,
531                                                 GrProtected isProtected,
532                                                 GrMemoryless isMemoryless) {
533    // Metal doesn't support protected textures
534    SkASSERT(isProtected == GrProtected::kNo);
535    // TODO: add memoryless support
536    SkASSERT(isMemoryless == GrMemoryless::kNo);
537
538    MTLPixelFormat pixelFormat = (MTLPixelFormat) format.asMtlFormat();
539    SkASSERT(pixelFormat != MTLPixelFormatInvalid);
540    SkASSERT(!GrMtlFormatIsCompressed(pixelFormat));
541    SkASSERT(this->mtlCaps().isFormatRenderable(pixelFormat, numSamples));
542
543    fStats.incMSAAAttachmentCreates();
544    return GrMtlAttachment::MakeMSAA(this, dimensions, numSamples, pixelFormat);
545}
546
547sk_sp<GrTexture> GrMtlGpu::onCreateTexture(SkISize dimensions,
548                                           const GrBackendFormat& format,
549                                           GrRenderable renderable,
550                                           int renderTargetSampleCnt,
551                                           skgpu::Budgeted budgeted,
552                                           GrProtected isProtected,
553                                           int mipLevelCount,
554                                           uint32_t levelClearMask,
555                                           std::string_view label) {
556    // We don't support protected textures in Metal.
557    if (isProtected == GrProtected::kYes) {
558        return nullptr;
559    }
560    SkASSERT(mipLevelCount > 0);
561
562    MTLPixelFormat mtlPixelFormat = GrBackendFormatAsMTLPixelFormat(format);
563    SkASSERT(mtlPixelFormat != MTLPixelFormatInvalid);
564    SkASSERT(!this->caps()->isFormatCompressed(format));
565
566    sk_sp<GrMtlTexture> tex;
567    GrMipmapStatus mipmapStatus =
568            mipLevelCount > 1 ? GrMipmapStatus::kDirty : GrMipmapStatus::kNotAllocated;
569    if (renderable == GrRenderable::kYes) {
570        tex = GrMtlTextureRenderTarget::MakeNewTextureRenderTarget(
571                this, budgeted, dimensions, renderTargetSampleCnt, mtlPixelFormat, mipLevelCount,
572                mipmapStatus, label);
573    } else {
574        tex = GrMtlTexture::MakeNewTexture(this, budgeted, dimensions, mtlPixelFormat,
575                                           mipLevelCount, mipmapStatus, label);
576    }
577
578    if (!tex) {
579        return nullptr;
580    }
581
582    if (levelClearMask) {
583        this->clearTexture(tex.get(), GrMtlFormatBytesPerBlock(mtlPixelFormat), levelClearMask);
584    }
585
586    return std::move(tex);
587}
588
589sk_sp<GrTexture> GrMtlGpu::onCreateCompressedTexture(SkISize dimensions,
590                                                     const GrBackendFormat& format,
591                                                     skgpu::Budgeted budgeted,
592                                                     GrMipmapped mipmapped,
593                                                     GrProtected isProtected,
594                                                     const void* data, size_t dataSize) {
595    // We don't support protected textures in Metal.
596    if (isProtected == GrProtected::kYes) {
597        return nullptr;
598    }
599
600    SkASSERT(this->caps()->isFormatTexturable(format, GrTextureType::k2D));
601    SkASSERT(data);
602
603    if (!check_max_blit_width(dimensions.width())) {
604        return nullptr;
605    }
606
607    MTLPixelFormat mtlPixelFormat = GrBackendFormatAsMTLPixelFormat(format);
608    SkASSERT(this->caps()->isFormatCompressed(format));
609
610    int numMipLevels = 1;
611    if (mipmapped == GrMipmapped::kYes) {
612        numMipLevels = SkMipmap::ComputeLevelCount(dimensions.width(), dimensions.height()) + 1;
613    }
614
615    GrMipmapStatus mipmapStatus = (mipmapped == GrMipmapped::kYes)
616                                                                ? GrMipmapStatus::kValid
617                                                                : GrMipmapStatus::kNotAllocated;
618
619    auto tex = GrMtlTexture::MakeNewTexture(this, budgeted, dimensions, mtlPixelFormat,
620                                            numMipLevels, mipmapStatus,
621                                            /*label=*/"MtlGpu_CreateCompressedTexture");
622    if (!tex) {
623        return nullptr;
624    }
625
626    // Upload to texture
627    id<MTLTexture> GR_NORETAIN mtlTexture = tex->mtlTexture();
628    SkASSERT(mtlTexture);
629
630    auto compressionType = GrBackendFormatToCompressionType(format);
631    SkASSERT(compressionType != SkImage::CompressionType::kNone);
632
633    SkTArray<size_t> individualMipOffsets(numMipLevels);
634    SkDEBUGCODE(size_t combinedBufferSize =) SkCompressedDataSize(compressionType, dimensions,
635                                                                  &individualMipOffsets,
636                                                                  mipmapped == GrMipmapped::kYes);
637    SkASSERT(individualMipOffsets.size() == numMipLevels);
638    SkASSERT(dataSize == combinedBufferSize);
639
640    // offset value must be a multiple of the destination texture's pixel size in bytes
641    // for compressed textures, this is the block size
642    size_t alignment = SkCompressedBlockSize(compressionType);
643    GrStagingBufferManager::Slice slice = fStagingBufferManager.allocateStagingBufferSlice(
644            dataSize, alignment);
645    if (!slice.fBuffer) {
646        return nullptr;
647    }
648    char* bufferData = (char*)slice.fOffsetMapPtr;
649    GrMtlBuffer* mtlBuffer = static_cast<GrMtlBuffer*>(slice.fBuffer);
650
651    MTLOrigin origin = MTLOriginMake(0, 0, 0);
652
653    auto cmdBuffer = this->commandBuffer();
654    id<MTLBlitCommandEncoder> GR_NORETAIN blitCmdEncoder = cmdBuffer->getBlitCommandEncoder();
655    if (!blitCmdEncoder) {
656        return nullptr;
657    }
658#ifdef SK_ENABLE_MTL_DEBUG_INFO
659    [blitCmdEncoder pushDebugGroup:@"onCreateCompressedTexture"];
660#endif
661
662    // copy data into the buffer, skipping any trailing bytes
663    memcpy(bufferData, data, dataSize);
664
665    SkISize levelDimensions = dimensions;
666    for (int currentMipLevel = 0; currentMipLevel < numMipLevels; currentMipLevel++) {
667        const size_t levelRowBytes = GrCompressedRowBytes(compressionType, levelDimensions.width());
668        size_t levelSize = SkCompressedDataSize(compressionType, levelDimensions, nullptr, false);
669
670        // TODO: can this all be done in one go?
671        [blitCmdEncoder copyFromBuffer: mtlBuffer->mtlBuffer()
672                          sourceOffset: slice.fOffset + individualMipOffsets[currentMipLevel]
673                     sourceBytesPerRow: levelRowBytes
674                   sourceBytesPerImage: levelSize
675                            sourceSize: MTLSizeMake(levelDimensions.width(),
676                                                    levelDimensions.height(), 1)
677                             toTexture: mtlTexture
678                      destinationSlice: 0
679                      destinationLevel: currentMipLevel
680                     destinationOrigin: origin];
681
682        levelDimensions = {std::max(1, levelDimensions.width() /2),
683                           std::max(1, levelDimensions.height()/2)};
684    }
685#ifdef SK_BUILD_FOR_MAC
686    if (this->mtlCaps().isMac()) {
687        [mtlBuffer->mtlBuffer() didModifyRange: NSMakeRange(slice.fOffset, dataSize)];
688    }
689#endif
690#ifdef SK_ENABLE_MTL_DEBUG_INFO
691    [blitCmdEncoder popDebugGroup];
692#endif
693
694    return std::move(tex);
695}
696
697// TODO: Extra retain/release can't be avoided here because of getMtlTextureInfo copying the
698// sk_cfp. It would be useful to have a (possibly-internal-only?) API to get the raw pointer.
699static id<MTLTexture> get_texture_from_backend(const GrBackendTexture& backendTex) {
700    GrMtlTextureInfo textureInfo;
701    if (!backendTex.getMtlTextureInfo(&textureInfo)) {
702        return nil;
703    }
704    return GrGetMTLTexture(textureInfo.fTexture.get());
705}
706
707static id<MTLTexture> get_texture_from_backend(const GrBackendRenderTarget& backendRT) {
708    GrMtlTextureInfo textureInfo;
709    if (!backendRT.getMtlTextureInfo(&textureInfo)) {
710        return nil;
711    }
712    return GrGetMTLTexture(textureInfo.fTexture.get());
713}
714
715sk_sp<GrTexture> GrMtlGpu::onWrapBackendTexture(const GrBackendTexture& backendTex,
716                                                GrWrapOwnership,
717                                                GrWrapCacheable cacheable,
718                                                GrIOType ioType) {
719    id<MTLTexture> mtlTexture = get_texture_from_backend(backendTex);
720    if (!mtlTexture) {
721        return nullptr;
722    }
723    // We don't currently support sampling from a MSAA texture in shaders.
724    if (mtlTexture.sampleCount != 1) {
725        return nullptr;
726    }
727
728    return GrMtlTexture::MakeWrappedTexture(this, backendTex.dimensions(), mtlTexture, cacheable,
729                                            ioType);
730}
731
732sk_sp<GrTexture> GrMtlGpu::onWrapCompressedBackendTexture(const GrBackendTexture& backendTex,
733                                                          GrWrapOwnership,
734                                                          GrWrapCacheable cacheable) {
735    id<MTLTexture> mtlTexture = get_texture_from_backend(backendTex);
736    if (!mtlTexture) {
737        return nullptr;
738    }
739    // We don't currently support sampling from a MSAA texture in shaders.
740    if (mtlTexture.sampleCount != 1) {
741        return nullptr;
742    }
743
744    return GrMtlTexture::MakeWrappedTexture(this, backendTex.dimensions(), mtlTexture, cacheable,
745                                            kRead_GrIOType);
746}
747
748sk_sp<GrTexture> GrMtlGpu::onWrapRenderableBackendTexture(const GrBackendTexture& backendTex,
749                                                          int sampleCnt,
750                                                          GrWrapOwnership,
751                                                          GrWrapCacheable cacheable) {
752    id<MTLTexture> mtlTexture = get_texture_from_backend(backendTex);
753    if (!mtlTexture) {
754        return nullptr;
755    }
756    // We don't currently support sampling from a MSAA texture in shaders.
757    if (mtlTexture.sampleCount != 1) {
758        return nullptr;
759    }
760
761    const GrMtlCaps& caps = this->mtlCaps();
762
763    MTLPixelFormat format = mtlTexture.pixelFormat;
764    if (!caps.isFormatRenderable(format, sampleCnt)) {
765        return nullptr;
766    }
767
768    if (@available(macOS 10.11, iOS 9.0, *)) {
769        SkASSERT(MTLTextureUsageRenderTarget & mtlTexture.usage);
770    }
771
772    sampleCnt = caps.getRenderTargetSampleCount(sampleCnt, format);
773    SkASSERT(sampleCnt);
774
775    return GrMtlTextureRenderTarget::MakeWrappedTextureRenderTarget(
776            this, backendTex.dimensions(), sampleCnt, mtlTexture, cacheable);
777}
778
779sk_sp<GrRenderTarget> GrMtlGpu::onWrapBackendRenderTarget(const GrBackendRenderTarget& backendRT) {
780    if (!this->caps()->isFormatRenderable(backendRT.getBackendFormat(), backendRT.sampleCnt())) {
781        return nullptr;
782    }
783
784    id<MTLTexture> mtlTexture = get_texture_from_backend(backendRT);
785    if (!mtlTexture) {
786        return nullptr;
787    }
788
789    if (@available(macOS 10.11, iOS 9.0, *)) {
790        SkASSERT(MTLTextureUsageRenderTarget & mtlTexture.usage);
791    }
792
793    return GrMtlRenderTarget::MakeWrappedRenderTarget(this, backendRT.dimensions(),
794                                                      backendRT.sampleCnt(), mtlTexture);
795}
796
797bool GrMtlGpu::onRegenerateMipMapLevels(GrTexture* texture) {
798    GrMtlTexture* grMtlTexture = static_cast<GrMtlTexture*>(texture);
799    id<MTLTexture> GR_NORETAIN mtlTexture = grMtlTexture->mtlTexture();
800
801    // Automatic mipmap generation is only supported by color-renderable formats
802    if (!fMtlCaps->isFormatRenderable(mtlTexture.pixelFormat, 1) &&
803        // We have pixel configs marked as textureable-only that use RGBA8 as the internal format
804        MTLPixelFormatRGBA8Unorm != mtlTexture.pixelFormat) {
805        return false;
806    }
807
808    auto cmdBuffer = this->commandBuffer();
809    id<MTLBlitCommandEncoder> GR_NORETAIN blitCmdEncoder = cmdBuffer->getBlitCommandEncoder();
810    if (!blitCmdEncoder) {
811        return false;
812    }
813    [blitCmdEncoder generateMipmapsForTexture: mtlTexture];
814    this->commandBuffer()->addGrSurface(sk_ref_sp<const GrSurface>(grMtlTexture->attachment()));
815
816    return true;
817}
818
819// Used to "clear" a backend texture to a constant color by transferring.
820static GrColorType mtl_format_to_backend_tex_clear_colortype(MTLPixelFormat format) {
821    switch(format) {
822        case MTLPixelFormatA8Unorm:         return GrColorType::kAlpha_8;
823        case MTLPixelFormatR8Unorm:         return GrColorType::kR_8;
824        case MTLPixelFormatB5G6R5Unorm:     return GrColorType::kBGR_565;
825        case MTLPixelFormatABGR4Unorm:      return GrColorType::kABGR_4444;
826        case MTLPixelFormatRGBA8Unorm:      return GrColorType::kRGBA_8888;
827        case MTLPixelFormatRGBA8Unorm_sRGB: return GrColorType::kRGBA_8888_SRGB;
828
829        case MTLPixelFormatRG8Unorm:        return GrColorType::kRG_88;
830        case MTLPixelFormatBGRA8Unorm:      return GrColorType::kBGRA_8888;
831        case MTLPixelFormatRGB10A2Unorm:    return GrColorType::kRGBA_1010102;
832        case MTLPixelFormatBGR10A2Unorm:    return GrColorType::kBGRA_1010102;
833        case MTLPixelFormatR16Float:        return GrColorType::kR_F16;
834        case MTLPixelFormatRGBA16Float:     return GrColorType::kRGBA_F16;
835        case MTLPixelFormatR16Unorm:        return GrColorType::kR_16;
836        case MTLPixelFormatRG16Unorm:       return GrColorType::kRG_1616;
837        case MTLPixelFormatRGBA16Unorm:     return GrColorType::kRGBA_16161616;
838        case MTLPixelFormatRG16Float:       return GrColorType::kRG_F16;
839        default:                            return GrColorType::kUnknown;
840    }
841
842    SkUNREACHABLE;
843}
844
845void copy_src_data(char* dst,
846                   size_t bytesPerPixel,
847                   const SkTArray<size_t>& individualMipOffsets,
848                   const GrPixmap srcData[],
849                   int numMipLevels,
850                   size_t bufferSize) {
851    SkASSERT(srcData && numMipLevels);
852    SkASSERT(individualMipOffsets.size() == numMipLevels);
853
854    for (int level = 0; level < numMipLevels; ++level) {
855        const size_t trimRB = srcData[level].width() * bytesPerPixel;
856        SkASSERT(individualMipOffsets[level] + trimRB * srcData[level].height() <= bufferSize);
857        SkRectMemcpy(dst + individualMipOffsets[level], trimRB,
858                     srcData[level].addr(), srcData[level].rowBytes(),
859                     trimRB, srcData[level].height());
860    }
861}
862
863bool GrMtlGpu::createMtlTextureForBackendSurface(MTLPixelFormat mtlFormat,
864                                                 SkISize dimensions,
865                                                 int sampleCnt,
866                                                 GrTexturable texturable,
867                                                 GrRenderable renderable,
868                                                 GrMipmapped mipmapped,
869                                                 GrMtlTextureInfo* info) {
870    SkASSERT(texturable == GrTexturable::kYes || renderable == GrRenderable::kYes);
871
872    if (texturable == GrTexturable::kYes && !fMtlCaps->isFormatTexturable(mtlFormat)) {
873        return false;
874    }
875    if (renderable == GrRenderable::kYes && !fMtlCaps->isFormatRenderable(mtlFormat, 1)) {
876        return false;
877    }
878
879    if (!check_max_blit_width(dimensions.width())) {
880        return false;
881    }
882
883    auto desc = [[MTLTextureDescriptor alloc] init];
884    desc.pixelFormat = mtlFormat;
885    desc.width = dimensions.width();
886    desc.height = dimensions.height();
887    if (mipmapped == GrMipmapped::kYes) {
888        desc.mipmapLevelCount = 1 + SkPrevLog2(std::max(dimensions.width(), dimensions.height()));
889    }
890    if (@available(macOS 10.11, iOS 9.0, *)) {
891        desc.storageMode = MTLStorageModePrivate;
892        MTLTextureUsage usage = texturable == GrTexturable::kYes ? MTLTextureUsageShaderRead : 0;
893        usage |= renderable == GrRenderable::kYes ? MTLTextureUsageRenderTarget : 0;
894        desc.usage = usage;
895    }
896    if (sampleCnt != 1) {
897        desc.sampleCount = sampleCnt;
898        desc.textureType = MTLTextureType2DMultisample;
899    }
900    id<MTLTexture> testTexture = [fDevice newTextureWithDescriptor: desc];
901#ifdef SK_ENABLE_MTL_DEBUG_INFO
902    testTexture.label = @"testTexture";
903#endif
904    info->fTexture.reset(GrRetainPtrFromId(testTexture));
905    return true;
906}
907
908GrBackendTexture GrMtlGpu::onCreateBackendTexture(SkISize dimensions,
909                                                  const GrBackendFormat& format,
910                                                  GrRenderable renderable,
911                                                  GrMipmapped mipmapped,
912                                                  GrProtected isProtected,
913                                                  std::string_view label) {
914    const MTLPixelFormat mtlFormat = GrBackendFormatAsMTLPixelFormat(format);
915
916    GrMtlTextureInfo info;
917    if (!this->createMtlTextureForBackendSurface(mtlFormat, dimensions, 1, GrTexturable::kYes,
918                                                 renderable, mipmapped, &info)) {
919        return {};
920    }
921
922    GrBackendTexture backendTex(dimensions.width(), dimensions.height(), mipmapped, info);
923    return backendTex;
924}
925
926bool GrMtlGpu::onClearBackendTexture(const GrBackendTexture& backendTexture,
927                                     sk_sp<skgpu::RefCntedCallback> finishedCallback,
928                                     std::array<float, 4> color) {
929    GrMtlTextureInfo info;
930    SkAssertResult(backendTexture.getMtlTextureInfo(&info));
931
932    id<MTLTexture> GR_NORETAIN mtlTexture = GrGetMTLTexture(info.fTexture.get());
933
934    const MTLPixelFormat mtlFormat = mtlTexture.pixelFormat;
935
936    // Create a transfer buffer and fill with data.
937    size_t bytesPerPixel = GrMtlFormatBytesPerBlock(mtlFormat);
938    size_t combinedBufferSize;
939
940    // Reuse the same buffer for all levels. Should be ok since we made the row bytes tight.
941    combinedBufferSize = bytesPerPixel*backendTexture.width()*backendTexture.height();
942
943    size_t alignment = std::max(bytesPerPixel, this->mtlCaps().getMinBufferAlignment());
944    GrStagingBufferManager::Slice slice = fStagingBufferManager.allocateStagingBufferSlice(
945            combinedBufferSize, alignment);
946    if (!slice.fBuffer) {
947        return false;
948    }
949    char* buffer = (char*)slice.fOffsetMapPtr;
950
951    auto colorType = mtl_format_to_backend_tex_clear_colortype(mtlFormat);
952    if (colorType == GrColorType::kUnknown) {
953        return false;
954    }
955    GrImageInfo ii(colorType, kUnpremul_SkAlphaType, nullptr, backendTexture.dimensions());
956    auto rb = ii.minRowBytes();
957    SkASSERT(rb == bytesPerPixel*backendTexture.width());
958    if (!GrClearImage(ii, buffer, rb, color)) {
959        return false;
960    }
961
962    // Transfer buffer contents to texture
963    MTLOrigin origin = MTLOriginMake(0, 0, 0);
964
965    GrMtlCommandBuffer* cmdBuffer = this->commandBuffer();
966    id<MTLBlitCommandEncoder> GR_NORETAIN blitCmdEncoder = cmdBuffer->getBlitCommandEncoder();
967    if (!blitCmdEncoder) {
968        return false;
969    }
970#ifdef SK_ENABLE_MTL_DEBUG_INFO
971    [blitCmdEncoder pushDebugGroup:@"onClearBackendTexture"];
972#endif
973    GrMtlBuffer* mtlBuffer = static_cast<GrMtlBuffer*>(slice.fBuffer);
974
975    SkISize levelDimensions(backendTexture.dimensions());
976    int numMipLevels = mtlTexture.mipmapLevelCount;
977    for (int currentMipLevel = 0; currentMipLevel < numMipLevels; currentMipLevel++) {
978        size_t levelRowBytes;
979        size_t levelSize;
980
981        levelRowBytes = levelDimensions.width() * bytesPerPixel;
982        levelSize = levelRowBytes * levelDimensions.height();
983
984        // TODO: can this all be done in one go?
985        [blitCmdEncoder copyFromBuffer: mtlBuffer->mtlBuffer()
986                          sourceOffset: slice.fOffset
987                     sourceBytesPerRow: levelRowBytes
988                   sourceBytesPerImage: levelSize
989                            sourceSize: MTLSizeMake(levelDimensions.width(),
990                                                    levelDimensions.height(),
991                                                    1)
992                             toTexture: mtlTexture
993                      destinationSlice: 0
994                      destinationLevel: currentMipLevel
995                     destinationOrigin: origin];
996
997        levelDimensions = {std::max(1, levelDimensions.width() / 2),
998                           std::max(1, levelDimensions.height() / 2)};
999    }
1000#ifdef SK_BUILD_FOR_MAC
1001    if (this->mtlCaps().isMac()) {
1002        [mtlBuffer->mtlBuffer() didModifyRange: NSMakeRange(slice.fOffset, combinedBufferSize)];
1003    }
1004#endif
1005    [blitCmdEncoder popDebugGroup];
1006
1007    if (finishedCallback) {
1008        this->addFinishedCallback(std::move(finishedCallback));
1009    }
1010
1011    return true;
1012}
1013
1014GrBackendTexture GrMtlGpu::onCreateCompressedBackendTexture(
1015        SkISize dimensions, const GrBackendFormat& format, GrMipmapped mipmapped,
1016        GrProtected isProtected) {
1017    const MTLPixelFormat mtlFormat = GrBackendFormatAsMTLPixelFormat(format);
1018
1019    GrMtlTextureInfo info;
1020    if (!this->createMtlTextureForBackendSurface(mtlFormat, dimensions, 1, GrTexturable::kYes,
1021                                                 GrRenderable::kNo, mipmapped, &info)) {
1022        return {};
1023    }
1024
1025    return GrBackendTexture(dimensions.width(), dimensions.height(), mipmapped, info);
1026}
1027
1028bool GrMtlGpu::onUpdateCompressedBackendTexture(const GrBackendTexture& backendTexture,
1029                                                sk_sp<skgpu::RefCntedCallback> finishedCallback,
1030                                                const void* data,
1031                                                size_t size) {
1032    GrMtlTextureInfo info;
1033    SkAssertResult(backendTexture.getMtlTextureInfo(&info));
1034
1035    id<MTLTexture> mtlTexture = GrGetMTLTexture(info.fTexture.get());
1036
1037    int numMipLevels = mtlTexture.mipmapLevelCount;
1038    GrMipmapped mipmapped = numMipLevels > 1 ? GrMipmapped::kYes : GrMipmapped::kNo;
1039
1040    SkImage::CompressionType compression =
1041            GrBackendFormatToCompressionType(backendTexture.getBackendFormat());
1042    SkASSERT(compression != SkImage::CompressionType::kNone);
1043
1044    // Create a transfer buffer and fill with data.
1045    SkSTArray<16, size_t> individualMipOffsets;
1046    size_t combinedBufferSize;
1047    combinedBufferSize = SkCompressedDataSize(compression,
1048                                              backendTexture.dimensions(),
1049                                              &individualMipOffsets,
1050                                              mipmapped == GrMipmapped::kYes);
1051    SkASSERT(individualMipOffsets.size() == numMipLevels);
1052
1053    size_t alignment = std::max(SkCompressedBlockSize(compression),
1054                                this->mtlCaps().getMinBufferAlignment());
1055    GrStagingBufferManager::Slice slice =
1056            fStagingBufferManager.allocateStagingBufferSlice(combinedBufferSize, alignment);
1057    if (!slice.fBuffer) {
1058        return false;
1059    }
1060    char* buffer = (char*)slice.fOffsetMapPtr;
1061
1062    memcpy(buffer, data, size);
1063
1064    // Transfer buffer contents to texture
1065    MTLOrigin origin = MTLOriginMake(0, 0, 0);
1066
1067    GrMtlCommandBuffer* cmdBuffer = this->commandBuffer();
1068    id<MTLBlitCommandEncoder> blitCmdEncoder = cmdBuffer->getBlitCommandEncoder();
1069    if (!blitCmdEncoder) {
1070        return false;
1071    }
1072#ifdef SK_ENABLE_MTL_DEBUG_INFO
1073    [blitCmdEncoder pushDebugGroup:@"onUpdateCompressedBackendTexture"];
1074#endif
1075    GrMtlBuffer* mtlBuffer = static_cast<GrMtlBuffer*>(slice.fBuffer);
1076
1077    SkISize levelDimensions(backendTexture.dimensions());
1078    for (int currentMipLevel = 0; currentMipLevel < numMipLevels; currentMipLevel++) {
1079        size_t levelRowBytes;
1080        size_t levelSize;
1081
1082        levelRowBytes = GrCompressedRowBytes(compression, levelDimensions.width());
1083        levelSize = SkCompressedDataSize(compression, levelDimensions, nullptr, false);
1084
1085        // TODO: can this all be done in one go?
1086        [blitCmdEncoder copyFromBuffer: mtlBuffer->mtlBuffer()
1087                          sourceOffset: slice.fOffset + individualMipOffsets[currentMipLevel]
1088                     sourceBytesPerRow: levelRowBytes
1089                   sourceBytesPerImage: levelSize
1090                            sourceSize: MTLSizeMake(levelDimensions.width(),
1091                                                    levelDimensions.height(),
1092                                                    1)
1093                             toTexture: mtlTexture
1094                      destinationSlice: 0
1095                      destinationLevel: currentMipLevel
1096                     destinationOrigin: origin];
1097
1098        levelDimensions = {std::max(1, levelDimensions.width() / 2),
1099                           std::max(1, levelDimensions.height() / 2)};
1100    }
1101#ifdef SK_BUILD_FOR_MAC
1102    if (this->mtlCaps().isMac()) {
1103        [mtlBuffer->mtlBuffer() didModifyRange:NSMakeRange(slice.fOffset, combinedBufferSize)];
1104    }
1105#endif
1106    [blitCmdEncoder popDebugGroup];
1107
1108    if (finishedCallback) {
1109        this->addFinishedCallback(std::move(finishedCallback));
1110    }
1111
1112    return true;
1113}
1114
1115void GrMtlGpu::deleteBackendTexture(const GrBackendTexture& tex) {
1116    SkASSERT(GrBackendApi::kMetal == tex.backend());
1117    // Nothing to do here, will get cleaned up when the GrBackendTexture object goes away
1118}
1119
1120bool GrMtlGpu::compile(const GrProgramDesc& desc, const GrProgramInfo& programInfo) {
1121
1122    GrThreadSafePipelineBuilder::Stats::ProgramCacheResult stat;
1123
1124    auto pipelineState = this->resourceProvider().findOrCreateCompatiblePipelineState(
1125                                 desc, programInfo, &stat);
1126    if (!pipelineState) {
1127        return false;
1128    }
1129
1130    return stat != GrThreadSafePipelineBuilder::Stats::ProgramCacheResult::kHit;
1131}
1132
1133bool GrMtlGpu::precompileShader(const SkData& key, const SkData& data) {
1134    return this->resourceProvider().precompileShader(key, data);
1135}
1136
1137#if GR_TEST_UTILS
1138bool GrMtlGpu::isTestingOnlyBackendTexture(const GrBackendTexture& tex) const {
1139    SkASSERT(GrBackendApi::kMetal == tex.backend());
1140
1141    GrMtlTextureInfo info;
1142    if (!tex.getMtlTextureInfo(&info)) {
1143        return false;
1144    }
1145    id<MTLTexture> mtlTexture = GrGetMTLTexture(info.fTexture.get());
1146    if (!mtlTexture) {
1147        return false;
1148    }
1149    if (@available(macOS 10.11, iOS 9.0, *)) {
1150        return mtlTexture.usage & MTLTextureUsageShaderRead;
1151    } else {
1152        return true; // best we can do
1153    }
1154}
1155
1156GrBackendRenderTarget GrMtlGpu::createTestingOnlyBackendRenderTarget(SkISize dimensions,
1157                                                                     GrColorType ct,
1158                                                                     int sampleCnt,
1159                                                                     GrProtected isProtected) {
1160    if (dimensions.width()  > this->caps()->maxRenderTargetSize() ||
1161        dimensions.height() > this->caps()->maxRenderTargetSize()) {
1162        return {};
1163    }
1164    if (isProtected == GrProtected::kYes) {
1165        return {};
1166    }
1167
1168    MTLPixelFormat format = this->mtlCaps().getFormatFromColorType(ct);
1169    sampleCnt = this->mtlCaps().getRenderTargetSampleCount(sampleCnt, format);
1170    if (sampleCnt == 0) {
1171        return {};
1172    }
1173
1174    GrMtlTextureInfo info;
1175    if (!this->createMtlTextureForBackendSurface(format, dimensions, sampleCnt, GrTexturable::kNo,
1176                                                 GrRenderable::kYes, GrMipmapped::kNo, &info)) {
1177        return {};
1178    }
1179
1180    return GrBackendRenderTarget(dimensions.width(), dimensions.height(), info);
1181}
1182
1183void GrMtlGpu::deleteTestingOnlyBackendRenderTarget(const GrBackendRenderTarget& rt) {
1184    SkASSERT(GrBackendApi::kMetal == rt.backend());
1185
1186    GrMtlTextureInfo info;
1187    if (rt.getMtlTextureInfo(&info)) {
1188        this->submitToGpu(true);
1189        // Nothing else to do here, will get cleaned up when the GrBackendRenderTarget
1190        // is deleted.
1191    }
1192}
1193#endif // GR_TEST_UTILS
1194
1195void GrMtlGpu::copySurfaceAsResolve(GrSurface* dst, GrSurface* src) {
1196    // TODO: Add support for subrectangles
1197    GrMtlRenderTarget* srcRT = static_cast<GrMtlRenderTarget*>(src->asRenderTarget());
1198    GrRenderTarget* dstRT = dst->asRenderTarget();
1199    GrMtlAttachment* dstAttachment;
1200    if (dstRT) {
1201        GrMtlRenderTarget* mtlRT = static_cast<GrMtlRenderTarget*>(dstRT);
1202        dstAttachment = mtlRT->colorAttachment();
1203    } else {
1204        SkASSERT(dst->asTexture());
1205        dstAttachment = static_cast<GrMtlTexture*>(dst->asTexture())->attachment();
1206    }
1207
1208    this->resolve(dstAttachment, srcRT->colorAttachment());
1209}
1210
1211void GrMtlGpu::copySurfaceAsBlit(GrSurface* dst, GrSurface* src,
1212                                 GrMtlAttachment* dstAttachment, GrMtlAttachment* srcAttachment,
1213                                 const SkIRect& srcRect, const SkIPoint& dstPoint) {
1214#ifdef SK_DEBUG
1215    SkASSERT(this->mtlCaps().canCopyAsBlit(dstAttachment->mtlFormat(), dstAttachment->numSamples(),
1216                                           srcAttachment->mtlFormat(), dstAttachment->numSamples(),
1217                                           srcRect, dstPoint, dst == src));
1218#endif
1219    id<MTLTexture> GR_NORETAIN dstTex = dstAttachment->mtlTexture();
1220    id<MTLTexture> GR_NORETAIN srcTex = srcAttachment->mtlTexture();
1221
1222    auto cmdBuffer = this->commandBuffer();
1223    id<MTLBlitCommandEncoder> GR_NORETAIN blitCmdEncoder = cmdBuffer->getBlitCommandEncoder();
1224    if (!blitCmdEncoder) {
1225        return;
1226    }
1227#ifdef SK_ENABLE_MTL_DEBUG_INFO
1228    [blitCmdEncoder pushDebugGroup:@"copySurfaceAsBlit"];
1229#endif
1230    [blitCmdEncoder copyFromTexture: srcTex
1231                        sourceSlice: 0
1232                        sourceLevel: 0
1233                       sourceOrigin: MTLOriginMake(srcRect.x(), srcRect.y(), 0)
1234                         sourceSize: MTLSizeMake(srcRect.width(), srcRect.height(), 1)
1235                          toTexture: dstTex
1236                   destinationSlice: 0
1237                   destinationLevel: 0
1238                  destinationOrigin: MTLOriginMake(dstPoint.fX, dstPoint.fY, 0)];
1239#ifdef SK_ENABLE_MTL_DEBUG_INFO
1240    [blitCmdEncoder popDebugGroup];
1241#endif
1242    cmdBuffer->addGrSurface(sk_ref_sp<const GrSurface>(dst));
1243    cmdBuffer->addGrSurface(sk_ref_sp<const GrSurface>(src));
1244}
1245
1246bool GrMtlGpu::onCopySurface(GrSurface* dst, const SkIRect& dstRect,
1247                             GrSurface* src, const SkIRect& srcRect,
1248                             GrSamplerState::Filter) {
1249    SkASSERT(!src->isProtected() && !dst->isProtected());
1250
1251    if (srcRect.size() != dstRect.size()) {
1252        return false;
1253    }
1254
1255    GrMtlAttachment* dstAttachment;
1256    GrMtlAttachment* srcAttachment;
1257    GrRenderTarget* dstRT = dst->asRenderTarget();
1258    if (dstRT) {
1259        GrMtlRenderTarget* mtlRT = static_cast<GrMtlRenderTarget*>(dstRT);
1260        // This will technically return true for single sample rts that used DMSAA in which case we
1261        // don't have to pick the resolve attachment. But in that case the resolve and color
1262        // attachments will be the same anyways.
1263        if (this->mtlCaps().renderTargetSupportsDiscardableMSAA(mtlRT)) {
1264            dstAttachment = mtlRT->resolveAttachment();
1265        } else {
1266            dstAttachment = mtlRT->colorAttachment();
1267        }
1268    } else if (dst->asTexture()) {
1269        dstAttachment = static_cast<GrMtlTexture*>(dst->asTexture())->attachment();
1270    } else {
1271        // The surface in a GrAttachment already
1272        dstAttachment = static_cast<GrMtlAttachment*>(dst);
1273    }
1274    GrRenderTarget* srcRT = src->asRenderTarget();
1275    if (srcRT) {
1276        GrMtlRenderTarget* mtlRT = static_cast<GrMtlRenderTarget*>(srcRT);
1277        // This will technically return true for single sample rts that used DMSAA in which case we
1278        // don't have to pick the resolve attachment. But in that case the resolve and color
1279        // attachments will be the same anyways.
1280        if (this->mtlCaps().renderTargetSupportsDiscardableMSAA(mtlRT)) {
1281            srcAttachment = mtlRT->resolveAttachment();
1282        } else {
1283            srcAttachment = mtlRT->colorAttachment();
1284        }
1285    } else if (src->asTexture()) {
1286        SkASSERT(src->asTexture());
1287        srcAttachment = static_cast<GrMtlTexture*>(src->asTexture())->attachment();
1288    } else {
1289        // The surface in a GrAttachment already
1290        srcAttachment = static_cast<GrMtlAttachment*>(src);
1291    }
1292
1293    MTLPixelFormat dstFormat = dstAttachment->mtlFormat();
1294    MTLPixelFormat srcFormat = srcAttachment->mtlFormat();
1295
1296    int dstSampleCnt = dstAttachment->sampleCount();
1297    int srcSampleCnt = srcAttachment->sampleCount();
1298
1299    const SkIPoint dstPoint = dstRect.topLeft();
1300    if (this->mtlCaps().canCopyAsResolve(dstFormat, dstSampleCnt,
1301                                         srcFormat, srcSampleCnt,
1302                                         SkToBool(srcRT), src->dimensions(),
1303                                         srcRect, dstPoint,
1304                                         dstAttachment == srcAttachment)) {
1305        this->copySurfaceAsResolve(dst, src);
1306        return true;
1307    }
1308
1309    if (srcAttachment->framebufferOnly() || dstAttachment->framebufferOnly()) {
1310        return false;
1311    }
1312
1313    if (this->mtlCaps().canCopyAsBlit(dstFormat, dstSampleCnt, srcFormat, srcSampleCnt,
1314                                      srcRect, dstPoint, dstAttachment == srcAttachment)) {
1315        this->copySurfaceAsBlit(dst, src, dstAttachment, srcAttachment, srcRect, dstPoint);
1316        return true;
1317    }
1318
1319    return false;
1320}
1321
1322bool GrMtlGpu::onWritePixels(GrSurface* surface,
1323                             SkIRect rect,
1324                             GrColorType surfaceColorType,
1325                             GrColorType srcColorType,
1326                             const GrMipLevel texels[],
1327                             int mipLevelCount,
1328                             bool prepForTexSampling) {
1329    GrMtlTexture* mtlTexture = static_cast<GrMtlTexture*>(surface->asTexture());
1330    // TODO: In principle we should be able to support pure rendertargets as well, but
1331    // until we find a use case we'll only support texture rendertargets.
1332    if (!mtlTexture) {
1333        return false;
1334    }
1335    if (!mipLevelCount) {
1336        return false;
1337    }
1338#ifdef SK_DEBUG
1339    for (int i = 0; i < mipLevelCount; i++) {
1340        SkASSERT(texels[i].fPixels);
1341    }
1342#endif
1343    return this->uploadToTexture(mtlTexture, rect, srcColorType, texels, mipLevelCount);
1344}
1345
1346bool GrMtlGpu::onReadPixels(GrSurface* surface,
1347                            SkIRect rect,
1348                            GrColorType surfaceColorType,
1349                            GrColorType dstColorType,
1350                            void* buffer,
1351                            size_t rowBytes) {
1352    SkASSERT(surface);
1353
1354    if (surfaceColorType != dstColorType) {
1355        return false;
1356    }
1357
1358    int bpp = GrColorTypeBytesPerPixel(dstColorType);
1359    size_t transBufferRowBytes = bpp*rect.width();
1360    size_t transBufferImageBytes = transBufferRowBytes*rect.height();
1361
1362    GrResourceProvider* resourceProvider = this->getContext()->priv().resourceProvider();
1363    sk_sp<GrGpuBuffer> transferBuffer = resourceProvider->createBuffer(
1364            transBufferImageBytes,
1365            GrGpuBufferType::kXferGpuToCpu,
1366            kDynamic_GrAccessPattern,
1367            GrResourceProvider::ZeroInit::kNo);
1368
1369    if (!transferBuffer) {
1370        return false;
1371    }
1372
1373    GrMtlBuffer* grMtlBuffer = static_cast<GrMtlBuffer*>(transferBuffer.get());
1374    if (!this->readOrTransferPixels(surface,
1375                                    rect,
1376                                    dstColorType,
1377                                    grMtlBuffer->mtlBuffer(),
1378                                    0,
1379                                    transBufferImageBytes,
1380                                    transBufferRowBytes)) {
1381        return false;
1382    }
1383    this->submitCommandBuffer(kForce_SyncQueue);
1384
1385    const void* mappedMemory = grMtlBuffer->mtlBuffer().contents;
1386
1387    SkRectMemcpy(buffer,
1388                 rowBytes,
1389                 mappedMemory,
1390                 transBufferRowBytes,
1391                 transBufferRowBytes,
1392                 rect.height());
1393
1394    return true;
1395}
1396
1397bool GrMtlGpu::onTransferFromBufferToBuffer(sk_sp<GrGpuBuffer> src,
1398                                            size_t srcOffset,
1399                                            sk_sp<GrGpuBuffer> dst,
1400                                            size_t dstOffset,
1401                                            size_t size) {
1402    id<MTLBuffer> GR_NORETAIN mtlSrc =  static_cast<GrMtlBuffer*>(src.get())->mtlBuffer();
1403    id<MTLBuffer> GR_NORETAIN mtlDst =  static_cast<GrMtlBuffer*>(dst.get())->mtlBuffer();
1404    SkASSERT(mtlSrc);
1405    SkASSERT(mtlDst);
1406
1407    auto cmdBuffer = this->commandBuffer();
1408    id<MTLBlitCommandEncoder> GR_NORETAIN blitCmdEncoder = cmdBuffer->getBlitCommandEncoder();
1409    if (!blitCmdEncoder) {
1410        return false;
1411    }
1412
1413#ifdef SK_ENABLE_MTL_DEBUG_INFO
1414    [blitCmdEncoder pushDebugGroup:@"onTransferFromBufferToBuffer"];
1415#endif
1416    [blitCmdEncoder copyFromBuffer: mtlSrc
1417                      sourceOffset: srcOffset
1418                          toBuffer: mtlDst
1419                 destinationOffset: dstOffset
1420                              size: size];
1421#ifdef SK_ENABLE_MTL_DEBUG_INFO
1422    [blitCmdEncoder popDebugGroup];
1423#endif
1424
1425    cmdBuffer->addGrBuffer(std::move(src));
1426    cmdBuffer->addGrBuffer(std::move(dst));
1427
1428    return true;
1429}
1430
1431bool GrMtlGpu::onTransferPixelsTo(GrTexture* texture,
1432                                  SkIRect rect,
1433                                  GrColorType textureColorType,
1434                                  GrColorType bufferColorType,
1435                                  sk_sp<GrGpuBuffer> transferBuffer,
1436                                  size_t offset,
1437                                  size_t rowBytes) {
1438    SkASSERT(texture);
1439    SkASSERT(transferBuffer);
1440    if (textureColorType != bufferColorType) {
1441        return false;
1442    }
1443
1444    GrMtlTexture* grMtlTexture = static_cast<GrMtlTexture*>(texture);
1445    id<MTLTexture> GR_NORETAIN mtlTexture = grMtlTexture->mtlTexture();
1446    SkASSERT(mtlTexture);
1447
1448    GrMtlBuffer* grMtlBuffer = static_cast<GrMtlBuffer*>(transferBuffer.get());
1449    id<MTLBuffer> GR_NORETAIN mtlBuffer = grMtlBuffer->mtlBuffer();
1450    SkASSERT(mtlBuffer);
1451
1452    size_t bpp = GrColorTypeBytesPerPixel(bufferColorType);
1453    if (offset % bpp) {
1454        return false;
1455    }
1456    if (GrBackendFormatBytesPerPixel(texture->backendFormat()) != bpp) {
1457        return false;
1458    }
1459
1460    MTLOrigin origin = MTLOriginMake(rect.left(), rect.top(), 0);
1461
1462    auto cmdBuffer = this->commandBuffer();
1463    id<MTLBlitCommandEncoder> GR_NORETAIN blitCmdEncoder = cmdBuffer->getBlitCommandEncoder();
1464    if (!blitCmdEncoder) {
1465        return false;
1466    }
1467#ifdef SK_ENABLE_MTL_DEBUG_INFO
1468    [blitCmdEncoder pushDebugGroup:@"onTransferPixelsTo"];
1469#endif
1470    [blitCmdEncoder copyFromBuffer: mtlBuffer
1471                      sourceOffset: offset
1472                 sourceBytesPerRow: rowBytes
1473               sourceBytesPerImage: rowBytes*rect.height()
1474                        sourceSize: MTLSizeMake(rect.width(), rect.height(), 1)
1475                         toTexture: mtlTexture
1476                  destinationSlice: 0
1477                  destinationLevel: 0
1478                 destinationOrigin: origin];
1479#ifdef SK_ENABLE_MTL_DEBUG_INFO
1480    [blitCmdEncoder popDebugGroup];
1481#endif
1482
1483    return true;
1484}
1485
1486bool GrMtlGpu::onTransferPixelsFrom(GrSurface* surface,
1487                                    SkIRect rect,
1488                                    GrColorType surfaceColorType,
1489                                    GrColorType bufferColorType,
1490                                    sk_sp<GrGpuBuffer> transferBuffer,
1491                                    size_t offset) {
1492    SkASSERT(surface);
1493    SkASSERT(transferBuffer);
1494
1495    if (surfaceColorType != bufferColorType) {
1496        return false;
1497    }
1498
1499    // Metal only supports offsets that are aligned to a pixel.
1500    size_t bpp = GrColorTypeBytesPerPixel(bufferColorType);
1501    if (offset % bpp) {
1502        return false;
1503    }
1504    if (GrBackendFormatBytesPerPixel(surface->backendFormat()) != bpp) {
1505        return false;
1506    }
1507
1508    GrMtlBuffer* grMtlBuffer = static_cast<GrMtlBuffer*>(transferBuffer.get());
1509
1510    size_t transBufferRowBytes = bpp*rect.width();
1511    size_t transBufferImageBytes = transBufferRowBytes*rect.height();
1512
1513    return this->readOrTransferPixels(surface,
1514                                      rect,
1515                                      bufferColorType,
1516                                      grMtlBuffer->mtlBuffer(),
1517                                      offset,
1518                                      transBufferImageBytes,
1519                                      transBufferRowBytes);
1520}
1521
1522bool GrMtlGpu::readOrTransferPixels(GrSurface* surface,
1523                                    SkIRect rect,
1524                                    GrColorType dstColorType,
1525                                    id<MTLBuffer> transferBuffer,
1526                                    size_t offset,
1527                                    size_t imageBytes,
1528                                    size_t rowBytes) {
1529    if (!check_max_blit_width(rect.width())) {
1530        return false;
1531    }
1532
1533    id<MTLTexture> mtlTexture;
1534    if (GrMtlRenderTarget* rt = static_cast<GrMtlRenderTarget*>(surface->asRenderTarget())) {
1535        if (rt->numSamples() > 1) {
1536            SkASSERT(rt->requiresManualMSAAResolve());  // msaa-render-to-texture not yet supported.
1537            mtlTexture = rt->resolveMTLTexture();
1538        } else {
1539            SkASSERT(!rt->requiresManualMSAAResolve());
1540            mtlTexture = rt->colorMTLTexture();
1541        }
1542    } else if (GrMtlTexture* texture = static_cast<GrMtlTexture*>(surface->asTexture())) {
1543        mtlTexture = texture->mtlTexture();
1544    }
1545    if (!mtlTexture) {
1546        return false;
1547    }
1548
1549    auto cmdBuffer = this->commandBuffer();
1550    id<MTLBlitCommandEncoder> GR_NORETAIN blitCmdEncoder = cmdBuffer->getBlitCommandEncoder();
1551    if (!blitCmdEncoder) {
1552        return false;
1553    }
1554#ifdef SK_ENABLE_MTL_DEBUG_INFO
1555    [blitCmdEncoder pushDebugGroup:@"readOrTransferPixels"];
1556#endif
1557    [blitCmdEncoder copyFromTexture: mtlTexture
1558                        sourceSlice: 0
1559                        sourceLevel: 0
1560                       sourceOrigin: MTLOriginMake(rect.left(), rect.top(), 0)
1561                         sourceSize: MTLSizeMake(rect.width(), rect.height(), 1)
1562                           toBuffer: transferBuffer
1563                  destinationOffset: offset
1564             destinationBytesPerRow: rowBytes
1565           destinationBytesPerImage: imageBytes];
1566#ifdef SK_BUILD_FOR_MAC
1567    if (this->mtlCaps().isMac()) {
1568        // Sync GPU data back to the CPU
1569        [blitCmdEncoder synchronizeResource: transferBuffer];
1570    }
1571#endif
1572#ifdef SK_ENABLE_MTL_DEBUG_INFO
1573    [blitCmdEncoder popDebugGroup];
1574#endif
1575
1576    return true;
1577}
1578
1579GrFence SK_WARN_UNUSED_RESULT GrMtlGpu::insertFence() {
1580    GrMtlCommandBuffer* cmdBuffer = this->commandBuffer();
1581    // We create a semaphore and signal it within the current
1582    // command buffer's completion handler.
1583    dispatch_semaphore_t semaphore = dispatch_semaphore_create(0);
1584    cmdBuffer->addCompletedHandler(^(id <MTLCommandBuffer>commandBuffer) {
1585        dispatch_semaphore_signal(semaphore);
1586    });
1587
1588    const void* cfFence = (__bridge_retained const void*) semaphore;
1589    return (GrFence) cfFence;
1590}
1591
1592bool GrMtlGpu::waitFence(GrFence fence) {
1593    const void* cfFence = (const void*) fence;
1594    dispatch_semaphore_t semaphore = (__bridge dispatch_semaphore_t)cfFence;
1595
1596    long result = dispatch_semaphore_wait(semaphore, 0);
1597
1598    return !result;
1599}
1600
1601void GrMtlGpu::deleteFence(GrFence fence) {
1602    const void* cfFence = (const void*) fence;
1603    // In this case it's easier to release in CoreFoundation than depend on ARC
1604    CFRelease(cfFence);
1605}
1606
1607std::unique_ptr<GrSemaphore> SK_WARN_UNUSED_RESULT GrMtlGpu::makeSemaphore(bool /*isOwned*/) {
1608    SkASSERT(this->caps()->semaphoreSupport());
1609    return GrMtlSemaphore::Make(this);
1610}
1611
1612std::unique_ptr<GrSemaphore> GrMtlGpu::wrapBackendSemaphore(const GrBackendSemaphore& semaphore,
1613                                                            GrSemaphoreWrapType /* wrapType */,
1614                                                            GrWrapOwnership /*ownership*/) {
1615    SkASSERT(this->caps()->semaphoreSupport());
1616    return GrMtlSemaphore::MakeWrapped(semaphore.mtlSemaphore(), semaphore.mtlValue());
1617}
1618
1619void GrMtlGpu::insertSemaphore(GrSemaphore* semaphore) {
1620    if (@available(macOS 10.14, iOS 12.0, *)) {
1621        SkASSERT(semaphore);
1622        GrMtlSemaphore* mtlSem = static_cast<GrMtlSemaphore*>(semaphore);
1623
1624        this->commandBuffer()->encodeSignalEvent(mtlSem->event(), mtlSem->value());
1625    }
1626}
1627
1628void GrMtlGpu::waitSemaphore(GrSemaphore* semaphore) {
1629    if (@available(macOS 10.14, iOS 12.0, *)) {
1630        SkASSERT(semaphore);
1631        GrMtlSemaphore* mtlSem = static_cast<GrMtlSemaphore*>(semaphore);
1632
1633        this->commandBuffer()->encodeWaitForEvent(mtlSem->event(), mtlSem->value());
1634    }
1635}
1636
1637void GrMtlGpu::onResolveRenderTarget(GrRenderTarget* target, const SkIRect&) {
1638    SkASSERT(target->numSamples() > 1);
1639    GrMtlRenderTarget* rt = static_cast<GrMtlRenderTarget*>(target);
1640
1641    if (rt->resolveAttachment() && this->mtlCaps().renderTargetSupportsDiscardableMSAA(rt)) {
1642        // We would have resolved the RT during the render pass.
1643        return;
1644    }
1645
1646    this->resolve(static_cast<GrMtlRenderTarget*>(target)->resolveAttachment(),
1647                  static_cast<GrMtlRenderTarget*>(target)->colorAttachment());
1648}
1649
1650void GrMtlGpu::resolve(GrMtlAttachment* resolveAttachment,
1651                       GrMtlAttachment* msaaAttachment) {
1652    auto renderPassDesc = [[MTLRenderPassDescriptor alloc] init];
1653    auto colorAttachment = renderPassDesc.colorAttachments[0];
1654    colorAttachment.texture = msaaAttachment->mtlTexture();
1655    colorAttachment.resolveTexture = resolveAttachment->mtlTexture();
1656    colorAttachment.loadAction = MTLLoadActionLoad;
1657    colorAttachment.storeAction = MTLStoreActionMultisampleResolve;
1658
1659    GrMtlRenderCommandEncoder* cmdEncoder =
1660            this->commandBuffer()->getRenderCommandEncoder(renderPassDesc, nullptr, nullptr);
1661    if (cmdEncoder) {
1662        cmdEncoder->setLabel(@"resolveTexture");
1663        this->commandBuffer()->addGrSurface(sk_ref_sp<const GrSurface>(resolveAttachment));
1664        this->commandBuffer()->addGrSurface(sk_ref_sp<const GrSurface>(msaaAttachment));
1665    }
1666}
1667
1668GrMtlRenderCommandEncoder* GrMtlGpu::loadMSAAFromResolve(
1669        GrAttachment* dst, GrMtlAttachment* src, const SkIRect& srcRect,
1670        MTLRenderPassStencilAttachmentDescriptor* stencil) {
1671    if (!dst) {
1672        return nil;
1673    }
1674    if (!src || src->framebufferOnly()) {
1675        return nil;
1676    }
1677
1678    GrMtlAttachment* mtlDst = static_cast<GrMtlAttachment*>(dst);
1679
1680    MTLPixelFormat stencilFormat = stencil.texture.pixelFormat;
1681    auto renderPipeline = this->resourceProvider().findOrCreateMSAALoadPipeline(mtlDst->mtlFormat(),
1682                                                                                dst->numSamples(),
1683                                                                                stencilFormat);
1684
1685    // Set up rendercommandencoder
1686    auto renderPassDesc = [MTLRenderPassDescriptor new];
1687    auto colorAttachment = renderPassDesc.colorAttachments[0];
1688    colorAttachment.texture = mtlDst->mtlTexture();
1689    colorAttachment.loadAction = MTLLoadActionDontCare;
1690    colorAttachment.storeAction = MTLStoreActionMultisampleResolve;
1691    colorAttachment.resolveTexture = src->mtlTexture();
1692
1693    renderPassDesc.stencilAttachment = stencil;
1694
1695    // We know in this case that the preceding renderCommandEncoder will not be compatible.
1696    // Either it's using a different rendertarget, or we are reading from the resolve and
1697    // hence we need to let the previous resolve finish. So we create a new one without checking.
1698    auto renderCmdEncoder =
1699                this->commandBuffer()->getRenderCommandEncoder(renderPassDesc, nullptr);
1700    if (!renderCmdEncoder) {
1701        return nullptr;
1702    }
1703
1704    // Bind pipeline
1705    renderCmdEncoder->setRenderPipelineState(renderPipeline->mtlPipelineState());
1706    this->commandBuffer()->addResource(sk_ref_sp(renderPipeline));
1707
1708    // Bind src as input texture
1709    renderCmdEncoder->setFragmentTexture(src->mtlTexture(), 0);
1710    // No sampler needed
1711    this->commandBuffer()->addGrSurface(sk_ref_sp<GrSurface>(src));
1712
1713    // Scissor and viewport should default to size of color attachment
1714
1715    // Update and bind uniform data
1716    int w = srcRect.width();
1717    int h = srcRect.height();
1718
1719    // dst rect edges in NDC (-1 to 1)
1720    int dw = dst->width();
1721    int dh = dst->height();
1722    float dx0 = 2.f * srcRect.fLeft / dw - 1.f;
1723    float dx1 = 2.f * (srcRect.fLeft + w) / dw - 1.f;
1724    float dy0 = 2.f * srcRect.fTop / dh - 1.f;
1725    float dy1 = 2.f * (srcRect.fTop + h) / dh - 1.f;
1726
1727    struct {
1728        float posXform[4];
1729        int textureSize[2];
1730        int pad[2];
1731    } uniData = {{dx1 - dx0, dy1 - dy0, dx0, dy0}, {dw, dh}, {0, 0}};
1732
1733    constexpr size_t uniformSize = 32;
1734    if (@available(macOS 10.11, iOS 8.3, *)) {
1735        SkASSERT(uniformSize <= this->caps()->maxPushConstantsSize());
1736        renderCmdEncoder->setVertexBytes(&uniData, uniformSize, 0);
1737    } else {
1738        // upload the data
1739        GrRingBuffer::Slice slice = this->uniformsRingBuffer()->suballocate(uniformSize);
1740        GrMtlBuffer* buffer = (GrMtlBuffer*) slice.fBuffer;
1741        char* destPtr = static_cast<char*>(slice.fBuffer->map()) + slice.fOffset;
1742        memcpy(destPtr, &uniData, uniformSize);
1743
1744        renderCmdEncoder->setVertexBuffer(buffer->mtlBuffer(), slice.fOffset, 0);
1745    }
1746
1747    renderCmdEncoder->drawPrimitives(MTLPrimitiveTypeTriangleStrip, (NSUInteger)0, (NSUInteger)4);
1748
1749    return renderCmdEncoder;
1750}
1751
1752#if GR_TEST_UTILS
1753void GrMtlGpu::testingOnly_startCapture() {
1754    if (@available(macOS 10.13, iOS 11.0, *)) {
1755        // TODO: add Metal 3 interface as well
1756        MTLCaptureManager* captureManager = [MTLCaptureManager sharedCaptureManager];
1757        if (captureManager.isCapturing) {
1758            return;
1759        }
1760        if (@available(macOS 10.15, iOS 13.0, *)) {
1761            MTLCaptureDescriptor* captureDescriptor = [[MTLCaptureDescriptor alloc] init];
1762            captureDescriptor.captureObject = fQueue;
1763
1764            NSError *error;
1765            if (![captureManager startCaptureWithDescriptor: captureDescriptor error:&error])
1766            {
1767                NSLog(@"Failed to start capture, error %@", error);
1768            }
1769        } else {
1770            [captureManager startCaptureWithCommandQueue: fQueue];
1771        }
1772     }
1773}
1774
1775void GrMtlGpu::testingOnly_stopCapture() {
1776    if (@available(macOS 10.13, iOS 11.0, *)) {
1777        MTLCaptureManager* captureManager = [MTLCaptureManager sharedCaptureManager];
1778        if (captureManager.isCapturing) {
1779            [captureManager stopCapture];
1780        }
1781    }
1782}
1783#endif
1784
1785#ifdef SK_ENABLE_DUMP_GPU
1786#include "src/utils/SkJSONWriter.h"
1787void GrMtlGpu::onDumpJSON(SkJSONWriter* writer) const {
1788    // We are called by the base class, which has already called beginObject(). We choose to nest
1789    // all of our caps information in a named sub-object.
1790    writer->beginObject("Metal GPU");
1791
1792    writer->beginObject("Device");
1793    writer->appendCString("name", fDevice.name.UTF8String);
1794#ifdef SK_BUILD_FOR_MAC
1795    if (@available(macOS 10.11, *)) {
1796        writer->appendBool("isHeadless", fDevice.isHeadless);
1797        writer->appendBool("isLowPower", fDevice.isLowPower);
1798    }
1799    if (@available(macOS 10.13, *)) {
1800        writer->appendBool("isRemovable", fDevice.isRemovable);
1801    }
1802#endif
1803    if (@available(macOS 10.13, iOS 11.0, *)) {
1804        writer->appendU64("registryID", fDevice.registryID);
1805    }
1806#if defined(SK_BUILD_FOR_MAC) && __MAC_OS_X_VERSION_MAX_ALLOWED >= 101500
1807    if (@available(macOS 10.15, *)) {
1808        switch (fDevice.location) {
1809            case MTLDeviceLocationBuiltIn:
1810                writer->appendNString("location", "builtIn");
1811                break;
1812            case MTLDeviceLocationSlot:
1813                writer->appendNString("location", "slot");
1814                break;
1815            case MTLDeviceLocationExternal:
1816                writer->appendNString("location", "external");
1817                break;
1818            case MTLDeviceLocationUnspecified:
1819                writer->appendNString("location", "unspecified");
1820                break;
1821            default:
1822                writer->appendNString("location", "unknown");
1823                break;
1824        }
1825        writer->appendU64("locationNumber", fDevice.locationNumber);
1826        writer->appendU64("maxTransferRate", fDevice.maxTransferRate);
1827    }
1828#endif  // SK_BUILD_FOR_MAC
1829#if __MAC_OS_X_VERSION_MAX_ALLOWED >= 101500 || __IPHONE_OS_VERSION_MAX_ALLOWED >= 130000
1830    if (@available(macOS 10.15, iOS 13.0, *)) {
1831        writer->appendBool("hasUnifiedMemory", fDevice.hasUnifiedMemory);
1832    }
1833#endif
1834#ifdef SK_BUILD_FOR_MAC
1835#if __MAC_OS_X_VERSION_MAX_ALLOWED >= 101500
1836    if (@available(macOS 10.15, *)) {
1837        writer->appendU64("peerGroupID", fDevice.peerGroupID);
1838        writer->appendU32("peerCount", fDevice.peerCount);
1839        writer->appendU32("peerIndex", fDevice.peerIndex);
1840    }
1841#endif
1842    if (@available(macOS 10.12, *)) {
1843        writer->appendU64("recommendedMaxWorkingSetSize", fDevice.recommendedMaxWorkingSetSize);
1844    }
1845#endif  // SK_BUILD_FOR_MAC
1846    if (@available(macOS 10.13, iOS 11.0, *)) {
1847        writer->appendU64("currentAllocatedSize", fDevice.currentAllocatedSize);
1848        writer->appendU64("maxThreadgroupMemoryLength", fDevice.maxThreadgroupMemoryLength);
1849    }
1850
1851    if (@available(macOS 10.11, iOS 9.0, *)) {
1852        writer->beginObject("maxThreadsPerThreadgroup");
1853        writer->appendU64("width", fDevice.maxThreadsPerThreadgroup.width);
1854        writer->appendU64("height", fDevice.maxThreadsPerThreadgroup.height);
1855        writer->appendU64("depth", fDevice.maxThreadsPerThreadgroup.depth);
1856        writer->endObject();
1857    }
1858
1859    if (@available(macOS 10.13, iOS 11.0, *)) {
1860        writer->appendBool("areProgrammableSamplePositionsSupported",
1861                           fDevice.areProgrammableSamplePositionsSupported);
1862        writer->appendBool("areRasterOrderGroupsSupported",
1863                           fDevice.areRasterOrderGroupsSupported);
1864    }
1865#ifdef SK_BUILD_FOR_MAC
1866    if (@available(macOS 10.11, *)) {
1867        writer->appendBool("isDepth24Stencil8PixelFormatSupported",
1868                           fDevice.isDepth24Stencil8PixelFormatSupported);
1869
1870    }
1871#if __MAC_OS_X_VERSION_MAX_ALLOWED >= 101500
1872    if (@available(macOS 10.15, *)) {
1873        writer->appendBool("areBarycentricCoordsSupported",
1874                           fDevice.areBarycentricCoordsSupported);
1875        writer->appendBool("supportsShaderBarycentricCoordinates",
1876                           fDevice.supportsShaderBarycentricCoordinates);
1877    }
1878#endif
1879#endif  // SK_BUILD_FOR_MAC
1880    if (@available(macOS 10.14, iOS 12.0, *)) {
1881        writer->appendU64("maxBufferLength", fDevice.maxBufferLength);
1882    }
1883    if (@available(macOS 10.13, iOS 11.0, *)) {
1884        switch (fDevice.readWriteTextureSupport) {
1885            case MTLReadWriteTextureTier1:
1886                writer->appendNString("readWriteTextureSupport", "tier1");
1887                break;
1888            case MTLReadWriteTextureTier2:
1889                writer->appendNString("readWriteTextureSupport", "tier2");
1890                break;
1891            case MTLReadWriteTextureTierNone:
1892                writer->appendNString("readWriteTextureSupport", "tierNone");
1893                break;
1894            default:
1895                writer->appendNString("readWriteTextureSupport", "unknown");
1896                break;
1897        }
1898        switch (fDevice.argumentBuffersSupport) {
1899            case MTLArgumentBuffersTier1:
1900                writer->appendNString("argumentBuffersSupport", "tier1");
1901                break;
1902            case MTLArgumentBuffersTier2:
1903                writer->appendNString("argumentBuffersSupport", "tier2");
1904                break;
1905            default:
1906                writer->appendNString("argumentBuffersSupport", "unknown");
1907                break;
1908        }
1909    }
1910    if (@available(macOS 10.14, iOS 12.0, *)) {
1911        writer->appendU64("maxArgumentBufferSamplerCount", fDevice.maxArgumentBufferSamplerCount);
1912    }
1913#ifdef SK_BUILD_FOR_IOS
1914    if (@available(iOS 13.0, *)) {
1915        writer->appendU64("sparseTileSizeInBytes", fDevice.sparseTileSizeInBytes);
1916    }
1917#endif
1918    writer->endObject();
1919
1920    writer->appendCString("queue", fQueue.label.UTF8String);
1921    writer->appendBool("disconnected", fDisconnected);
1922
1923    writer->endObject();
1924}
1925#endif
1926
1927GR_NORETAIN_END
1928