• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1/*
2 * Copyright 2017 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8#include "src/gpu/mtl/GrMtlGpu.h"
9
10#include "include/private/GrTypesPriv.h"
11#include "src/core/SkCompressedDataUtils.h"
12#include "src/core/SkConvertPixels.h"
13#include "src/core/SkMathPriv.h"
14#include "src/core/SkMipmap.h"
15#include "src/gpu/GrBackendUtils.h"
16#include "src/gpu/GrDataUtils.h"
17#include "src/gpu/GrDirectContextPriv.h"
18#include "src/gpu/GrRenderTarget.h"
19#include "src/gpu/GrResourceProvider.h"
20#include "src/gpu/GrTexture.h"
21#include "src/gpu/GrThreadSafePipelineBuilder.h"
22#include "src/gpu/mtl/GrMtlBuffer.h"
23#include "src/gpu/mtl/GrMtlCommandBuffer.h"
24#include "src/gpu/mtl/GrMtlOpsRenderPass.h"
25#include "src/gpu/mtl/GrMtlPipelineStateBuilder.h"
26#include "src/gpu/mtl/GrMtlRenderCommandEncoder.h"
27#include "src/gpu/mtl/GrMtlSemaphore.h"
28#include "src/gpu/mtl/GrMtlTexture.h"
29#include "src/gpu/mtl/GrMtlTextureRenderTarget.h"
30#include "src/gpu/mtl/GrMtlUtil.h"
31
32#import <simd/simd.h>
33
34#if !__has_feature(objc_arc)
35#error This file must be compiled with Arc. Use -fobjc-arc flag
36#endif
37
38GR_NORETAIN_BEGIN
39
40#if GR_TEST_UTILS
41// set to 1 if you want to do GPU capture of each commandBuffer
42#define GR_METAL_CAPTURE_COMMANDBUFFER 0
43#endif
44
45sk_sp<GrGpu> GrMtlGpu::Make(const GrMtlBackendContext& context, const GrContextOptions& options,
46                            GrDirectContext* direct) {
47    if (!context.fDevice || !context.fQueue) {
48        return nullptr;
49    }
50    if (@available(macOS 10.14, iOS 10.0, *)) {
51        // no warning needed
52    } else {
53        SkDebugf("*** Error ***: Skia's Metal backend no longer supports this OS version.\n");
54#ifdef SK_BUILD_FOR_IOS
55        SkDebugf("Minimum supported version is iOS 10.0.\n");
56#else
57        SkDebugf("Minimum supported version is MacOS 10.14.\n");
58#endif
59        return nullptr;
60    }
61
62    id<MTLDevice> GR_NORETAIN device = (__bridge id<MTLDevice>)(context.fDevice.get());
63    id<MTLCommandQueue> GR_NORETAIN queue = (__bridge id<MTLCommandQueue>)(context.fQueue.get());
64
65    return sk_sp<GrGpu>(new GrMtlGpu(direct, options, device, queue, context.fBinaryArchive.get()));
66}
67
68// This constant determines how many OutstandingCommandBuffers are allocated together as a block in
69// the deque. As such it needs to balance allocating too much memory vs. incurring
70// allocation/deallocation thrashing. It should roughly correspond to the max number of outstanding
71// command buffers we expect to see.
72static const int kDefaultOutstandingAllocCnt = 8;
73
74GrMtlGpu::GrMtlGpu(GrDirectContext* direct, const GrContextOptions& options,
75                   id<MTLDevice> device, id<MTLCommandQueue> queue, GrMTLHandle binaryArchive)
76        : INHERITED(direct)
77        , fDevice(device)
78        , fQueue(queue)
79        , fOutstandingCommandBuffers(sizeof(OutstandingCommandBuffer), kDefaultOutstandingAllocCnt)
80        , fResourceProvider(this)
81        , fStagingBufferManager(this)
82        , fUniformsRingBuffer(this, 128 * 1024, 256, GrGpuBufferType::kUniform)
83        , fDisconnected(false) {
84    fMtlCaps.reset(new GrMtlCaps(options, fDevice));
85    this->initCapsAndCompiler(fMtlCaps);
86#if GR_METAL_CAPTURE_COMMANDBUFFER
87    this->testingOnly_startCapture();
88#endif
89    fCurrentCmdBuffer = GrMtlCommandBuffer::Make(fQueue);
90#if GR_METAL_SDK_VERSION >= 230
91    if (@available(macOS 11.0, iOS 14.0, *)) {
92        fBinaryArchive = (__bridge id<MTLBinaryArchive>)(binaryArchive);
93    }
94#endif
95}
96
97GrMtlGpu::~GrMtlGpu() {
98    if (!fDisconnected) {
99        this->destroyResources();
100    }
101}
102
103void GrMtlGpu::disconnect(DisconnectType type) {
104    INHERITED::disconnect(type);
105
106    if (!fDisconnected) {
107        this->destroyResources();
108        fDisconnected = true;
109    }
110}
111
112GrThreadSafePipelineBuilder* GrMtlGpu::pipelineBuilder() {
113    return nullptr;
114}
115
116sk_sp<GrThreadSafePipelineBuilder> GrMtlGpu::refPipelineBuilder() {
117    return nullptr;
118}
119
120void GrMtlGpu::destroyResources() {
121    this->submitCommandBuffer(SyncQueue::kForce_SyncQueue);
122    // if there's no work we won't release the command buffer, so we do it here
123    fCurrentCmdBuffer = nil;
124
125    // We used a placement new for each object in fOutstandingCommandBuffers, so we're responsible
126    // for calling the destructor on each of them as well.
127    while (!fOutstandingCommandBuffers.empty()) {
128        OutstandingCommandBuffer* buffer =
129                (OutstandingCommandBuffer*)fOutstandingCommandBuffers.front();
130        // make sure we remove before deleting as deletion might try to kick off another submit
131        fOutstandingCommandBuffers.pop_front();
132        buffer->~OutstandingCommandBuffer();
133    }
134
135    fStagingBufferManager.reset();
136
137    fResourceProvider.destroyResources();
138
139    fQueue = nil;
140    fDevice = nil;
141}
142
143GrOpsRenderPass* GrMtlGpu::onGetOpsRenderPass(
144            GrRenderTarget* renderTarget, bool useMSAASurface, GrAttachment* stencil,
145            GrSurfaceOrigin origin, const SkIRect& bounds,
146            const GrOpsRenderPass::LoadAndStoreInfo& colorInfo,
147            const GrOpsRenderPass::StencilLoadAndStoreInfo& stencilInfo,
148            const SkTArray<GrSurfaceProxy*, true>& sampledProxies,
149            GrXferBarrierFlags renderPassXferBarriers) {
150    // For the given render target and requested render pass features we need to find a compatible
151    // framebuffer to use.
152    GrMtlRenderTarget* mtlRT = static_cast<GrMtlRenderTarget*>(renderTarget);
153
154    // TODO: support DMSAA
155    SkASSERT(!useMSAASurface ||
156             (renderTarget->numSamples() > 1));
157
158    bool withResolve = false;
159
160    // Figure out if we can use a Resolve store action for this render pass. When we set up
161    // the render pass we'll update the color load/store ops since we don't want to ever load
162    // or store the msaa color attachment, but may need to for the resolve attachment.
163    if (useMSAASurface && this->mtlCaps().renderTargetSupportsDiscardableMSAA(mtlRT)) {
164        withResolve = true;
165    }
166
167    sk_sp<GrMtlFramebuffer> framebuffer =
168            sk_ref_sp(mtlRT->getFramebuffer(withResolve, SkToBool(stencil)));
169    if (!framebuffer) {
170        return nullptr;
171    }
172
173    return new GrMtlOpsRenderPass(this, renderTarget, std::move(framebuffer), origin, colorInfo,
174                                  stencilInfo);
175}
176
177GrMtlCommandBuffer* GrMtlGpu::commandBuffer() {
178    if (!fCurrentCmdBuffer) {
179#if GR_METAL_CAPTURE_COMMANDBUFFER
180        this->testingOnly_startCapture();
181#endif
182        // Create a new command buffer for the next submit
183        fCurrentCmdBuffer = GrMtlCommandBuffer::Make(fQueue);
184    }
185
186    SkASSERT(fCurrentCmdBuffer);
187    return fCurrentCmdBuffer.get();
188}
189
190void GrMtlGpu::takeOwnershipOfBuffer(sk_sp<GrGpuBuffer> buffer) {
191    SkASSERT(buffer);
192    this->commandBuffer()->addGrBuffer(std::move(buffer));
193}
194
195void GrMtlGpu::submit(GrOpsRenderPass* renderPass) {
196    GrMtlOpsRenderPass* mtlRenderPass = reinterpret_cast<GrMtlOpsRenderPass*>(renderPass);
197    mtlRenderPass->submit();
198    delete renderPass;
199}
200
201bool GrMtlGpu::submitCommandBuffer(SyncQueue sync) {
202    if (!fCurrentCmdBuffer || !fCurrentCmdBuffer->hasWork()) {
203        if (sync == SyncQueue::kForce_SyncQueue) {
204            this->finishOutstandingGpuWork();
205            this->checkForFinishedCommandBuffers();
206        }
207        // We need to manually call the finishedCallbacks since we don't add this
208        // to the OutstandingCommandBuffer list
209        if (fCurrentCmdBuffer) {
210            fCurrentCmdBuffer->callFinishedCallbacks();
211        }
212        return true;
213    }
214
215    SkASSERT(fCurrentCmdBuffer);
216    bool didCommit = fCurrentCmdBuffer->commit(sync == SyncQueue::kForce_SyncQueue);
217    if (didCommit) {
218        new (fOutstandingCommandBuffers.push_back()) OutstandingCommandBuffer(fCurrentCmdBuffer);
219    }
220
221    // We don't create a new command buffer here because we may end up using it
222    // in the next frame, and that confuses the GPU debugger. Instead we
223    // create when we next need one.
224    fCurrentCmdBuffer.reset();
225
226    // If the freeing of any resources held by a finished command buffer causes us to send
227    // a new command to the gpu we'll create the new command buffer in commandBuffer(), above.
228    this->checkForFinishedCommandBuffers();
229
230#if GR_METAL_CAPTURE_COMMANDBUFFER
231    this->testingOnly_endCapture();
232#endif
233    return didCommit;
234}
235
236void GrMtlGpu::checkForFinishedCommandBuffers() {
237    // Iterate over all the outstanding command buffers to see if any have finished. The command
238    // buffers are in order from oldest to newest, so we start at the front to check if their fence
239    // has signaled. If so we pop it off and move onto the next.
240    // Repeat till we find a command list that has not finished yet (and all others afterwards are
241    // also guaranteed to not have finished).
242    OutstandingCommandBuffer* front = (OutstandingCommandBuffer*)fOutstandingCommandBuffers.front();
243    while (front && (*front)->isCompleted()) {
244        // Make sure we remove before deleting as deletion might try to kick off another submit
245        fOutstandingCommandBuffers.pop_front();
246        // Since we used placement new we are responsible for calling the destructor manually.
247        front->~OutstandingCommandBuffer();
248        front = (OutstandingCommandBuffer*)fOutstandingCommandBuffers.front();
249    }
250}
251
252void GrMtlGpu::finishOutstandingGpuWork() {
253    // wait for the last command buffer we've submitted to finish
254    OutstandingCommandBuffer* back =
255            (OutstandingCommandBuffer*)fOutstandingCommandBuffers.back();
256    if (back) {
257        (*back)->waitUntilCompleted();
258    }
259}
260
261void GrMtlGpu::addFinishedProc(GrGpuFinishedProc finishedProc,
262                               GrGpuFinishedContext finishedContext) {
263    SkASSERT(finishedProc);
264    this->addFinishedCallback(GrRefCntedCallback::Make(finishedProc, finishedContext));
265}
266
267void GrMtlGpu::addFinishedCallback(sk_sp<GrRefCntedCallback> finishedCallback) {
268    SkASSERT(finishedCallback);
269    // Besides the current commandbuffer, we also add the finishedCallback to the newest outstanding
270    // commandbuffer. Our contract for calling the proc is that all previous submitted cmdbuffers
271    // have finished when we call it. However, if our current command buffer has no work when it is
272    // flushed it will drop its ref to the callback immediately. But the previous work may not have
273    // finished. It is safe to only add the proc to the newest outstanding commandbuffer cause that
274    // must finish after all previously submitted command buffers.
275    OutstandingCommandBuffer* back = (OutstandingCommandBuffer*)fOutstandingCommandBuffers.back();
276    if (back) {
277        (*back)->addFinishedCallback(finishedCallback);
278    }
279    commandBuffer()->addFinishedCallback(std::move(finishedCallback));
280}
281
282bool GrMtlGpu::onSubmitToGpu(bool syncCpu) {
283    if (syncCpu) {
284        return this->submitCommandBuffer(kForce_SyncQueue);
285    } else {
286        return this->submitCommandBuffer(kSkip_SyncQueue);
287    }
288}
289
290std::unique_ptr<GrSemaphore> GrMtlGpu::prepareTextureForCrossContextUsage(GrTexture*) {
291    this->submitToGpu(false);
292    return nullptr;
293}
294
295sk_sp<GrGpuBuffer> GrMtlGpu::onCreateBuffer(size_t size, GrGpuBufferType type,
296                                            GrAccessPattern accessPattern, const void* data) {
297    return GrMtlBuffer::Make(this, size, type, accessPattern, data);
298}
299
300static bool check_max_blit_width(int widthInPixels) {
301    if (widthInPixels > 32767) {
302        SkASSERT(false); // surfaces should not be this wide anyway
303        return false;
304    }
305    return true;
306}
307
308bool GrMtlGpu::uploadToTexture(GrMtlTexture* tex,
309                               SkIRect rect,
310                               GrColorType dataColorType,
311                               const GrMipLevel texels[],
312                               int mipLevelCount) {
313    SkASSERT(this->mtlCaps().isFormatTexturable(tex->mtlTexture().pixelFormat));
314    // The assumption is either that we have no mipmaps, or that our rect is the entire texture
315    SkASSERT(mipLevelCount == 1 || rect == SkIRect::MakeSize(tex->dimensions()));
316
317    // We assume that if the texture has mip levels, we either upload to all the levels or just the
318    // first.
319    SkASSERT(mipLevelCount == 1 || mipLevelCount == (tex->maxMipmapLevel() + 1));
320
321    if (!check_max_blit_width(rect.width())) {
322        return false;
323    }
324    if (rect.isEmpty()) {
325        return false;
326    }
327
328    SkASSERT(this->mtlCaps().surfaceSupportsWritePixels(tex));
329    SkASSERT(this->mtlCaps().areColorTypeAndFormatCompatible(dataColorType, tex->backendFormat()));
330
331    id<MTLTexture> GR_NORETAIN mtlTexture = tex->mtlTexture();
332    SkASSERT(mtlTexture);
333    // Either upload only the first miplevel or all miplevels
334    SkASSERT(1 == mipLevelCount || mipLevelCount == (int)mtlTexture.mipmapLevelCount);
335
336    if (mipLevelCount == 1 && !texels[0].fPixels) {
337        return true;   // no data to upload
338    }
339
340    for (int i = 0; i < mipLevelCount; ++i) {
341        // We do not allow any gaps in the mip data
342        if (!texels[i].fPixels) {
343            return false;
344        }
345    }
346
347    size_t bpp = GrColorTypeBytesPerPixel(dataColorType);
348
349    SkTArray<size_t> individualMipOffsets(mipLevelCount);
350    size_t combinedBufferSize = GrComputeTightCombinedBufferSize(bpp,
351                                                                 rect.size(),
352                                                                 &individualMipOffsets,
353                                                                 mipLevelCount);
354    SkASSERT(combinedBufferSize);
355
356
357    // offset value must be a multiple of the destination texture's pixel size in bytes
358    size_t alignment = std::max(bpp, this->mtlCaps().getMinBufferAlignment());
359    GrStagingBufferManager::Slice slice = fStagingBufferManager.allocateStagingBufferSlice(
360            combinedBufferSize, alignment);
361    if (!slice.fBuffer) {
362        return false;
363    }
364    char* bufferData = (char*)slice.fOffsetMapPtr;
365    GrMtlBuffer* mtlBuffer = static_cast<GrMtlBuffer*>(slice.fBuffer);
366
367    int currentWidth = rect.width();
368    int currentHeight = rect.height();
369    SkDEBUGCODE(int layerHeight = tex->height());
370    MTLOrigin origin = MTLOriginMake(rect.left(), rect.top(), 0);
371
372    auto cmdBuffer = this->commandBuffer();
373    id<MTLBlitCommandEncoder> GR_NORETAIN blitCmdEncoder = cmdBuffer->getBlitCommandEncoder();
374    if (!blitCmdEncoder) {
375        return false;
376    }
377#ifdef SK_ENABLE_MTL_DEBUG_INFO
378    [blitCmdEncoder pushDebugGroup:@"uploadToTexture"];
379#endif
380    for (int currentMipLevel = 0; currentMipLevel < mipLevelCount; currentMipLevel++) {
381        if (texels[currentMipLevel].fPixels) {
382            SkASSERT(1 == mipLevelCount || currentHeight == layerHeight);
383            const size_t trimRowBytes = currentWidth * bpp;
384            const size_t rowBytes = texels[currentMipLevel].fRowBytes;
385
386            // copy data into the buffer, skipping any trailing bytes
387            char* dst = bufferData + individualMipOffsets[currentMipLevel];
388            const char* src = (const char*)texels[currentMipLevel].fPixels;
389            SkRectMemcpy(dst, trimRowBytes, src, rowBytes, trimRowBytes, currentHeight);
390
391            [blitCmdEncoder copyFromBuffer: mtlBuffer->mtlBuffer()
392                              sourceOffset: slice.fOffset + individualMipOffsets[currentMipLevel]
393                         sourceBytesPerRow: trimRowBytes
394                       sourceBytesPerImage: trimRowBytes*currentHeight
395                                sourceSize: MTLSizeMake(currentWidth, currentHeight, 1)
396                                 toTexture: mtlTexture
397                          destinationSlice: 0
398                          destinationLevel: currentMipLevel
399                         destinationOrigin: origin];
400        }
401        currentWidth = std::max(1, currentWidth/2);
402        currentHeight = std::max(1, currentHeight/2);
403        SkDEBUGCODE(layerHeight = currentHeight);
404    }
405#ifdef SK_BUILD_FOR_MAC
406    if (this->mtlCaps().isMac()) {
407        [mtlBuffer->mtlBuffer() didModifyRange: NSMakeRange(slice.fOffset, combinedBufferSize)];
408    }
409#endif
410#ifdef SK_ENABLE_MTL_DEBUG_INFO
411    [blitCmdEncoder popDebugGroup];
412#endif
413
414    if (mipLevelCount < (int) tex->mtlTexture().mipmapLevelCount) {
415        tex->markMipmapsDirty();
416    }
417
418    return true;
419}
420
421bool GrMtlGpu::clearTexture(GrMtlTexture* tex, size_t bpp, uint32_t levelMask) {
422    SkASSERT(this->mtlCaps().isFormatTexturable(tex->mtlTexture().pixelFormat));
423
424    if (!levelMask) {
425        return true;
426    }
427
428    id<MTLTexture> GR_NORETAIN mtlTexture = tex->mtlTexture();
429    SkASSERT(mtlTexture);
430    // Either upload only the first miplevel or all miplevels
431    int mipLevelCount = (int)mtlTexture.mipmapLevelCount;
432
433    SkTArray<size_t> individualMipOffsets(mipLevelCount);
434    size_t combinedBufferSize = 0;
435    int currentWidth = tex->width();
436    int currentHeight = tex->height();
437
438    // The alignment must be at least 4 bytes and a multiple of the bytes per pixel of the image
439    // config. This works with the assumption that the bytes in pixel config is always a power of 2.
440    // TODO: can we just copy from a single buffer the size of the largest cleared level w/o a perf
441    // penalty?
442    SkASSERT((bpp & (bpp - 1)) == 0);
443    const size_t alignmentMask = 0x3 | (bpp - 1);
444    for (int currentMipLevel = 0; currentMipLevel < mipLevelCount; currentMipLevel++) {
445        if (levelMask & (1 << currentMipLevel)) {
446            const size_t trimmedSize = currentWidth * bpp * currentHeight;
447            const size_t alignmentDiff = combinedBufferSize & alignmentMask;
448            if (alignmentDiff != 0) {
449                combinedBufferSize += alignmentMask - alignmentDiff + 1;
450            }
451            individualMipOffsets.push_back(combinedBufferSize);
452            combinedBufferSize += trimmedSize;
453        }
454        currentWidth = std::max(1, currentWidth/2);
455        currentHeight = std::max(1, currentHeight/2);
456    }
457    SkASSERT(combinedBufferSize > 0 && !individualMipOffsets.empty());
458
459    size_t alignment = std::max(bpp, this->mtlCaps().getMinBufferAlignment());
460    GrStagingBufferManager::Slice slice = fStagingBufferManager.allocateStagingBufferSlice(
461            combinedBufferSize, alignment);
462    if (!slice.fBuffer) {
463        return false;
464    }
465    GrMtlBuffer* mtlBuffer = static_cast<GrMtlBuffer*>(slice.fBuffer);
466    id<MTLBuffer> transferBuffer = mtlBuffer->mtlBuffer();
467
468    auto cmdBuffer = this->commandBuffer();
469    id<MTLBlitCommandEncoder> GR_NORETAIN blitCmdEncoder = cmdBuffer->getBlitCommandEncoder();
470    if (!blitCmdEncoder) {
471        return false;
472    }
473#ifdef SK_ENABLE_MTL_DEBUG_INFO
474    [blitCmdEncoder pushDebugGroup:@"clearTexture"];
475#endif
476    // clear the buffer to transparent black
477    NSRange clearRange;
478    clearRange.location = 0;
479    clearRange.length = combinedBufferSize;
480    [blitCmdEncoder fillBuffer: transferBuffer
481                         range: clearRange
482                         value: 0];
483
484    // now copy buffer to texture
485    currentWidth = tex->width();
486    currentHeight = tex->height();
487    MTLOrigin origin = MTLOriginMake(0, 0, 0);
488    for (int currentMipLevel = 0; currentMipLevel < mipLevelCount; currentMipLevel++) {
489        if (levelMask & (1 << currentMipLevel)) {
490            const size_t rowBytes = currentWidth * bpp;
491
492            [blitCmdEncoder copyFromBuffer: transferBuffer
493                              sourceOffset: individualMipOffsets[currentMipLevel]
494                         sourceBytesPerRow: rowBytes
495                       sourceBytesPerImage: rowBytes * currentHeight
496                                sourceSize: MTLSizeMake(currentWidth, currentHeight, 1)
497                                 toTexture: mtlTexture
498                          destinationSlice: 0
499                          destinationLevel: currentMipLevel
500                         destinationOrigin: origin];
501        }
502        currentWidth = std::max(1, currentWidth/2);
503        currentHeight = std::max(1, currentHeight/2);
504    }
505    // Don't need didModifyRange: here because fillBuffer: happens on the GPU
506#ifdef SK_ENABLE_MTL_DEBUG_INFO
507    [blitCmdEncoder popDebugGroup];
508#endif
509
510    if (mipLevelCount < (int) tex->mtlTexture().mipmapLevelCount) {
511        tex->markMipmapsDirty();
512    }
513
514    return true;
515}
516
517sk_sp<GrAttachment> GrMtlGpu::makeStencilAttachment(const GrBackendFormat& /*colorFormat*/,
518                                                    SkISize dimensions, int numStencilSamples) {
519    MTLPixelFormat sFmt = this->mtlCaps().preferredStencilFormat();
520
521    fStats.incStencilAttachmentCreates();
522    return GrMtlAttachment::GrMtlAttachment::MakeStencil(this, dimensions, numStencilSamples, sFmt);
523}
524
525sk_sp<GrAttachment> GrMtlGpu::makeMSAAAttachment(SkISize dimensions,
526                                                 const GrBackendFormat& format,
527                                                 int numSamples,
528                                                 GrProtected isProtected,
529                                                 GrMemoryless isMemoryless) {
530    // Metal doesn't support protected textures
531    SkASSERT(isProtected == GrProtected::kNo);
532    // TODO: add memoryless support
533    SkASSERT(isMemoryless == GrMemoryless::kNo);
534
535    MTLPixelFormat pixelFormat = (MTLPixelFormat) format.asMtlFormat();
536    SkASSERT(pixelFormat != MTLPixelFormatInvalid);
537    SkASSERT(!GrMtlFormatIsCompressed(pixelFormat));
538    SkASSERT(this->mtlCaps().isFormatRenderable(pixelFormat, numSamples));
539
540    fStats.incMSAAAttachmentCreates();
541    return GrMtlAttachment::MakeMSAA(this, dimensions, numSamples, pixelFormat);
542}
543
544sk_sp<GrTexture> GrMtlGpu::onCreateTexture(SkISize dimensions,
545                                           const GrBackendFormat& format,
546                                           GrRenderable renderable,
547                                           int renderTargetSampleCnt,
548                                           SkBudgeted budgeted,
549                                           GrProtected isProtected,
550                                           int mipLevelCount,
551                                           uint32_t levelClearMask) {
552    // We don't support protected textures in Metal.
553    if (isProtected == GrProtected::kYes) {
554        return nullptr;
555    }
556    SkASSERT(mipLevelCount > 0);
557
558    MTLPixelFormat mtlPixelFormat = GrBackendFormatAsMTLPixelFormat(format);
559    SkASSERT(mtlPixelFormat != MTLPixelFormatInvalid);
560    SkASSERT(!this->caps()->isFormatCompressed(format));
561
562    sk_sp<GrMtlTexture> tex;
563    GrMipmapStatus mipmapStatus =
564            mipLevelCount > 1 ? GrMipmapStatus::kDirty : GrMipmapStatus::kNotAllocated;
565    if (renderable == GrRenderable::kYes) {
566        tex = GrMtlTextureRenderTarget::MakeNewTextureRenderTarget(
567                this, budgeted, dimensions, renderTargetSampleCnt, mtlPixelFormat, mipLevelCount,
568                mipmapStatus);
569    } else {
570        tex = GrMtlTexture::MakeNewTexture(this, budgeted, dimensions, mtlPixelFormat,
571                                           mipLevelCount, mipmapStatus);
572    }
573
574    if (!tex) {
575        return nullptr;
576    }
577
578    if (levelClearMask) {
579        this->clearTexture(tex.get(), GrMtlFormatBytesPerBlock(mtlPixelFormat), levelClearMask);
580    }
581
582    return std::move(tex);
583}
584
585sk_sp<GrTexture> GrMtlGpu::onCreateCompressedTexture(SkISize dimensions,
586                                                     const GrBackendFormat& format,
587                                                     SkBudgeted budgeted,
588                                                     GrMipmapped mipMapped,
589                                                     GrProtected isProtected,
590                                                     const void* data, size_t dataSize) {
591    // We don't support protected textures in Metal.
592    if (isProtected == GrProtected::kYes) {
593        return nullptr;
594    }
595
596    SkASSERT(this->caps()->isFormatTexturable(format, GrTextureType::k2D));
597    SkASSERT(data);
598
599    if (!check_max_blit_width(dimensions.width())) {
600        return nullptr;
601    }
602
603    MTLPixelFormat mtlPixelFormat = GrBackendFormatAsMTLPixelFormat(format);
604    SkASSERT(this->caps()->isFormatCompressed(format));
605
606    int numMipLevels = 1;
607    if (mipMapped == GrMipmapped::kYes) {
608        numMipLevels = SkMipmap::ComputeLevelCount(dimensions.width(), dimensions.height()) + 1;
609    }
610
611    GrMipmapStatus mipmapStatus = (mipMapped == GrMipmapped::kYes)
612                                                                ? GrMipmapStatus::kValid
613                                                                : GrMipmapStatus::kNotAllocated;
614
615    auto tex = GrMtlTexture::MakeNewTexture(this, budgeted, dimensions, mtlPixelFormat,
616                                            numMipLevels, mipmapStatus);
617    if (!tex) {
618        return nullptr;
619    }
620
621    // Upload to texture
622    id<MTLTexture> GR_NORETAIN mtlTexture = tex->mtlTexture();
623    SkASSERT(mtlTexture);
624
625    auto compressionType = GrBackendFormatToCompressionType(format);
626    SkASSERT(compressionType != SkImage::CompressionType::kNone);
627
628    SkTArray<size_t> individualMipOffsets(numMipLevels);
629    SkDEBUGCODE(size_t combinedBufferSize =) SkCompressedDataSize(compressionType, dimensions,
630                                                                  &individualMipOffsets,
631                                                                  mipMapped == GrMipmapped::kYes);
632    SkASSERT(individualMipOffsets.count() == numMipLevels);
633    SkASSERT(dataSize == combinedBufferSize);
634
635    // offset value must be a multiple of the destination texture's pixel size in bytes
636    // for compressed textures, this is the block size
637    size_t alignment = SkCompressedBlockSize(compressionType);
638    GrStagingBufferManager::Slice slice = fStagingBufferManager.allocateStagingBufferSlice(
639            dataSize, alignment);
640    if (!slice.fBuffer) {
641        return nullptr;
642    }
643    char* bufferData = (char*)slice.fOffsetMapPtr;
644    GrMtlBuffer* mtlBuffer = static_cast<GrMtlBuffer*>(slice.fBuffer);
645
646    MTLOrigin origin = MTLOriginMake(0, 0, 0);
647
648    auto cmdBuffer = this->commandBuffer();
649    id<MTLBlitCommandEncoder> GR_NORETAIN blitCmdEncoder = cmdBuffer->getBlitCommandEncoder();
650    if (!blitCmdEncoder) {
651        return nullptr;
652    }
653#ifdef SK_ENABLE_MTL_DEBUG_INFO
654    [blitCmdEncoder pushDebugGroup:@"onCreateCompressedTexture"];
655#endif
656
657    // copy data into the buffer, skipping any trailing bytes
658    memcpy(bufferData, data, dataSize);
659
660    SkISize levelDimensions = dimensions;
661    for (int currentMipLevel = 0; currentMipLevel < numMipLevels; currentMipLevel++) {
662        const size_t levelRowBytes = GrCompressedRowBytes(compressionType, levelDimensions.width());
663        size_t levelSize = SkCompressedDataSize(compressionType, levelDimensions, nullptr, false);
664
665        // TODO: can this all be done in one go?
666        [blitCmdEncoder copyFromBuffer: mtlBuffer->mtlBuffer()
667                          sourceOffset: slice.fOffset + individualMipOffsets[currentMipLevel]
668                     sourceBytesPerRow: levelRowBytes
669                   sourceBytesPerImage: levelSize
670                            sourceSize: MTLSizeMake(levelDimensions.width(),
671                                                    levelDimensions.height(), 1)
672                             toTexture: mtlTexture
673                      destinationSlice: 0
674                      destinationLevel: currentMipLevel
675                     destinationOrigin: origin];
676
677        levelDimensions = {std::max(1, levelDimensions.width() /2),
678                           std::max(1, levelDimensions.height()/2)};
679    }
680#ifdef SK_BUILD_FOR_MAC
681    if (this->mtlCaps().isMac()) {
682        [mtlBuffer->mtlBuffer() didModifyRange: NSMakeRange(slice.fOffset, dataSize)];
683    }
684#endif
685#ifdef SK_ENABLE_MTL_DEBUG_INFO
686    [blitCmdEncoder popDebugGroup];
687#endif
688
689    return std::move(tex);
690}
691
692// TODO: Extra retain/release can't be avoided here because of getMtlTextureInfo copying the
693// sk_cfp. It would be useful to have a (possibly-internal-only?) API to get the raw pointer.
694static id<MTLTexture> get_texture_from_backend(const GrBackendTexture& backendTex) {
695    GrMtlTextureInfo textureInfo;
696    if (!backendTex.getMtlTextureInfo(&textureInfo)) {
697        return nil;
698    }
699    return GrGetMTLTexture(textureInfo.fTexture.get());
700}
701
702static id<MTLTexture> get_texture_from_backend(const GrBackendRenderTarget& backendRT) {
703    GrMtlTextureInfo textureInfo;
704    if (!backendRT.getMtlTextureInfo(&textureInfo)) {
705        return nil;
706    }
707    return GrGetMTLTexture(textureInfo.fTexture.get());
708}
709
710sk_sp<GrTexture> GrMtlGpu::onWrapBackendTexture(const GrBackendTexture& backendTex,
711                                                GrWrapOwnership,
712                                                GrWrapCacheable cacheable,
713                                                GrIOType ioType) {
714    id<MTLTexture> mtlTexture = get_texture_from_backend(backendTex);
715    if (!mtlTexture) {
716        return nullptr;
717    }
718    // We don't currently support sampling from a MSAA texture in shaders.
719    if (mtlTexture.sampleCount != 1) {
720        return nullptr;
721    }
722
723    return GrMtlTexture::MakeWrappedTexture(this, backendTex.dimensions(), mtlTexture, cacheable,
724                                            ioType);
725}
726
727sk_sp<GrTexture> GrMtlGpu::onWrapCompressedBackendTexture(const GrBackendTexture& backendTex,
728                                                          GrWrapOwnership,
729                                                          GrWrapCacheable cacheable) {
730    id<MTLTexture> mtlTexture = get_texture_from_backend(backendTex);
731    if (!mtlTexture) {
732        return nullptr;
733    }
734    // We don't currently support sampling from a MSAA texture in shaders.
735    if (mtlTexture.sampleCount != 1) {
736        return nullptr;
737    }
738
739    return GrMtlTexture::MakeWrappedTexture(this, backendTex.dimensions(), mtlTexture, cacheable,
740                                            kRead_GrIOType);
741}
742
743sk_sp<GrTexture> GrMtlGpu::onWrapRenderableBackendTexture(const GrBackendTexture& backendTex,
744                                                          int sampleCnt,
745                                                          GrWrapOwnership,
746                                                          GrWrapCacheable cacheable) {
747    id<MTLTexture> mtlTexture = get_texture_from_backend(backendTex);
748    if (!mtlTexture) {
749        return nullptr;
750    }
751    // We don't currently support sampling from a MSAA texture in shaders.
752    if (mtlTexture.sampleCount != 1) {
753        return nullptr;
754    }
755
756    const GrMtlCaps& caps = this->mtlCaps();
757
758    MTLPixelFormat format = mtlTexture.pixelFormat;
759    if (!caps.isFormatRenderable(format, sampleCnt)) {
760        return nullptr;
761    }
762
763    if (@available(macOS 10.11, iOS 9.0, *)) {
764        SkASSERT(MTLTextureUsageRenderTarget & mtlTexture.usage);
765    }
766
767    sampleCnt = caps.getRenderTargetSampleCount(sampleCnt, format);
768    SkASSERT(sampleCnt);
769
770    return GrMtlTextureRenderTarget::MakeWrappedTextureRenderTarget(
771            this, backendTex.dimensions(), sampleCnt, mtlTexture, cacheable);
772}
773
774sk_sp<GrRenderTarget> GrMtlGpu::onWrapBackendRenderTarget(const GrBackendRenderTarget& backendRT) {
775    if (!this->caps()->isFormatRenderable(backendRT.getBackendFormat(), backendRT.sampleCnt())) {
776        return nullptr;
777    }
778
779    id<MTLTexture> mtlTexture = get_texture_from_backend(backendRT);
780    if (!mtlTexture) {
781        return nullptr;
782    }
783
784    if (@available(macOS 10.11, iOS 9.0, *)) {
785        SkASSERT(MTLTextureUsageRenderTarget & mtlTexture.usage);
786    }
787
788    return GrMtlRenderTarget::MakeWrappedRenderTarget(this, backendRT.dimensions(),
789                                                      backendRT.sampleCnt(), mtlTexture);
790}
791
792bool GrMtlGpu::onRegenerateMipMapLevels(GrTexture* texture) {
793    GrMtlTexture* grMtlTexture = static_cast<GrMtlTexture*>(texture);
794    id<MTLTexture> GR_NORETAIN mtlTexture = grMtlTexture->mtlTexture();
795
796    // Automatic mipmap generation is only supported by color-renderable formats
797    if (!fMtlCaps->isFormatRenderable(mtlTexture.pixelFormat, 1) &&
798        // We have pixel configs marked as textureable-only that use RGBA8 as the internal format
799        MTLPixelFormatRGBA8Unorm != mtlTexture.pixelFormat) {
800        return false;
801    }
802
803    auto cmdBuffer = this->commandBuffer();
804    id<MTLBlitCommandEncoder> GR_NORETAIN blitCmdEncoder = cmdBuffer->getBlitCommandEncoder();
805    if (!blitCmdEncoder) {
806        return false;
807    }
808    [blitCmdEncoder generateMipmapsForTexture: mtlTexture];
809    this->commandBuffer()->addGrSurface(sk_ref_sp<const GrSurface>(grMtlTexture->attachment()));
810
811    return true;
812}
813
814// Used to "clear" a backend texture to a constant color by transferring.
815static GrColorType mtl_format_to_backend_tex_clear_colortype(MTLPixelFormat format) {
816    switch(format) {
817        case MTLPixelFormatA8Unorm:         return GrColorType::kAlpha_8;
818        case MTLPixelFormatR8Unorm:         return GrColorType::kR_8;
819
820#ifdef SK_BUILD_FOR_IOS
821        case MTLPixelFormatB5G6R5Unorm:     return GrColorType::kBGR_565;
822        case MTLPixelFormatABGR4Unorm:      return GrColorType::kABGR_4444;
823#endif
824        case MTLPixelFormatRGBA8Unorm:      return GrColorType::kRGBA_8888;
825        case MTLPixelFormatRGBA8Unorm_sRGB: return GrColorType::kRGBA_8888_SRGB;
826
827        case MTLPixelFormatRG8Unorm:        return GrColorType::kRG_88;
828        case MTLPixelFormatBGRA8Unorm:      return GrColorType::kBGRA_8888;
829        case MTLPixelFormatRGB10A2Unorm:    return GrColorType::kRGBA_1010102;
830#ifdef SK_BUILD_FOR_MAC
831        case MTLPixelFormatBGR10A2Unorm:    return GrColorType::kBGRA_1010102;
832#endif
833        case MTLPixelFormatR16Float:        return GrColorType::kR_F16;
834        case MTLPixelFormatRGBA16Float:     return GrColorType::kRGBA_F16;
835        case MTLPixelFormatR16Unorm:        return GrColorType::kR_16;
836        case MTLPixelFormatRG16Unorm:       return GrColorType::kRG_1616;
837        case MTLPixelFormatRGBA16Unorm:     return GrColorType::kRGBA_16161616;
838        case MTLPixelFormatRG16Float:       return GrColorType::kRG_F16;
839        default:                            return GrColorType::kUnknown;
840    }
841
842    SkUNREACHABLE;
843}
844
845void copy_src_data(char* dst,
846                   size_t bytesPerPixel,
847                   const SkTArray<size_t>& individualMipOffsets,
848                   const GrPixmap srcData[],
849                   int numMipLevels,
850                   size_t bufferSize) {
851    SkASSERT(srcData && numMipLevels);
852    SkASSERT(individualMipOffsets.count() == numMipLevels);
853
854    for (int level = 0; level < numMipLevels; ++level) {
855        const size_t trimRB = srcData[level].width() * bytesPerPixel;
856        SkASSERT(individualMipOffsets[level] + trimRB * srcData[level].height() <= bufferSize);
857        SkRectMemcpy(dst + individualMipOffsets[level], trimRB,
858                     srcData[level].addr(), srcData[level].rowBytes(),
859                     trimRB, srcData[level].height());
860    }
861}
862
863bool GrMtlGpu::createMtlTextureForBackendSurface(MTLPixelFormat mtlFormat,
864                                                 SkISize dimensions,
865                                                 int sampleCnt,
866                                                 GrTexturable texturable,
867                                                 GrRenderable renderable,
868                                                 GrMipmapped mipMapped,
869                                                 GrMtlTextureInfo* info) {
870    SkASSERT(texturable == GrTexturable::kYes || renderable == GrRenderable::kYes);
871
872    if (texturable == GrTexturable::kYes && !fMtlCaps->isFormatTexturable(mtlFormat)) {
873        return false;
874    }
875    if (renderable == GrRenderable::kYes && !fMtlCaps->isFormatRenderable(mtlFormat, 1)) {
876        return false;
877    }
878
879    if (!check_max_blit_width(dimensions.width())) {
880        return false;
881    }
882
883    auto desc = [[MTLTextureDescriptor alloc] init];
884    desc.pixelFormat = mtlFormat;
885    desc.width = dimensions.width();
886    desc.height = dimensions.height();
887    if (mipMapped == GrMipMapped::kYes) {
888        desc.mipmapLevelCount = 1 + SkPrevLog2(std::max(dimensions.width(), dimensions.height()));
889    }
890    if (@available(macOS 10.11, iOS 9.0, *)) {
891        desc.storageMode = MTLStorageModePrivate;
892        MTLTextureUsage usage = texturable == GrTexturable::kYes ? MTLTextureUsageShaderRead : 0;
893        usage |= renderable == GrRenderable::kYes ? MTLTextureUsageRenderTarget : 0;
894        desc.usage = usage;
895    }
896    if (sampleCnt != 1) {
897        desc.sampleCount = sampleCnt;
898        desc.textureType = MTLTextureType2DMultisample;
899    }
900    id<MTLTexture> testTexture = [fDevice newTextureWithDescriptor: desc];
901#ifdef SK_ENABLE_MTL_DEBUG_INFO
902    testTexture.label = @"testTexture";
903#endif
904    info->fTexture.reset(GrRetainPtrFromId(testTexture));
905    return true;
906}
907
908GrBackendTexture GrMtlGpu::onCreateBackendTexture(SkISize dimensions,
909                                                  const GrBackendFormat& format,
910                                                  GrRenderable renderable,
911                                                  GrMipmapped mipMapped,
912                                                  GrProtected isProtected) {
913    const MTLPixelFormat mtlFormat = GrBackendFormatAsMTLPixelFormat(format);
914
915    GrMtlTextureInfo info;
916    if (!this->createMtlTextureForBackendSurface(mtlFormat, dimensions, 1, GrTexturable::kYes,
917                                                 renderable, mipMapped, &info)) {
918        return {};
919    }
920
921    GrBackendTexture backendTex(dimensions.width(), dimensions.height(), mipMapped, info);
922    return backendTex;
923}
924
925bool GrMtlGpu::onClearBackendTexture(const GrBackendTexture& backendTexture,
926                                     sk_sp<GrRefCntedCallback> finishedCallback,
927                                     std::array<float, 4> color) {
928    GrMtlTextureInfo info;
929    SkAssertResult(backendTexture.getMtlTextureInfo(&info));
930
931    id<MTLTexture> GR_NORETAIN mtlTexture = GrGetMTLTexture(info.fTexture.get());
932
933    const MTLPixelFormat mtlFormat = mtlTexture.pixelFormat;
934
935    // Create a transfer buffer and fill with data.
936    size_t bytesPerPixel = GrMtlFormatBytesPerBlock(mtlFormat);
937    size_t combinedBufferSize;
938
939    // Reuse the same buffer for all levels. Should be ok since we made the row bytes tight.
940    combinedBufferSize = bytesPerPixel*backendTexture.width()*backendTexture.height();
941
942    size_t alignment = std::max(bytesPerPixel, this->mtlCaps().getMinBufferAlignment());
943    GrStagingBufferManager::Slice slice = fStagingBufferManager.allocateStagingBufferSlice(
944            combinedBufferSize, alignment);
945    if (!slice.fBuffer) {
946        return false;
947    }
948    char* buffer = (char*)slice.fOffsetMapPtr;
949
950    auto colorType = mtl_format_to_backend_tex_clear_colortype(mtlFormat);
951    if (colorType == GrColorType::kUnknown) {
952        return false;
953    }
954    GrImageInfo ii(colorType, kUnpremul_SkAlphaType, nullptr, backendTexture.dimensions());
955    auto rb = ii.minRowBytes();
956    SkASSERT(rb == bytesPerPixel*backendTexture.width());
957    if (!GrClearImage(ii, buffer, rb, color)) {
958        return false;
959    }
960
961    // Transfer buffer contents to texture
962    MTLOrigin origin = MTLOriginMake(0, 0, 0);
963
964    GrMtlCommandBuffer* cmdBuffer = this->commandBuffer();
965    id<MTLBlitCommandEncoder> GR_NORETAIN blitCmdEncoder = cmdBuffer->getBlitCommandEncoder();
966    if (!blitCmdEncoder) {
967        return false;
968    }
969#ifdef SK_ENABLE_MTL_DEBUG_INFO
970    [blitCmdEncoder pushDebugGroup:@"onClearBackendTexture"];
971#endif
972    GrMtlBuffer* mtlBuffer = static_cast<GrMtlBuffer*>(slice.fBuffer);
973
974    SkISize levelDimensions(backendTexture.dimensions());
975    int numMipLevels = mtlTexture.mipmapLevelCount;
976    for (int currentMipLevel = 0; currentMipLevel < numMipLevels; currentMipLevel++) {
977        size_t levelRowBytes;
978        size_t levelSize;
979
980        levelRowBytes = levelDimensions.width() * bytesPerPixel;
981        levelSize = levelRowBytes * levelDimensions.height();
982
983        // TODO: can this all be done in one go?
984        [blitCmdEncoder copyFromBuffer: mtlBuffer->mtlBuffer()
985                          sourceOffset: slice.fOffset
986                     sourceBytesPerRow: levelRowBytes
987                   sourceBytesPerImage: levelSize
988                            sourceSize: MTLSizeMake(levelDimensions.width(),
989                                                    levelDimensions.height(),
990                                                    1)
991                             toTexture: mtlTexture
992                      destinationSlice: 0
993                      destinationLevel: currentMipLevel
994                     destinationOrigin: origin];
995
996        levelDimensions = {std::max(1, levelDimensions.width() / 2),
997                           std::max(1, levelDimensions.height() / 2)};
998    }
999#ifdef SK_BUILD_FOR_MAC
1000    if (this->mtlCaps().isMac()) {
1001        [mtlBuffer->mtlBuffer() didModifyRange: NSMakeRange(slice.fOffset, combinedBufferSize)];
1002    }
1003#endif
1004    [blitCmdEncoder popDebugGroup];
1005
1006    if (finishedCallback) {
1007        this->addFinishedCallback(std::move(finishedCallback));
1008    }
1009
1010    return true;
1011}
1012
1013GrBackendTexture GrMtlGpu::onCreateCompressedBackendTexture(
1014        SkISize dimensions, const GrBackendFormat& format, GrMipmapped mipMapped,
1015        GrProtected isProtected) {
1016    const MTLPixelFormat mtlFormat = GrBackendFormatAsMTLPixelFormat(format);
1017
1018    GrMtlTextureInfo info;
1019    if (!this->createMtlTextureForBackendSurface(mtlFormat, dimensions, 1, GrTexturable::kYes,
1020                                                 GrRenderable::kNo, mipMapped, &info)) {
1021        return {};
1022    }
1023
1024    return GrBackendTexture(dimensions.width(), dimensions.height(), mipMapped, info);
1025}
1026
1027bool GrMtlGpu::onUpdateCompressedBackendTexture(const GrBackendTexture& backendTexture,
1028                                                sk_sp<GrRefCntedCallback> finishedCallback,
1029                                                const void* data,
1030                                                size_t size) {
1031    GrMtlTextureInfo info;
1032    SkAssertResult(backendTexture.getMtlTextureInfo(&info));
1033
1034    id<MTLTexture> mtlTexture = GrGetMTLTexture(info.fTexture.get());
1035
1036    int numMipLevels = mtlTexture.mipmapLevelCount;
1037    GrMipmapped mipMapped = numMipLevels > 1 ? GrMipmapped::kYes : GrMipmapped::kNo;
1038
1039    SkImage::CompressionType compression =
1040            GrBackendFormatToCompressionType(backendTexture.getBackendFormat());
1041    SkASSERT(compression != SkImage::CompressionType::kNone);
1042
1043    // Create a transfer buffer and fill with data.
1044    SkSTArray<16, size_t> individualMipOffsets;
1045    size_t combinedBufferSize;
1046    combinedBufferSize = SkCompressedDataSize(compression,
1047                                              backendTexture.dimensions(),
1048                                              &individualMipOffsets,
1049                                              mipMapped == GrMipmapped::kYes);
1050    SkASSERT(individualMipOffsets.count() == numMipLevels);
1051
1052    size_t alignment = std::max(SkCompressedBlockSize(compression),
1053                                this->mtlCaps().getMinBufferAlignment());
1054    GrStagingBufferManager::Slice slice =
1055            fStagingBufferManager.allocateStagingBufferSlice(combinedBufferSize, alignment);
1056    if (!slice.fBuffer) {
1057        return false;
1058    }
1059    char* buffer = (char*)slice.fOffsetMapPtr;
1060
1061    memcpy(buffer, data, size);
1062
1063    // Transfer buffer contents to texture
1064    MTLOrigin origin = MTLOriginMake(0, 0, 0);
1065
1066    GrMtlCommandBuffer* cmdBuffer = this->commandBuffer();
1067    id<MTLBlitCommandEncoder> blitCmdEncoder = cmdBuffer->getBlitCommandEncoder();
1068    if (!blitCmdEncoder) {
1069        return false;
1070    }
1071#ifdef SK_ENABLE_MTL_DEBUG_INFO
1072    [blitCmdEncoder pushDebugGroup:@"onUpdateCompressedBackendTexture"];
1073#endif
1074    GrMtlBuffer* mtlBuffer = static_cast<GrMtlBuffer*>(slice.fBuffer);
1075
1076    SkISize levelDimensions(backendTexture.dimensions());
1077    for (int currentMipLevel = 0; currentMipLevel < numMipLevels; currentMipLevel++) {
1078        size_t levelRowBytes;
1079        size_t levelSize;
1080
1081        levelRowBytes = GrCompressedRowBytes(compression, levelDimensions.width());
1082        levelSize = SkCompressedDataSize(compression, levelDimensions, nullptr, false);
1083
1084        // TODO: can this all be done in one go?
1085        [blitCmdEncoder copyFromBuffer: mtlBuffer->mtlBuffer()
1086                          sourceOffset: slice.fOffset + individualMipOffsets[currentMipLevel]
1087                     sourceBytesPerRow: levelRowBytes
1088                   sourceBytesPerImage: levelSize
1089                            sourceSize: MTLSizeMake(levelDimensions.width(),
1090                                                    levelDimensions.height(),
1091                                                    1)
1092                             toTexture: mtlTexture
1093                      destinationSlice: 0
1094                      destinationLevel: currentMipLevel
1095                     destinationOrigin: origin];
1096
1097        levelDimensions = {std::max(1, levelDimensions.width() / 2),
1098                           std::max(1, levelDimensions.height() / 2)};
1099    }
1100#ifdef SK_BUILD_FOR_MAC
1101    if (this->mtlCaps().isMac()) {
1102        [mtlBuffer->mtlBuffer() didModifyRange:NSMakeRange(slice.fOffset, combinedBufferSize)];
1103    }
1104#endif
1105    [blitCmdEncoder popDebugGroup];
1106
1107    if (finishedCallback) {
1108        this->addFinishedCallback(std::move(finishedCallback));
1109    }
1110
1111    return true;
1112}
1113
1114void GrMtlGpu::deleteBackendTexture(const GrBackendTexture& tex) {
1115    SkASSERT(GrBackendApi::kMetal == tex.backend());
1116    // Nothing to do here, will get cleaned up when the GrBackendTexture object goes away
1117}
1118
1119bool GrMtlGpu::compile(const GrProgramDesc& desc, const GrProgramInfo& programInfo) {
1120
1121    GrThreadSafePipelineBuilder::Stats::ProgramCacheResult stat;
1122
1123    auto pipelineState = this->resourceProvider().findOrCreateCompatiblePipelineState(
1124                                 desc, programInfo, &stat);
1125    if (!pipelineState) {
1126        return false;
1127    }
1128
1129    return stat != GrThreadSafePipelineBuilder::Stats::ProgramCacheResult::kHit;
1130}
1131
1132bool GrMtlGpu::precompileShader(const SkData& key, const SkData& data) {
1133    return this->resourceProvider().precompileShader(key, data);
1134}
1135
1136#if GR_TEST_UTILS
1137bool GrMtlGpu::isTestingOnlyBackendTexture(const GrBackendTexture& tex) const {
1138    SkASSERT(GrBackendApi::kMetal == tex.backend());
1139
1140    GrMtlTextureInfo info;
1141    if (!tex.getMtlTextureInfo(&info)) {
1142        return false;
1143    }
1144    id<MTLTexture> mtlTexture = GrGetMTLTexture(info.fTexture.get());
1145    if (!mtlTexture) {
1146        return false;
1147    }
1148    if (@available(macOS 10.11, iOS 9.0, *)) {
1149        return mtlTexture.usage & MTLTextureUsageShaderRead;
1150    } else {
1151        return true; // best we can do
1152    }
1153}
1154
1155GrBackendRenderTarget GrMtlGpu::createTestingOnlyBackendRenderTarget(SkISize dimensions,
1156                                                                     GrColorType ct,
1157                                                                     int sampleCnt,
1158                                                                     GrProtected isProtected) {
1159    if (dimensions.width()  > this->caps()->maxRenderTargetSize() ||
1160        dimensions.height() > this->caps()->maxRenderTargetSize()) {
1161        return {};
1162    }
1163    if (isProtected == GrProtected::kYes) {
1164        return {};
1165    }
1166
1167    MTLPixelFormat format = this->mtlCaps().getFormatFromColorType(ct);
1168    sampleCnt = this->mtlCaps().getRenderTargetSampleCount(sampleCnt, format);
1169    if (sampleCnt == 0) {
1170        return {};
1171    }
1172
1173    GrMtlTextureInfo info;
1174    if (!this->createMtlTextureForBackendSurface(format, dimensions, sampleCnt, GrTexturable::kNo,
1175                                                 GrRenderable::kYes, GrMipmapped::kNo, &info)) {
1176        return {};
1177    }
1178
1179    return GrBackendRenderTarget(dimensions.width(), dimensions.height(), info);
1180}
1181
1182void GrMtlGpu::deleteTestingOnlyBackendRenderTarget(const GrBackendRenderTarget& rt) {
1183    SkASSERT(GrBackendApi::kMetal == rt.backend());
1184
1185    GrMtlTextureInfo info;
1186    if (rt.getMtlTextureInfo(&info)) {
1187        this->submitToGpu(true);
1188        // Nothing else to do here, will get cleaned up when the GrBackendRenderTarget
1189        // is deleted.
1190    }
1191}
1192#endif // GR_TEST_UTILS
1193
1194void GrMtlGpu::copySurfaceAsResolve(GrSurface* dst, GrSurface* src) {
1195    // TODO: Add support for subrectangles
1196    GrMtlRenderTarget* srcRT = static_cast<GrMtlRenderTarget*>(src->asRenderTarget());
1197    GrRenderTarget* dstRT = dst->asRenderTarget();
1198    GrMtlAttachment* dstAttachment;
1199    if (dstRT) {
1200        GrMtlRenderTarget* mtlRT = static_cast<GrMtlRenderTarget*>(dstRT);
1201        dstAttachment = mtlRT->colorAttachment();
1202    } else {
1203        SkASSERT(dst->asTexture());
1204        dstAttachment = static_cast<GrMtlTexture*>(dst->asTexture())->attachment();
1205    }
1206
1207    this->resolve(dstAttachment, srcRT->colorAttachment());
1208}
1209
1210void GrMtlGpu::copySurfaceAsBlit(GrSurface* dst, GrSurface* src,
1211                                 GrMtlAttachment* dstAttachment, GrMtlAttachment* srcAttachment,
1212                                 const SkIRect& srcRect, const SkIPoint& dstPoint) {
1213#ifdef SK_DEBUG
1214    SkASSERT(this->mtlCaps().canCopyAsBlit(dstAttachment->mtlFormat(), dstAttachment->numSamples(),
1215                                           srcAttachment->mtlFormat(), dstAttachment->numSamples(),
1216                                           srcRect, dstPoint, dst == src));
1217#endif
1218    id<MTLTexture> GR_NORETAIN dstTex = dstAttachment->mtlTexture();
1219    id<MTLTexture> GR_NORETAIN srcTex = srcAttachment->mtlTexture();
1220
1221    auto cmdBuffer = this->commandBuffer();
1222    id<MTLBlitCommandEncoder> GR_NORETAIN blitCmdEncoder = cmdBuffer->getBlitCommandEncoder();
1223    if (!blitCmdEncoder) {
1224        return;
1225    }
1226#ifdef SK_ENABLE_MTL_DEBUG_INFO
1227    [blitCmdEncoder pushDebugGroup:@"copySurfaceAsBlit"];
1228#endif
1229    [blitCmdEncoder copyFromTexture: srcTex
1230                        sourceSlice: 0
1231                        sourceLevel: 0
1232                       sourceOrigin: MTLOriginMake(srcRect.x(), srcRect.y(), 0)
1233                         sourceSize: MTLSizeMake(srcRect.width(), srcRect.height(), 1)
1234                          toTexture: dstTex
1235                   destinationSlice: 0
1236                   destinationLevel: 0
1237                  destinationOrigin: MTLOriginMake(dstPoint.fX, dstPoint.fY, 0)];
1238#ifdef SK_ENABLE_MTL_DEBUG_INFO
1239    [blitCmdEncoder popDebugGroup];
1240#endif
1241    cmdBuffer->addGrSurface(sk_ref_sp<const GrSurface>(dst));
1242    cmdBuffer->addGrSurface(sk_ref_sp<const GrSurface>(src));
1243}
1244
1245bool GrMtlGpu::onCopySurface(GrSurface* dst, GrSurface* src, const SkIRect& srcRect,
1246                             const SkIPoint& dstPoint) {
1247    SkASSERT(!src->isProtected() && !dst->isProtected());
1248
1249    GrMtlAttachment* dstAttachment;
1250    GrMtlAttachment* srcAttachment;
1251    GrRenderTarget* dstRT = dst->asRenderTarget();
1252    if (dstRT) {
1253        GrMtlRenderTarget* mtlRT = static_cast<GrMtlRenderTarget*>(dstRT);
1254        // This will technically return true for single sample rts that used DMSAA in which case we
1255        // don't have to pick the resolve attachment. But in that case the resolve and color
1256        // attachments will be the same anyways.
1257        if (this->mtlCaps().renderTargetSupportsDiscardableMSAA(mtlRT)) {
1258            dstAttachment = mtlRT->resolveAttachment();
1259        } else {
1260            dstAttachment = mtlRT->colorAttachment();
1261        }
1262    } else if (dst->asTexture()) {
1263        dstAttachment = static_cast<GrMtlTexture*>(dst->asTexture())->attachment();
1264    } else {
1265        // The surface in a GrAttachment already
1266        dstAttachment = static_cast<GrMtlAttachment*>(dst);
1267    }
1268    GrRenderTarget* srcRT = src->asRenderTarget();
1269    if (srcRT) {
1270        GrMtlRenderTarget* mtlRT = static_cast<GrMtlRenderTarget*>(srcRT);
1271        // This will technically return true for single sample rts that used DMSAA in which case we
1272        // don't have to pick the resolve attachment. But in that case the resolve and color
1273        // attachments will be the same anyways.
1274        if (this->mtlCaps().renderTargetSupportsDiscardableMSAA(mtlRT)) {
1275            srcAttachment = mtlRT->resolveAttachment();
1276        } else {
1277            srcAttachment = mtlRT->colorAttachment();
1278        }
1279    } else if (src->asTexture()) {
1280        SkASSERT(src->asTexture());
1281        srcAttachment = static_cast<GrMtlTexture*>(src->asTexture())->attachment();
1282    } else {
1283        // The surface in a GrAttachment already
1284        srcAttachment = static_cast<GrMtlAttachment*>(src);
1285    }
1286
1287    MTLPixelFormat dstFormat = dstAttachment->mtlFormat();
1288    MTLPixelFormat srcFormat = srcAttachment->mtlFormat();
1289
1290    int dstSampleCnt = dstAttachment->sampleCount();
1291    int srcSampleCnt = srcAttachment->sampleCount();
1292
1293    if (this->mtlCaps().canCopyAsResolve(dstFormat, dstSampleCnt,
1294                                         srcFormat, srcSampleCnt,
1295                                         SkToBool(srcRT), src->dimensions(),
1296                                         srcRect, dstPoint,
1297                                         dstAttachment == srcAttachment)) {
1298        this->copySurfaceAsResolve(dst, src);
1299        return true;
1300    }
1301
1302    if (srcAttachment->framebufferOnly() || dstAttachment->framebufferOnly()) {
1303        return false;
1304    }
1305
1306    if (this->mtlCaps().canCopyAsBlit(dstFormat, dstSampleCnt, srcFormat, srcSampleCnt,
1307                                      srcRect, dstPoint, dstAttachment == srcAttachment)) {
1308        this->copySurfaceAsBlit(dst, src, dstAttachment, srcAttachment, srcRect, dstPoint);
1309        return true;
1310    }
1311
1312    return false;
1313}
1314
1315bool GrMtlGpu::onWritePixels(GrSurface* surface,
1316                             SkIRect rect,
1317                             GrColorType surfaceColorType,
1318                             GrColorType srcColorType,
1319                             const GrMipLevel texels[],
1320                             int mipLevelCount,
1321                             bool prepForTexSampling) {
1322    GrMtlTexture* mtlTexture = static_cast<GrMtlTexture*>(surface->asTexture());
1323    // TODO: In principle we should be able to support pure rendertargets as well, but
1324    // until we find a use case we'll only support texture rendertargets.
1325    if (!mtlTexture) {
1326        return false;
1327    }
1328    if (!mipLevelCount) {
1329        return false;
1330    }
1331#ifdef SK_DEBUG
1332    for (int i = 0; i < mipLevelCount; i++) {
1333        SkASSERT(texels[i].fPixels);
1334    }
1335#endif
1336    return this->uploadToTexture(mtlTexture, rect, srcColorType, texels, mipLevelCount);
1337}
1338
1339bool GrMtlGpu::onReadPixels(GrSurface* surface,
1340                            SkIRect rect,
1341                            GrColorType surfaceColorType,
1342                            GrColorType dstColorType,
1343                            void* buffer,
1344                            size_t rowBytes) {
1345    SkASSERT(surface);
1346
1347    if (surfaceColorType != dstColorType) {
1348        return false;
1349    }
1350
1351    int bpp = GrColorTypeBytesPerPixel(dstColorType);
1352    size_t transBufferRowBytes = bpp*rect.width();
1353    size_t transBufferImageBytes = transBufferRowBytes*rect.height();
1354
1355    GrResourceProvider* resourceProvider = this->getContext()->priv().resourceProvider();
1356    sk_sp<GrGpuBuffer> transferBuffer = resourceProvider->createBuffer(
1357            transBufferImageBytes, GrGpuBufferType::kXferGpuToCpu,
1358            kDynamic_GrAccessPattern);
1359
1360    if (!transferBuffer) {
1361        return false;
1362    }
1363
1364    GrMtlBuffer* grMtlBuffer = static_cast<GrMtlBuffer*>(transferBuffer.get());
1365    if (!this->readOrTransferPixels(surface,
1366                                    rect,
1367                                    dstColorType,
1368                                    grMtlBuffer->mtlBuffer(),
1369                                    0,
1370                                    transBufferImageBytes,
1371                                    transBufferRowBytes)) {
1372        return false;
1373    }
1374    this->submitCommandBuffer(kForce_SyncQueue);
1375
1376    const void* mappedMemory = grMtlBuffer->mtlBuffer().contents;
1377
1378    SkRectMemcpy(buffer,
1379                 rowBytes,
1380                 mappedMemory,
1381                 transBufferRowBytes,
1382                 transBufferRowBytes,
1383                 rect.height());
1384
1385    return true;
1386}
1387
1388bool GrMtlGpu::onTransferPixelsTo(GrTexture* texture,
1389                                  SkIRect rect,
1390                                  GrColorType textureColorType,
1391                                  GrColorType bufferColorType,
1392                                  sk_sp<GrGpuBuffer> transferBuffer,
1393                                  size_t offset,
1394                                  size_t rowBytes) {
1395    SkASSERT(texture);
1396    SkASSERT(transferBuffer);
1397    if (textureColorType != bufferColorType) {
1398        return false;
1399    }
1400
1401    GrMtlTexture* grMtlTexture = static_cast<GrMtlTexture*>(texture);
1402    id<MTLTexture> GR_NORETAIN mtlTexture = grMtlTexture->mtlTexture();
1403    SkASSERT(mtlTexture);
1404
1405    GrMtlBuffer* grMtlBuffer = static_cast<GrMtlBuffer*>(transferBuffer.get());
1406    id<MTLBuffer> GR_NORETAIN mtlBuffer = grMtlBuffer->mtlBuffer();
1407    SkASSERT(mtlBuffer);
1408
1409    size_t bpp = GrColorTypeBytesPerPixel(bufferColorType);
1410    if (offset % bpp) {
1411        return false;
1412    }
1413    if (GrBackendFormatBytesPerPixel(texture->backendFormat()) != bpp) {
1414        return false;
1415    }
1416
1417    MTLOrigin origin = MTLOriginMake(rect.left(), rect.top(), 0);
1418
1419    auto cmdBuffer = this->commandBuffer();
1420    id<MTLBlitCommandEncoder> GR_NORETAIN blitCmdEncoder = cmdBuffer->getBlitCommandEncoder();
1421    if (!blitCmdEncoder) {
1422        return false;
1423    }
1424#ifdef SK_ENABLE_MTL_DEBUG_INFO
1425    [blitCmdEncoder pushDebugGroup:@"onTransferPixelsTo"];
1426#endif
1427    [blitCmdEncoder copyFromBuffer: mtlBuffer
1428                      sourceOffset: offset
1429                 sourceBytesPerRow: rowBytes
1430               sourceBytesPerImage: rowBytes*rect.height()
1431                        sourceSize: MTLSizeMake(rect.width(), rect.height(), 1)
1432                         toTexture: mtlTexture
1433                  destinationSlice: 0
1434                  destinationLevel: 0
1435                 destinationOrigin: origin];
1436#ifdef SK_ENABLE_MTL_DEBUG_INFO
1437    [blitCmdEncoder popDebugGroup];
1438#endif
1439
1440    return true;
1441}
1442
1443bool GrMtlGpu::onTransferPixelsFrom(GrSurface* surface,
1444                                    SkIRect rect,
1445                                    GrColorType surfaceColorType,
1446                                    GrColorType bufferColorType,
1447                                    sk_sp<GrGpuBuffer> transferBuffer,
1448                                    size_t offset) {
1449    SkASSERT(surface);
1450    SkASSERT(transferBuffer);
1451
1452    if (surfaceColorType != bufferColorType) {
1453        return false;
1454    }
1455
1456    // Metal only supports offsets that are aligned to a pixel.
1457    size_t bpp = GrColorTypeBytesPerPixel(bufferColorType);
1458    if (offset % bpp) {
1459        return false;
1460    }
1461    if (GrBackendFormatBytesPerPixel(surface->backendFormat()) != bpp) {
1462        return false;
1463    }
1464
1465    GrMtlBuffer* grMtlBuffer = static_cast<GrMtlBuffer*>(transferBuffer.get());
1466
1467    size_t transBufferRowBytes = bpp*rect.width();
1468    size_t transBufferImageBytes = transBufferRowBytes*rect.height();
1469
1470    return this->readOrTransferPixels(surface,
1471                                      rect,
1472                                      bufferColorType,
1473                                      grMtlBuffer->mtlBuffer(),
1474                                      offset,
1475                                      transBufferImageBytes,
1476                                      transBufferRowBytes);
1477}
1478
1479bool GrMtlGpu::readOrTransferPixels(GrSurface* surface,
1480                                    SkIRect rect,
1481                                    GrColorType dstColorType,
1482                                    id<MTLBuffer> transferBuffer,
1483                                    size_t offset,
1484                                    size_t imageBytes,
1485                                    size_t rowBytes) {
1486    if (!check_max_blit_width(rect.width())) {
1487        return false;
1488    }
1489
1490    id<MTLTexture> mtlTexture;
1491    if (GrMtlRenderTarget* rt = static_cast<GrMtlRenderTarget*>(surface->asRenderTarget())) {
1492        if (rt->numSamples() > 1) {
1493            SkASSERT(rt->requiresManualMSAAResolve());  // msaa-render-to-texture not yet supported.
1494            mtlTexture = rt->resolveMTLTexture();
1495        } else {
1496            SkASSERT(!rt->requiresManualMSAAResolve());
1497            mtlTexture = rt->colorMTLTexture();
1498        }
1499    } else if (GrMtlTexture* texture = static_cast<GrMtlTexture*>(surface->asTexture())) {
1500        mtlTexture = texture->mtlTexture();
1501    }
1502    if (!mtlTexture) {
1503        return false;
1504    }
1505
1506    auto cmdBuffer = this->commandBuffer();
1507    id<MTLBlitCommandEncoder> GR_NORETAIN blitCmdEncoder = cmdBuffer->getBlitCommandEncoder();
1508    if (!blitCmdEncoder) {
1509        return false;
1510    }
1511#ifdef SK_ENABLE_MTL_DEBUG_INFO
1512    [blitCmdEncoder pushDebugGroup:@"readOrTransferPixels"];
1513#endif
1514    [blitCmdEncoder copyFromTexture: mtlTexture
1515                        sourceSlice: 0
1516                        sourceLevel: 0
1517                       sourceOrigin: MTLOriginMake(rect.left(), rect.top(), 0)
1518                         sourceSize: MTLSizeMake(rect.width(), rect.height(), 1)
1519                           toBuffer: transferBuffer
1520                  destinationOffset: offset
1521             destinationBytesPerRow: rowBytes
1522           destinationBytesPerImage: imageBytes];
1523#ifdef SK_BUILD_FOR_MAC
1524    if (this->mtlCaps().isMac()) {
1525        // Sync GPU data back to the CPU
1526        [blitCmdEncoder synchronizeResource: transferBuffer];
1527    }
1528#endif
1529#ifdef SK_ENABLE_MTL_DEBUG_INFO
1530    [blitCmdEncoder popDebugGroup];
1531#endif
1532
1533    return true;
1534}
1535
1536GrFence SK_WARN_UNUSED_RESULT GrMtlGpu::insertFence() {
1537    GrMtlCommandBuffer* cmdBuffer = this->commandBuffer();
1538    // We create a semaphore and signal it within the current
1539    // command buffer's completion handler.
1540    dispatch_semaphore_t semaphore = dispatch_semaphore_create(0);
1541    cmdBuffer->addCompletedHandler(^(id <MTLCommandBuffer>commandBuffer) {
1542        dispatch_semaphore_signal(semaphore);
1543    });
1544
1545    const void* cfFence = (__bridge_retained const void*) semaphore;
1546    return (GrFence) cfFence;
1547}
1548
1549bool GrMtlGpu::waitFence(GrFence fence) {
1550    const void* cfFence = (const void*) fence;
1551    dispatch_semaphore_t semaphore = (__bridge dispatch_semaphore_t)cfFence;
1552
1553    long result = dispatch_semaphore_wait(semaphore, 0);
1554
1555    return !result;
1556}
1557
1558void GrMtlGpu::deleteFence(GrFence fence) const {
1559    const void* cfFence = (const void*) fence;
1560    // In this case it's easier to release in CoreFoundation than depend on ARC
1561    CFRelease(cfFence);
1562}
1563
1564std::unique_ptr<GrSemaphore> SK_WARN_UNUSED_RESULT GrMtlGpu::makeSemaphore(bool /*isOwned*/) {
1565    SkASSERT(this->caps()->semaphoreSupport());
1566    return GrMtlSemaphore::Make(this);
1567}
1568
1569std::unique_ptr<GrSemaphore> GrMtlGpu::wrapBackendSemaphore(const GrBackendSemaphore& semaphore,
1570                                                            GrSemaphoreWrapType /* wrapType */,
1571                                                            GrWrapOwnership /*ownership*/) {
1572    SkASSERT(this->caps()->semaphoreSupport());
1573    return GrMtlSemaphore::MakeWrapped(semaphore.mtlSemaphore(), semaphore.mtlValue());
1574}
1575
1576void GrMtlGpu::insertSemaphore(GrSemaphore* semaphore) {
1577    if (@available(macOS 10.14, iOS 12.0, *)) {
1578        SkASSERT(semaphore);
1579        GrMtlSemaphore* mtlSem = static_cast<GrMtlSemaphore*>(semaphore);
1580
1581        this->commandBuffer()->encodeSignalEvent(mtlSem->event(), mtlSem->value());
1582    }
1583}
1584
1585void GrMtlGpu::waitSemaphore(GrSemaphore* semaphore) {
1586    if (@available(macOS 10.14, iOS 12.0, *)) {
1587        SkASSERT(semaphore);
1588        GrMtlSemaphore* mtlSem = static_cast<GrMtlSemaphore*>(semaphore);
1589
1590        this->commandBuffer()->encodeWaitForEvent(mtlSem->event(), mtlSem->value());
1591    }
1592}
1593
1594void GrMtlGpu::onResolveRenderTarget(GrRenderTarget* target, const SkIRect&) {
1595    SkASSERT(target->numSamples() > 1);
1596    GrMtlRenderTarget* rt = static_cast<GrMtlRenderTarget*>(target);
1597
1598    if (rt->resolveAttachment() && this->mtlCaps().renderTargetSupportsDiscardableMSAA(rt)) {
1599        // We would have resolved the RT during the render pass.
1600        return;
1601    }
1602
1603    this->resolve(static_cast<GrMtlRenderTarget*>(target)->resolveAttachment(),
1604                  static_cast<GrMtlRenderTarget*>(target)->colorAttachment());
1605}
1606
1607void GrMtlGpu::resolve(GrMtlAttachment* resolveAttachment,
1608                       GrMtlAttachment* msaaAttachment) {
1609    auto renderPassDesc = [[MTLRenderPassDescriptor alloc] init];
1610    auto colorAttachment = renderPassDesc.colorAttachments[0];
1611    colorAttachment.texture = msaaAttachment->mtlTexture();
1612    colorAttachment.resolveTexture = resolveAttachment->mtlTexture();
1613    colorAttachment.loadAction = MTLLoadActionLoad;
1614    colorAttachment.storeAction = MTLStoreActionMultisampleResolve;
1615
1616    GrMtlRenderCommandEncoder* cmdEncoder =
1617            this->commandBuffer()->getRenderCommandEncoder(renderPassDesc, nullptr, nullptr);
1618    if (cmdEncoder) {
1619        cmdEncoder->setLabel(@"resolveTexture");
1620        this->commandBuffer()->addGrSurface(sk_ref_sp<const GrSurface>(resolveAttachment));
1621        this->commandBuffer()->addGrSurface(sk_ref_sp<const GrSurface>(msaaAttachment));
1622    }
1623}
1624
1625GrMtlRenderCommandEncoder* GrMtlGpu::loadMSAAFromResolve(
1626        GrAttachment* dst, GrMtlAttachment* src, const SkIRect& srcRect,
1627        MTLRenderPassStencilAttachmentDescriptor* stencil) {
1628    if (!dst) {
1629        return nil;
1630    }
1631    if (!src || src->framebufferOnly()) {
1632        return nil;
1633    }
1634
1635    GrMtlAttachment* mtlDst = static_cast<GrMtlAttachment*>(dst);
1636
1637    MTLPixelFormat stencilFormat = stencil.texture.pixelFormat;
1638    auto renderPipeline = this->resourceProvider().findOrCreateMSAALoadPipeline(mtlDst->mtlFormat(),
1639                                                                                dst->numSamples(),
1640                                                                                stencilFormat);
1641
1642    // Set up rendercommandencoder
1643    auto renderPassDesc = [MTLRenderPassDescriptor new];
1644    auto colorAttachment = renderPassDesc.colorAttachments[0];
1645    colorAttachment.texture = mtlDst->mtlTexture();
1646    colorAttachment.loadAction = MTLLoadActionDontCare;
1647    colorAttachment.storeAction = MTLStoreActionMultisampleResolve;
1648    colorAttachment.resolveTexture = src->mtlTexture();
1649
1650    renderPassDesc.stencilAttachment = stencil;
1651
1652    // We know in this case that the preceding renderCommandEncoder will not be compatible.
1653    // Either it's using a different rendertarget, or we are reading from the resolve and
1654    // hence we need to let the previous resolve finish. So we create a new one without checking.
1655    auto renderCmdEncoder =
1656                this->commandBuffer()->getRenderCommandEncoder(renderPassDesc, nullptr);
1657    if (!renderCmdEncoder) {
1658        return nullptr;
1659    }
1660
1661    // Bind pipeline
1662    renderCmdEncoder->setRenderPipelineState(renderPipeline->mtlPipelineState());
1663    this->commandBuffer()->addResource(sk_ref_sp(renderPipeline));
1664
1665    // Bind src as input texture
1666    renderCmdEncoder->setFragmentTexture(src->mtlTexture(), 0);
1667    // No sampler needed
1668    this->commandBuffer()->addGrSurface(sk_ref_sp<GrSurface>(src));
1669
1670    // Scissor and viewport should default to size of color attachment
1671
1672    // Update and bind uniform data
1673    int w = srcRect.width();
1674    int h = srcRect.height();
1675
1676    // dst rect edges in NDC (-1 to 1)
1677    int dw = dst->width();
1678    int dh = dst->height();
1679    float dx0 = 2.f * srcRect.fLeft / dw - 1.f;
1680    float dx1 = 2.f * (srcRect.fLeft + w) / dw - 1.f;
1681    float dy0 = 2.f * srcRect.fTop / dh - 1.f;
1682    float dy1 = 2.f * (srcRect.fTop + h) / dh - 1.f;
1683
1684    struct {
1685        float posXform[4];
1686        int textureSize[2];
1687        int pad[2];
1688    } uniData = {{dx1 - dx0, dy1 - dy0, dx0, dy0}, {dw, dh}, {0, 0}};
1689
1690    constexpr size_t uniformSize = 32;
1691    if (@available(macOS 10.11, iOS 8.3, *)) {
1692        SkASSERT(uniformSize <= this->caps()->maxPushConstantsSize());
1693        renderCmdEncoder->setVertexBytes(&uniData, uniformSize, 0);
1694    } else {
1695        // upload the data
1696        GrRingBuffer::Slice slice = this->uniformsRingBuffer()->suballocate(uniformSize);
1697        GrMtlBuffer* buffer = (GrMtlBuffer*) slice.fBuffer;
1698        char* destPtr = static_cast<char*>(slice.fBuffer->map()) + slice.fOffset;
1699        memcpy(destPtr, &uniData, uniformSize);
1700
1701        renderCmdEncoder->setVertexBuffer(buffer->mtlBuffer(), slice.fOffset, 0);
1702    }
1703
1704    renderCmdEncoder->drawPrimitives(MTLPrimitiveTypeTriangleStrip, (NSUInteger)0, (NSUInteger)4);
1705
1706    return renderCmdEncoder;
1707}
1708
1709#if GR_TEST_UTILS
1710void GrMtlGpu::testingOnly_startCapture() {
1711    if (@available(macOS 10.13, iOS 11.0, *)) {
1712        // TODO: add Metal 3 interface as well
1713        MTLCaptureManager* captureManager = [MTLCaptureManager sharedCaptureManager];
1714        if (captureManager.isCapturing) {
1715            return;
1716        }
1717        if (@available(macOS 10.15, iOS 13.0, *)) {
1718            MTLCaptureDescriptor* captureDescriptor = [[MTLCaptureDescriptor alloc] init];
1719            captureDescriptor.captureObject = fQueue;
1720
1721            NSError *error;
1722            if (![captureManager startCaptureWithDescriptor: captureDescriptor error:&error])
1723            {
1724                NSLog(@"Failed to start capture, error %@", error);
1725            }
1726        } else {
1727            [captureManager startCaptureWithCommandQueue: fQueue];
1728        }
1729     }
1730}
1731
1732void GrMtlGpu::testingOnly_endCapture() {
1733    if (@available(macOS 10.13, iOS 11.0, *)) {
1734        MTLCaptureManager* captureManager = [MTLCaptureManager sharedCaptureManager];
1735        if (captureManager.isCapturing) {
1736            [captureManager stopCapture];
1737        }
1738    }
1739}
1740#endif
1741
1742#ifdef SK_ENABLE_DUMP_GPU
1743#include "src/utils/SkJSONWriter.h"
1744void GrMtlGpu::onDumpJSON(SkJSONWriter* writer) const {
1745    // We are called by the base class, which has already called beginObject(). We choose to nest
1746    // all of our caps information in a named sub-object.
1747    writer->beginObject("Metal GPU");
1748
1749    writer->beginObject("Device");
1750    writer->appendString("name", fDevice.name.UTF8String);
1751#ifdef SK_BUILD_FOR_MAC
1752    if (@available(macOS 10.11, *)) {
1753        writer->appendBool("isHeadless", fDevice.isHeadless);
1754        writer->appendBool("isLowPower", fDevice.isLowPower);
1755    }
1756    if (@available(macOS 10.13, *)) {
1757        writer->appendBool("isRemovable", fDevice.isRemovable);
1758    }
1759#endif
1760    if (@available(macOS 10.13, iOS 11.0, *)) {
1761        writer->appendU64("registryID", fDevice.registryID);
1762    }
1763#if defined(SK_BUILD_FOR_MAC) && __MAC_OS_X_VERSION_MAX_ALLOWED >= 101500
1764    if (@available(macOS 10.15, *)) {
1765        switch (fDevice.location) {
1766            case MTLDeviceLocationBuiltIn:
1767                writer->appendString("location", "builtIn");
1768                break;
1769            case MTLDeviceLocationSlot:
1770                writer->appendString("location", "slot");
1771                break;
1772            case MTLDeviceLocationExternal:
1773                writer->appendString("location", "external");
1774                break;
1775            case MTLDeviceLocationUnspecified:
1776                writer->appendString("location", "unspecified");
1777                break;
1778            default:
1779                writer->appendString("location", "unknown");
1780                break;
1781        }
1782        writer->appendU64("locationNumber", fDevice.locationNumber);
1783        writer->appendU64("maxTransferRate", fDevice.maxTransferRate);
1784    }
1785#endif  // SK_BUILD_FOR_MAC
1786#if __MAC_OS_X_VERSION_MAX_ALLOWED >= 101500 || __IPHONE_OS_VERSION_MAX_ALLOWED >= 130000
1787    if (@available(macOS 10.15, iOS 13.0, *)) {
1788        writer->appendBool("hasUnifiedMemory", fDevice.hasUnifiedMemory);
1789    }
1790#endif
1791#ifdef SK_BUILD_FOR_MAC
1792#if __MAC_OS_X_VERSION_MAX_ALLOWED >= 101500
1793    if (@available(macOS 10.15, *)) {
1794        writer->appendU64("peerGroupID", fDevice.peerGroupID);
1795        writer->appendU32("peerCount", fDevice.peerCount);
1796        writer->appendU32("peerIndex", fDevice.peerIndex);
1797    }
1798#endif
1799    if (@available(macOS 10.12, *)) {
1800        writer->appendU64("recommendedMaxWorkingSetSize", fDevice.recommendedMaxWorkingSetSize);
1801    }
1802#endif  // SK_BUILD_FOR_MAC
1803    if (@available(macOS 10.13, iOS 11.0, *)) {
1804        writer->appendU64("currentAllocatedSize", fDevice.currentAllocatedSize);
1805        writer->appendU64("maxThreadgroupMemoryLength", fDevice.maxThreadgroupMemoryLength);
1806    }
1807
1808    if (@available(macOS 10.11, iOS 9.0, *)) {
1809        writer->beginObject("maxThreadsPerThreadgroup");
1810        writer->appendU64("width", fDevice.maxThreadsPerThreadgroup.width);
1811        writer->appendU64("height", fDevice.maxThreadsPerThreadgroup.height);
1812        writer->appendU64("depth", fDevice.maxThreadsPerThreadgroup.depth);
1813        writer->endObject();
1814    }
1815
1816    if (@available(macOS 10.13, iOS 11.0, *)) {
1817        writer->appendBool("areProgrammableSamplePositionsSupported",
1818                           fDevice.areProgrammableSamplePositionsSupported);
1819        writer->appendBool("areRasterOrderGroupsSupported",
1820                           fDevice.areRasterOrderGroupsSupported);
1821    }
1822#ifdef SK_BUILD_FOR_MAC
1823    if (@available(macOS 10.11, *)) {
1824        writer->appendBool("isDepth24Stencil8PixelFormatSupported",
1825                           fDevice.isDepth24Stencil8PixelFormatSupported);
1826
1827    }
1828#if __MAC_OS_X_VERSION_MAX_ALLOWED >= 101500
1829    if (@available(macOS 10.15, *)) {
1830        writer->appendBool("areBarycentricCoordsSupported",
1831                           fDevice.areBarycentricCoordsSupported);
1832        writer->appendBool("supportsShaderBarycentricCoordinates",
1833                           fDevice.supportsShaderBarycentricCoordinates);
1834    }
1835#endif
1836#endif  // SK_BUILD_FOR_MAC
1837    if (@available(macOS 10.14, iOS 12.0, *)) {
1838        writer->appendU64("maxBufferLength", fDevice.maxBufferLength);
1839    }
1840    if (@available(macOS 10.13, iOS 11.0, *)) {
1841        switch (fDevice.readWriteTextureSupport) {
1842            case MTLReadWriteTextureTier1:
1843                writer->appendString("readWriteTextureSupport", "tier1");
1844                break;
1845            case MTLReadWriteTextureTier2:
1846                writer->appendString("readWriteTextureSupport", "tier2");
1847                break;
1848            case MTLReadWriteTextureTierNone:
1849                writer->appendString("readWriteTextureSupport", "tierNone");
1850                break;
1851            default:
1852                writer->appendString("readWriteTextureSupport", "unknown");
1853                break;
1854        }
1855        switch (fDevice.argumentBuffersSupport) {
1856            case MTLArgumentBuffersTier1:
1857                writer->appendString("argumentBuffersSupport", "tier1");
1858                break;
1859            case MTLArgumentBuffersTier2:
1860                writer->appendString("argumentBuffersSupport", "tier2");
1861                break;
1862            default:
1863                writer->appendString("argumentBuffersSupport", "unknown");
1864                break;
1865        }
1866    }
1867    if (@available(macOS 10.14, iOS 12.0, *)) {
1868        writer->appendU64("maxArgumentBufferSamplerCount", fDevice.maxArgumentBufferSamplerCount);
1869    }
1870#ifdef SK_BUILD_FOR_IOS
1871    if (@available(iOS 13.0, *)) {
1872        writer->appendU64("sparseTileSizeInBytes", fDevice.sparseTileSizeInBytes);
1873    }
1874#endif
1875    writer->endObject();
1876
1877    writer->appendString("queue", fQueue.label.UTF8String);
1878    writer->appendBool("disconnected", fDisconnected);
1879
1880    writer->endObject();
1881}
1882#endif
1883
1884GR_NORETAIN_END
1885