• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1/*
2 * Copyright 2017 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8#include "src/gpu/mtl/GrMtlGpu.h"
9
10#include "include/private/GrTypesPriv.h"
11#include "src/core/SkCompressedDataUtils.h"
12#include "src/core/SkConvertPixels.h"
13#include "src/core/SkMipmap.h"
14#include "src/gpu/GrBackendUtils.h"
15#include "src/gpu/GrDataUtils.h"
16#include "src/gpu/GrDirectContextPriv.h"
17#include "src/gpu/GrRenderTarget.h"
18#include "src/gpu/GrTexture.h"
19#include "src/gpu/GrThreadSafePipelineBuilder.h"
20#include "src/gpu/mtl/GrMtlBuffer.h"
21#include "src/gpu/mtl/GrMtlCommandBuffer.h"
22#include "src/gpu/mtl/GrMtlOpsRenderPass.h"
23#include "src/gpu/mtl/GrMtlPipelineStateBuilder.h"
24#include "src/gpu/mtl/GrMtlSemaphore.h"
25#include "src/gpu/mtl/GrMtlTexture.h"
26#include "src/gpu/mtl/GrMtlTextureRenderTarget.h"
27#include "src/gpu/mtl/GrMtlUtil.h"
28
29#import <simd/simd.h>
30
31#if !__has_feature(objc_arc)
32#error This file must be compiled with Arc. Use -fobjc-arc flag
33#endif
34
35GR_NORETAIN_BEGIN
36
37static bool get_feature_set(id<MTLDevice> device, MTLFeatureSet* featureSet) {
38    // Mac OSX
39#ifdef SK_BUILD_FOR_MAC
40    if (@available(macOS 10.12, *)) {
41        if ([device supportsFeatureSet:MTLFeatureSet_OSX_GPUFamily1_v2]) {
42            *featureSet = MTLFeatureSet_OSX_GPUFamily1_v2;
43            return true;
44        }
45    }
46    if ([device supportsFeatureSet:MTLFeatureSet_OSX_GPUFamily1_v1]) {
47        *featureSet = MTLFeatureSet_OSX_GPUFamily1_v1;
48        return true;
49    }
50#endif
51
52    // iOS Family group 3
53#ifdef SK_BUILD_FOR_IOS
54    if (@available(iOS 10.0, *)) {
55        if ([device supportsFeatureSet:MTLFeatureSet_iOS_GPUFamily3_v2]) {
56            *featureSet = MTLFeatureSet_iOS_GPUFamily3_v2;
57            return true;
58        }
59    }
60    if (@available(iOS 9.0, *)) {
61        if ([device supportsFeatureSet:MTLFeatureSet_iOS_GPUFamily3_v1]) {
62            *featureSet = MTLFeatureSet_iOS_GPUFamily3_v1;
63            return true;
64        }
65    }
66
67    // iOS Family group 2
68    if (@available(iOS 10.0, *)) {
69        if ([device supportsFeatureSet:MTLFeatureSet_iOS_GPUFamily2_v3]) {
70            *featureSet = MTLFeatureSet_iOS_GPUFamily2_v3;
71            return true;
72        }
73    }
74    if (@available(iOS 9.0, *)) {
75        if ([device supportsFeatureSet:MTLFeatureSet_iOS_GPUFamily2_v2]) {
76            *featureSet = MTLFeatureSet_iOS_GPUFamily2_v2;
77            return true;
78        }
79    }
80    if ([device supportsFeatureSet:MTLFeatureSet_iOS_GPUFamily2_v1]) {
81        *featureSet = MTLFeatureSet_iOS_GPUFamily2_v1;
82        return true;
83    }
84
85    // iOS Family group 1
86    if (@available(iOS 10.0, *)) {
87        if ([device supportsFeatureSet:MTLFeatureSet_iOS_GPUFamily1_v3]) {
88            *featureSet = MTLFeatureSet_iOS_GPUFamily1_v3;
89            return true;
90        }
91    }
92    if (@available(iOS 9.0, *)) {
93        if ([device supportsFeatureSet:MTLFeatureSet_iOS_GPUFamily1_v2]) {
94            *featureSet = MTLFeatureSet_iOS_GPUFamily1_v2;
95            return true;
96        }
97    }
98    if ([device supportsFeatureSet:MTLFeatureSet_iOS_GPUFamily1_v1]) {
99        *featureSet = MTLFeatureSet_iOS_GPUFamily1_v1;
100        return true;
101    }
102#endif
103    // No supported feature sets were found
104    return false;
105}
106
107sk_sp<GrGpu> GrMtlGpu::Make(const GrMtlBackendContext& context, const GrContextOptions& options,
108                            GrDirectContext* direct) {
109    if (!context.fDevice || !context.fQueue) {
110        return nullptr;
111    }
112    if (@available(macOS 10.14, iOS 11.0, *)) {
113        // no warning needed
114    } else {
115        SkDebugf("*** Error ***: Skia's Metal backend no longer supports this OS version.\n");
116#ifdef SK_BUILD_FOR_IOS
117        SkDebugf("Minimum supported version is iOS 11.0.\n");
118#else
119        SkDebugf("Minimum supported version is MacOS 10.14.\n");
120#endif
121        return nullptr;
122    }
123
124    id<MTLDevice> GR_NORETAIN device = (__bridge id<MTLDevice>)(context.fDevice.get());
125    id<MTLCommandQueue> GR_NORETAIN queue = (__bridge id<MTLCommandQueue>)(context.fQueue.get());
126
127
128    MTLFeatureSet featureSet;
129    if (!get_feature_set(device, &featureSet)) {
130        return nullptr;
131    }
132    return sk_sp<GrGpu>(new GrMtlGpu(direct, options, device, queue, context.fBinaryArchive.get(),
133                                     featureSet));
134}
135
136// This constant determines how many OutstandingCommandBuffers are allocated together as a block in
137// the deque. As such it needs to balance allocating too much memory vs. incurring
138// allocation/deallocation thrashing. It should roughly correspond to the max number of outstanding
139// command buffers we expect to see.
140static const int kDefaultOutstandingAllocCnt = 8;
141
142GrMtlGpu::GrMtlGpu(GrDirectContext* direct, const GrContextOptions& options,
143                   id<MTLDevice> device, id<MTLCommandQueue> queue, GrMTLHandle binaryArchive,
144                   MTLFeatureSet featureSet)
145        : INHERITED(direct)
146        , fDevice(device)
147        , fQueue(queue)
148        , fOutstandingCommandBuffers(sizeof(OutstandingCommandBuffer), kDefaultOutstandingAllocCnt)
149        , fResourceProvider(this)
150        , fStagingBufferManager(this)
151        , fDisconnected(false) {
152    fMtlCaps.reset(new GrMtlCaps(options, fDevice, featureSet));
153    this->initCapsAndCompiler(fMtlCaps);
154    fCurrentCmdBuffer = GrMtlCommandBuffer::Make(fQueue);
155#if GR_METAL_SDK_VERSION >= 230
156    if (@available(macOS 11.0, iOS 14.0, *)) {
157        fBinaryArchive = (__bridge id<MTLBinaryArchive>)(binaryArchive);
158    }
159#endif
160}
161
162GrMtlGpu::~GrMtlGpu() {
163    if (!fDisconnected) {
164        this->destroyResources();
165    }
166}
167
168void GrMtlGpu::disconnect(DisconnectType type) {
169    INHERITED::disconnect(type);
170
171    if (!fDisconnected) {
172        this->destroyResources();
173        fDisconnected = true;
174    }
175}
176
177GrThreadSafePipelineBuilder* GrMtlGpu::pipelineBuilder() {
178    return nullptr;
179}
180
181sk_sp<GrThreadSafePipelineBuilder> GrMtlGpu::refPipelineBuilder() {
182    return nullptr;
183}
184
185void GrMtlGpu::destroyResources() {
186    this->submitCommandBuffer(SyncQueue::kForce_SyncQueue);
187
188    // We used a placement new for each object in fOutstandingCommandBuffers, so we're responsible
189    // for calling the destructor on each of them as well.
190    while (!fOutstandingCommandBuffers.empty()) {
191        OutstandingCommandBuffer* buffer =
192                (OutstandingCommandBuffer*)fOutstandingCommandBuffers.front();
193        // make sure we remove before deleting as deletion might try to kick off another submit
194        fOutstandingCommandBuffers.pop_front();
195        buffer->~OutstandingCommandBuffer();
196    }
197
198    fStagingBufferManager.reset();
199
200    fResourceProvider.destroyResources();
201
202    fQueue = nil;
203    fDevice = nil;
204}
205
206GrOpsRenderPass* GrMtlGpu::onGetOpsRenderPass(
207            GrRenderTarget* renderTarget, bool /*useMSAASurface*/, GrAttachment*,
208            GrSurfaceOrigin origin, const SkIRect& bounds,
209            const GrOpsRenderPass::LoadAndStoreInfo& colorInfo,
210            const GrOpsRenderPass::StencilLoadAndStoreInfo& stencilInfo,
211            const SkTArray<GrSurfaceProxy*, true>& sampledProxies,
212            GrXferBarrierFlags renderPassXferBarriers) {
213    return new GrMtlOpsRenderPass(this, renderTarget, origin, colorInfo, stencilInfo);
214}
215
216GrMtlCommandBuffer* GrMtlGpu::commandBuffer() {
217    if (!fCurrentCmdBuffer) {
218        // Create a new command buffer for the next submit
219        fCurrentCmdBuffer = GrMtlCommandBuffer::Make(fQueue);
220    }
221
222    SkASSERT(fCurrentCmdBuffer);
223    return fCurrentCmdBuffer.get();
224}
225
226void GrMtlGpu::takeOwnershipOfBuffer(sk_sp<GrGpuBuffer> buffer) {
227    SkASSERT(fCurrentCmdBuffer);
228    fCurrentCmdBuffer->addGrBuffer(std::move(buffer));
229}
230
231void GrMtlGpu::submit(GrOpsRenderPass* renderPass) {
232    GrMtlOpsRenderPass* mtlRenderPass = reinterpret_cast<GrMtlOpsRenderPass*>(renderPass);
233    mtlRenderPass->submit();
234    delete renderPass;
235}
236
237bool GrMtlGpu::submitCommandBuffer(SyncQueue sync) {
238    if (!fCurrentCmdBuffer || !fCurrentCmdBuffer->hasWork()) {
239        if (sync == SyncQueue::kForce_SyncQueue) {
240            this->finishOutstandingGpuWork();
241            this->checkForFinishedCommandBuffers();
242        }
243        // We need to manually call the finishedCallbacks since we don't add this
244        // to the OutstandingCommandBuffer list
245        if (fCurrentCmdBuffer) {
246            fCurrentCmdBuffer->callFinishedCallbacks();
247        }
248        return true;
249    }
250
251    SkASSERT(fCurrentCmdBuffer);
252    new (fOutstandingCommandBuffers.push_back()) OutstandingCommandBuffer(fCurrentCmdBuffer);
253
254    if (!fCurrentCmdBuffer->commit(sync == SyncQueue::kForce_SyncQueue)) {
255        return false;
256    }
257
258    // We don't create a new command buffer here because we may end up using it
259    // in the next frame, and that confuses the GPU debugger. Instead we
260    // create when we next need one.
261    fCurrentCmdBuffer = nullptr;
262
263    // If the freeing of any resources held by a finished command buffer causes us to send
264    // a new command to the gpu (like changing the resource state) we'll create the new
265    // command buffer in commandBuffer(), above.
266    this->checkForFinishedCommandBuffers();
267
268    return true;
269}
270
271void GrMtlGpu::checkForFinishedCommandBuffers() {
272    // Iterate over all the outstanding command buffers to see if any have finished. The command
273    // buffers are in order from oldest to newest, so we start at the front to check if their fence
274    // has signaled. If so we pop it off and move onto the next.
275    // Repeat till we find a command list that has not finished yet (and all others afterwards are
276    // also guaranteed to not have finished).
277    OutstandingCommandBuffer* front = (OutstandingCommandBuffer*)fOutstandingCommandBuffers.front();
278    while (front && (*front)->isCompleted()) {
279        // Make sure we remove before deleting as deletion might try to kick off another submit
280        fOutstandingCommandBuffers.pop_front();
281        // Since we used placement new we are responsible for calling the destructor manually.
282        front->~OutstandingCommandBuffer();
283        front = (OutstandingCommandBuffer*)fOutstandingCommandBuffers.front();
284    }
285}
286
287void GrMtlGpu::finishOutstandingGpuWork() {
288    // wait for the last command buffer we've submitted to finish
289    OutstandingCommandBuffer* back =
290            (OutstandingCommandBuffer*)fOutstandingCommandBuffers.back();
291    if (back) {
292        (*back)->waitUntilCompleted();
293    }
294}
295
296void GrMtlGpu::addFinishedProc(GrGpuFinishedProc finishedProc,
297                               GrGpuFinishedContext finishedContext) {
298    SkASSERT(finishedProc);
299    this->addFinishedCallback(GrRefCntedCallback::Make(finishedProc, finishedContext));
300}
301
302void GrMtlGpu::addFinishedCallback(sk_sp<GrRefCntedCallback> finishedCallback) {
303    SkASSERT(finishedCallback);
304    // Besides the current commandbuffer, we also add the finishedCallback to the newest outstanding
305    // commandbuffer. Our contract for calling the proc is that all previous submitted cmdbuffers
306    // have finished when we call it. However, if our current command buffer has no work when it is
307    // flushed it will drop its ref to the callback immediately. But the previous work may not have
308    // finished. It is safe to only add the proc to the newest outstanding commandbuffer cause that
309    // must finish after all previously submitted command buffers.
310    OutstandingCommandBuffer* back = (OutstandingCommandBuffer*)fOutstandingCommandBuffers.back();
311    if (back) {
312        (*back)->addFinishedCallback(finishedCallback);
313    }
314    commandBuffer()->addFinishedCallback(std::move(finishedCallback));
315}
316
317bool GrMtlGpu::onSubmitToGpu(bool syncCpu) {
318    if (syncCpu) {
319        return this->submitCommandBuffer(kForce_SyncQueue);
320    } else {
321        return this->submitCommandBuffer(kSkip_SyncQueue);
322    }
323}
324
325std::unique_ptr<GrSemaphore> GrMtlGpu::prepareTextureForCrossContextUsage(GrTexture*) {
326    this->submitToGpu(false);
327    return nullptr;
328}
329
330sk_sp<GrGpuBuffer> GrMtlGpu::onCreateBuffer(size_t size, GrGpuBufferType type,
331                                            GrAccessPattern accessPattern, const void* data) {
332    return GrMtlBuffer::Make(this, size, type, accessPattern, data);
333}
334
335static bool check_max_blit_width(int widthInPixels) {
336    if (widthInPixels > 32767) {
337        SkASSERT(false); // surfaces should not be this wide anyway
338        return false;
339    }
340    return true;
341}
342
343bool GrMtlGpu::uploadToTexture(GrMtlTexture* tex, int left, int top, int width, int height,
344                               GrColorType dataColorType, const GrMipLevel texels[],
345                               int mipLevelCount) {
346    SkASSERT(this->caps()->isFormatTexturable(tex->backendFormat()));
347    // The assumption is either that we have no mipmaps, or that our rect is the entire texture
348    SkASSERT(1 == mipLevelCount ||
349             (0 == left && 0 == top && width == tex->width() && height == tex->height()));
350
351    // We assume that if the texture has mip levels, we either upload to all the levels or just the
352    // first.
353    SkASSERT(1 == mipLevelCount || mipLevelCount == (tex->maxMipmapLevel() + 1));
354
355    if (!check_max_blit_width(width)) {
356        return false;
357    }
358    if (width == 0 || height == 0) {
359        return false;
360    }
361
362    SkASSERT(this->mtlCaps().surfaceSupportsWritePixels(tex));
363    SkASSERT(this->mtlCaps().areColorTypeAndFormatCompatible(dataColorType, tex->backendFormat()));
364
365    id<MTLTexture> GR_NORETAIN mtlTexture = tex->mtlTexture();
366    SkASSERT(mtlTexture);
367    // Either upload only the first miplevel or all miplevels
368    SkASSERT(1 == mipLevelCount || mipLevelCount == (int)mtlTexture.mipmapLevelCount);
369
370    if (1 == mipLevelCount && !texels[0].fPixels) {
371        return true;   // no data to upload
372    }
373
374    for (int i = 0; i < mipLevelCount; ++i) {
375        // We do not allow any gaps in the mip data
376        if (!texels[i].fPixels) {
377            return false;
378        }
379    }
380
381    size_t bpp = GrColorTypeBytesPerPixel(dataColorType);
382
383    SkTArray<size_t> individualMipOffsets(mipLevelCount);
384    size_t combinedBufferSize = GrComputeTightCombinedBufferSize(
385            bpp, {width, height}, &individualMipOffsets, mipLevelCount);
386    SkASSERT(combinedBufferSize);
387
388
389    // offset value must be a multiple of the destination texture's pixel size in bytes
390#ifdef SK_BUILD_FOR_MAC
391    static const size_t kMinAlignment = 4;
392#else
393    static const size_t kMinAlignment = 1;
394#endif
395    size_t alignment = std::max(bpp, kMinAlignment);
396    GrStagingBufferManager::Slice slice = fStagingBufferManager.allocateStagingBufferSlice(
397            combinedBufferSize, alignment);
398    if (!slice.fBuffer) {
399        return false;
400    }
401    char* bufferData = (char*)slice.fOffsetMapPtr;
402    GrMtlBuffer* mtlBuffer = static_cast<GrMtlBuffer*>(slice.fBuffer);
403
404    int currentWidth = width;
405    int currentHeight = height;
406    int layerHeight = tex->height();
407    MTLOrigin origin = MTLOriginMake(left, top, 0);
408
409    auto cmdBuffer = this->commandBuffer();
410    id<MTLBlitCommandEncoder> GR_NORETAIN blitCmdEncoder = cmdBuffer->getBlitCommandEncoder();
411    for (int currentMipLevel = 0; currentMipLevel < mipLevelCount; currentMipLevel++) {
412        if (texels[currentMipLevel].fPixels) {
413            SkASSERT(1 == mipLevelCount || currentHeight == layerHeight);
414            const size_t trimRowBytes = currentWidth * bpp;
415            const size_t rowBytes = texels[currentMipLevel].fRowBytes;
416
417            // copy data into the buffer, skipping any trailing bytes
418            char* dst = bufferData + individualMipOffsets[currentMipLevel];
419            const char* src = (const char*)texels[currentMipLevel].fPixels;
420            SkRectMemcpy(dst, trimRowBytes, src, rowBytes, trimRowBytes, currentHeight);
421
422            [blitCmdEncoder copyFromBuffer: mtlBuffer->mtlBuffer()
423                              sourceOffset: slice.fOffset + individualMipOffsets[currentMipLevel]
424                         sourceBytesPerRow: trimRowBytes
425                       sourceBytesPerImage: trimRowBytes*currentHeight
426                                sourceSize: MTLSizeMake(currentWidth, currentHeight, 1)
427                                 toTexture: mtlTexture
428                          destinationSlice: 0
429                          destinationLevel: currentMipLevel
430                         destinationOrigin: origin];
431        }
432        currentWidth = std::max(1, currentWidth/2);
433        currentHeight = std::max(1, currentHeight/2);
434        layerHeight = currentHeight;
435    }
436#ifdef SK_BUILD_FOR_MAC
437    [mtlBuffer->mtlBuffer() didModifyRange: NSMakeRange(slice.fOffset, combinedBufferSize)];
438#endif
439
440    if (mipLevelCount < (int) tex->mtlTexture().mipmapLevelCount) {
441        tex->markMipmapsDirty();
442    }
443
444    return true;
445}
446
447bool GrMtlGpu::clearTexture(GrMtlTexture* tex, size_t bpp, uint32_t levelMask) {
448    SkASSERT(this->mtlCaps().isFormatTexturable(tex->backendFormat()));
449
450    if (!levelMask) {
451        return true;
452    }
453
454    id<MTLTexture> GR_NORETAIN mtlTexture = tex->mtlTexture();
455    SkASSERT(mtlTexture);
456    // Either upload only the first miplevel or all miplevels
457    int mipLevelCount = (int)mtlTexture.mipmapLevelCount;
458
459    SkTArray<size_t> individualMipOffsets(mipLevelCount);
460    size_t combinedBufferSize = 0;
461    int currentWidth = tex->width();
462    int currentHeight = tex->height();
463
464    // The alignment must be at least 4 bytes and a multiple of the bytes per pixel of the image
465    // config. This works with the assumption that the bytes in pixel config is always a power of 2.
466    // TODO: can we just copy from a single buffer the size of the largest cleared level w/o a perf
467    // penalty?
468    SkASSERT((bpp & (bpp - 1)) == 0);
469    const size_t alignmentMask = 0x3 | (bpp - 1);
470    for (int currentMipLevel = 0; currentMipLevel < mipLevelCount; currentMipLevel++) {
471        if (levelMask & (1 << currentMipLevel)) {
472            const size_t trimmedSize = currentWidth * bpp * currentHeight;
473            const size_t alignmentDiff = combinedBufferSize & alignmentMask;
474            if (alignmentDiff != 0) {
475                combinedBufferSize += alignmentMask - alignmentDiff + 1;
476            }
477            individualMipOffsets.push_back(combinedBufferSize);
478            combinedBufferSize += trimmedSize;
479        }
480        currentWidth = std::max(1, currentWidth/2);
481        currentHeight = std::max(1, currentHeight/2);
482    }
483    SkASSERT(combinedBufferSize > 0 && !individualMipOffsets.empty());
484
485    // TODO: Create GrMtlTransferBuffer
486    NSUInteger options = 0;
487    if (@available(macOS 10.11, iOS 9.0, *)) {
488        options |= MTLResourceStorageModePrivate;
489    }
490    id<MTLBuffer> transferBuffer = [fDevice newBufferWithLength: combinedBufferSize
491                                                        options: options];
492    if (nil == transferBuffer) {
493        return false;
494    }
495
496    auto cmdBuffer = this->commandBuffer();
497    id<MTLBlitCommandEncoder> GR_NORETAIN blitCmdEncoder = cmdBuffer->getBlitCommandEncoder();
498    // clear the buffer to transparent black
499    NSRange clearRange;
500    clearRange.location = 0;
501    clearRange.length = combinedBufferSize;
502    [blitCmdEncoder fillBuffer: transferBuffer
503                         range: clearRange
504                         value: 0];
505
506    // now copy buffer to texture
507    currentWidth = tex->width();
508    currentHeight = tex->height();
509    MTLOrigin origin = MTLOriginMake(0, 0, 0);
510    for (int currentMipLevel = 0; currentMipLevel < mipLevelCount; currentMipLevel++) {
511        if (levelMask & (1 << currentMipLevel)) {
512            const size_t rowBytes = currentWidth * bpp;
513
514            [blitCmdEncoder copyFromBuffer: transferBuffer
515                              sourceOffset: individualMipOffsets[currentMipLevel]
516                         sourceBytesPerRow: rowBytes
517                       sourceBytesPerImage: rowBytes * currentHeight
518                                sourceSize: MTLSizeMake(currentWidth, currentHeight, 1)
519                                 toTexture: mtlTexture
520                          destinationSlice: 0
521                          destinationLevel: currentMipLevel
522                         destinationOrigin: origin];
523        }
524        currentWidth = std::max(1, currentWidth/2);
525        currentHeight = std::max(1, currentHeight/2);
526    }
527
528    if (mipLevelCount < (int) tex->mtlTexture().mipmapLevelCount) {
529        tex->markMipmapsDirty();
530    }
531
532    return true;
533}
534
535sk_sp<GrAttachment> GrMtlGpu::makeStencilAttachment(const GrBackendFormat& /*colorFormat*/,
536                                                    SkISize dimensions, int numStencilSamples) {
537    MTLPixelFormat sFmt = this->mtlCaps().preferredStencilFormat();
538
539    fStats.incStencilAttachmentCreates();
540    return GrMtlAttachment::GrMtlAttachment::MakeStencil(this, dimensions, numStencilSamples, sFmt);
541}
542
543sk_sp<GrTexture> GrMtlGpu::onCreateTexture(SkISize dimensions,
544                                           const GrBackendFormat& format,
545                                           GrRenderable renderable,
546                                           int renderTargetSampleCnt,
547                                           SkBudgeted budgeted,
548                                           GrProtected isProtected,
549                                           int mipLevelCount,
550                                           uint32_t levelClearMask) {
551    // We don't support protected textures in Metal.
552    if (isProtected == GrProtected::kYes) {
553        return nullptr;
554    }
555    SkASSERT(mipLevelCount > 0);
556
557    MTLPixelFormat mtlPixelFormat = GrBackendFormatAsMTLPixelFormat(format);
558    SkASSERT(mtlPixelFormat != MTLPixelFormatInvalid);
559    SkASSERT(!this->caps()->isFormatCompressed(format));
560
561    sk_sp<GrMtlTexture> tex;
562    // This TexDesc refers to the texture that will be read by the client. Thus even if msaa is
563    // requested, this TexDesc describes the resolved texture. Therefore we always have samples
564    // set to 1.
565    MTLTextureDescriptor* texDesc = [[MTLTextureDescriptor alloc] init];
566    texDesc.textureType = MTLTextureType2D;
567    texDesc.pixelFormat = mtlPixelFormat;
568    texDesc.width = dimensions.fWidth;
569    texDesc.height = dimensions.fHeight;
570    texDesc.depth = 1;
571    texDesc.mipmapLevelCount = mipLevelCount;
572    texDesc.sampleCount = 1;
573    texDesc.arrayLength = 1;
574    // Make all textures have private gpu only access. We can use transfer buffers or textures
575    // to copy to them.
576    if (@available(macOS 10.11, iOS 9.0, *)) {
577        texDesc.storageMode = MTLStorageModePrivate;
578        texDesc.usage = MTLTextureUsageShaderRead;
579        texDesc.usage |= (renderable == GrRenderable::kYes) ? MTLTextureUsageRenderTarget : 0;
580    }
581
582    GrMipmapStatus mipmapStatus =
583            mipLevelCount > 1 ? GrMipmapStatus::kDirty : GrMipmapStatus::kNotAllocated;
584    if (renderable == GrRenderable::kYes) {
585        tex = GrMtlTextureRenderTarget::MakeNewTextureRenderTarget(
586                this, budgeted, dimensions, renderTargetSampleCnt, texDesc, mipmapStatus);
587    } else {
588        tex = GrMtlTexture::MakeNewTexture(this, budgeted, dimensions, texDesc, mipmapStatus);
589    }
590
591    if (!tex) {
592        return nullptr;
593    }
594
595    if (levelClearMask) {
596        this->clearTexture(tex.get(), GrMtlFormatBytesPerBlock(mtlPixelFormat), levelClearMask);
597    }
598
599    return std::move(tex);
600}
601
602sk_sp<GrTexture> GrMtlGpu::onCreateCompressedTexture(SkISize dimensions,
603                                                     const GrBackendFormat& format,
604                                                     SkBudgeted budgeted,
605                                                     GrMipmapped mipMapped,
606                                                     GrProtected isProtected,
607                                                     const void* data, size_t dataSize) {
608    // We don't support protected textures in Metal.
609    if (isProtected == GrProtected::kYes) {
610        return nullptr;
611    }
612
613    SkASSERT(this->caps()->isFormatTexturable(format));
614    SkASSERT(data);
615
616    if (!check_max_blit_width(dimensions.width())) {
617        return nullptr;
618    }
619
620    MTLPixelFormat mtlPixelFormat = GrBackendFormatAsMTLPixelFormat(format);
621    SkASSERT(this->caps()->isFormatCompressed(format));
622
623    int numMipLevels = 1;
624    if (mipMapped == GrMipmapped::kYes) {
625        numMipLevels = SkMipmap::ComputeLevelCount(dimensions.width(), dimensions.height()) + 1;
626    }
627
628    // This TexDesc refers to the texture that will be read by the client. Thus even if msaa is
629    // requested, this TexDesc describes the resolved texture. Therefore we always have samples
630    // set to 1.
631    // Compressed textures with MIP levels or multiple samples are not supported as of now.
632    MTLTextureDescriptor* texDesc = [[MTLTextureDescriptor alloc] init];
633    texDesc.textureType = MTLTextureType2D;
634    texDesc.pixelFormat = mtlPixelFormat;
635    texDesc.width = dimensions.width();
636    texDesc.height = dimensions.height();
637    texDesc.depth = 1;
638    texDesc.mipmapLevelCount = numMipLevels;
639    texDesc.sampleCount = 1;
640    texDesc.arrayLength = 1;
641    // Make all textures have private gpu only access. We can use transfer buffers or textures
642    // to copy to them.
643    if (@available(macOS 10.11, iOS 9.0, *)) {
644        texDesc.storageMode = MTLStorageModePrivate;
645        texDesc.usage = MTLTextureUsageShaderRead;
646    }
647
648    GrMipmapStatus mipmapStatus = (mipMapped == GrMipmapped::kYes)
649                                                                ? GrMipmapStatus::kValid
650                                                                : GrMipmapStatus::kNotAllocated;
651
652    auto tex = GrMtlTexture::MakeNewTexture(this, budgeted, dimensions, texDesc, mipmapStatus);
653    if (!tex) {
654        return nullptr;
655    }
656
657    // Upload to texture
658    id<MTLTexture> GR_NORETAIN mtlTexture = tex->mtlTexture();
659    SkASSERT(mtlTexture);
660
661    auto compressionType = GrBackendFormatToCompressionType(format);
662    SkASSERT(compressionType != SkImage::CompressionType::kNone);
663
664    SkTArray<size_t> individualMipOffsets(numMipLevels);
665    SkDEBUGCODE(size_t combinedBufferSize =) SkCompressedDataSize(compressionType, dimensions,
666                                                                  &individualMipOffsets,
667                                                                  mipMapped == GrMipmapped::kYes);
668    SkASSERT(individualMipOffsets.count() == numMipLevels);
669    SkASSERT(dataSize == combinedBufferSize);
670
671    // offset value must be a multiple of the destination texture's pixel size in bytes
672    // for compressed textures, this is the block size
673    size_t alignment = SkCompressedBlockSize(compressionType);
674    GrStagingBufferManager::Slice slice = fStagingBufferManager.allocateStagingBufferSlice(
675            dataSize, alignment);
676    if (!slice.fBuffer) {
677        return nullptr;
678    }
679    char* bufferData = (char*)slice.fOffsetMapPtr;
680    GrMtlBuffer* mtlBuffer = static_cast<GrMtlBuffer*>(slice.fBuffer);
681
682    MTLOrigin origin = MTLOriginMake(0, 0, 0);
683
684    auto cmdBuffer = this->commandBuffer();
685    id<MTLBlitCommandEncoder> GR_NORETAIN blitCmdEncoder = cmdBuffer->getBlitCommandEncoder();
686
687    // copy data into the buffer, skipping any trailing bytes
688    memcpy(bufferData, data, dataSize);
689
690    SkISize levelDimensions = dimensions;
691    for (int currentMipLevel = 0; currentMipLevel < numMipLevels; currentMipLevel++) {
692        const size_t levelRowBytes = GrCompressedRowBytes(compressionType, levelDimensions.width());
693        size_t levelSize = SkCompressedDataSize(compressionType, levelDimensions, nullptr, false);
694
695        // TODO: can this all be done in one go?
696        [blitCmdEncoder copyFromBuffer: mtlBuffer->mtlBuffer()
697                          sourceOffset: slice.fOffset + individualMipOffsets[currentMipLevel]
698                     sourceBytesPerRow: levelRowBytes
699                   sourceBytesPerImage: levelSize
700                            sourceSize: MTLSizeMake(levelDimensions.width(),
701                                                    levelDimensions.height(), 1)
702                             toTexture: mtlTexture
703                      destinationSlice: 0
704                      destinationLevel: currentMipLevel
705                     destinationOrigin: origin];
706
707        levelDimensions = {std::max(1, levelDimensions.width() /2),
708                           std::max(1, levelDimensions.height()/2)};
709    }
710#ifdef SK_BUILD_FOR_MAC
711    [mtlBuffer->mtlBuffer() didModifyRange: NSMakeRange(slice.fOffset, dataSize)];
712#endif
713
714    return std::move(tex);
715}
716
717// TODO: Extra retain/release can't be avoided here because of getMtlTextureInfo copying the
718// sk_cfp. It would be useful to have a (possibly-internal-only?) API to get the raw pointer.
719static id<MTLTexture> get_texture_from_backend(const GrBackendTexture& backendTex) {
720    GrMtlTextureInfo textureInfo;
721    if (!backendTex.getMtlTextureInfo(&textureInfo)) {
722        return nil;
723    }
724    return GrGetMTLTexture(textureInfo.fTexture.get());
725}
726
727static id<MTLTexture> get_texture_from_backend(const GrBackendRenderTarget& backendRT) {
728    GrMtlTextureInfo textureInfo;
729    if (!backendRT.getMtlTextureInfo(&textureInfo)) {
730        return nil;
731    }
732    return GrGetMTLTexture(textureInfo.fTexture.get());
733}
734
735sk_sp<GrTexture> GrMtlGpu::onWrapBackendTexture(const GrBackendTexture& backendTex,
736                                                GrWrapOwnership,
737                                                GrWrapCacheable cacheable,
738                                                GrIOType ioType) {
739    id<MTLTexture> mtlTexture = get_texture_from_backend(backendTex);
740    if (!mtlTexture) {
741        return nullptr;
742    }
743    // We don't currently support sampling from a MSAA texture in shaders.
744    if (mtlTexture.sampleCount != 1) {
745        return nullptr;
746    }
747
748    return GrMtlTexture::MakeWrappedTexture(this, backendTex.dimensions(), mtlTexture, cacheable,
749                                            ioType);
750}
751
752sk_sp<GrTexture> GrMtlGpu::onWrapCompressedBackendTexture(const GrBackendTexture& backendTex,
753                                                          GrWrapOwnership,
754                                                          GrWrapCacheable cacheable) {
755    id<MTLTexture> mtlTexture = get_texture_from_backend(backendTex);
756    if (!mtlTexture) {
757        return nullptr;
758    }
759    // We don't currently support sampling from a MSAA texture in shaders.
760    if (mtlTexture.sampleCount != 1) {
761        return nullptr;
762    }
763
764    return GrMtlTexture::MakeWrappedTexture(this, backendTex.dimensions(), mtlTexture, cacheable,
765                                            kRead_GrIOType);
766}
767
768sk_sp<GrTexture> GrMtlGpu::onWrapRenderableBackendTexture(const GrBackendTexture& backendTex,
769                                                          int sampleCnt,
770                                                          GrWrapOwnership,
771                                                          GrWrapCacheable cacheable) {
772    id<MTLTexture> mtlTexture = get_texture_from_backend(backendTex);
773    if (!mtlTexture) {
774        return nullptr;
775    }
776    // We don't currently support sampling from a MSAA texture in shaders.
777    if (mtlTexture.sampleCount != 1) {
778        return nullptr;
779    }
780
781    const GrMtlCaps& caps = this->mtlCaps();
782
783    MTLPixelFormat format = mtlTexture.pixelFormat;
784    if (!caps.isFormatRenderable(format, sampleCnt)) {
785        return nullptr;
786    }
787
788    if (@available(macOS 10.11, iOS 9.0, *)) {
789        SkASSERT(MTLTextureUsageRenderTarget & mtlTexture.usage);
790    }
791
792    sampleCnt = caps.getRenderTargetSampleCount(sampleCnt, format);
793    SkASSERT(sampleCnt);
794
795    return GrMtlTextureRenderTarget::MakeWrappedTextureRenderTarget(
796            this, backendTex.dimensions(), sampleCnt, mtlTexture, cacheable);
797}
798
799sk_sp<GrRenderTarget> GrMtlGpu::onWrapBackendRenderTarget(const GrBackendRenderTarget& backendRT) {
800    if (!this->caps()->isFormatRenderable(backendRT.getBackendFormat(), backendRT.sampleCnt())) {
801        return nullptr;
802    }
803
804    id<MTLTexture> mtlTexture = get_texture_from_backend(backendRT);
805    if (!mtlTexture) {
806        return nullptr;
807    }
808
809    if (@available(macOS 10.11, iOS 9.0, *)) {
810        SkASSERT(MTLTextureUsageRenderTarget & mtlTexture.usage);
811    }
812
813    return GrMtlRenderTarget::MakeWrappedRenderTarget(this, backendRT.dimensions(),
814                                                      backendRT.sampleCnt(), mtlTexture);
815}
816
817bool GrMtlGpu::onRegenerateMipMapLevels(GrTexture* texture) {
818    GrMtlTexture* grMtlTexture = static_cast<GrMtlTexture*>(texture);
819    id<MTLTexture> GR_NORETAIN mtlTexture = grMtlTexture->mtlTexture();
820
821    // Automatic mipmap generation is only supported by color-renderable formats
822    if (!fMtlCaps->isFormatRenderable(mtlTexture.pixelFormat, 1) &&
823        // We have pixel configs marked as textureable-only that use RGBA8 as the internal format
824        MTLPixelFormatRGBA8Unorm != mtlTexture.pixelFormat) {
825        return false;
826    }
827
828    auto cmdBuffer = this->commandBuffer();
829    id<MTLBlitCommandEncoder> GR_NORETAIN blitCmdEncoder = cmdBuffer->getBlitCommandEncoder();
830    [blitCmdEncoder generateMipmapsForTexture: mtlTexture];
831
832    return true;
833}
834
835// Used to "clear" a backend texture to a constant color by transferring.
836static GrColorType mtl_format_to_backend_tex_clear_colortype(MTLPixelFormat format) {
837    switch(format) {
838        case MTLPixelFormatA8Unorm:         return GrColorType::kAlpha_8;
839        case MTLPixelFormatR8Unorm:         return GrColorType::kR_8;
840
841#ifdef SK_BUILD_FOR_IOS
842        case MTLPixelFormatB5G6R5Unorm:     return GrColorType::kBGR_565;
843        case MTLPixelFormatABGR4Unorm:      return GrColorType::kABGR_4444;
844#endif
845        case MTLPixelFormatRGBA8Unorm:      return GrColorType::kRGBA_8888;
846        case MTLPixelFormatRGBA8Unorm_sRGB: return GrColorType::kRGBA_8888_SRGB;
847
848        case MTLPixelFormatRG8Unorm:        return GrColorType::kRG_88;
849        case MTLPixelFormatBGRA8Unorm:      return GrColorType::kBGRA_8888;
850        case MTLPixelFormatRGB10A2Unorm:    return GrColorType::kRGBA_1010102;
851#ifdef SK_BUILD_FOR_MAC
852        case MTLPixelFormatBGR10A2Unorm:    return GrColorType::kBGRA_1010102;
853#endif
854        case MTLPixelFormatR16Float:        return GrColorType::kR_F16;
855        case MTLPixelFormatRGBA16Float:     return GrColorType::kRGBA_F16;
856        case MTLPixelFormatR16Unorm:        return GrColorType::kR_16;
857        case MTLPixelFormatRG16Unorm:       return GrColorType::kRG_1616;
858        case MTLPixelFormatRGBA16Unorm:     return GrColorType::kRGBA_16161616;
859        case MTLPixelFormatRG16Float:       return GrColorType::kRG_F16;
860        default:                            return GrColorType::kUnknown;
861    }
862
863    SkUNREACHABLE;
864}
865
866void copy_src_data(char* dst,
867                   size_t bytesPerPixel,
868                   const SkTArray<size_t>& individualMipOffsets,
869                   const GrPixmap srcData[],
870                   int numMipLevels,
871                   size_t bufferSize) {
872    SkASSERT(srcData && numMipLevels);
873    SkASSERT(individualMipOffsets.count() == numMipLevels);
874
875    for (int level = 0; level < numMipLevels; ++level) {
876        const size_t trimRB = srcData[level].width() * bytesPerPixel;
877        SkASSERT(individualMipOffsets[level] + trimRB * srcData[level].height() <= bufferSize);
878        SkRectMemcpy(dst + individualMipOffsets[level], trimRB,
879                     srcData[level].addr(), srcData[level].rowBytes(),
880                     trimRB, srcData[level].height());
881    }
882}
883
884bool GrMtlGpu::createMtlTextureForBackendSurface(MTLPixelFormat mtlFormat,
885                                                 SkISize dimensions,
886                                                 int sampleCnt,
887                                                 GrTexturable texturable,
888                                                 GrRenderable renderable,
889                                                 GrMipmapped mipMapped,
890                                                 GrMtlTextureInfo* info) {
891    SkASSERT(texturable == GrTexturable::kYes || renderable == GrRenderable::kYes);
892
893    if (texturable == GrTexturable::kYes && !fMtlCaps->isFormatTexturable(mtlFormat)) {
894        return false;
895    }
896    if (renderable == GrRenderable::kYes && !fMtlCaps->isFormatRenderable(mtlFormat, 1)) {
897        return false;
898    }
899
900    if (!check_max_blit_width(dimensions.width())) {
901        return false;
902    }
903
904    auto desc = [[MTLTextureDescriptor alloc] init];
905    desc.pixelFormat = mtlFormat;
906    desc.width = dimensions.width();
907    desc.height = dimensions.height();
908    if (mipMapped == GrMipMapped::kYes) {
909        desc.mipmapLevelCount = 1 + SkPrevLog2(std::max(dimensions.width(), dimensions.height()));
910    }
911    if (@available(macOS 10.11, iOS 9.0, *)) {
912        desc.storageMode = MTLStorageModePrivate;
913        MTLTextureUsage usage = texturable == GrTexturable::kYes ? MTLTextureUsageShaderRead : 0;
914        usage |= renderable == GrRenderable::kYes ? MTLTextureUsageRenderTarget : 0;
915        desc.usage = usage;
916    }
917    if (sampleCnt != 1) {
918        desc.sampleCount = sampleCnt;
919        desc.textureType = MTLTextureType2DMultisample;
920    }
921    id<MTLTexture> testTexture = [fDevice newTextureWithDescriptor: desc];
922    info->fTexture.reset(GrRetainPtrFromId(testTexture));
923    return true;
924}
925
926GrBackendTexture GrMtlGpu::onCreateBackendTexture(SkISize dimensions,
927                                                  const GrBackendFormat& format,
928                                                  GrRenderable renderable,
929                                                  GrMipmapped mipMapped,
930                                                  GrProtected isProtected) {
931    const MTLPixelFormat mtlFormat = GrBackendFormatAsMTLPixelFormat(format);
932
933    GrMtlTextureInfo info;
934    if (!this->createMtlTextureForBackendSurface(mtlFormat, dimensions, 1, GrTexturable::kYes,
935                                                 renderable, mipMapped, &info)) {
936        return {};
937    }
938
939    GrBackendTexture backendTex(dimensions.width(), dimensions.height(), mipMapped, info);
940    return backendTex;
941}
942
943bool GrMtlGpu::onClearBackendTexture(const GrBackendTexture& backendTexture,
944                                     sk_sp<GrRefCntedCallback> finishedCallback,
945                                     std::array<float, 4> color) {
946    GrMtlTextureInfo info;
947    SkAssertResult(backendTexture.getMtlTextureInfo(&info));
948
949    id<MTLTexture> GR_NORETAIN mtlTexture = GrGetMTLTexture(info.fTexture.get());
950
951    const MTLPixelFormat mtlFormat = mtlTexture.pixelFormat;
952
953    // Create a transfer buffer and fill with data.
954    size_t bytesPerPixel = GrMtlFormatBytesPerBlock(mtlFormat);
955    size_t combinedBufferSize;
956
957    // Reuse the same buffer for all levels. Should be ok since we made the row bytes tight.
958    combinedBufferSize = bytesPerPixel*backendTexture.width()*backendTexture.height();
959
960#ifdef SK_BUILD_FOR_MAC
961    static const size_t kMinAlignment = 4;
962#else
963    static const size_t kMinAlignment = 1;
964#endif
965    size_t alignment = std::max(bytesPerPixel, kMinAlignment);
966    GrStagingBufferManager::Slice slice = fStagingBufferManager.allocateStagingBufferSlice(
967            combinedBufferSize, alignment);
968    if (!slice.fBuffer) {
969        return false;
970    }
971    char* buffer = (char*)slice.fOffsetMapPtr;
972
973    auto colorType = mtl_format_to_backend_tex_clear_colortype(mtlFormat);
974    if (colorType == GrColorType::kUnknown) {
975        return false;
976    }
977    GrImageInfo ii(colorType, kUnpremul_SkAlphaType, nullptr, backendTexture.dimensions());
978    auto rb = ii.minRowBytes();
979    SkASSERT(rb == bytesPerPixel*backendTexture.width());
980    if (!GrClearImage(ii, buffer, rb, color)) {
981        return false;
982    }
983
984    // Transfer buffer contents to texture
985    MTLOrigin origin = MTLOriginMake(0, 0, 0);
986
987    GrMtlCommandBuffer* cmdBuffer = this->commandBuffer();
988    id<MTLBlitCommandEncoder> GR_NORETAIN blitCmdEncoder = cmdBuffer->getBlitCommandEncoder();
989    GrMtlBuffer* mtlBuffer = static_cast<GrMtlBuffer*>(slice.fBuffer);
990
991    SkISize levelDimensions(backendTexture.dimensions());
992    int numMipLevels = mtlTexture.mipmapLevelCount;
993    for (int currentMipLevel = 0; currentMipLevel < numMipLevels; currentMipLevel++) {
994        size_t levelRowBytes;
995        size_t levelSize;
996
997        levelRowBytes = levelDimensions.width() * bytesPerPixel;
998        levelSize = levelRowBytes * levelDimensions.height();
999
1000        // TODO: can this all be done in one go?
1001        [blitCmdEncoder copyFromBuffer: mtlBuffer->mtlBuffer()
1002                          sourceOffset: slice.fOffset
1003                     sourceBytesPerRow: levelRowBytes
1004                   sourceBytesPerImage: levelSize
1005                            sourceSize: MTLSizeMake(levelDimensions.width(),
1006                                                    levelDimensions.height(),
1007                                                    1)
1008                             toTexture: mtlTexture
1009                      destinationSlice: 0
1010                      destinationLevel: currentMipLevel
1011                     destinationOrigin: origin];
1012
1013        levelDimensions = {std::max(1, levelDimensions.width() / 2),
1014                           std::max(1, levelDimensions.height() / 2)};
1015    }
1016#ifdef SK_BUILD_FOR_MAC
1017    [mtlBuffer->mtlBuffer() didModifyRange: NSMakeRange(slice.fOffset, combinedBufferSize)];
1018#endif
1019
1020    if (finishedCallback) {
1021        this->addFinishedCallback(std::move(finishedCallback));
1022    }
1023
1024    return true;
1025}
1026
1027GrBackendTexture GrMtlGpu::onCreateCompressedBackendTexture(
1028        SkISize dimensions, const GrBackendFormat& format, GrMipmapped mipMapped,
1029        GrProtected isProtected) {
1030    const MTLPixelFormat mtlFormat = GrBackendFormatAsMTLPixelFormat(format);
1031
1032    GrMtlTextureInfo info;
1033    if (!this->createMtlTextureForBackendSurface(mtlFormat, dimensions, 1, GrTexturable::kYes,
1034                                                 GrRenderable::kNo, mipMapped, &info)) {
1035        return {};
1036    }
1037
1038    return GrBackendTexture(dimensions.width(), dimensions.height(), mipMapped, info);
1039}
1040
1041bool GrMtlGpu::onUpdateCompressedBackendTexture(const GrBackendTexture& backendTexture,
1042                                                sk_sp<GrRefCntedCallback> finishedCallback,
1043                                                const void* data,
1044                                                size_t size) {
1045    GrMtlTextureInfo info;
1046    SkAssertResult(backendTexture.getMtlTextureInfo(&info));
1047
1048    id<MTLTexture> mtlTexture = GrGetMTLTexture(info.fTexture.get());
1049
1050    int numMipLevels = mtlTexture.mipmapLevelCount;
1051    GrMipmapped mipMapped = numMipLevels > 1 ? GrMipmapped::kYes : GrMipmapped::kNo;
1052
1053    SkImage::CompressionType compression =
1054            GrBackendFormatToCompressionType(backendTexture.getBackendFormat());
1055    SkASSERT(compression != SkImage::CompressionType::kNone);
1056
1057    // Create a transfer buffer and fill with data.
1058    SkSTArray<16, size_t> individualMipOffsets;
1059    size_t combinedBufferSize;
1060    combinedBufferSize = SkCompressedDataSize(compression,
1061                                              backendTexture.dimensions(),
1062                                              &individualMipOffsets,
1063                                              mipMapped == GrMipmapped::kYes);
1064    SkASSERT(individualMipOffsets.count() == numMipLevels);
1065
1066#ifdef SK_BUILD_FOR_MAC
1067    static const size_t kMinAlignment = 4;
1068#else
1069    static const size_t kMinAlignment = 1;
1070#endif
1071    size_t alignment = std::max(SkCompressedBlockSize(compression), kMinAlignment);
1072    GrStagingBufferManager::Slice slice =
1073            fStagingBufferManager.allocateStagingBufferSlice(combinedBufferSize, alignment);
1074    if (!slice.fBuffer) {
1075        return false;
1076    }
1077    char* buffer = (char*)slice.fOffsetMapPtr;
1078
1079    memcpy(buffer, data, size);
1080
1081    // Transfer buffer contents to texture
1082    MTLOrigin origin = MTLOriginMake(0, 0, 0);
1083
1084    GrMtlCommandBuffer* cmdBuffer = this->commandBuffer();
1085    id<MTLBlitCommandEncoder> blitCmdEncoder = cmdBuffer->getBlitCommandEncoder();
1086    GrMtlBuffer* mtlBuffer = static_cast<GrMtlBuffer*>(slice.fBuffer);
1087
1088    SkISize levelDimensions(backendTexture.dimensions());
1089    for (int currentMipLevel = 0; currentMipLevel < numMipLevels; currentMipLevel++) {
1090        size_t levelRowBytes;
1091        size_t levelSize;
1092
1093        levelRowBytes = GrCompressedRowBytes(compression, levelDimensions.width());
1094        levelSize = SkCompressedDataSize(compression, levelDimensions, nullptr, false);
1095
1096        // TODO: can this all be done in one go?
1097        [blitCmdEncoder copyFromBuffer: mtlBuffer->mtlBuffer()
1098                          sourceOffset: slice.fOffset + individualMipOffsets[currentMipLevel]
1099                     sourceBytesPerRow: levelRowBytes
1100                   sourceBytesPerImage: levelSize
1101                            sourceSize: MTLSizeMake(levelDimensions.width(),
1102                                                    levelDimensions.height(),
1103                                                    1)
1104                             toTexture: mtlTexture
1105                      destinationSlice: 0
1106                      destinationLevel: currentMipLevel
1107                     destinationOrigin: origin];
1108
1109        levelDimensions = {std::max(1, levelDimensions.width() / 2),
1110                           std::max(1, levelDimensions.height() / 2)};
1111    }
1112#ifdef SK_BUILD_FOR_MAC
1113    [mtlBuffer->mtlBuffer() didModifyRange:NSMakeRange(slice.fOffset, combinedBufferSize)];
1114#endif
1115
1116    if (finishedCallback) {
1117        this->addFinishedCallback(std::move(finishedCallback));
1118    }
1119
1120    return true;
1121}
1122
1123void GrMtlGpu::deleteBackendTexture(const GrBackendTexture& tex) {
1124    SkASSERT(GrBackendApi::kMetal == tex.backend());
1125    // Nothing to do here, will get cleaned up when the GrBackendTexture object goes away
1126}
1127
1128bool GrMtlGpu::compile(const GrProgramDesc& desc, const GrProgramInfo& programInfo) {
1129
1130    GrThreadSafePipelineBuilder::Stats::ProgramCacheResult stat;
1131
1132    auto pipelineState = this->resourceProvider().findOrCreateCompatiblePipelineState(
1133                                 desc, programInfo, &stat);
1134    if (!pipelineState) {
1135        return false;
1136    }
1137
1138    return stat != GrThreadSafePipelineBuilder::Stats::ProgramCacheResult::kHit;
1139}
1140
1141bool GrMtlGpu::precompileShader(const SkData& key, const SkData& data) {
1142    return this->resourceProvider().precompileShader(key, data);
1143}
1144
1145#if GR_TEST_UTILS
1146bool GrMtlGpu::isTestingOnlyBackendTexture(const GrBackendTexture& tex) const {
1147    SkASSERT(GrBackendApi::kMetal == tex.backend());
1148
1149    GrMtlTextureInfo info;
1150    if (!tex.getMtlTextureInfo(&info)) {
1151        return false;
1152    }
1153    id<MTLTexture> mtlTexture = GrGetMTLTexture(info.fTexture.get());
1154    if (!mtlTexture) {
1155        return false;
1156    }
1157    if (@available(macOS 10.11, iOS 9.0, *)) {
1158        return mtlTexture.usage & MTLTextureUsageShaderRead;
1159    } else {
1160        return true; // best we can do
1161    }
1162}
1163
1164GrBackendRenderTarget GrMtlGpu::createTestingOnlyBackendRenderTarget(SkISize dimensions,
1165                                                                     GrColorType ct,
1166                                                                     int sampleCnt,
1167                                                                     GrProtected isProtected) {
1168    if (dimensions.width()  > this->caps()->maxRenderTargetSize() ||
1169        dimensions.height() > this->caps()->maxRenderTargetSize()) {
1170        return {};
1171    }
1172    if (isProtected == GrProtected::kYes) {
1173        return {};
1174    }
1175
1176    MTLPixelFormat format = this->mtlCaps().getFormatFromColorType(ct);
1177    sampleCnt = this->mtlCaps().getRenderTargetSampleCount(sampleCnt, format);
1178    if (sampleCnt == 0) {
1179        return {};
1180    }
1181
1182    GrMtlTextureInfo info;
1183    if (!this->createMtlTextureForBackendSurface(format, dimensions, sampleCnt, GrTexturable::kNo,
1184                                                 GrRenderable::kYes, GrMipmapped::kNo, &info)) {
1185        return {};
1186    }
1187
1188    return GrBackendRenderTarget(dimensions.width(), dimensions.height(), info);
1189}
1190
1191void GrMtlGpu::deleteTestingOnlyBackendRenderTarget(const GrBackendRenderTarget& rt) {
1192    SkASSERT(GrBackendApi::kMetal == rt.backend());
1193
1194    GrMtlTextureInfo info;
1195    if (rt.getMtlTextureInfo(&info)) {
1196        this->submitToGpu(true);
1197        // Nothing else to do here, will get cleaned up when the GrBackendRenderTarget
1198        // is deleted.
1199    }
1200}
1201#endif // GR_TEST_UTILS
1202
1203void GrMtlGpu::copySurfaceAsResolve(GrSurface* dst, GrSurface* src) {
1204    // TODO: Add support for subrectangles
1205    GrMtlRenderTarget* srcRT = static_cast<GrMtlRenderTarget*>(src->asRenderTarget());
1206    GrRenderTarget* dstRT = dst->asRenderTarget();
1207    id<MTLTexture> dstTexture;
1208    if (dstRT) {
1209        GrMtlRenderTarget* mtlRT = static_cast<GrMtlRenderTarget*>(dstRT);
1210        dstTexture = mtlRT->mtlColorTexture();
1211    } else {
1212        SkASSERT(dst->asTexture());
1213        dstTexture = static_cast<GrMtlTexture*>(dst->asTexture())->mtlTexture();
1214    }
1215
1216    this->resolveTexture(dstTexture, srcRT->mtlColorTexture());
1217}
1218
1219void GrMtlGpu::copySurfaceAsBlit(GrSurface* dst, GrSurface* src, const SkIRect& srcRect,
1220                                 const SkIPoint& dstPoint) {
1221#ifdef SK_DEBUG
1222    SkASSERT(this->mtlCaps().canCopyAsBlit(dst, src, srcRect, dstPoint));
1223#endif
1224    id<MTLTexture> GR_NORETAIN dstTex = GrGetMTLTextureFromSurface(dst);
1225    id<MTLTexture> GR_NORETAIN srcTex = GrGetMTLTextureFromSurface(src);
1226
1227    auto cmdBuffer = this->commandBuffer();
1228    id<MTLBlitCommandEncoder> GR_NORETAIN blitCmdEncoder = cmdBuffer->getBlitCommandEncoder();
1229    [blitCmdEncoder copyFromTexture: srcTex
1230                        sourceSlice: 0
1231                        sourceLevel: 0
1232                       sourceOrigin: MTLOriginMake(srcRect.x(), srcRect.y(), 0)
1233                         sourceSize: MTLSizeMake(srcRect.width(), srcRect.height(), 1)
1234                          toTexture: dstTex
1235                   destinationSlice: 0
1236                   destinationLevel: 0
1237                  destinationOrigin: MTLOriginMake(dstPoint.fX, dstPoint.fY, 0)];
1238}
1239
1240bool GrMtlGpu::onCopySurface(GrSurface* dst, GrSurface* src, const SkIRect& srcRect,
1241                             const SkIPoint& dstPoint) {
1242    SkASSERT(!src->isProtected() && !dst->isProtected());
1243
1244    bool success = false;
1245    if (this->mtlCaps().canCopyAsBlit(dst, src, srcRect, dstPoint)) {
1246       this->copySurfaceAsBlit(dst, src, srcRect, dstPoint);
1247       success = true;
1248    } else if (this->mtlCaps().canCopyAsResolve(dst, src, srcRect, dstPoint)) {
1249        this->copySurfaceAsResolve(dst, src);
1250        success = true;
1251    }
1252    if (success) {
1253        SkIRect dstRect = SkIRect::MakeXYWH(dstPoint.x(), dstPoint.y(),
1254                                            srcRect.width(), srcRect.height());
1255        // The rect is already in device space so we pass in kTopLeft so no flip is done.
1256        this->didWriteToSurface(dst, kTopLeft_GrSurfaceOrigin, &dstRect);
1257    }
1258    return success;
1259}
1260
1261bool GrMtlGpu::onWritePixels(GrSurface* surface, int left, int top, int width, int height,
1262                             GrColorType surfaceColorType, GrColorType srcColorType,
1263                             const GrMipLevel texels[], int mipLevelCount,
1264                             bool prepForTexSampling) {
1265    GrMtlTexture* mtlTexture = static_cast<GrMtlTexture*>(surface->asTexture());
1266    // TODO: In principle we should be able to support pure rendertargets as well, but
1267    // until we find a use case we'll only support texture rendertargets.
1268    if (!mtlTexture) {
1269        return false;
1270    }
1271    if (!mipLevelCount) {
1272        return false;
1273    }
1274#ifdef SK_DEBUG
1275    for (int i = 0; i < mipLevelCount; i++) {
1276        SkASSERT(texels[i].fPixels);
1277    }
1278#endif
1279    return this->uploadToTexture(mtlTexture, left, top, width, height, srcColorType, texels,
1280                                 mipLevelCount);
1281}
1282
1283bool GrMtlGpu::onReadPixels(GrSurface* surface, int left, int top, int width, int height,
1284                            GrColorType surfaceColorType, GrColorType dstColorType, void* buffer,
1285                            size_t rowBytes) {
1286    SkASSERT(surface);
1287
1288    if (surfaceColorType != dstColorType) {
1289        return false;
1290    }
1291
1292    int bpp = GrColorTypeBytesPerPixel(dstColorType);
1293    size_t transBufferRowBytes = bpp * width;
1294    size_t transBufferImageBytes = transBufferRowBytes * height;
1295
1296    // TODO: implement some way of reusing buffers instead of making a new one every time.
1297    NSUInteger options = 0;
1298    if (@available(macOS 10.11, iOS 9.0, *)) {
1299#ifdef SK_BUILD_FOR_MAC
1300        options |= MTLResourceStorageModeManaged;
1301#else
1302        options |= MTLResourceStorageModeShared;
1303#endif
1304    }
1305
1306    GrResourceProvider* resourceProvider = this->getContext()->priv().resourceProvider();
1307    sk_sp<GrGpuBuffer> transferBuffer = resourceProvider->createBuffer(
1308            transBufferImageBytes, GrGpuBufferType::kXferGpuToCpu,
1309            kDynamic_GrAccessPattern);
1310
1311    if (!transferBuffer) {
1312        return false;
1313    }
1314
1315    GrMtlBuffer* grMtlBuffer = static_cast<GrMtlBuffer*>(transferBuffer.get());
1316    if (!this->readOrTransferPixels(surface, left, top, width, height, dstColorType,
1317                                    grMtlBuffer->mtlBuffer(),
1318                                    0, transBufferImageBytes, transBufferRowBytes)) {
1319        return false;
1320    }
1321    this->submitCommandBuffer(kForce_SyncQueue);
1322
1323    const void* mappedMemory = grMtlBuffer->mtlBuffer().contents;
1324
1325    SkRectMemcpy(buffer, rowBytes, mappedMemory, transBufferRowBytes, transBufferRowBytes, height);
1326
1327    return true;
1328}
1329
1330bool GrMtlGpu::onTransferPixelsTo(GrTexture* texture, int left, int top, int width, int height,
1331                                  GrColorType textureColorType, GrColorType bufferColorType,
1332                                  sk_sp<GrGpuBuffer> transferBuffer, size_t offset,
1333                                  size_t rowBytes) {
1334    SkASSERT(texture);
1335    SkASSERT(transferBuffer);
1336    if (textureColorType != bufferColorType) {
1337        return false;
1338    }
1339
1340    GrMtlTexture* grMtlTexture = static_cast<GrMtlTexture*>(texture);
1341    id<MTLTexture> GR_NORETAIN mtlTexture = grMtlTexture->mtlTexture();
1342    SkASSERT(mtlTexture);
1343
1344    GrMtlBuffer* grMtlBuffer = static_cast<GrMtlBuffer*>(transferBuffer.get());
1345    id<MTLBuffer> GR_NORETAIN mtlBuffer = grMtlBuffer->mtlBuffer();
1346    SkASSERT(mtlBuffer);
1347
1348    size_t bpp = GrColorTypeBytesPerPixel(bufferColorType);
1349    if (offset % bpp) {
1350        return false;
1351    }
1352    if (GrBackendFormatBytesPerPixel(texture->backendFormat()) != bpp) {
1353        return false;
1354    }
1355
1356    MTLOrigin origin = MTLOriginMake(left, top, 0);
1357
1358    auto cmdBuffer = this->commandBuffer();
1359    id<MTLBlitCommandEncoder> GR_NORETAIN blitCmdEncoder = cmdBuffer->getBlitCommandEncoder();
1360    [blitCmdEncoder copyFromBuffer: mtlBuffer
1361                      sourceOffset: offset + grMtlBuffer->offset()
1362                 sourceBytesPerRow: rowBytes
1363               sourceBytesPerImage: rowBytes*height
1364                        sourceSize: MTLSizeMake(width, height, 1)
1365                         toTexture: mtlTexture
1366                  destinationSlice: 0
1367                  destinationLevel: 0
1368                 destinationOrigin: origin];
1369
1370    return true;
1371}
1372
1373bool GrMtlGpu::onTransferPixelsFrom(GrSurface* surface, int left, int top, int width, int height,
1374                                    GrColorType surfaceColorType, GrColorType bufferColorType,
1375                                    sk_sp<GrGpuBuffer> transferBuffer, size_t offset) {
1376    SkASSERT(surface);
1377    SkASSERT(transferBuffer);
1378
1379    if (surfaceColorType != bufferColorType) {
1380        return false;
1381    }
1382
1383    // Metal only supports offsets that are aligned to a pixel.
1384    size_t bpp = GrColorTypeBytesPerPixel(bufferColorType);
1385    if (offset % bpp) {
1386        return false;
1387    }
1388    if (GrBackendFormatBytesPerPixel(surface->backendFormat()) != bpp) {
1389        return false;
1390    }
1391
1392    GrMtlBuffer* grMtlBuffer = static_cast<GrMtlBuffer*>(transferBuffer.get());
1393
1394    size_t transBufferRowBytes = bpp * width;
1395    size_t transBufferImageBytes = transBufferRowBytes * height;
1396
1397    return this->readOrTransferPixels(surface, left, top, width, height, bufferColorType,
1398                                      grMtlBuffer->mtlBuffer(), offset + grMtlBuffer->offset(),
1399                                      transBufferImageBytes, transBufferRowBytes);
1400}
1401
1402bool GrMtlGpu::readOrTransferPixels(GrSurface* surface, int left, int top, int width, int height,
1403                                    GrColorType dstColorType, id<MTLBuffer> transferBuffer,
1404                                    size_t offset, size_t imageBytes, size_t rowBytes) {
1405    if (!check_max_blit_width(width)) {
1406        return false;
1407    }
1408
1409    id<MTLTexture> mtlTexture;
1410    if (GrMtlRenderTarget* rt = static_cast<GrMtlRenderTarget*>(surface->asRenderTarget())) {
1411        if (rt->numSamples() > 1) {
1412            SkASSERT(rt->requiresManualMSAAResolve());  // msaa-render-to-texture not yet supported.
1413            mtlTexture = rt->mtlResolveTexture();
1414        } else {
1415            SkASSERT(!rt->requiresManualMSAAResolve());
1416            mtlTexture = rt->mtlColorTexture();
1417        }
1418    } else if (GrMtlTexture* texture = static_cast<GrMtlTexture*>(surface->asTexture())) {
1419        mtlTexture = texture->mtlTexture();
1420    }
1421    if (!mtlTexture) {
1422        return false;
1423    }
1424
1425    auto cmdBuffer = this->commandBuffer();
1426    id<MTLBlitCommandEncoder> GR_NORETAIN blitCmdEncoder = cmdBuffer->getBlitCommandEncoder();
1427    [blitCmdEncoder copyFromTexture: mtlTexture
1428                        sourceSlice: 0
1429                        sourceLevel: 0
1430                       sourceOrigin: MTLOriginMake(left, top, 0)
1431                         sourceSize: MTLSizeMake(width, height, 1)
1432                           toBuffer: transferBuffer
1433                  destinationOffset: offset
1434             destinationBytesPerRow: rowBytes
1435           destinationBytesPerImage: imageBytes];
1436#ifdef SK_BUILD_FOR_MAC
1437    // Sync GPU data back to the CPU
1438    [blitCmdEncoder synchronizeResource: transferBuffer];
1439#endif
1440
1441    return true;
1442}
1443
1444GrFence SK_WARN_UNUSED_RESULT GrMtlGpu::insertFence() {
1445    GrMtlCommandBuffer* cmdBuffer = this->commandBuffer();
1446    // We create a semaphore and signal it within the current
1447    // command buffer's completion handler.
1448    dispatch_semaphore_t semaphore = dispatch_semaphore_create(0);
1449    cmdBuffer->addCompletedHandler(^(id <MTLCommandBuffer>commandBuffer) {
1450        dispatch_semaphore_signal(semaphore);
1451    });
1452
1453    const void* cfFence = (__bridge_retained const void*) semaphore;
1454    return (GrFence) cfFence;
1455}
1456
1457bool GrMtlGpu::waitFence(GrFence fence) {
1458    const void* cfFence = (const void*) fence;
1459    dispatch_semaphore_t semaphore = (__bridge dispatch_semaphore_t)cfFence;
1460
1461    long result = dispatch_semaphore_wait(semaphore, 0);
1462
1463    return !result;
1464}
1465
1466void GrMtlGpu::deleteFence(GrFence fence) const {
1467    const void* cfFence = (const void*) fence;
1468    // In this case it's easier to release in CoreFoundation than depend on ARC
1469    CFRelease(cfFence);
1470}
1471
1472std::unique_ptr<GrSemaphore> SK_WARN_UNUSED_RESULT GrMtlGpu::makeSemaphore(bool /*isOwned*/) {
1473    SkASSERT(this->caps()->semaphoreSupport());
1474    return GrMtlSemaphore::Make(this);
1475}
1476
1477std::unique_ptr<GrSemaphore> GrMtlGpu::wrapBackendSemaphore(
1478        const GrBackendSemaphore& semaphore,
1479        GrResourceProvider::SemaphoreWrapType wrapType,
1480        GrWrapOwnership /*ownership*/) {
1481    SkASSERT(this->caps()->semaphoreSupport());
1482    return GrMtlSemaphore::MakeWrapped(semaphore.mtlSemaphore(), semaphore.mtlValue());
1483}
1484
1485void GrMtlGpu::insertSemaphore(GrSemaphore* semaphore) {
1486    if (@available(macOS 10.14, iOS 12.0, *)) {
1487        SkASSERT(semaphore);
1488        GrMtlSemaphore* mtlSem = static_cast<GrMtlSemaphore*>(semaphore);
1489
1490        this->commandBuffer()->encodeSignalEvent(mtlSem->event(), mtlSem->value());
1491    }
1492}
1493
1494void GrMtlGpu::waitSemaphore(GrSemaphore* semaphore) {
1495    if (@available(macOS 10.14, iOS 12.0, *)) {
1496        SkASSERT(semaphore);
1497        GrMtlSemaphore* mtlSem = static_cast<GrMtlSemaphore*>(semaphore);
1498
1499        this->commandBuffer()->encodeWaitForEvent(mtlSem->event(), mtlSem->value());
1500    }
1501}
1502
1503void GrMtlGpu::onResolveRenderTarget(GrRenderTarget* target, const SkIRect&) {
1504    this->resolveTexture(static_cast<GrMtlRenderTarget*>(target)->mtlResolveTexture(),
1505                         static_cast<GrMtlRenderTarget*>(target)->mtlColorTexture());
1506}
1507
1508void GrMtlGpu::resolveTexture(id<MTLTexture> resolveTexture, id<MTLTexture> colorTexture) {
1509    auto renderPassDesc = [[MTLRenderPassDescriptor alloc] init];
1510    auto colorAttachment = renderPassDesc.colorAttachments[0];
1511    colorAttachment.texture = colorTexture;
1512    colorAttachment.resolveTexture = resolveTexture;
1513    colorAttachment.loadAction = MTLLoadActionLoad;
1514    colorAttachment.storeAction = MTLStoreActionMultisampleResolve;
1515
1516    id<MTLRenderCommandEncoder> GR_NORETAIN cmdEncoder =
1517            this->commandBuffer()->getRenderCommandEncoder(renderPassDesc, nullptr, nullptr);
1518    SkASSERT(nil != cmdEncoder);
1519    cmdEncoder.label = @"resolveTexture";
1520}
1521
1522#if GR_TEST_UTILS
1523void GrMtlGpu::testingOnly_startCapture() {
1524    if (@available(macOS 10.13, iOS 11.0, *)) {
1525        // TODO: add Metal 3 interface as well
1526        MTLCaptureManager* captureManager = [MTLCaptureManager sharedCaptureManager];
1527        [captureManager startCaptureWithDevice: fDevice];
1528    }
1529}
1530
1531void GrMtlGpu::testingOnly_endCapture() {
1532    if (@available(macOS 10.13, iOS 11.0, *)) {
1533        MTLCaptureManager* captureManager = [MTLCaptureManager sharedCaptureManager];
1534        [captureManager stopCapture];
1535    }
1536}
1537#endif
1538
1539#ifdef SK_ENABLE_DUMP_GPU
1540#include "src/utils/SkJSONWriter.h"
1541void GrMtlGpu::onDumpJSON(SkJSONWriter* writer) const {
1542    // We are called by the base class, which has already called beginObject(). We choose to nest
1543    // all of our caps information in a named sub-object.
1544    writer->beginObject("Metal GPU");
1545
1546    writer->beginObject("Device");
1547    writer->appendString("name", fDevice.name.UTF8String);
1548#ifdef SK_BUILD_FOR_MAC
1549    if (@available(macOS 10.11, *)) {
1550        writer->appendBool("isHeadless", fDevice.isHeadless);
1551        writer->appendBool("isLowPower", fDevice.isLowPower);
1552    }
1553    if (@available(macOS 10.13, *)) {
1554        writer->appendBool("isRemovable", fDevice.isRemovable);
1555    }
1556#endif
1557    if (@available(macOS 10.13, iOS 11.0, *)) {
1558        writer->appendU64("registryID", fDevice.registryID);
1559    }
1560#if defined(SK_BUILD_FOR_MAC) && __MAC_OS_X_VERSION_MAX_ALLOWED >= 101500
1561    if (@available(macOS 10.15, *)) {
1562        switch (fDevice.location) {
1563            case MTLDeviceLocationBuiltIn:
1564                writer->appendString("location", "builtIn");
1565                break;
1566            case MTLDeviceLocationSlot:
1567                writer->appendString("location", "slot");
1568                break;
1569            case MTLDeviceLocationExternal:
1570                writer->appendString("location", "external");
1571                break;
1572            case MTLDeviceLocationUnspecified:
1573                writer->appendString("location", "unspecified");
1574                break;
1575            default:
1576                writer->appendString("location", "unknown");
1577                break;
1578        }
1579        writer->appendU64("locationNumber", fDevice.locationNumber);
1580        writer->appendU64("maxTransferRate", fDevice.maxTransferRate);
1581    }
1582#endif  // SK_BUILD_FOR_MAC
1583#if __MAC_OS_X_VERSION_MAX_ALLOWED >= 101500 || __IPHONE_OS_VERSION_MAX_ALLOWED >= 130000
1584    if (@available(macOS 10.15, iOS 13.0, *)) {
1585        writer->appendBool("hasUnifiedMemory", fDevice.hasUnifiedMemory);
1586    }
1587#endif
1588#ifdef SK_BUILD_FOR_MAC
1589#if __MAC_OS_X_VERSION_MAX_ALLOWED >= 101500
1590    if (@available(macOS 10.15, *)) {
1591        writer->appendU64("peerGroupID", fDevice.peerGroupID);
1592        writer->appendU32("peerCount", fDevice.peerCount);
1593        writer->appendU32("peerIndex", fDevice.peerIndex);
1594    }
1595#endif
1596    if (@available(macOS 10.12, *)) {
1597        writer->appendU64("recommendedMaxWorkingSetSize", fDevice.recommendedMaxWorkingSetSize);
1598    }
1599#endif  // SK_BUILD_FOR_MAC
1600    if (@available(macOS 10.13, iOS 11.0, *)) {
1601        writer->appendU64("currentAllocatedSize", fDevice.currentAllocatedSize);
1602        writer->appendU64("maxThreadgroupMemoryLength", fDevice.maxThreadgroupMemoryLength);
1603    }
1604
1605    if (@available(macOS 10.11, iOS 9.0, *)) {
1606        writer->beginObject("maxThreadsPerThreadgroup");
1607        writer->appendU64("width", fDevice.maxThreadsPerThreadgroup.width);
1608        writer->appendU64("height", fDevice.maxThreadsPerThreadgroup.height);
1609        writer->appendU64("depth", fDevice.maxThreadsPerThreadgroup.depth);
1610        writer->endObject();
1611    }
1612
1613    if (@available(macOS 10.13, iOS 11.0, *)) {
1614        writer->appendBool("areProgrammableSamplePositionsSupported",
1615                           fDevice.areProgrammableSamplePositionsSupported);
1616        writer->appendBool("areRasterOrderGroupsSupported",
1617                           fDevice.areRasterOrderGroupsSupported);
1618    }
1619#ifdef SK_BUILD_FOR_MAC
1620    if (@available(macOS 10.11, *)) {
1621        writer->appendBool("isDepth24Stencil8PixelFormatSupported",
1622                           fDevice.isDepth24Stencil8PixelFormatSupported);
1623
1624    }
1625#if __MAC_OS_X_VERSION_MAX_ALLOWED >= 101500
1626    if (@available(macOS 10.15, *)) {
1627        writer->appendBool("areBarycentricCoordsSupported",
1628                           fDevice.areBarycentricCoordsSupported);
1629        writer->appendBool("supportsShaderBarycentricCoordinates",
1630                           fDevice.supportsShaderBarycentricCoordinates);
1631    }
1632#endif
1633#endif  // SK_BUILD_FOR_MAC
1634    if (@available(macOS 10.14, iOS 12.0, *)) {
1635        writer->appendU64("maxBufferLength", fDevice.maxBufferLength);
1636    }
1637    if (@available(macOS 10.13, iOS 11.0, *)) {
1638        switch (fDevice.readWriteTextureSupport) {
1639            case MTLReadWriteTextureTier1:
1640                writer->appendString("readWriteTextureSupport", "tier1");
1641                break;
1642            case MTLReadWriteTextureTier2:
1643                writer->appendString("readWriteTextureSupport", "tier2");
1644                break;
1645            case MTLReadWriteTextureTierNone:
1646                writer->appendString("readWriteTextureSupport", "tierNone");
1647                break;
1648            default:
1649                writer->appendString("readWriteTextureSupport", "unknown");
1650                break;
1651        }
1652        switch (fDevice.argumentBuffersSupport) {
1653            case MTLArgumentBuffersTier1:
1654                writer->appendString("argumentBuffersSupport", "tier1");
1655                break;
1656            case MTLArgumentBuffersTier2:
1657                writer->appendString("argumentBuffersSupport", "tier2");
1658                break;
1659            default:
1660                writer->appendString("argumentBuffersSupport", "unknown");
1661                break;
1662        }
1663    }
1664    if (@available(macOS 10.14, iOS 12.0, *)) {
1665        writer->appendU64("maxArgumentBufferSamplerCount", fDevice.maxArgumentBufferSamplerCount);
1666    }
1667#ifdef SK_BUILD_FOR_IOS
1668    if (@available(iOS 13.0, *)) {
1669        writer->appendU64("sparseTileSizeInBytes", fDevice.sparseTileSizeInBytes);
1670    }
1671#endif
1672    writer->endObject();
1673
1674    writer->appendString("queue", fQueue.label.UTF8String);
1675    writer->appendBool("disconnected", fDisconnected);
1676
1677    writer->endObject();
1678}
1679#endif
1680
1681GR_NORETAIN_END
1682