1 /*
2 * Copyright 2020 Google LLC
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "src/gpu/d3d/GrD3DGpu.h"
9
10 #include "include/gpu/GrBackendSurface.h"
11 #include "include/gpu/d3d/GrD3DBackendContext.h"
12 #include "src/core/SkConvertPixels.h"
13 #include "src/core/SkMipmap.h"
14 #include "src/gpu/GrBackendUtils.h"
15 #include "src/gpu/GrDataUtils.h"
16 #include "src/gpu/GrTexture.h"
17 #include "src/gpu/GrThreadSafePipelineBuilder.h"
18 #include "src/gpu/d3d/GrD3DAMDMemoryAllocator.h"
19 #include "src/gpu/d3d/GrD3DAttachment.h"
20 #include "src/gpu/d3d/GrD3DBuffer.h"
21 #include "src/gpu/d3d/GrD3DCaps.h"
22 #include "src/gpu/d3d/GrD3DOpsRenderPass.h"
23 #include "src/gpu/d3d/GrD3DSemaphore.h"
24 #include "src/gpu/d3d/GrD3DTexture.h"
25 #include "src/gpu/d3d/GrD3DTextureRenderTarget.h"
26 #include "src/gpu/d3d/GrD3DUtil.h"
27 #include "src/sksl/SkSLCompiler.h"
28
29 #if GR_TEST_UTILS
30 #include <DXProgrammableCapture.h>
31 #endif
32
pipelineBuilder()33 GrThreadSafePipelineBuilder* GrD3DGpu::pipelineBuilder() {
34 return nullptr;
35 }
36
refPipelineBuilder()37 sk_sp<GrThreadSafePipelineBuilder> GrD3DGpu::refPipelineBuilder() {
38 return nullptr;
39 }
40
41
Make(const GrD3DBackendContext & backendContext,const GrContextOptions & contextOptions,GrDirectContext * direct)42 sk_sp<GrGpu> GrD3DGpu::Make(const GrD3DBackendContext& backendContext,
43 const GrContextOptions& contextOptions, GrDirectContext* direct) {
44 sk_sp<GrD3DMemoryAllocator> memoryAllocator = backendContext.fMemoryAllocator;
45 if (!memoryAllocator) {
46 // We were not given a memory allocator at creation
47 memoryAllocator = GrD3DAMDMemoryAllocator::Make(
48 backendContext.fAdapter.get(), backendContext.fDevice.get());
49 }
50 if (!memoryAllocator) {
51 SkDEBUGFAIL("No supplied Direct3D memory allocator and unable to create one internally.");
52 return nullptr;
53 }
54
55 return sk_sp<GrGpu>(new GrD3DGpu(direct, contextOptions, backendContext, memoryAllocator));
56 }
57
58 // This constant determines how many OutstandingCommandLists are allocated together as a block in
59 // the deque. As such it needs to balance allocating too much memory vs. incurring
60 // allocation/deallocation thrashing. It should roughly correspond to the max number of outstanding
61 // command lists we expect to see.
62 static const int kDefaultOutstandingAllocCnt = 8;
63
64 // constants have to be aligned to 256
65 constexpr int kConstantAlignment = 256;
66
GrD3DGpu(GrDirectContext * direct,const GrContextOptions & contextOptions,const GrD3DBackendContext & backendContext,sk_sp<GrD3DMemoryAllocator> allocator)67 GrD3DGpu::GrD3DGpu(GrDirectContext* direct, const GrContextOptions& contextOptions,
68 const GrD3DBackendContext& backendContext,
69 sk_sp<GrD3DMemoryAllocator> allocator)
70 : INHERITED(direct)
71 , fDevice(backendContext.fDevice)
72 , fQueue(backendContext.fQueue)
73 , fMemoryAllocator(std::move(allocator))
74 , fResourceProvider(this)
75 , fStagingBufferManager(this)
76 , fConstantsRingBuffer(this, 128 * 1024, kConstantAlignment, GrGpuBufferType::kVertex)
77 , fOutstandingCommandLists(sizeof(OutstandingCommandList), kDefaultOutstandingAllocCnt) {
78 this->initCapsAndCompiler(sk_make_sp<GrD3DCaps>(contextOptions,
79 backendContext.fAdapter.get(),
80 backendContext.fDevice.get()));
81
82 fCurrentDirectCommandList = fResourceProvider.findOrCreateDirectCommandList();
83 SkASSERT(fCurrentDirectCommandList);
84
85 SkASSERT(fCurrentFenceValue == 0);
86 GR_D3D_CALL_ERRCHECK(fDevice->CreateFence(fCurrentFenceValue, D3D12_FENCE_FLAG_NONE,
87 IID_PPV_ARGS(&fFence)));
88
89 #if GR_TEST_UTILS
90 HRESULT getAnalysis = DXGIGetDebugInterface1(0, IID_PPV_ARGS(&fGraphicsAnalysis));
91 if (FAILED(getAnalysis)) {
92 fGraphicsAnalysis = nullptr;
93 }
94 #endif
95 }
96
~GrD3DGpu()97 GrD3DGpu::~GrD3DGpu() {
98 this->destroyResources();
99 }
100
destroyResources()101 void GrD3DGpu::destroyResources() {
102 if (fCurrentDirectCommandList) {
103 fCurrentDirectCommandList->close();
104 fCurrentDirectCommandList->reset();
105 }
106
107 // We need to make sure everything has finished on the queue.
108 this->waitForQueueCompletion();
109
110 SkDEBUGCODE(uint64_t fenceValue = fFence->GetCompletedValue();)
111
112 // We used a placement new for each object in fOutstandingCommandLists, so we're responsible
113 // for calling the destructor on each of them as well.
114 while (!fOutstandingCommandLists.empty()) {
115 OutstandingCommandList* list = (OutstandingCommandList*)fOutstandingCommandLists.front();
116 SkASSERT(list->fFenceValue <= fenceValue);
117 // No reason to recycle the command lists since we are destroying all resources anyways.
118 list->~OutstandingCommandList();
119 fOutstandingCommandLists.pop_front();
120 }
121
122 fStagingBufferManager.reset();
123
124 fResourceProvider.destroyResources();
125 }
126
onGetOpsRenderPass(GrRenderTarget * rt,bool,GrAttachment *,GrSurfaceOrigin origin,const SkIRect & bounds,const GrOpsRenderPass::LoadAndStoreInfo & colorInfo,const GrOpsRenderPass::StencilLoadAndStoreInfo & stencilInfo,const SkTArray<GrSurfaceProxy *,true> & sampledProxies,GrXferBarrierFlags renderPassXferBarriers)127 GrOpsRenderPass* GrD3DGpu::onGetOpsRenderPass(
128 GrRenderTarget* rt,
129 bool /*useMSAASurface*/,
130 GrAttachment*,
131 GrSurfaceOrigin origin,
132 const SkIRect& bounds,
133 const GrOpsRenderPass::LoadAndStoreInfo& colorInfo,
134 const GrOpsRenderPass::StencilLoadAndStoreInfo& stencilInfo,
135 const SkTArray<GrSurfaceProxy*, true>& sampledProxies,
136 GrXferBarrierFlags renderPassXferBarriers) {
137 if (!fCachedOpsRenderPass) {
138 fCachedOpsRenderPass.reset(new GrD3DOpsRenderPass(this));
139 }
140
141 if (!fCachedOpsRenderPass->set(rt, origin, bounds, colorInfo, stencilInfo, sampledProxies)) {
142 return nullptr;
143 }
144 return fCachedOpsRenderPass.get();
145 }
146
submitDirectCommandList(SyncQueue sync)147 bool GrD3DGpu::submitDirectCommandList(SyncQueue sync) {
148 SkASSERT(fCurrentDirectCommandList);
149
150 fResourceProvider.prepForSubmit();
151 for (int i = 0; i < fMipmapCPUDescriptors.count(); ++i) {
152 fResourceProvider.recycleShaderView(fMipmapCPUDescriptors[i]);
153 }
154 fMipmapCPUDescriptors.reset();
155
156 GrD3DDirectCommandList::SubmitResult result = fCurrentDirectCommandList->submit(fQueue.get());
157 if (result == GrD3DDirectCommandList::SubmitResult::kFailure) {
158 return false;
159 } else if (result == GrD3DDirectCommandList::SubmitResult::kNoWork) {
160 if (sync == SyncQueue::kForce) {
161 this->waitForQueueCompletion();
162 this->checkForFinishedCommandLists();
163 }
164 return true;
165 }
166
167 // We just submitted the command list so make sure all GrD3DPipelineState's mark their cached
168 // uniform data as dirty.
169 fResourceProvider.markPipelineStateUniformsDirty();
170
171 GrFence fence = this->insertFence();
172 new (fOutstandingCommandLists.push_back()) OutstandingCommandList(
173 std::move(fCurrentDirectCommandList), fence);
174
175 if (sync == SyncQueue::kForce) {
176 this->waitForQueueCompletion();
177 }
178
179 fCurrentDirectCommandList = fResourceProvider.findOrCreateDirectCommandList();
180
181 // This should be done after we have a new command list in case the freeing of any resources
182 // held by a finished command list causes us send a new command to the gpu (like changing the
183 // resource state.
184 this->checkForFinishedCommandLists();
185
186 SkASSERT(fCurrentDirectCommandList);
187 return true;
188 }
189
checkForFinishedCommandLists()190 void GrD3DGpu::checkForFinishedCommandLists() {
191 uint64_t currentFenceValue = fFence->GetCompletedValue();
192
193 // Iterate over all the outstanding command lists to see if any have finished. The commands
194 // lists are in order from oldest to newest, so we start at the front to check if their fence
195 // value is less than the last signaled value. If so we pop it off and move onto the next.
196 // Repeat till we find a command list that has not finished yet (and all others afterwards are
197 // also guaranteed to not have finished).
198 OutstandingCommandList* front = (OutstandingCommandList*)fOutstandingCommandLists.front();
199 while (front && front->fFenceValue <= currentFenceValue) {
200 std::unique_ptr<GrD3DDirectCommandList> currList(std::move(front->fCommandList));
201 // Since we used placement new we are responsible for calling the destructor manually.
202 front->~OutstandingCommandList();
203 fOutstandingCommandLists.pop_front();
204 fResourceProvider.recycleDirectCommandList(std::move(currList));
205 front = (OutstandingCommandList*)fOutstandingCommandLists.front();
206 }
207 }
208
waitForQueueCompletion()209 void GrD3DGpu::waitForQueueCompletion() {
210 if (fFence->GetCompletedValue() < fCurrentFenceValue) {
211 HANDLE fenceEvent;
212 fenceEvent = CreateEvent(nullptr, FALSE, FALSE, nullptr);
213 SkASSERT(fenceEvent);
214 GR_D3D_CALL_ERRCHECK(fFence->SetEventOnCompletion(fCurrentFenceValue, fenceEvent));
215 WaitForSingleObject(fenceEvent, INFINITE);
216 CloseHandle(fenceEvent);
217 }
218 }
219
submit(GrOpsRenderPass * renderPass)220 void GrD3DGpu::submit(GrOpsRenderPass* renderPass) {
221 SkASSERT(fCachedOpsRenderPass.get() == renderPass);
222
223 fCachedOpsRenderPass->submit();
224 fCachedOpsRenderPass.reset();
225 }
226
endRenderPass(GrRenderTarget * target,GrSurfaceOrigin origin,const SkIRect & bounds)227 void GrD3DGpu::endRenderPass(GrRenderTarget* target, GrSurfaceOrigin origin,
228 const SkIRect& bounds) {
229 this->didWriteToSurface(target, origin, &bounds);
230 }
231
addFinishedProc(GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)232 void GrD3DGpu::addFinishedProc(GrGpuFinishedProc finishedProc,
233 GrGpuFinishedContext finishedContext) {
234 SkASSERT(finishedProc);
235 this->addFinishedCallback(GrRefCntedCallback::Make(finishedProc, finishedContext));
236 }
237
addFinishedCallback(sk_sp<GrRefCntedCallback> finishedCallback)238 void GrD3DGpu::addFinishedCallback(sk_sp<GrRefCntedCallback> finishedCallback) {
239 SkASSERT(finishedCallback);
240 // Besides the current command list, we also add the finishedCallback to the newest outstanding
241 // command list. Our contract for calling the proc is that all previous submitted command lists
242 // have finished when we call it. However, if our current command list has no work when it is
243 // flushed it will drop its ref to the callback immediately. But the previous work may not have
244 // finished. It is safe to only add the proc to the newest outstanding commandlist cause that
245 // must finish after all previously submitted command lists.
246 OutstandingCommandList* back = (OutstandingCommandList*)fOutstandingCommandLists.back();
247 if (back) {
248 back->fCommandList->addFinishedCallback(finishedCallback);
249 }
250 fCurrentDirectCommandList->addFinishedCallback(std::move(finishedCallback));
251 }
252
createD3DTexture(SkISize dimensions,DXGI_FORMAT dxgiFormat,GrRenderable renderable,int renderTargetSampleCnt,SkBudgeted budgeted,GrProtected isProtected,int mipLevelCount,GrMipmapStatus mipmapStatus)253 sk_sp<GrD3DTexture> GrD3DGpu::createD3DTexture(SkISize dimensions,
254 DXGI_FORMAT dxgiFormat,
255 GrRenderable renderable,
256 int renderTargetSampleCnt,
257 SkBudgeted budgeted,
258 GrProtected isProtected,
259 int mipLevelCount,
260 GrMipmapStatus mipmapStatus) {
261 D3D12_RESOURCE_FLAGS usageFlags = D3D12_RESOURCE_FLAG_NONE;
262 if (renderable == GrRenderable::kYes) {
263 usageFlags |= D3D12_RESOURCE_FLAG_ALLOW_RENDER_TARGET;
264 }
265
266 // This desc refers to a texture that will be read by the client. Thus even if msaa is
267 // requested, this describes the resolved texture. Therefore we always have samples set
268 // to 1.
269 SkASSERT(mipLevelCount > 0);
270 D3D12_RESOURCE_DESC resourceDesc = {};
271 resourceDesc.Dimension = D3D12_RESOURCE_DIMENSION_TEXTURE2D;
272 // TODO: will use 4MB alignment for MSAA textures and 64KB for everything else
273 // might want to manually set alignment to 4KB for smaller textures
274 resourceDesc.Alignment = 0;
275 resourceDesc.Width = dimensions.fWidth;
276 resourceDesc.Height = dimensions.fHeight;
277 resourceDesc.DepthOrArraySize = 1;
278 resourceDesc.MipLevels = mipLevelCount;
279 resourceDesc.Format = dxgiFormat;
280 resourceDesc.SampleDesc.Count = 1;
281 resourceDesc.SampleDesc.Quality = DXGI_STANDARD_MULTISAMPLE_QUALITY_PATTERN;
282 resourceDesc.Layout = D3D12_TEXTURE_LAYOUT_UNKNOWN; // use driver-selected swizzle
283 resourceDesc.Flags = usageFlags;
284
285 if (renderable == GrRenderable::kYes) {
286 return GrD3DTextureRenderTarget::MakeNewTextureRenderTarget(
287 this, budgeted, dimensions, renderTargetSampleCnt, resourceDesc, isProtected,
288 mipmapStatus);
289 } else {
290 return GrD3DTexture::MakeNewTexture(this, budgeted, dimensions, resourceDesc, isProtected,
291 mipmapStatus);
292 }
293 }
294
onCreateTexture(SkISize dimensions,const GrBackendFormat & format,GrRenderable renderable,int renderTargetSampleCnt,SkBudgeted budgeted,GrProtected isProtected,int mipLevelCount,uint32_t levelClearMask)295 sk_sp<GrTexture> GrD3DGpu::onCreateTexture(SkISize dimensions,
296 const GrBackendFormat& format,
297 GrRenderable renderable,
298 int renderTargetSampleCnt,
299 SkBudgeted budgeted,
300 GrProtected isProtected,
301 int mipLevelCount,
302 uint32_t levelClearMask) {
303 DXGI_FORMAT dxgiFormat;
304 SkAssertResult(format.asDxgiFormat(&dxgiFormat));
305 SkASSERT(!GrDxgiFormatIsCompressed(dxgiFormat));
306
307 GrMipmapStatus mipmapStatus = mipLevelCount > 1 ? GrMipmapStatus::kDirty
308 : GrMipmapStatus::kNotAllocated;
309
310 sk_sp<GrD3DTexture> tex = this->createD3DTexture(dimensions, dxgiFormat, renderable,
311 renderTargetSampleCnt, budgeted, isProtected,
312 mipLevelCount, mipmapStatus);
313 if (!tex) {
314 return nullptr;
315 }
316
317 if (levelClearMask) {
318 // TODO
319 }
320
321 return std::move(tex);
322 }
323
copy_compressed_data(char * mapPtr,DXGI_FORMAT dxgiFormat,D3D12_PLACED_SUBRESOURCE_FOOTPRINT * placedFootprints,UINT * numRows,UINT64 * rowSizeInBytes,const void * compressedData,int numMipLevels)324 static void copy_compressed_data(char* mapPtr, DXGI_FORMAT dxgiFormat,
325 D3D12_PLACED_SUBRESOURCE_FOOTPRINT* placedFootprints,
326 UINT* numRows, UINT64* rowSizeInBytes,
327 const void* compressedData, int numMipLevels) {
328 SkASSERT(compressedData && numMipLevels);
329 SkASSERT(GrDxgiFormatIsCompressed(dxgiFormat));
330 SkASSERT(mapPtr);
331
332 const char* src = static_cast<const char*>(compressedData);
333 for (int currentMipLevel = 0; currentMipLevel < numMipLevels; currentMipLevel++) {
334 // copy data into the buffer, skipping any trailing bytes
335 char* dst = mapPtr + placedFootprints[currentMipLevel].Offset;
336 SkRectMemcpy(dst, placedFootprints[currentMipLevel].Footprint.RowPitch,
337 src, rowSizeInBytes[currentMipLevel], rowSizeInBytes[currentMipLevel],
338 numRows[currentMipLevel]);
339 src += numRows[currentMipLevel] * rowSizeInBytes[currentMipLevel];
340 }
341 }
342
onCreateCompressedTexture(SkISize dimensions,const GrBackendFormat & format,SkBudgeted budgeted,GrMipmapped mipMapped,GrProtected isProtected,const void * data,size_t dataSize)343 sk_sp<GrTexture> GrD3DGpu::onCreateCompressedTexture(SkISize dimensions,
344 const GrBackendFormat& format,
345 SkBudgeted budgeted,
346 GrMipmapped mipMapped,
347 GrProtected isProtected,
348 const void* data, size_t dataSize) {
349 DXGI_FORMAT dxgiFormat;
350 SkAssertResult(format.asDxgiFormat(&dxgiFormat));
351 SkASSERT(GrDxgiFormatIsCompressed(dxgiFormat));
352
353 SkDEBUGCODE(SkImage::CompressionType compression = GrBackendFormatToCompressionType(format));
354 SkASSERT(dataSize == SkCompressedFormatDataSize(compression, dimensions,
355 mipMapped == GrMipmapped::kYes));
356
357 int mipLevelCount = 1;
358 if (mipMapped == GrMipmapped::kYes) {
359 mipLevelCount = SkMipmap::ComputeLevelCount(dimensions.width(), dimensions.height()) + 1;
360 }
361 GrMipmapStatus mipmapStatus = mipLevelCount > 1 ? GrMipmapStatus::kValid
362 : GrMipmapStatus::kNotAllocated;
363
364 sk_sp<GrD3DTexture> d3dTex = this->createD3DTexture(dimensions, dxgiFormat, GrRenderable::kNo,
365 1, budgeted, isProtected,
366 mipLevelCount, mipmapStatus);
367 if (!d3dTex) {
368 return nullptr;
369 }
370
371 ID3D12Resource* d3dResource = d3dTex->d3dResource();
372 SkASSERT(d3dResource);
373 D3D12_RESOURCE_DESC desc = d3dResource->GetDesc();
374 // Either upload only the first miplevel or all miplevels
375 SkASSERT(1 == mipLevelCount || mipLevelCount == (int)desc.MipLevels);
376
377 SkAutoTMalloc<D3D12_PLACED_SUBRESOURCE_FOOTPRINT> placedFootprints(mipLevelCount);
378 SkAutoTMalloc<UINT> numRows(mipLevelCount);
379 SkAutoTMalloc<UINT64> rowSizeInBytes(mipLevelCount);
380 UINT64 combinedBufferSize;
381 // We reset the width and height in the description to match our subrectangle size
382 // so we don't end up allocating more space than we need.
383 desc.Width = dimensions.width();
384 desc.Height = dimensions.height();
385 fDevice->GetCopyableFootprints(&desc, 0, mipLevelCount, 0, placedFootprints.get(),
386 numRows.get(), rowSizeInBytes.get(), &combinedBufferSize);
387 SkASSERT(combinedBufferSize);
388
389 GrStagingBufferManager::Slice slice = fStagingBufferManager.allocateStagingBufferSlice(
390 combinedBufferSize, D3D12_TEXTURE_DATA_PLACEMENT_ALIGNMENT);
391 if (!slice.fBuffer) {
392 return false;
393 }
394
395 char* bufferData = (char*)slice.fOffsetMapPtr;
396
397 copy_compressed_data(bufferData, desc.Format, placedFootprints.get(), numRows.get(),
398 rowSizeInBytes.get(), data, mipLevelCount);
399
400 // Update the offsets in the footprints to be relative to the slice's offset
401 for (int i = 0; i < mipLevelCount; ++i) {
402 placedFootprints[i].Offset += slice.fOffset;
403 }
404
405 ID3D12Resource* d3dBuffer = static_cast<GrD3DBuffer*>(slice.fBuffer)->d3dResource();
406 fCurrentDirectCommandList->copyBufferToTexture(d3dBuffer, d3dTex.get(), mipLevelCount,
407 placedFootprints.get(), 0, 0);
408
409 return std::move(d3dTex);
410 }
411
get_surface_sample_cnt(GrSurface * surf)412 static int get_surface_sample_cnt(GrSurface* surf) {
413 if (const GrRenderTarget* rt = surf->asRenderTarget()) {
414 return rt->numSamples();
415 }
416 return 0;
417 }
418
onCopySurface(GrSurface * dst,GrSurface * src,const SkIRect & srcRect,const SkIPoint & dstPoint)419 bool GrD3DGpu::onCopySurface(GrSurface* dst, GrSurface* src, const SkIRect& srcRect,
420 const SkIPoint& dstPoint) {
421
422 if (src->isProtected() && !dst->isProtected()) {
423 SkDebugf("Can't copy from protected memory to non-protected");
424 return false;
425 }
426
427 int dstSampleCnt = get_surface_sample_cnt(dst);
428 int srcSampleCnt = get_surface_sample_cnt(src);
429
430 GrD3DTextureResource* dstTexResource;
431 GrD3DTextureResource* srcTexResource;
432 GrRenderTarget* dstRT = dst->asRenderTarget();
433 if (dstRT) {
434 GrD3DRenderTarget* d3dRT = static_cast<GrD3DRenderTarget*>(dstRT);
435 dstTexResource = d3dRT->numSamples() > 1 ? d3dRT->msaaTextureResource() : d3dRT;
436 } else {
437 SkASSERT(dst->asTexture());
438 dstTexResource = static_cast<GrD3DTexture*>(dst->asTexture());
439 }
440 GrRenderTarget* srcRT = src->asRenderTarget();
441 if (srcRT) {
442 GrD3DRenderTarget* d3dRT = static_cast<GrD3DRenderTarget*>(srcRT);
443 srcTexResource = d3dRT->numSamples() > 1 ? d3dRT->msaaTextureResource() : d3dRT;
444 } else {
445 SkASSERT(src->asTexture());
446 srcTexResource = static_cast<GrD3DTexture*>(src->asTexture());
447 }
448
449 DXGI_FORMAT dstFormat = dstTexResource->dxgiFormat();
450 DXGI_FORMAT srcFormat = srcTexResource->dxgiFormat();
451
452 if (this->d3dCaps().canCopyAsResolve(dstFormat, dstSampleCnt, srcFormat, srcSampleCnt)) {
453 this->copySurfaceAsResolve(dst, src, srcRect, dstPoint);
454 return true;
455 }
456
457 if (this->d3dCaps().canCopyTexture(dstFormat, dstSampleCnt, srcFormat, srcSampleCnt)) {
458 this->copySurfaceAsCopyTexture(dst, src, dstTexResource, srcTexResource, srcRect, dstPoint);
459 return true;
460 }
461
462 return false;
463 }
464
copySurfaceAsCopyTexture(GrSurface * dst,GrSurface * src,GrD3DTextureResource * dstResource,GrD3DTextureResource * srcResource,const SkIRect & srcRect,const SkIPoint & dstPoint)465 void GrD3DGpu::copySurfaceAsCopyTexture(GrSurface* dst, GrSurface* src,
466 GrD3DTextureResource* dstResource,
467 GrD3DTextureResource* srcResource,
468 const SkIRect& srcRect, const SkIPoint& dstPoint) {
469 #ifdef SK_DEBUG
470 int dstSampleCnt = get_surface_sample_cnt(dst);
471 int srcSampleCnt = get_surface_sample_cnt(src);
472 DXGI_FORMAT dstFormat = dstResource->dxgiFormat();
473 DXGI_FORMAT srcFormat;
474 SkAssertResult(dst->backendFormat().asDxgiFormat(&srcFormat));
475 SkASSERT(this->d3dCaps().canCopyTexture(dstFormat, dstSampleCnt, srcFormat, srcSampleCnt));
476 #endif
477 if (src->isProtected() && !dst->isProtected()) {
478 SkDebugf("Can't copy from protected memory to non-protected");
479 return;
480 }
481
482 dstResource->setResourceState(this, D3D12_RESOURCE_STATE_COPY_DEST);
483 srcResource->setResourceState(this, D3D12_RESOURCE_STATE_COPY_SOURCE);
484
485 D3D12_TEXTURE_COPY_LOCATION dstLocation = {};
486 dstLocation.pResource = dstResource->d3dResource();
487 dstLocation.Type = D3D12_TEXTURE_COPY_TYPE_SUBRESOURCE_INDEX;
488 dstLocation.SubresourceIndex = 0;
489
490 D3D12_TEXTURE_COPY_LOCATION srcLocation = {};
491 srcLocation.pResource = srcResource->d3dResource();
492 srcLocation.Type = D3D12_TEXTURE_COPY_TYPE_SUBRESOURCE_INDEX;
493 srcLocation.SubresourceIndex = 0;
494
495 D3D12_BOX srcBox = {};
496 srcBox.left = srcRect.fLeft;
497 srcBox.top = srcRect.fTop;
498 srcBox.right = srcRect.fRight;
499 srcBox.bottom = srcRect.fBottom;
500 srcBox.front = 0;
501 srcBox.back = 1;
502 // TODO: use copyResource if copying full resource and sizes match
503 fCurrentDirectCommandList->copyTextureRegionToTexture(dstResource->resource(),
504 &dstLocation,
505 dstPoint.fX, dstPoint.fY,
506 srcResource->resource(),
507 &srcLocation,
508 &srcBox);
509
510 SkIRect dstRect = SkIRect::MakeXYWH(dstPoint.fX, dstPoint.fY,
511 srcRect.width(), srcRect.height());
512 // The rect is already in device space so we pass in kTopLeft so no flip is done.
513 this->didWriteToSurface(dst, kTopLeft_GrSurfaceOrigin, &dstRect);
514 }
515
copySurfaceAsResolve(GrSurface * dst,GrSurface * src,const SkIRect & srcRect,const SkIPoint & dstPoint)516 void GrD3DGpu::copySurfaceAsResolve(GrSurface* dst, GrSurface* src, const SkIRect& srcRect,
517 const SkIPoint& dstPoint) {
518 GrD3DRenderTarget* srcRT = static_cast<GrD3DRenderTarget*>(src->asRenderTarget());
519 SkASSERT(srcRT);
520
521 this->resolveTexture(dst, dstPoint.fX, dstPoint.fY, srcRT, srcRect);
522 SkIRect dstRect = SkIRect::MakeXYWH(dstPoint.fX, dstPoint.fY,
523 srcRect.width(), srcRect.height());
524 // The rect is already in device space so we pass in kTopLeft so no flip is done.
525 this->didWriteToSurface(dst, kTopLeft_GrSurfaceOrigin, &dstRect);
526 }
527
resolveTexture(GrSurface * dst,int32_t dstX,int32_t dstY,GrD3DRenderTarget * src,const SkIRect & srcIRect)528 void GrD3DGpu::resolveTexture(GrSurface* dst, int32_t dstX, int32_t dstY,
529 GrD3DRenderTarget* src, const SkIRect& srcIRect) {
530 SkASSERT(dst);
531 SkASSERT(src && src->numSamples() > 1 && src->msaaTextureResource());
532
533 D3D12_RECT srcRect = { srcIRect.fLeft, srcIRect.fTop, srcIRect.fRight, srcIRect.fBottom };
534
535 GrD3DTextureResource* dstTextureResource;
536 GrRenderTarget* dstRT = dst->asRenderTarget();
537 if (dstRT) {
538 dstTextureResource = static_cast<GrD3DRenderTarget*>(dstRT);
539 } else {
540 SkASSERT(dst->asTexture());
541 dstTextureResource = static_cast<GrD3DTexture*>(dst->asTexture());
542 }
543
544 dstTextureResource->setResourceState(this, D3D12_RESOURCE_STATE_RESOLVE_DEST);
545 src->msaaTextureResource()->setResourceState(this, D3D12_RESOURCE_STATE_RESOLVE_SOURCE);
546
547 fCurrentDirectCommandList->resolveSubresourceRegion(dstTextureResource, dstX, dstY,
548 src->msaaTextureResource(), &srcRect);
549 }
550
onResolveRenderTarget(GrRenderTarget * target,const SkIRect & resolveRect)551 void GrD3DGpu::onResolveRenderTarget(GrRenderTarget* target, const SkIRect& resolveRect) {
552 SkASSERT(target->numSamples() > 1);
553 GrD3DRenderTarget* rt = static_cast<GrD3DRenderTarget*>(target);
554 SkASSERT(rt->msaaTextureResource() && rt != rt->msaaTextureResource());
555
556 this->resolveTexture(target, resolveRect.fLeft, resolveRect.fTop, rt, resolveRect);
557 }
558
onReadPixels(GrSurface * surface,int left,int top,int width,int height,GrColorType surfaceColorType,GrColorType dstColorType,void * buffer,size_t rowBytes)559 bool GrD3DGpu::onReadPixels(GrSurface* surface, int left, int top, int width, int height,
560 GrColorType surfaceColorType, GrColorType dstColorType, void* buffer,
561 size_t rowBytes) {
562 SkASSERT(surface);
563
564 if (surfaceColorType != dstColorType) {
565 return false;
566 }
567
568 // Set up src location and box
569 GrD3DTextureResource* texResource = nullptr;
570 GrD3DRenderTarget* rt = static_cast<GrD3DRenderTarget*>(surface->asRenderTarget());
571 if (rt) {
572 texResource = rt;
573 } else {
574 texResource = static_cast<GrD3DTexture*>(surface->asTexture());
575 }
576
577 if (!texResource) {
578 return false;
579 }
580
581 D3D12_TEXTURE_COPY_LOCATION srcLocation = {};
582 srcLocation.pResource = texResource->d3dResource();
583 SkASSERT(srcLocation.pResource);
584 srcLocation.Type = D3D12_TEXTURE_COPY_TYPE_SUBRESOURCE_INDEX;
585 srcLocation.SubresourceIndex = 0;
586
587 D3D12_BOX srcBox = {};
588 srcBox.left = left;
589 srcBox.top = top;
590 srcBox.right = left + width;
591 srcBox.bottom = top + height;
592 srcBox.front = 0;
593 srcBox.back = 1;
594
595 // Set up dst location and create transfer buffer
596 D3D12_TEXTURE_COPY_LOCATION dstLocation = {};
597 dstLocation.Type = D3D12_TEXTURE_COPY_TYPE_PLACED_FOOTPRINT;
598 UINT64 transferTotalBytes;
599 const UINT64 baseOffset = 0;
600 D3D12_RESOURCE_DESC desc = srcLocation.pResource->GetDesc();
601 fDevice->GetCopyableFootprints(&desc, 0, 1, baseOffset, &dstLocation.PlacedFootprint,
602 nullptr, nullptr, &transferTotalBytes);
603 SkASSERT(transferTotalBytes);
604 size_t bpp = GrColorTypeBytesPerPixel(dstColorType);
605 if (GrDxgiFormatBytesPerBlock(texResource->dxgiFormat()) != bpp) {
606 return false;
607 }
608 size_t tightRowBytes = bpp * width;
609
610 // TODO: implement some way of reusing buffers instead of making a new one every time.
611 sk_sp<GrGpuBuffer> transferBuffer = this->createBuffer(transferTotalBytes,
612 GrGpuBufferType::kXferGpuToCpu,
613 kDynamic_GrAccessPattern);
614 GrD3DBuffer* d3dBuf = static_cast<GrD3DBuffer*>(transferBuffer.get());
615 dstLocation.pResource = d3dBuf->d3dResource();
616
617 // Need to change the resource state to COPY_SOURCE in order to download from it
618 texResource->setResourceState(this, D3D12_RESOURCE_STATE_COPY_SOURCE);
619
620 fCurrentDirectCommandList->copyTextureRegionToBuffer(transferBuffer, &dstLocation, 0, 0,
621 texResource->resource(), &srcLocation,
622 &srcBox);
623 this->submitDirectCommandList(SyncQueue::kForce);
624
625 const void* mappedMemory = transferBuffer->map();
626
627 SkRectMemcpy(buffer, rowBytes, mappedMemory, dstLocation.PlacedFootprint.Footprint.RowPitch,
628 tightRowBytes, height);
629
630 transferBuffer->unmap();
631
632 return true;
633 }
634
onWritePixels(GrSurface * surface,int left,int top,int width,int height,GrColorType surfaceColorType,GrColorType srcColorType,const GrMipLevel texels[],int mipLevelCount,bool prepForTexSampling)635 bool GrD3DGpu::onWritePixels(GrSurface* surface, int left, int top, int width, int height,
636 GrColorType surfaceColorType, GrColorType srcColorType,
637 const GrMipLevel texels[], int mipLevelCount,
638 bool prepForTexSampling) {
639 GrD3DTexture* d3dTex = static_cast<GrD3DTexture*>(surface->asTexture());
640 if (!d3dTex) {
641 return false;
642 }
643
644 // Make sure we have at least the base level
645 if (!mipLevelCount || !texels[0].fPixels) {
646 return false;
647 }
648
649 SkASSERT(!GrDxgiFormatIsCompressed(d3dTex->dxgiFormat()));
650 bool success = false;
651
652 // Need to change the resource state to COPY_DEST in order to upload to it
653 d3dTex->setResourceState(this, D3D12_RESOURCE_STATE_COPY_DEST);
654
655 SkASSERT(mipLevelCount <= d3dTex->maxMipmapLevel() + 1);
656 success = this->uploadToTexture(d3dTex, left, top, width, height, srcColorType, texels,
657 mipLevelCount);
658
659 if (prepForTexSampling) {
660 d3dTex->setResourceState(this, D3D12_RESOURCE_STATE_PIXEL_SHADER_RESOURCE);
661 }
662
663 return success;
664 }
665
uploadToTexture(GrD3DTexture * tex,int left,int top,int width,int height,GrColorType colorType,const GrMipLevel * texels,int mipLevelCount)666 bool GrD3DGpu::uploadToTexture(GrD3DTexture* tex, int left, int top, int width, int height,
667 GrColorType colorType, const GrMipLevel* texels, int mipLevelCount) {
668 SkASSERT(this->caps()->isFormatTexturable(tex->backendFormat()));
669 // The assumption is either that we have no mipmaps, or that our rect is the entire texture
670 SkASSERT(1 == mipLevelCount ||
671 (0 == left && 0 == top && width == tex->width() && height == tex->height()));
672
673 // We assume that if the texture has mip levels, we either upload to all the levels or just the
674 // first.
675 SkASSERT(1 == mipLevelCount || mipLevelCount == (tex->maxMipmapLevel() + 1));
676
677 if (width == 0 || height == 0) {
678 return false;
679 }
680
681 SkASSERT(this->d3dCaps().surfaceSupportsWritePixels(tex));
682 SkASSERT(this->d3dCaps().areColorTypeAndFormatCompatible(colorType, tex->backendFormat()));
683
684 ID3D12Resource* d3dResource = tex->d3dResource();
685 SkASSERT(d3dResource);
686 D3D12_RESOURCE_DESC desc = d3dResource->GetDesc();
687 // Either upload only the first miplevel or all miplevels
688 SkASSERT(1 == mipLevelCount || mipLevelCount == (int)desc.MipLevels);
689
690 if (1 == mipLevelCount && !texels[0].fPixels) {
691 return true; // no data to upload
692 }
693
694 for (int i = 0; i < mipLevelCount; ++i) {
695 // We do not allow any gaps in the mip data
696 if (!texels[i].fPixels) {
697 return false;
698 }
699 }
700
701 SkAutoTMalloc<D3D12_PLACED_SUBRESOURCE_FOOTPRINT> placedFootprints(mipLevelCount);
702 UINT64 combinedBufferSize;
703 // We reset the width and height in the description to match our subrectangle size
704 // so we don't end up allocating more space than we need.
705 desc.Width = width;
706 desc.Height = height;
707 fDevice->GetCopyableFootprints(&desc, 0, mipLevelCount, 0, placedFootprints.get(),
708 nullptr, nullptr, &combinedBufferSize);
709 size_t bpp = GrColorTypeBytesPerPixel(colorType);
710 SkASSERT(combinedBufferSize);
711
712 GrStagingBufferManager::Slice slice = fStagingBufferManager.allocateStagingBufferSlice(
713 combinedBufferSize, D3D12_TEXTURE_DATA_PLACEMENT_ALIGNMENT);
714 if (!slice.fBuffer) {
715 return false;
716 }
717
718 char* bufferData = (char*)slice.fOffsetMapPtr;
719
720 int currentWidth = width;
721 int currentHeight = height;
722 int layerHeight = tex->height();
723
724 for (int currentMipLevel = 0; currentMipLevel < mipLevelCount; currentMipLevel++) {
725 if (texels[currentMipLevel].fPixels) {
726 SkASSERT(1 == mipLevelCount || currentHeight == layerHeight);
727
728 const size_t trimRowBytes = currentWidth * bpp;
729 const size_t srcRowBytes = texels[currentMipLevel].fRowBytes;
730
731 char* dst = bufferData + placedFootprints[currentMipLevel].Offset;
732
733 // copy data into the buffer, skipping any trailing bytes
734 const char* src = (const char*)texels[currentMipLevel].fPixels;
735 SkRectMemcpy(dst, placedFootprints[currentMipLevel].Footprint.RowPitch,
736 src, srcRowBytes, trimRowBytes, currentHeight);
737 }
738 currentWidth = std::max(1, currentWidth / 2);
739 currentHeight = std::max(1, currentHeight / 2);
740 layerHeight = currentHeight;
741 }
742
743 // Update the offsets in the footprints to be relative to the slice's offset
744 for (int i = 0; i < mipLevelCount; ++i) {
745 placedFootprints[i].Offset += slice.fOffset;
746 }
747
748 ID3D12Resource* d3dBuffer = static_cast<GrD3DBuffer*>(slice.fBuffer)->d3dResource();
749 fCurrentDirectCommandList->copyBufferToTexture(d3dBuffer, tex, mipLevelCount,
750 placedFootprints.get(), left, top);
751
752 if (mipLevelCount < (int)desc.MipLevels) {
753 tex->markMipmapsDirty();
754 }
755
756 return true;
757 }
758
check_resource_info(const GrD3DTextureResourceInfo & info)759 static bool check_resource_info(const GrD3DTextureResourceInfo& info) {
760 if (!info.fResource.get()) {
761 return false;
762 }
763 return true;
764 }
765
check_tex_resource_info(const GrD3DCaps & caps,const GrD3DTextureResourceInfo & info)766 static bool check_tex_resource_info(const GrD3DCaps& caps, const GrD3DTextureResourceInfo& info) {
767 if (!caps.isFormatTexturable(info.fFormat)) {
768 return false;
769 }
770 // We don't support sampling from multisampled textures.
771 if (info.fSampleCount != 1) {
772 return false;
773 }
774 return true;
775 }
776
check_rt_resource_info(const GrD3DCaps & caps,const GrD3DTextureResourceInfo & info,int sampleCnt)777 static bool check_rt_resource_info(const GrD3DCaps& caps, const GrD3DTextureResourceInfo& info,
778 int sampleCnt) {
779 if (!caps.isFormatRenderable(info.fFormat, sampleCnt)) {
780 return false;
781 }
782 return true;
783 }
784
onWrapBackendTexture(const GrBackendTexture & tex,GrWrapOwnership,GrWrapCacheable wrapType,GrIOType ioType)785 sk_sp<GrTexture> GrD3DGpu::onWrapBackendTexture(const GrBackendTexture& tex,
786 GrWrapOwnership,
787 GrWrapCacheable wrapType,
788 GrIOType ioType) {
789 GrD3DTextureResourceInfo textureInfo;
790 if (!tex.getD3DTextureResourceInfo(&textureInfo)) {
791 return nullptr;
792 }
793
794 if (!check_resource_info(textureInfo)) {
795 return nullptr;
796 }
797
798 if (!check_tex_resource_info(this->d3dCaps(), textureInfo)) {
799 return nullptr;
800 }
801
802 // TODO: support protected context
803 if (tex.isProtected()) {
804 return nullptr;
805 }
806
807 sk_sp<GrD3DResourceState> state = tex.getGrD3DResourceState();
808 SkASSERT(state);
809 return GrD3DTexture::MakeWrappedTexture(this, tex.dimensions(), wrapType, ioType, textureInfo,
810 std::move(state));
811 }
812
onWrapCompressedBackendTexture(const GrBackendTexture & tex,GrWrapOwnership ownership,GrWrapCacheable wrapType)813 sk_sp<GrTexture> GrD3DGpu::onWrapCompressedBackendTexture(const GrBackendTexture& tex,
814 GrWrapOwnership ownership,
815 GrWrapCacheable wrapType) {
816 return this->onWrapBackendTexture(tex, ownership, wrapType, kRead_GrIOType);
817 }
818
onWrapRenderableBackendTexture(const GrBackendTexture & tex,int sampleCnt,GrWrapOwnership ownership,GrWrapCacheable cacheable)819 sk_sp<GrTexture> GrD3DGpu::onWrapRenderableBackendTexture(const GrBackendTexture& tex,
820 int sampleCnt,
821 GrWrapOwnership ownership,
822 GrWrapCacheable cacheable) {
823 GrD3DTextureResourceInfo textureInfo;
824 if (!tex.getD3DTextureResourceInfo(&textureInfo)) {
825 return nullptr;
826 }
827
828 if (!check_resource_info(textureInfo)) {
829 return nullptr;
830 }
831
832 if (!check_tex_resource_info(this->d3dCaps(), textureInfo)) {
833 return nullptr;
834 }
835 if (!check_rt_resource_info(this->d3dCaps(), textureInfo, sampleCnt)) {
836 return nullptr;
837 }
838
839 // TODO: support protected context
840 if (tex.isProtected()) {
841 return nullptr;
842 }
843
844 sampleCnt = this->d3dCaps().getRenderTargetSampleCount(sampleCnt, textureInfo.fFormat);
845
846 sk_sp<GrD3DResourceState> state = tex.getGrD3DResourceState();
847 SkASSERT(state);
848
849 return GrD3DTextureRenderTarget::MakeWrappedTextureRenderTarget(this, tex.dimensions(),
850 sampleCnt, cacheable,
851 textureInfo, std::move(state));
852 }
853
onWrapBackendRenderTarget(const GrBackendRenderTarget & rt)854 sk_sp<GrRenderTarget> GrD3DGpu::onWrapBackendRenderTarget(const GrBackendRenderTarget& rt) {
855 GrD3DTextureResourceInfo info;
856 if (!rt.getD3DTextureResourceInfo(&info)) {
857 return nullptr;
858 }
859
860 if (!check_resource_info(info)) {
861 return nullptr;
862 }
863
864 if (!check_rt_resource_info(this->d3dCaps(), info, rt.sampleCnt())) {
865 return nullptr;
866 }
867
868 // TODO: support protected context
869 if (rt.isProtected()) {
870 return nullptr;
871 }
872
873 sk_sp<GrD3DResourceState> state = rt.getGrD3DResourceState();
874
875 sk_sp<GrD3DRenderTarget> tgt = GrD3DRenderTarget::MakeWrappedRenderTarget(
876 this, rt.dimensions(), rt.sampleCnt(), info, std::move(state));
877
878 // We don't allow the client to supply a premade stencil buffer. We always create one if needed.
879 SkASSERT(!rt.stencilBits());
880 if (tgt) {
881 SkASSERT(tgt->canAttemptStencilAttachment(tgt->numSamples() > 1));
882 }
883
884 return std::move(tgt);
885 }
886
is_odd(int x)887 static bool is_odd(int x) {
888 return x > 1 && SkToBool(x & 0x1);
889 }
890
onRegenerateMipMapLevels(GrTexture * tex)891 bool GrD3DGpu::onRegenerateMipMapLevels(GrTexture * tex) {
892 auto * d3dTex = static_cast<GrD3DTexture*>(tex);
893 SkASSERT(tex->textureType() == GrTextureType::k2D);
894 int width = tex->width();
895 int height = tex->height();
896
897 // determine if we can read from and mipmap this format
898 const GrD3DCaps & caps = this->d3dCaps();
899 if (!caps.isFormatTexturable(d3dTex->dxgiFormat()) ||
900 !caps.mipmapSupport()) {
901 return false;
902 }
903
904 sk_sp<GrD3DTexture> uavTexture;
905 // if the format is unordered accessible and resource flag is set, use resource for uav
906 if (caps.isFormatUnorderedAccessible(d3dTex->dxgiFormat()) &&
907 (d3dTex->d3dResource()->GetDesc().Flags & D3D12_RESOURCE_FLAG_ALLOW_UNORDERED_ACCESS)) {
908 uavTexture = sk_ref_sp(d3dTex);
909 } else {
910 // need to make a copy and use that for our uav
911 D3D12_RESOURCE_DESC uavDesc = d3dTex->d3dResource()->GetDesc();
912 uavDesc.Flags |= D3D12_RESOURCE_FLAG_ALLOW_UNORDERED_ACCESS;
913 // if the format is unordered accessible, copy to resource with same format and flag set
914 if (!caps.isFormatUnorderedAccessible(d3dTex->dxgiFormat())) {
915 // TODO: support BGR and sRGB
916 return false;
917 }
918 // TODO: make this a scratch texture
919 GrProtected grProtected = tex->isProtected() ? GrProtected::kYes : GrProtected::kNo;
920 uavTexture = GrD3DTexture::MakeNewTexture(this, SkBudgeted::kNo, tex->dimensions(),
921 uavDesc, grProtected, GrMipmapStatus::kDirty);
922 if (!uavTexture) {
923 return false;
924 }
925
926 d3dTex->setResourceState(this, D3D12_RESOURCE_STATE_COPY_SOURCE);
927 // copy top miplevel to uavTexture
928 uavTexture->setResourceState(this, D3D12_RESOURCE_STATE_COPY_DEST);
929 this->currentCommandList()->copyTextureToTexture(uavTexture.get(), d3dTex, 0);
930 }
931
932 uint32_t levelCount = d3dTex->mipLevels();
933 // SkMipmap doesn't include the base level in the level count so we have to add 1
934 SkASSERT((int)levelCount == SkMipmap::ComputeLevelCount(tex->width(), tex->height()) + 1);
935
936 sk_sp<GrD3DRootSignature> rootSig = fResourceProvider.findOrCreateRootSignature(1, 1);
937 this->currentCommandList()->setComputeRootSignature(rootSig);
938
939 // TODO: use linear vs. srgb shader based on texture format
940 sk_sp<GrD3DPipeline> pipeline = this->resourceProvider().findOrCreateMipmapPipeline();
941 SkASSERT(pipeline);
942 this->currentCommandList()->setPipelineState(std::move(pipeline));
943
944 // set sampler
945 GrSamplerState samplerState(SkFilterMode::kLinear, SkMipmapMode::kNearest);
946 std::vector<D3D12_CPU_DESCRIPTOR_HANDLE> samplers(1);
947 samplers[0] = fResourceProvider.findOrCreateCompatibleSampler(samplerState);
948 this->currentCommandList()->addSampledTextureRef(uavTexture.get());
949 sk_sp<GrD3DDescriptorTable> samplerTable = fResourceProvider.findOrCreateSamplerTable(samplers);
950 this->currentCommandList()->setComputeRootDescriptorTable(
951 static_cast<unsigned int>(GrD3DRootSignature::ParamIndex::kSamplerDescriptorTable),
952 samplerTable->baseGpuDescriptor());
953
954 // Transition the top subresource to be readable in the compute shader
955 D3D12_RESOURCE_STATES currentResourceState = uavTexture->currentState();
956 D3D12_RESOURCE_TRANSITION_BARRIER barrier;
957 barrier.pResource = uavTexture->d3dResource();
958 barrier.Subresource = 0;
959 barrier.StateBefore = currentResourceState;
960 barrier.StateAfter = D3D12_RESOURCE_STATE_NON_PIXEL_SHADER_RESOURCE;
961 this->addResourceBarriers(uavTexture->resource(), 1, &barrier);
962
963 // Generate the miplevels
964 for (unsigned int dstMip = 1; dstMip < levelCount; ++dstMip) {
965 unsigned int srcMip = dstMip - 1;
966 width = std::max(1, width / 2);
967 height = std::max(1, height / 2);
968
969 unsigned int sampleMode = 0;
970 if (is_odd(width) && is_odd(height)) {
971 sampleMode = 1;
972 } else if (is_odd(width)) {
973 sampleMode = 2;
974 } else if (is_odd(height)) {
975 sampleMode = 3;
976 }
977
978 // set constants
979 struct {
980 SkSize inverseSize;
981 uint32_t mipLevel;
982 uint32_t sampleMode;
983 } constantData = { {1.f / width, 1.f / height}, srcMip, sampleMode };
984
985 D3D12_GPU_VIRTUAL_ADDRESS constantsAddress =
986 fResourceProvider.uploadConstantData(&constantData, sizeof(constantData));
987 this->currentCommandList()->setComputeRootConstantBufferView(
988 (unsigned int)GrD3DRootSignature::ParamIndex::kConstantBufferView,
989 constantsAddress);
990
991 std::vector<D3D12_CPU_DESCRIPTOR_HANDLE> shaderViews;
992 // create SRV
993 GrD3DDescriptorHeap::CPUHandle srvHandle =
994 fResourceProvider.createShaderResourceView(uavTexture->d3dResource(), srcMip, 1);
995 shaderViews.push_back(srvHandle.fHandle);
996 fMipmapCPUDescriptors.push_back(srvHandle);
997 // create UAV
998 GrD3DDescriptorHeap::CPUHandle uavHandle =
999 fResourceProvider.createUnorderedAccessView(uavTexture->d3dResource(), dstMip);
1000 shaderViews.push_back(uavHandle.fHandle);
1001 fMipmapCPUDescriptors.push_back(uavHandle);
1002
1003 // set up and bind shaderView descriptor table
1004 sk_sp<GrD3DDescriptorTable> srvTable =
1005 fResourceProvider.findOrCreateShaderViewTable(shaderViews);
1006 this->currentCommandList()->setComputeRootDescriptorTable(
1007 (unsigned int)GrD3DRootSignature::ParamIndex::kShaderViewDescriptorTable,
1008 srvTable->baseGpuDescriptor());
1009
1010 // Transition resource state of dstMip subresource so we can write to it
1011 barrier.Subresource = dstMip;
1012 barrier.StateBefore = currentResourceState;
1013 barrier.StateAfter = D3D12_RESOURCE_STATE_UNORDERED_ACCESS;
1014 this->addResourceBarriers(uavTexture->resource(), 1, &barrier);
1015
1016 // Using the form (x+7)/8 ensures that the remainder is covered as well
1017 this->currentCommandList()->dispatch((width+7)/8, (height+7)/8);
1018
1019 // guarantee UAV writes have completed
1020 this->currentCommandList()->uavBarrier(uavTexture->resource(), uavTexture->d3dResource());
1021
1022 // Transition resource state of dstMip subresource so we can read it in the next stage
1023 barrier.StateBefore = D3D12_RESOURCE_STATE_UNORDERED_ACCESS;
1024 barrier.StateAfter = D3D12_RESOURCE_STATE_NON_PIXEL_SHADER_RESOURCE;
1025 this->addResourceBarriers(uavTexture->resource(), 1, &barrier);
1026 }
1027
1028 // copy back if necessary
1029 if (uavTexture.get() != d3dTex) {
1030 d3dTex->setResourceState(this, D3D12_RESOURCE_STATE_COPY_DEST);
1031 barrier.Subresource = D3D12_RESOURCE_BARRIER_ALL_SUBRESOURCES;
1032 barrier.StateBefore = D3D12_RESOURCE_STATE_NON_PIXEL_SHADER_RESOURCE;
1033 barrier.StateAfter = D3D12_RESOURCE_STATE_COPY_SOURCE;
1034 // TODO: support BGR and sRGB
1035 this->addResourceBarriers(uavTexture->resource(), 1, &barrier);
1036 this->currentCommandList()->copyTextureToTexture(d3dTex, uavTexture.get());
1037 } else {
1038 // For simplicity our resource state tracking considers all subresources to have the same
1039 // state. However, we've changed that state one subresource at a time without going through
1040 // the tracking system, so we need to patch up the resource states back to the original.
1041 barrier.Subresource = D3D12_RESOURCE_BARRIER_ALL_SUBRESOURCES;
1042 barrier.StateBefore = D3D12_RESOURCE_STATE_NON_PIXEL_SHADER_RESOURCE;
1043 barrier.StateAfter = currentResourceState;
1044 this->addResourceBarriers(d3dTex->resource(), 1, &barrier);
1045 }
1046
1047 return true;
1048 }
1049
onCreateBuffer(size_t sizeInBytes,GrGpuBufferType type,GrAccessPattern accessPattern,const void * data)1050 sk_sp<GrGpuBuffer> GrD3DGpu::onCreateBuffer(size_t sizeInBytes, GrGpuBufferType type,
1051 GrAccessPattern accessPattern, const void* data) {
1052 sk_sp<GrD3DBuffer> buffer = GrD3DBuffer::Make(this, sizeInBytes, type, accessPattern);
1053 if (data && buffer) {
1054 buffer->updateData(data, sizeInBytes);
1055 }
1056
1057 return std::move(buffer);
1058 }
1059
makeStencilAttachment(const GrBackendFormat &,SkISize dimensions,int numStencilSamples)1060 sk_sp<GrAttachment> GrD3DGpu::makeStencilAttachment(const GrBackendFormat& /*colorFormat*/,
1061 SkISize dimensions, int numStencilSamples) {
1062 DXGI_FORMAT sFmt = this->d3dCaps().preferredStencilFormat();
1063
1064 fStats.incStencilAttachmentCreates();
1065 return GrD3DAttachment::MakeStencil(this, dimensions, numStencilSamples, sFmt);
1066 }
1067
createTextureResourceForBackendSurface(DXGI_FORMAT dxgiFormat,SkISize dimensions,GrTexturable texturable,GrRenderable renderable,GrMipmapped mipMapped,int sampleCnt,GrD3DTextureResourceInfo * info,GrProtected isProtected)1068 bool GrD3DGpu::createTextureResourceForBackendSurface(DXGI_FORMAT dxgiFormat,
1069 SkISize dimensions,
1070 GrTexturable texturable,
1071 GrRenderable renderable,
1072 GrMipmapped mipMapped,
1073 int sampleCnt,
1074 GrD3DTextureResourceInfo* info,
1075 GrProtected isProtected) {
1076 SkASSERT(texturable == GrTexturable::kYes || renderable == GrRenderable::kYes);
1077
1078 if (this->protectedContext() != (isProtected == GrProtected::kYes)) {
1079 return false;
1080 }
1081
1082 if (texturable == GrTexturable::kYes && !this->d3dCaps().isFormatTexturable(dxgiFormat)) {
1083 return false;
1084 }
1085
1086 if (renderable == GrRenderable::kYes && !this->d3dCaps().isFormatRenderable(dxgiFormat, 1)) {
1087 return false;
1088 }
1089
1090 int numMipLevels = 1;
1091 if (mipMapped == GrMipmapped::kYes) {
1092 numMipLevels = SkMipmap::ComputeLevelCount(dimensions.width(), dimensions.height()) + 1;
1093 }
1094
1095 // create the texture
1096 D3D12_RESOURCE_FLAGS usageFlags = D3D12_RESOURCE_FLAG_NONE;
1097 if (renderable == GrRenderable::kYes) {
1098 usageFlags |= D3D12_RESOURCE_FLAG_ALLOW_RENDER_TARGET;
1099 }
1100
1101 D3D12_RESOURCE_DESC resourceDesc = {};
1102 resourceDesc.Dimension = D3D12_RESOURCE_DIMENSION_TEXTURE2D;
1103 resourceDesc.Alignment = 0; // use default alignment
1104 resourceDesc.Width = dimensions.fWidth;
1105 resourceDesc.Height = dimensions.fHeight;
1106 resourceDesc.DepthOrArraySize = 1;
1107 resourceDesc.MipLevels = numMipLevels;
1108 resourceDesc.Format = dxgiFormat;
1109 resourceDesc.SampleDesc.Count = sampleCnt;
1110 resourceDesc.SampleDesc.Quality = DXGI_STANDARD_MULTISAMPLE_QUALITY_PATTERN;
1111 resourceDesc.Layout = D3D12_TEXTURE_LAYOUT_UNKNOWN; // use driver-selected swizzle
1112 resourceDesc.Flags = usageFlags;
1113
1114 D3D12_CLEAR_VALUE* clearValuePtr = nullptr;
1115 D3D12_CLEAR_VALUE clearValue = {};
1116 if (renderable == GrRenderable::kYes) {
1117 clearValue.Format = dxgiFormat;
1118 // Assume transparent black
1119 clearValue.Color[0] = 0;
1120 clearValue.Color[1] = 0;
1121 clearValue.Color[2] = 0;
1122 clearValue.Color[3] = 0;
1123 clearValuePtr = &clearValue;
1124 }
1125
1126 D3D12_RESOURCE_STATES initialState = (renderable == GrRenderable::kYes)
1127 ? D3D12_RESOURCE_STATE_RENDER_TARGET
1128 : D3D12_RESOURCE_STATE_COPY_DEST;
1129 if (!GrD3DTextureResource::InitTextureResourceInfo(this, resourceDesc, initialState,
1130 isProtected, clearValuePtr, info)) {
1131 SkDebugf("Failed to init texture resource info\n");
1132 return false;
1133 }
1134
1135 return true;
1136 }
1137
onCreateBackendTexture(SkISize dimensions,const GrBackendFormat & format,GrRenderable renderable,GrMipmapped mipMapped,GrProtected isProtected)1138 GrBackendTexture GrD3DGpu::onCreateBackendTexture(SkISize dimensions,
1139 const GrBackendFormat& format,
1140 GrRenderable renderable,
1141 GrMipmapped mipMapped,
1142 GrProtected isProtected) {
1143 const GrD3DCaps& caps = this->d3dCaps();
1144
1145 if (this->protectedContext() != (isProtected == GrProtected::kYes)) {
1146 return {};
1147 }
1148
1149 DXGI_FORMAT dxgiFormat;
1150 if (!format.asDxgiFormat(&dxgiFormat)) {
1151 return {};
1152 }
1153
1154 // TODO: move the texturability check up to GrGpu::createBackendTexture and just assert here
1155 if (!caps.isFormatTexturable(dxgiFormat)) {
1156 return {};
1157 }
1158
1159 GrD3DTextureResourceInfo info;
1160 if (!this->createTextureResourceForBackendSurface(dxgiFormat, dimensions, GrTexturable::kYes,
1161 renderable, mipMapped, 1, &info,
1162 isProtected)) {
1163 return {};
1164 }
1165
1166 return GrBackendTexture(dimensions.width(), dimensions.height(), info);
1167 }
1168
copy_color_data(const GrD3DCaps & caps,char * mapPtr,DXGI_FORMAT dxgiFormat,SkISize dimensions,D3D12_PLACED_SUBRESOURCE_FOOTPRINT * placedFootprints,std::array<float,4> color)1169 static bool copy_color_data(const GrD3DCaps& caps,
1170 char* mapPtr,
1171 DXGI_FORMAT dxgiFormat,
1172 SkISize dimensions,
1173 D3D12_PLACED_SUBRESOURCE_FOOTPRINT* placedFootprints,
1174 std::array<float, 4> color) {
1175 auto colorType = caps.getFormatColorType(dxgiFormat);
1176 if (colorType == GrColorType::kUnknown) {
1177 return false;
1178 }
1179 GrImageInfo ii(colorType, kUnpremul_SkAlphaType, nullptr, dimensions);
1180 if (!GrClearImage(ii, mapPtr, placedFootprints[0].Footprint.RowPitch, color)) {
1181 return false;
1182 }
1183
1184 return true;
1185 }
1186
onClearBackendTexture(const GrBackendTexture & backendTexture,sk_sp<GrRefCntedCallback> finishedCallback,std::array<float,4> color)1187 bool GrD3DGpu::onClearBackendTexture(const GrBackendTexture& backendTexture,
1188 sk_sp<GrRefCntedCallback> finishedCallback,
1189 std::array<float, 4> color) {
1190 GrD3DTextureResourceInfo info;
1191 SkAssertResult(backendTexture.getD3DTextureResourceInfo(&info));
1192 SkASSERT(!GrDxgiFormatIsCompressed(info.fFormat));
1193
1194 sk_sp<GrD3DResourceState> state = backendTexture.getGrD3DResourceState();
1195 SkASSERT(state);
1196 sk_sp<GrD3DTexture> texture =
1197 GrD3DTexture::MakeWrappedTexture(this, backendTexture.dimensions(),
1198 GrWrapCacheable::kNo,
1199 kRW_GrIOType, info, std::move(state));
1200 if (!texture) {
1201 return false;
1202 }
1203
1204 GrD3DDirectCommandList* cmdList = this->currentCommandList();
1205 if (!cmdList) {
1206 return false;
1207 }
1208
1209 texture->setResourceState(this, D3D12_RESOURCE_STATE_COPY_DEST);
1210
1211 ID3D12Resource* d3dResource = texture->d3dResource();
1212 SkASSERT(d3dResource);
1213 D3D12_RESOURCE_DESC desc = d3dResource->GetDesc();
1214 unsigned int mipLevelCount = 1;
1215 if (backendTexture.fMipmapped == GrMipmapped::kYes) {
1216 mipLevelCount = SkMipmap::ComputeLevelCount(backendTexture.dimensions()) + 1;
1217 }
1218 SkASSERT(mipLevelCount == info.fLevelCount);
1219 SkAutoSTMalloc<15, D3D12_PLACED_SUBRESOURCE_FOOTPRINT> placedFootprints(mipLevelCount);
1220 UINT numRows;
1221 UINT64 rowSizeInBytes;
1222 UINT64 combinedBufferSize;
1223 // We reuse the same top-level buffer area for all levels, hence passing 1 for level count.
1224 fDevice->GetCopyableFootprints(&desc,
1225 /* first resource */ 0,
1226 /* mip level count */ 1,
1227 /* base offset */ 0,
1228 placedFootprints.get(),
1229 &numRows,
1230 &rowSizeInBytes,
1231 &combinedBufferSize);
1232 SkASSERT(combinedBufferSize);
1233
1234 GrStagingBufferManager::Slice slice = fStagingBufferManager.allocateStagingBufferSlice(
1235 combinedBufferSize, D3D12_TEXTURE_DATA_PLACEMENT_ALIGNMENT);
1236 if (!slice.fBuffer) {
1237 return false;
1238 }
1239
1240 char* bufferData = (char*)slice.fOffsetMapPtr;
1241 SkASSERT(bufferData);
1242 if (!copy_color_data(this->d3dCaps(),
1243 bufferData,
1244 info.fFormat,
1245 backendTexture.dimensions(),
1246 placedFootprints,
1247 color)) {
1248 return false;
1249 }
1250 // Update the offsets in the footprint to be relative to the slice's offset
1251 placedFootprints[0].Offset += slice.fOffset;
1252 // Since we're sharing data for all the levels, set all the upper level footprints to the base.
1253 UINT w = placedFootprints[0].Footprint.Width;
1254 UINT h = placedFootprints[0].Footprint.Height;
1255 for (unsigned int i = 1; i < mipLevelCount; ++i) {
1256 w = std::max(1U, w/2);
1257 h = std::max(1U, h/2);
1258 placedFootprints[i].Offset = placedFootprints[0].Offset;
1259 placedFootprints[i].Footprint.Format = placedFootprints[0].Footprint.Format;
1260 placedFootprints[i].Footprint.Width = w;
1261 placedFootprints[i].Footprint.Height = h;
1262 placedFootprints[i].Footprint.Depth = 1;
1263 placedFootprints[i].Footprint.RowPitch = placedFootprints[0].Footprint.RowPitch;
1264 }
1265
1266 ID3D12Resource* d3dBuffer = static_cast<GrD3DBuffer*>(slice.fBuffer)->d3dResource();
1267 cmdList->copyBufferToTexture(d3dBuffer,
1268 texture.get(),
1269 mipLevelCount,
1270 placedFootprints.get(),
1271 /*left*/ 0,
1272 /*top */ 0);
1273
1274 if (finishedCallback) {
1275 this->addFinishedCallback(std::move(finishedCallback));
1276 }
1277
1278 return true;
1279 }
1280
onCreateCompressedBackendTexture(SkISize dimensions,const GrBackendFormat & format,GrMipmapped mipMapped,GrProtected isProtected)1281 GrBackendTexture GrD3DGpu::onCreateCompressedBackendTexture(
1282 SkISize dimensions, const GrBackendFormat& format, GrMipmapped mipMapped,
1283 GrProtected isProtected) {
1284 return this->onCreateBackendTexture(dimensions, format, GrRenderable::kNo, mipMapped,
1285 isProtected);
1286 }
1287
onUpdateCompressedBackendTexture(const GrBackendTexture & backendTexture,sk_sp<GrRefCntedCallback> finishedCallback,const void * data,size_t size)1288 bool GrD3DGpu::onUpdateCompressedBackendTexture(const GrBackendTexture& backendTexture,
1289 sk_sp<GrRefCntedCallback> finishedCallback,
1290 const void* data,
1291 size_t size) {
1292 GrD3DTextureResourceInfo info;
1293 SkAssertResult(backendTexture.getD3DTextureResourceInfo(&info));
1294
1295 sk_sp<GrD3DResourceState> state = backendTexture.getGrD3DResourceState();
1296 SkASSERT(state);
1297 sk_sp<GrD3DTexture> texture = GrD3DTexture::MakeWrappedTexture(this,
1298 backendTexture.dimensions(),
1299 GrWrapCacheable::kNo,
1300 kRW_GrIOType,
1301 info,
1302 std::move(state));
1303 if (!texture) {
1304 return false;
1305 }
1306
1307 GrD3DDirectCommandList* cmdList = this->currentCommandList();
1308 if (!cmdList) {
1309 return false;
1310 }
1311
1312 texture->setResourceState(this, D3D12_RESOURCE_STATE_COPY_DEST);
1313
1314 ID3D12Resource* d3dResource = texture->d3dResource();
1315 SkASSERT(d3dResource);
1316 D3D12_RESOURCE_DESC desc = d3dResource->GetDesc();
1317 unsigned int mipLevelCount = 1;
1318 if (backendTexture.hasMipmaps()) {
1319 mipLevelCount = SkMipmap::ComputeLevelCount(backendTexture.dimensions().width(),
1320 backendTexture.dimensions().height()) + 1;
1321 }
1322 SkASSERT(mipLevelCount == info.fLevelCount);
1323 SkAutoTMalloc<D3D12_PLACED_SUBRESOURCE_FOOTPRINT> placedFootprints(mipLevelCount);
1324 UINT64 combinedBufferSize;
1325 SkAutoTMalloc<UINT> numRows(mipLevelCount);
1326 SkAutoTMalloc<UINT64> rowSizeInBytes(mipLevelCount);
1327 fDevice->GetCopyableFootprints(&desc,
1328 0,
1329 mipLevelCount,
1330 0,
1331 placedFootprints.get(),
1332 numRows.get(),
1333 rowSizeInBytes.get(),
1334 &combinedBufferSize);
1335 SkASSERT(combinedBufferSize);
1336 SkASSERT(GrDxgiFormatIsCompressed(info.fFormat));
1337
1338 GrStagingBufferManager::Slice slice = fStagingBufferManager.allocateStagingBufferSlice(
1339 combinedBufferSize, D3D12_TEXTURE_DATA_PLACEMENT_ALIGNMENT);
1340 if (!slice.fBuffer) {
1341 return false;
1342 }
1343
1344 char* bufferData = (char*)slice.fOffsetMapPtr;
1345 SkASSERT(bufferData);
1346 copy_compressed_data(bufferData,
1347 info.fFormat,
1348 placedFootprints.get(),
1349 numRows.get(),
1350 rowSizeInBytes.get(),
1351 data,
1352 info.fLevelCount);
1353
1354 // Update the offsets in the footprints to be relative to the slice's offset
1355 for (unsigned int i = 0; i < mipLevelCount; ++i) {
1356 placedFootprints[i].Offset += slice.fOffset;
1357 }
1358
1359 ID3D12Resource* d3dBuffer = static_cast<GrD3DBuffer*>(slice.fBuffer)->d3dResource();
1360 cmdList->copyBufferToTexture(d3dBuffer,
1361 texture.get(),
1362 mipLevelCount,
1363 placedFootprints.get(),
1364 0,
1365 0);
1366
1367 if (finishedCallback) {
1368 this->addFinishedCallback(std::move(finishedCallback));
1369 }
1370
1371 return true;
1372 }
1373
deleteBackendTexture(const GrBackendTexture & tex)1374 void GrD3DGpu::deleteBackendTexture(const GrBackendTexture& tex) {
1375 SkASSERT(GrBackendApi::kDirect3D == tex.fBackend);
1376 // Nothing to do here, will get cleaned up when the GrBackendTexture object goes away
1377 }
1378
compile(const GrProgramDesc &,const GrProgramInfo &)1379 bool GrD3DGpu::compile(const GrProgramDesc&, const GrProgramInfo&) {
1380 return false;
1381 }
1382
1383 #if GR_TEST_UTILS
isTestingOnlyBackendTexture(const GrBackendTexture & tex) const1384 bool GrD3DGpu::isTestingOnlyBackendTexture(const GrBackendTexture& tex) const {
1385 SkASSERT(GrBackendApi::kDirect3D == tex.backend());
1386
1387 GrD3DTextureResourceInfo info;
1388 if (!tex.getD3DTextureResourceInfo(&info)) {
1389 return false;
1390 }
1391 ID3D12Resource* textureResource = info.fResource.get();
1392 if (!textureResource) {
1393 return false;
1394 }
1395 return !(textureResource->GetDesc().Flags & D3D12_RESOURCE_FLAG_DENY_SHADER_RESOURCE);
1396 }
1397
createTestingOnlyBackendRenderTarget(SkISize dimensions,GrColorType colorType,int sampleCnt,GrProtected isProtected)1398 GrBackendRenderTarget GrD3DGpu::createTestingOnlyBackendRenderTarget(SkISize dimensions,
1399 GrColorType colorType,
1400 int sampleCnt,
1401 GrProtected isProtected) {
1402 if (dimensions.width() > this->caps()->maxRenderTargetSize() ||
1403 dimensions.height() > this->caps()->maxRenderTargetSize()) {
1404 return {};
1405 }
1406
1407 DXGI_FORMAT dxgiFormat = this->d3dCaps().getFormatFromColorType(colorType);
1408
1409 GrD3DTextureResourceInfo info;
1410 if (!this->createTextureResourceForBackendSurface(dxgiFormat, dimensions, GrTexturable::kNo,
1411 GrRenderable::kYes, GrMipmapped::kNo,
1412 sampleCnt, &info, isProtected)) {
1413 return {};
1414 }
1415
1416 return GrBackendRenderTarget(dimensions.width(), dimensions.height(), info);
1417 }
1418
deleteTestingOnlyBackendRenderTarget(const GrBackendRenderTarget & rt)1419 void GrD3DGpu::deleteTestingOnlyBackendRenderTarget(const GrBackendRenderTarget& rt) {
1420 SkASSERT(GrBackendApi::kDirect3D == rt.backend());
1421
1422 GrD3DTextureResourceInfo info;
1423 if (rt.getD3DTextureResourceInfo(&info)) {
1424 this->submitToGpu(true);
1425 // Nothing else to do here, will get cleaned up when the GrBackendRenderTarget
1426 // is deleted.
1427 }
1428 }
1429
testingOnly_startCapture()1430 void GrD3DGpu::testingOnly_startCapture() {
1431 if (fGraphicsAnalysis) {
1432 fGraphicsAnalysis->BeginCapture();
1433 }
1434 }
1435
testingOnly_endCapture()1436 void GrD3DGpu::testingOnly_endCapture() {
1437 if (fGraphicsAnalysis) {
1438 fGraphicsAnalysis->EndCapture();
1439 }
1440 }
1441 #endif
1442
1443 ///////////////////////////////////////////////////////////////////////////////
1444
addResourceBarriers(sk_sp<GrManagedResource> resource,int numBarriers,D3D12_RESOURCE_TRANSITION_BARRIER * barriers) const1445 void GrD3DGpu::addResourceBarriers(sk_sp<GrManagedResource> resource,
1446 int numBarriers,
1447 D3D12_RESOURCE_TRANSITION_BARRIER* barriers) const {
1448 SkASSERT(fCurrentDirectCommandList);
1449 SkASSERT(resource);
1450
1451 fCurrentDirectCommandList->resourceBarrier(std::move(resource), numBarriers, barriers);
1452 }
1453
addBufferResourceBarriers(GrD3DBuffer * buffer,int numBarriers,D3D12_RESOURCE_TRANSITION_BARRIER * barriers) const1454 void GrD3DGpu::addBufferResourceBarriers(GrD3DBuffer* buffer,
1455 int numBarriers,
1456 D3D12_RESOURCE_TRANSITION_BARRIER* barriers) const {
1457 SkASSERT(fCurrentDirectCommandList);
1458 SkASSERT(buffer);
1459
1460 fCurrentDirectCommandList->resourceBarrier(nullptr, numBarriers, barriers);
1461 fCurrentDirectCommandList->addGrBuffer(sk_ref_sp<const GrBuffer>(buffer));
1462 }
1463
1464
prepareSurfacesForBackendAccessAndStateUpdates(SkSpan<GrSurfaceProxy * > proxies,SkSurface::BackendSurfaceAccess access,const GrBackendSurfaceMutableState * newState)1465 void GrD3DGpu::prepareSurfacesForBackendAccessAndStateUpdates(
1466 SkSpan<GrSurfaceProxy*> proxies,
1467 SkSurface::BackendSurfaceAccess access,
1468 const GrBackendSurfaceMutableState* newState) {
1469 // prepare proxies by transitioning to PRESENT renderState
1470 if (!proxies.empty() && access == SkSurface::BackendSurfaceAccess::kPresent) {
1471 GrD3DTextureResource* resource;
1472 for (GrSurfaceProxy* proxy : proxies) {
1473 SkASSERT(proxy->isInstantiated());
1474 if (GrTexture* tex = proxy->peekTexture()) {
1475 resource = static_cast<GrD3DTexture*>(tex);
1476 } else {
1477 GrRenderTarget* rt = proxy->peekRenderTarget();
1478 SkASSERT(rt);
1479 resource = static_cast<GrD3DRenderTarget*>(rt);
1480 }
1481 resource->prepareForPresent(this);
1482 }
1483 }
1484 }
1485
takeOwnershipOfBuffer(sk_sp<GrGpuBuffer> buffer)1486 void GrD3DGpu::takeOwnershipOfBuffer(sk_sp<GrGpuBuffer> buffer) {
1487 fCurrentDirectCommandList->addGrBuffer(std::move(buffer));
1488 }
1489
onSubmitToGpu(bool syncCpu)1490 bool GrD3DGpu::onSubmitToGpu(bool syncCpu) {
1491 if (syncCpu) {
1492 return this->submitDirectCommandList(SyncQueue::kForce);
1493 } else {
1494 return this->submitDirectCommandList(SyncQueue::kSkip);
1495 }
1496 }
1497
makeSemaphore(bool)1498 std::unique_ptr<GrSemaphore> SK_WARN_UNUSED_RESULT GrD3DGpu::makeSemaphore(bool) {
1499 return GrD3DSemaphore::Make(this);
1500 }
wrapBackendSemaphore(const GrBackendSemaphore & semaphore,GrResourceProvider::SemaphoreWrapType,GrWrapOwnership)1501 std::unique_ptr<GrSemaphore> GrD3DGpu::wrapBackendSemaphore(
1502 const GrBackendSemaphore& semaphore,
1503 GrResourceProvider::SemaphoreWrapType,
1504 GrWrapOwnership) {
1505 SkASSERT(this->caps()->semaphoreSupport());
1506 GrD3DFenceInfo fenceInfo;
1507 if (!semaphore.getD3DFenceInfo(&fenceInfo)) {
1508 return nullptr;
1509 }
1510 return GrD3DSemaphore::MakeWrapped(fenceInfo);
1511 }
1512
insertSemaphore(GrSemaphore * semaphore)1513 void GrD3DGpu::insertSemaphore(GrSemaphore* semaphore) {
1514 SkASSERT(semaphore);
1515 GrD3DSemaphore* d3dSem = static_cast<GrD3DSemaphore*>(semaphore);
1516 // TODO: Do we need to track the lifetime of this? How do we know it's done?
1517 fQueue->Signal(d3dSem->fence(), d3dSem->value());
1518 }
1519
waitSemaphore(GrSemaphore * semaphore)1520 void GrD3DGpu::waitSemaphore(GrSemaphore* semaphore) {
1521 SkASSERT(semaphore);
1522 GrD3DSemaphore* d3dSem = static_cast<GrD3DSemaphore*>(semaphore);
1523 // TODO: Do we need to track the lifetime of this?
1524 fQueue->Wait(d3dSem->fence(), d3dSem->value());
1525 }
1526
insertFence()1527 GrFence SK_WARN_UNUSED_RESULT GrD3DGpu::insertFence() {
1528 GR_D3D_CALL_ERRCHECK(fQueue->Signal(fFence.get(), ++fCurrentFenceValue));
1529 return fCurrentFenceValue;
1530 }
1531
waitFence(GrFence fence)1532 bool GrD3DGpu::waitFence(GrFence fence) {
1533 return (fFence->GetCompletedValue() >= fence);
1534 }
1535
finishOutstandingGpuWork()1536 void GrD3DGpu::finishOutstandingGpuWork() {
1537 this->waitForQueueCompletion();
1538 }
1539