1 /*
2 * Copyright 2010 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8
9 #include "GrGpu.h"
10
11 #include "GrBackendSemaphore.h"
12 #include "GrBackendSurface.h"
13 #include "GrBuffer.h"
14 #include "GrCaps.h"
15 #include "GrContext.h"
16 #include "GrContextPriv.h"
17 #include "GrGpuResourcePriv.h"
18 #include "GrMesh.h"
19 #include "GrPathRendering.h"
20 #include "GrPipeline.h"
21 #include "GrRenderTargetPriv.h"
22 #include "GrResourceCache.h"
23 #include "GrResourceProvider.h"
24 #include "GrSemaphore.h"
25 #include "GrStencilAttachment.h"
26 #include "GrStencilSettings.h"
27 #include "GrSurfacePriv.h"
28 #include "GrTexturePriv.h"
29 #include "GrTextureProxyPriv.h"
30 #include "GrTracing.h"
31 #include "SkJSONWriter.h"
32 #include "SkMathPriv.h"
33
34 ////////////////////////////////////////////////////////////////////////////////
35
GrGpu(GrContext * context)36 GrGpu::GrGpu(GrContext* context)
37 : fResetTimestamp(kExpiredTimestamp+1)
38 , fResetBits(kAll_GrBackendState)
39 , fContext(context) {
40 }
41
~GrGpu()42 GrGpu::~GrGpu() {}
43
disconnect(DisconnectType)44 void GrGpu::disconnect(DisconnectType) {}
45
46 ////////////////////////////////////////////////////////////////////////////////
47
IsACopyNeededForRepeatWrapMode(const GrCaps * caps,GrTextureProxy * texProxy,int width,int height,GrSamplerState::Filter filter,GrTextureProducer::CopyParams * copyParams,SkScalar scaleAdjust[2])48 bool GrGpu::IsACopyNeededForRepeatWrapMode(const GrCaps* caps, GrTextureProxy* texProxy,
49 int width, int height,
50 GrSamplerState::Filter filter,
51 GrTextureProducer::CopyParams* copyParams,
52 SkScalar scaleAdjust[2]) {
53 if (!caps->npotTextureTileSupport() &&
54 (!SkIsPow2(width) || !SkIsPow2(height))) {
55 SkASSERT(scaleAdjust);
56 copyParams->fWidth = GrNextPow2(width);
57 copyParams->fHeight = GrNextPow2(height);
58 SkASSERT(scaleAdjust);
59 scaleAdjust[0] = ((SkScalar)copyParams->fWidth) / width;
60 scaleAdjust[1] = ((SkScalar)copyParams->fHeight) / height;
61 switch (filter) {
62 case GrSamplerState::Filter::kNearest:
63 copyParams->fFilter = GrSamplerState::Filter::kNearest;
64 break;
65 case GrSamplerState::Filter::kBilerp:
66 case GrSamplerState::Filter::kMipMap:
67 // We are only ever scaling up so no reason to ever indicate kMipMap.
68 copyParams->fFilter = GrSamplerState::Filter::kBilerp;
69 break;
70 }
71 return true;
72 }
73
74 if (texProxy) {
75 // If the texture format itself doesn't support repeat wrap mode or mipmapping (and
76 // those capabilities are required) force a copy.
77 if (texProxy->hasRestrictedSampling()) {
78 copyParams->fFilter = GrSamplerState::Filter::kNearest;
79 copyParams->fWidth = texProxy->width();
80 copyParams->fHeight = texProxy->height();
81 return true;
82 }
83 }
84
85 return false;
86 }
87
IsACopyNeededForMips(const GrCaps * caps,const GrTextureProxy * texProxy,GrSamplerState::Filter filter,GrTextureProducer::CopyParams * copyParams)88 bool GrGpu::IsACopyNeededForMips(const GrCaps* caps, const GrTextureProxy* texProxy,
89 GrSamplerState::Filter filter,
90 GrTextureProducer::CopyParams* copyParams) {
91 SkASSERT(texProxy);
92 bool willNeedMips = GrSamplerState::Filter::kMipMap == filter && caps->mipMapSupport();
93 // If the texture format itself doesn't support mipmapping (and those capabilities are required)
94 // force a copy.
95 if (willNeedMips && texProxy->mipMapped() == GrMipMapped::kNo) {
96 copyParams->fFilter = GrSamplerState::Filter::kNearest;
97 copyParams->fWidth = texProxy->width();
98 copyParams->fHeight = texProxy->height();
99 return true;
100 }
101
102 return false;
103 }
104
createTexture(const GrSurfaceDesc & origDesc,SkBudgeted budgeted,const GrMipLevel texels[],int mipLevelCount)105 sk_sp<GrTexture> GrGpu::createTexture(const GrSurfaceDesc& origDesc, SkBudgeted budgeted,
106 const GrMipLevel texels[], int mipLevelCount) {
107 GR_CREATE_TRACE_MARKER_CONTEXT("GrGpu", "createTexture", fContext);
108 GrSurfaceDesc desc = origDesc;
109
110 GrMipMapped mipMapped = mipLevelCount > 1 ? GrMipMapped::kYes : GrMipMapped::kNo;
111 if (!this->caps()->validateSurfaceDesc(desc, mipMapped)) {
112 return nullptr;
113 }
114
115 bool isRT = desc.fFlags & kRenderTarget_GrSurfaceFlag;
116 if (isRT) {
117 desc.fSampleCnt = this->caps()->getRenderTargetSampleCount(desc.fSampleCnt, desc.fConfig);
118 }
119 // Attempt to catch un- or wrongly initialized sample counts.
120 SkASSERT(desc.fSampleCnt > 0 && desc.fSampleCnt <= 64);
121
122 if (mipLevelCount && (desc.fFlags & kPerformInitialClear_GrSurfaceFlag)) {
123 return nullptr;
124 }
125
126 // We shouldn't be rendering into compressed textures
127 SkASSERT(!GrPixelConfigIsCompressed(desc.fConfig) || !isRT);
128 SkASSERT(!GrPixelConfigIsCompressed(desc.fConfig) || 1 == desc.fSampleCnt);
129
130 this->handleDirtyContext();
131 sk_sp<GrTexture> tex = this->onCreateTexture(desc, budgeted, texels, mipLevelCount);
132 if (tex) {
133 if (!this->caps()->reuseScratchTextures() && !isRT) {
134 tex->resourcePriv().removeScratchKey();
135 }
136 fStats.incTextureCreates();
137 if (mipLevelCount) {
138 if (texels[0].fPixels) {
139 fStats.incTextureUploads();
140 }
141 }
142 }
143 return tex;
144 }
145
createTexture(const GrSurfaceDesc & desc,SkBudgeted budgeted)146 sk_sp<GrTexture> GrGpu::createTexture(const GrSurfaceDesc& desc, SkBudgeted budgeted) {
147 return this->createTexture(desc, budgeted, nullptr, 0);
148 }
149
wrapBackendTexture(const GrBackendTexture & backendTex,GrWrapOwnership ownership,GrWrapCacheable cacheable,GrIOType ioType)150 sk_sp<GrTexture> GrGpu::wrapBackendTexture(const GrBackendTexture& backendTex,
151 GrWrapOwnership ownership, GrWrapCacheable cacheable,
152 GrIOType ioType) {
153 SkASSERT(ioType != kWrite_GrIOType);
154 this->handleDirtyContext();
155 SkASSERT(this->caps());
156 if (!this->caps()->isConfigTexturable(backendTex.config())) {
157 return nullptr;
158 }
159 if (backendTex.width() > this->caps()->maxTextureSize() ||
160 backendTex.height() > this->caps()->maxTextureSize()) {
161 return nullptr;
162 }
163 return this->onWrapBackendTexture(backendTex, ownership, cacheable, ioType);
164 }
165
wrapRenderableBackendTexture(const GrBackendTexture & backendTex,int sampleCnt,GrWrapOwnership ownership,GrWrapCacheable cacheable)166 sk_sp<GrTexture> GrGpu::wrapRenderableBackendTexture(const GrBackendTexture& backendTex,
167 int sampleCnt, GrWrapOwnership ownership,
168 GrWrapCacheable cacheable) {
169 this->handleDirtyContext();
170 if (sampleCnt < 1) {
171 return nullptr;
172 }
173 if (!this->caps()->isConfigTexturable(backendTex.config()) ||
174 !this->caps()->getRenderTargetSampleCount(sampleCnt, backendTex.config())) {
175 return nullptr;
176 }
177
178 if (backendTex.width() > this->caps()->maxRenderTargetSize() ||
179 backendTex.height() > this->caps()->maxRenderTargetSize()) {
180 return nullptr;
181 }
182 sk_sp<GrTexture> tex =
183 this->onWrapRenderableBackendTexture(backendTex, sampleCnt, ownership, cacheable);
184 SkASSERT(!tex || tex->asRenderTarget());
185 return tex;
186 }
187
wrapBackendRenderTarget(const GrBackendRenderTarget & backendRT)188 sk_sp<GrRenderTarget> GrGpu::wrapBackendRenderTarget(const GrBackendRenderTarget& backendRT) {
189 if (0 == this->caps()->getRenderTargetSampleCount(backendRT.sampleCnt(), backendRT.config())) {
190 return nullptr;
191 }
192 this->handleDirtyContext();
193 return this->onWrapBackendRenderTarget(backendRT);
194 }
195
wrapBackendTextureAsRenderTarget(const GrBackendTexture & tex,int sampleCnt)196 sk_sp<GrRenderTarget> GrGpu::wrapBackendTextureAsRenderTarget(const GrBackendTexture& tex,
197 int sampleCnt) {
198 if (0 == this->caps()->getRenderTargetSampleCount(sampleCnt, tex.config())) {
199 return nullptr;
200 }
201 int maxSize = this->caps()->maxTextureSize();
202 if (tex.width() > maxSize || tex.height() > maxSize) {
203 return nullptr;
204 }
205 this->handleDirtyContext();
206 return this->onWrapBackendTextureAsRenderTarget(tex, sampleCnt);
207 }
208
wrapVulkanSecondaryCBAsRenderTarget(const SkImageInfo & imageInfo,const GrVkDrawableInfo & vkInfo)209 sk_sp<GrRenderTarget> GrGpu::wrapVulkanSecondaryCBAsRenderTarget(const SkImageInfo& imageInfo,
210 const GrVkDrawableInfo& vkInfo) {
211 return this->onWrapVulkanSecondaryCBAsRenderTarget(imageInfo, vkInfo);
212 }
213
onWrapVulkanSecondaryCBAsRenderTarget(const SkImageInfo & imageInfo,const GrVkDrawableInfo & vkInfo)214 sk_sp<GrRenderTarget> GrGpu::onWrapVulkanSecondaryCBAsRenderTarget(const SkImageInfo& imageInfo,
215 const GrVkDrawableInfo& vkInfo) {
216 // This is only supported on Vulkan so we default to returning nullptr here
217 return nullptr;
218 }
219
createBuffer(size_t size,GrBufferType intendedType,GrAccessPattern accessPattern,const void * data)220 sk_sp<GrBuffer> GrGpu::createBuffer(size_t size, GrBufferType intendedType,
221 GrAccessPattern accessPattern, const void* data) {
222 this->handleDirtyContext();
223 sk_sp<GrBuffer> buffer = this->onCreateBuffer(size, intendedType, accessPattern, data);
224 if (!this->caps()->reuseScratchBuffers()) {
225 buffer->resourcePriv().removeScratchKey();
226 }
227 return buffer;
228 }
229
copySurface(GrSurface * dst,GrSurfaceOrigin dstOrigin,GrSurface * src,GrSurfaceOrigin srcOrigin,const SkIRect & srcRect,const SkIPoint & dstPoint,bool canDiscardOutsideDstRect)230 bool GrGpu::copySurface(GrSurface* dst, GrSurfaceOrigin dstOrigin,
231 GrSurface* src, GrSurfaceOrigin srcOrigin,
232 const SkIRect& srcRect, const SkIPoint& dstPoint,
233 bool canDiscardOutsideDstRect) {
234 GR_CREATE_TRACE_MARKER_CONTEXT("GrGpu", "copySurface", fContext);
235 SkASSERT(dst && src);
236
237 if (dst->readOnly()) {
238 return false;
239 }
240
241 this->handleDirtyContext();
242
243 return this->onCopySurface(dst, dstOrigin, src, srcOrigin, srcRect, dstPoint,
244 canDiscardOutsideDstRect);
245 }
246
readPixels(GrSurface * surface,int left,int top,int width,int height,GrColorType dstColorType,void * buffer,size_t rowBytes)247 bool GrGpu::readPixels(GrSurface* surface, int left, int top, int width, int height,
248 GrColorType dstColorType, void* buffer, size_t rowBytes) {
249 SkASSERT(surface);
250
251 int bpp = GrColorTypeBytesPerPixel(dstColorType);
252 if (!GrSurfacePriv::AdjustReadPixelParams(surface->width(), surface->height(), bpp,
253 &left, &top, &width, &height,
254 &buffer,
255 &rowBytes)) {
256 return false;
257 }
258
259 if (GrPixelConfigIsCompressed(surface->config())) {
260 return false;
261 }
262
263 this->handleDirtyContext();
264
265 return this->onReadPixels(surface, left, top, width, height, dstColorType, buffer, rowBytes);
266 }
267
writePixels(GrSurface * surface,int left,int top,int width,int height,GrColorType srcColorType,const GrMipLevel texels[],int mipLevelCount)268 bool GrGpu::writePixels(GrSurface* surface, int left, int top, int width, int height,
269 GrColorType srcColorType, const GrMipLevel texels[], int mipLevelCount) {
270 SkASSERT(surface);
271
272 if (surface->readOnly()) {
273 return false;
274 }
275
276 if (1 == mipLevelCount) {
277 // We require that if we are not mipped, then the write region is contained in the surface
278 SkIRect subRect = SkIRect::MakeXYWH(left, top, width, height);
279 SkIRect bounds = SkIRect::MakeWH(surface->width(), surface->height());
280 if (!bounds.contains(subRect)) {
281 return false;
282 }
283 } else if (0 != left || 0 != top || width != surface->width() || height != surface->height()) {
284 // We require that if the texels are mipped, than the write region is the entire surface
285 return false;
286 }
287
288 for (int currentMipLevel = 0; currentMipLevel < mipLevelCount; currentMipLevel++) {
289 if (!texels[currentMipLevel].fPixels ) {
290 return false;
291 }
292 }
293
294 this->handleDirtyContext();
295 if (this->onWritePixels(surface, left, top, width, height, srcColorType, texels,
296 mipLevelCount)) {
297 SkIRect rect = SkIRect::MakeXYWH(left, top, width, height);
298 this->didWriteToSurface(surface, kTopLeft_GrSurfaceOrigin, &rect, mipLevelCount);
299 fStats.incTextureUploads();
300 return true;
301 }
302 return false;
303 }
304
transferPixels(GrTexture * texture,int left,int top,int width,int height,GrColorType bufferColorType,GrBuffer * transferBuffer,size_t offset,size_t rowBytes)305 bool GrGpu::transferPixels(GrTexture* texture, int left, int top, int width, int height,
306 GrColorType bufferColorType, GrBuffer* transferBuffer, size_t offset,
307 size_t rowBytes) {
308 SkASSERT(texture);
309 SkASSERT(transferBuffer);
310
311 if (texture->readOnly()) {
312 return false;
313 }
314
315 // We require that the write region is contained in the texture
316 SkIRect subRect = SkIRect::MakeXYWH(left, top, width, height);
317 SkIRect bounds = SkIRect::MakeWH(texture->width(), texture->height());
318 if (!bounds.contains(subRect)) {
319 return false;
320 }
321
322 this->handleDirtyContext();
323 if (this->onTransferPixels(texture, left, top, width, height, bufferColorType, transferBuffer,
324 offset, rowBytes)) {
325 SkIRect rect = SkIRect::MakeXYWH(left, top, width, height);
326 this->didWriteToSurface(texture, kTopLeft_GrSurfaceOrigin, &rect);
327 fStats.incTransfersToTexture();
328
329 return true;
330 }
331 return false;
332 }
333
regenerateMipMapLevels(GrTexture * texture)334 bool GrGpu::regenerateMipMapLevels(GrTexture* texture) {
335 SkASSERT(texture);
336 SkASSERT(this->caps()->mipMapSupport());
337 SkASSERT(texture->texturePriv().mipMapped() == GrMipMapped::kYes);
338 SkASSERT(texture->texturePriv().mipMapsAreDirty());
339 SkASSERT(!texture->asRenderTarget() || !texture->asRenderTarget()->needsResolve());
340 if (texture->readOnly()) {
341 return false;
342 }
343 if (this->onRegenerateMipMapLevels(texture)) {
344 texture->texturePriv().markMipMapsClean();
345 return true;
346 }
347 return false;
348 }
349
resolveRenderTarget(GrRenderTarget * target)350 void GrGpu::resolveRenderTarget(GrRenderTarget* target) {
351 SkASSERT(target);
352 this->handleDirtyContext();
353 this->onResolveRenderTarget(target);
354 }
355
didWriteToSurface(GrSurface * surface,GrSurfaceOrigin origin,const SkIRect * bounds,uint32_t mipLevels) const356 void GrGpu::didWriteToSurface(GrSurface* surface, GrSurfaceOrigin origin, const SkIRect* bounds,
357 uint32_t mipLevels) const {
358 SkASSERT(surface);
359 SkASSERT(!surface->readOnly());
360 // Mark any MIP chain and resolve buffer as dirty if and only if there is a non-empty bounds.
361 if (nullptr == bounds || !bounds->isEmpty()) {
362 if (GrRenderTarget* target = surface->asRenderTarget()) {
363 SkIRect flippedBounds;
364 if (kBottomLeft_GrSurfaceOrigin == origin && bounds) {
365 flippedBounds = {bounds->fLeft, surface->height() - bounds->fBottom,
366 bounds->fRight, surface->height() - bounds->fTop};
367 bounds = &flippedBounds;
368 }
369 target->flagAsNeedingResolve(bounds);
370 }
371 GrTexture* texture = surface->asTexture();
372 if (texture && 1 == mipLevels) {
373 texture->texturePriv().markMipMapsDirty();
374 }
375 }
376 }
377
finishFlush(int numSemaphores,GrBackendSemaphore backendSemaphores[])378 GrSemaphoresSubmitted GrGpu::finishFlush(int numSemaphores,
379 GrBackendSemaphore backendSemaphores[]) {
380 this->stats()->incNumFinishFlushes();
381 GrResourceProvider* resourceProvider = fContext->contextPriv().resourceProvider();
382
383 if (this->caps()->fenceSyncSupport()) {
384 for (int i = 0; i < numSemaphores; ++i) {
385 sk_sp<GrSemaphore> semaphore;
386 if (backendSemaphores[i].isInitialized()) {
387 semaphore = resourceProvider->wrapBackendSemaphore(
388 backendSemaphores[i], GrResourceProvider::SemaphoreWrapType::kWillSignal,
389 kBorrow_GrWrapOwnership);
390 } else {
391 semaphore = resourceProvider->makeSemaphore(false);
392 }
393 this->insertSemaphore(semaphore);
394
395 if (!backendSemaphores[i].isInitialized()) {
396 backendSemaphores[i] = semaphore->backendSemaphore();
397 }
398 }
399 }
400 this->onFinishFlush((numSemaphores > 0 && this->caps()->fenceSyncSupport()));
401 return this->caps()->fenceSyncSupport() ? GrSemaphoresSubmitted::kYes
402 : GrSemaphoresSubmitted::kNo;
403 }
404
405 #ifdef SK_ENABLE_DUMP_GPU
dumpJSON(SkJSONWriter * writer) const406 void GrGpu::dumpJSON(SkJSONWriter* writer) const {
407 writer->beginObject();
408
409 // TODO: Is there anything useful in the base class to dump here?
410
411 this->onDumpJSON(writer);
412
413 writer->endObject();
414 }
415 #else
dumpJSON(SkJSONWriter * writer) const416 void GrGpu::dumpJSON(SkJSONWriter* writer) const { }
417 #endif
418
419 #if GR_TEST_UTILS
createTestingOnlyBackendTexture(const void * pixels,int w,int h,SkColorType colorType,bool isRenderTarget,GrMipMapped isMipped,size_t rowBytes)420 GrBackendTexture GrGpu::createTestingOnlyBackendTexture(const void* pixels, int w, int h,
421 SkColorType colorType, bool isRenderTarget,
422 GrMipMapped isMipped, size_t rowBytes) {
423 GrColorType grCT = SkColorTypeToGrColorType(colorType);
424
425 return this->createTestingOnlyBackendTexture(pixels, w, h, grCT, isRenderTarget, isMipped,
426 rowBytes);
427 }
428 #endif
429