1 /* 2 * Copyright 2011 Google Inc. 3 * 4 * Use of this source code is governed by a BSD-style license that can be 5 * found in the LICENSE file. 6 */ 7 8 #ifndef GrGpu_DEFINED 9 #define GrGpu_DEFINED 10 11 #include "GrCaps.h" 12 #include "GrGpuCommandBuffer.h" 13 #include "GrProgramDesc.h" 14 #include "GrSwizzle.h" 15 #include "GrAllocator.h" 16 #include "GrTextureProducer.h" 17 #include "GrTypes.h" 18 #include "GrXferProcessor.h" 19 #include "SkPath.h" 20 #include "SkTArray.h" 21 #include <map> 22 23 class GrBackendRenderTarget; 24 class GrBackendSemaphore; 25 class GrBuffer; 26 class GrContext; 27 struct GrContextOptions; 28 class GrGLContext; 29 class GrMesh; 30 class GrPath; 31 class GrPathRenderer; 32 class GrPathRendererChain; 33 class GrPathRendering; 34 class GrPipeline; 35 class GrPrimitiveProcessor; 36 class GrRenderTarget; 37 class GrSemaphore; 38 class GrStencilAttachment; 39 class GrStencilSettings; 40 class GrSurface; 41 class GrTexture; 42 class SkJSONWriter; 43 44 class GrGpu : public SkRefCnt { 45 public: 46 GrGpu(GrContext* context); 47 ~GrGpu() override; 48 getContext()49 GrContext* getContext() { return fContext; } getContext()50 const GrContext* getContext() const { return fContext; } 51 52 /** 53 * Gets the capabilities of the draw target. 54 */ caps()55 const GrCaps* caps() const { return fCaps.get(); } refCaps()56 sk_sp<const GrCaps> refCaps() const { return fCaps; } 57 pathRendering()58 GrPathRendering* pathRendering() { return fPathRendering.get(); } 59 60 enum class DisconnectType { 61 // No cleanup should be attempted, immediately cease making backend API calls 62 kAbandon, 63 // Free allocated resources (not known by GrResourceCache) before returning and 64 // ensure no backend backend 3D API calls will be made after disconnect() returns. 65 kCleanup, 66 }; 67 68 // Called by GrContext when the underlying backend context is already or will be destroyed 69 // before GrContext. 70 virtual void disconnect(DisconnectType); 71 72 /** 73 * The GrGpu object normally assumes that no outsider is setting state 74 * within the underlying 3D API's context/device/whatever. This call informs 75 * the GrGpu that the state was modified and it shouldn't make assumptions 76 * about the state. 77 */ 78 void markContextDirty(uint32_t state = kAll_GrBackendState) { fResetBits |= state; } 79 80 /** 81 * Creates a texture object. If kRenderTarget_GrSurfaceFlag the texture can 82 * be used as a render target by calling GrTexture::asRenderTarget(). Not all 83 * pixel configs can be used as render targets. Support for configs as textures 84 * or render targets can be checked using GrCaps. 85 * 86 * @param desc describes the texture to be created. 87 * @param budgeted does this texture count against the resource cache budget? 88 * @param texels array of mipmap levels containing texel data to load. 89 * Each level begins with full-size palette data for paletted textures. 90 * It contains width*height texels. If there is only one 91 * element and it contains nullptr fPixels, texture data is 92 * uninitialized. 93 * @param mipLevelCount the number of levels in 'texels' 94 * @return The texture object if successful, otherwise nullptr. 95 */ 96 sk_sp<GrTexture> createTexture(const GrSurfaceDesc&, SkBudgeted, const GrMipLevel texels[], 97 int mipLevelCount); 98 99 /** 100 * Simplified createTexture() interface for when there is no initial texel data to upload. 101 */ 102 sk_sp<GrTexture> createTexture(const GrSurfaceDesc& desc, SkBudgeted); 103 104 /** 105 * Implements GrResourceProvider::wrapBackendTexture 106 */ 107 sk_sp<GrTexture> wrapBackendTexture(const GrBackendTexture&, GrWrapOwnership, GrWrapCacheable, 108 GrIOType); 109 110 /** 111 * Implements GrResourceProvider::wrapRenderableBackendTexture 112 */ 113 sk_sp<GrTexture> wrapRenderableBackendTexture(const GrBackendTexture&, int sampleCnt, 114 GrWrapOwnership, GrWrapCacheable); 115 116 /** 117 * Implements GrResourceProvider::wrapBackendRenderTarget 118 */ 119 sk_sp<GrRenderTarget> wrapBackendRenderTarget(const GrBackendRenderTarget&); 120 121 /** 122 * Implements GrResourceProvider::wrapBackendTextureAsRenderTarget 123 */ 124 sk_sp<GrRenderTarget> wrapBackendTextureAsRenderTarget(const GrBackendTexture&, 125 int sampleCnt); 126 127 /** 128 * Implements GrResourceProvider::wrapVulkanSecondaryCBAsRenderTarget 129 */ 130 sk_sp<GrRenderTarget> wrapVulkanSecondaryCBAsRenderTarget(const SkImageInfo&, 131 const GrVkDrawableInfo&); 132 133 /** 134 * Creates a buffer in GPU memory. For a client-side buffer use GrBuffer::CreateCPUBacked. 135 * 136 * @param size size of buffer to create. 137 * @param intendedType hint to the graphics subsystem about what the buffer will be used for. 138 * @param accessPattern hint to the graphics subsystem about how the data will be accessed. 139 * @param data optional data with which to initialize the buffer. 140 * 141 * @return the buffer if successful, otherwise nullptr. 142 */ 143 sk_sp<GrBuffer> createBuffer(size_t size, GrBufferType intendedType, 144 GrAccessPattern accessPattern, const void* data = nullptr); 145 146 /** 147 * Resolves MSAA. 148 */ 149 void resolveRenderTarget(GrRenderTarget*); 150 151 /** 152 * Uses the base of the texture to recompute the contents of the other levels. 153 */ 154 bool regenerateMipMapLevels(GrTexture*); 155 156 /** 157 * Reads a rectangle of pixels from a render target. No sRGB/linear conversions are performed. 158 * 159 * @param surface The surface to read from 160 * @param left left edge of the rectangle to read (inclusive) 161 * @param top top edge of the rectangle to read (inclusive) 162 * @param width width of rectangle to read in pixels. 163 * @param height height of rectangle to read in pixels. 164 * @param dstColorType the color type of the destination buffer. 165 * @param buffer memory to read the rectangle into. 166 * @param rowBytes the number of bytes between consecutive rows. Zero 167 * means rows are tightly packed. 168 * @param invertY buffer should be populated bottom-to-top as opposed 169 * to top-to-bottom (skia's usual order) 170 * 171 * @return true if the read succeeded, false if not. The read can fail 172 * because of a unsupported pixel config or because no render 173 * target is currently set. 174 */ 175 bool readPixels(GrSurface* surface, int left, int top, int width, int height, 176 GrColorType dstColorType, void* buffer, size_t rowBytes); 177 178 /** 179 * Updates the pixels in a rectangle of a surface. No sRGB/linear conversions are performed. 180 * 181 * @param surface The surface to write to. 182 * @param left left edge of the rectangle to write (inclusive) 183 * @param top top edge of the rectangle to write (inclusive) 184 * @param width width of rectangle to write in pixels. 185 * @param height height of rectangle to write in pixels. 186 * @param srcColorType the color type of the source buffer. 187 * @param texels array of mipmap levels containing texture data 188 * @param mipLevelCount number of levels in 'texels' 189 */ 190 bool writePixels(GrSurface* surface, int left, int top, int width, int height, 191 GrColorType srcColorType, const GrMipLevel texels[], int mipLevelCount); 192 193 /** 194 * Helper for the case of a single level. 195 */ writePixels(GrSurface * surface,int left,int top,int width,int height,GrColorType srcColorType,const void * buffer,size_t rowBytes)196 bool writePixels(GrSurface* surface, int left, int top, int width, int height, 197 GrColorType srcColorType, const void* buffer, size_t rowBytes) { 198 GrMipLevel mipLevel = {buffer, rowBytes}; 199 return this->writePixels(surface, left, top, width, height, srcColorType, &mipLevel, 1); 200 } 201 202 /** 203 * Updates the pixels in a rectangle of a texture using a buffer 204 * 205 * There are a couple of assumptions here. First, we only update the top miplevel. 206 * And second, that any y flip needed has already been done in the buffer. 207 * 208 * @param texture The texture to write to. 209 * @param left left edge of the rectangle to write (inclusive) 210 * @param top top edge of the rectangle to write (inclusive) 211 * @param width width of rectangle to write in pixels. 212 * @param height height of rectangle to write in pixels. 213 * @param bufferColorType the color type of the transfer buffer's pixel data 214 * @param transferBuffer GrBuffer to read pixels from (type must be "kXferCpuToGpu") 215 * @param offset offset from the start of the buffer 216 * @param rowBytes number of bytes between consecutive rows in the buffer. Zero 217 * means rows are tightly packed. 218 */ 219 bool transferPixels(GrTexture* texture, int left, int top, int width, int height, 220 GrColorType bufferColorType, GrBuffer* transferBuffer, size_t offset, 221 size_t rowBytes); 222 223 // After the client interacts directly with the 3D context state the GrGpu 224 // must resync its internal state and assumptions about 3D context state. 225 // Each time this occurs the GrGpu bumps a timestamp. 226 // state of the 3D context 227 // At 10 resets / frame and 60fps a 64bit timestamp will overflow in about 228 // a billion years. 229 typedef uint64_t ResetTimestamp; 230 231 // This timestamp is always older than the current timestamp 232 static const ResetTimestamp kExpiredTimestamp = 0; 233 // Returns a timestamp based on the number of times the context was reset. 234 // This timestamp can be used to lazily detect when cached 3D context state 235 // is dirty. getResetTimestamp()236 ResetTimestamp getResetTimestamp() const { return fResetTimestamp; } 237 238 // Called to perform a surface to surface copy. Fallbacks to issuing a draw from the src to dst 239 // take place at the GrOpList level and this function implement faster copy paths. The rect 240 // and point are pre-clipped. The src rect and implied dst rect are guaranteed to be within the 241 // src/dst bounds and non-empty. If canDiscardOutsideDstRect is set to true then we don't need 242 // to preserve any data on the dst surface outside of the copy. 243 bool copySurface(GrSurface* dst, GrSurfaceOrigin dstOrigin, 244 GrSurface* src, GrSurfaceOrigin srcOrigin, 245 const SkIRect& srcRect, 246 const SkIPoint& dstPoint, 247 bool canDiscardOutsideDstRect = false); 248 249 // Returns a GrGpuRTCommandBuffer which GrOpLists send draw commands to instead of directly 250 // to the Gpu object. The 'bounds' rect is the content rect of the destination. 251 virtual GrGpuRTCommandBuffer* getCommandBuffer( 252 GrRenderTarget*, GrSurfaceOrigin, const SkRect& bounds, 253 const GrGpuRTCommandBuffer::LoadAndStoreInfo&, 254 const GrGpuRTCommandBuffer::StencilLoadAndStoreInfo&) = 0; 255 256 // Returns a GrGpuTextureCommandBuffer which GrOpLists send texture commands to instead of 257 // directly to the Gpu object. 258 virtual GrGpuTextureCommandBuffer* getCommandBuffer(GrTexture*, GrSurfaceOrigin) = 0; 259 260 // Called by GrDrawingManager when flushing. 261 // Provides a hook for post-flush actions (e.g. Vulkan command buffer submits). This will also 262 // insert any numSemaphore semaphores on the gpu and set the backendSemaphores to match the 263 // inserted semaphores. 264 GrSemaphoresSubmitted finishFlush(int numSemaphores, GrBackendSemaphore backendSemaphores[]); 265 266 virtual void submit(GrGpuCommandBuffer*) = 0; 267 268 virtual GrFence SK_WARN_UNUSED_RESULT insertFence() = 0; 269 virtual bool waitFence(GrFence, uint64_t timeout = 1000) = 0; 270 virtual void deleteFence(GrFence) const = 0; 271 272 virtual sk_sp<GrSemaphore> SK_WARN_UNUSED_RESULT makeSemaphore(bool isOwned = true) = 0; 273 virtual sk_sp<GrSemaphore> wrapBackendSemaphore(const GrBackendSemaphore& semaphore, 274 GrResourceProvider::SemaphoreWrapType wrapType, 275 GrWrapOwnership ownership) = 0; 276 virtual void insertSemaphore(sk_sp<GrSemaphore> semaphore) = 0; 277 virtual void waitSemaphore(sk_sp<GrSemaphore> semaphore) = 0; 278 279 /** 280 * Put this texture in a safe and known state for use across multiple GrContexts. Depending on 281 * the backend, this may return a GrSemaphore. If so, other contexts should wait on that 282 * semaphore before using this texture. 283 */ 284 virtual sk_sp<GrSemaphore> prepareTextureForCrossContextUsage(GrTexture*) = 0; 285 286 /////////////////////////////////////////////////////////////////////////// 287 // Debugging and Stats 288 289 class Stats { 290 public: 291 #if GR_GPU_STATS Stats()292 Stats() { this->reset(); } 293 reset()294 void reset() { 295 fRenderTargetBinds = 0; 296 fShaderCompilations = 0; 297 fTextureCreates = 0; 298 fTextureUploads = 0; 299 fTransfersToTexture = 0; 300 fStencilAttachmentCreates = 0; 301 fNumDraws = 0; 302 fNumFailedDraws = 0; 303 fNumFinishFlushes = 0; 304 } 305 renderTargetBinds()306 int renderTargetBinds() const { return fRenderTargetBinds; } incRenderTargetBinds()307 void incRenderTargetBinds() { fRenderTargetBinds++; } shaderCompilations()308 int shaderCompilations() const { return fShaderCompilations; } incShaderCompilations()309 void incShaderCompilations() { fShaderCompilations++; } textureCreates()310 int textureCreates() const { return fTextureCreates; } incTextureCreates()311 void incTextureCreates() { fTextureCreates++; } textureUploads()312 int textureUploads() const { return fTextureUploads; } incTextureUploads()313 void incTextureUploads() { fTextureUploads++; } transfersToTexture()314 int transfersToTexture() const { return fTransfersToTexture; } incTransfersToTexture()315 void incTransfersToTexture() { fTransfersToTexture++; } incStencilAttachmentCreates()316 void incStencilAttachmentCreates() { fStencilAttachmentCreates++; } incNumDraws()317 void incNumDraws() { fNumDraws++; } incNumFailedDraws()318 void incNumFailedDraws() { ++fNumFailedDraws; } incNumFinishFlushes()319 void incNumFinishFlushes() { ++fNumFinishFlushes; } 320 void dump(SkString*); 321 void dumpKeyValuePairs(SkTArray<SkString>* keys, SkTArray<double>* values); numDraws()322 int numDraws() const { return fNumDraws; } numFailedDraws()323 int numFailedDraws() const { return fNumFailedDraws; } numFinishFlushes()324 int numFinishFlushes() const { return fNumFinishFlushes; } 325 private: 326 int fRenderTargetBinds; 327 int fShaderCompilations; 328 int fTextureCreates; 329 int fTextureUploads; 330 int fTransfersToTexture; 331 int fStencilAttachmentCreates; 332 int fNumDraws; 333 int fNumFailedDraws; 334 int fNumFinishFlushes; 335 #else 336 void dump(SkString*) {} 337 void dumpKeyValuePairs(SkTArray<SkString>*, SkTArray<double>*) {} 338 void incRenderTargetBinds() {} 339 void incShaderCompilations() {} 340 void incTextureCreates() {} 341 void incTextureUploads() {} 342 void incTransfersToTexture() {} 343 void incStencilAttachmentCreates() {} 344 void incNumDraws() {} 345 void incNumFailedDraws() {} 346 void incNumFinishFlushes() {} 347 #endif 348 }; 349 stats()350 Stats* stats() { return &fStats; } 351 void dumpJSON(SkJSONWriter*) const; 352 353 #if GR_TEST_UTILS 354 GrBackendTexture createTestingOnlyBackendTexture(const void* pixels, int w, int h, 355 SkColorType, bool isRenderTarget, 356 GrMipMapped, size_t rowBytes = 0); 357 358 /** Creates a texture directly in the backend API without wrapping it in a GrTexture. This is 359 only to be used for testing (particularly for testing the methods that import an externally 360 created texture into Skia. Must be matched with a call to deleteTestingOnlyTexture(). */ 361 virtual GrBackendTexture createTestingOnlyBackendTexture(const void* pixels, int w, int h, 362 GrColorType, bool isRenderTarget, 363 GrMipMapped, size_t rowBytes = 0) = 0; 364 365 /** Check a handle represents an actual texture in the backend API that has not been freed. */ 366 virtual bool isTestingOnlyBackendTexture(const GrBackendTexture&) const = 0; 367 /** 368 * Frees a texture created by createTestingOnlyBackendTexture(). If ownership of the backend 369 * texture has been transferred to a GrContext using adopt semantics this should not be called. 370 */ 371 virtual void deleteTestingOnlyBackendTexture(const GrBackendTexture&) = 0; 372 373 virtual GrBackendRenderTarget createTestingOnlyBackendRenderTarget(int w, int h, 374 GrColorType) = 0; 375 376 virtual void deleteTestingOnlyBackendRenderTarget(const GrBackendRenderTarget&) = 0; 377 378 // This is only to be used in GL-specific tests. glContextForTesting()379 virtual const GrGLContext* glContextForTesting() const { return nullptr; } 380 381 // This is only to be used by testing code resetShaderCacheForTesting()382 virtual void resetShaderCacheForTesting() const {} 383 384 /** 385 * Flushes all work to the gpu and forces the GPU to wait until all the gpu work has completed. 386 * This is for testing purposes only. 387 */ 388 virtual void testingOnly_flushGpuAndSync() = 0; 389 #endif 390 391 // width and height may be larger than rt (if underlying API allows it). 392 // Returns nullptr if compatible sb could not be created, otherwise the caller owns the ref on 393 // the GrStencilAttachment. 394 virtual GrStencilAttachment* createStencilAttachmentForRenderTarget(const GrRenderTarget*, 395 int width, 396 int height) = 0; 397 398 // Determines whether a texture will need to be rescaled in order to be used with the 399 // GrSamplerState. 400 static bool IsACopyNeededForRepeatWrapMode(const GrCaps*, GrTextureProxy* texProxy, 401 int width, int height, 402 GrSamplerState::Filter, 403 GrTextureProducer::CopyParams*, 404 SkScalar scaleAdjust[2]); 405 406 // Determines whether a texture will need to be copied because the draw requires mips but the 407 // texutre doesn't have any. This call should be only checked if IsACopyNeededForTextureParams 408 // fails. If the previous call succeeds, then a copy should be done using those params and the 409 // mip mapping requirements will be handled there. 410 static bool IsACopyNeededForMips(const GrCaps* caps, const GrTextureProxy* texProxy, 411 GrSamplerState::Filter filter, 412 GrTextureProducer::CopyParams* copyParams); 413 handleDirtyContext()414 void handleDirtyContext() { 415 if (fResetBits) { 416 this->resetContext(); 417 } 418 } 419 420 /** 421 * Returns a key that represents the sampler that will be created for the passed in parameters. 422 * Currently this key is only used when we are building a vulkan pipeline with immutable 423 * samplers. In that case, we need our cache key to also contain this key. 424 * 425 * A return value of 0 indicates that the program/pipeline we are creating is not affected by 426 * the sampler. 427 */ getExtraSamplerKeyForProgram(const GrSamplerState &,const GrBackendFormat &)428 virtual uint32_t getExtraSamplerKeyForProgram(const GrSamplerState&, const GrBackendFormat&) { 429 return 0; 430 } 431 storeVkPipelineCacheData()432 virtual void storeVkPipelineCacheData() {} 433 434 protected: 435 // Handles cases where a surface will be updated without a call to flushRenderTarget. 436 void didWriteToSurface(GrSurface* surface, GrSurfaceOrigin origin, const SkIRect* bounds, 437 uint32_t mipLevels = 1) const; 438 439 Stats fStats; 440 std::unique_ptr<GrPathRendering> fPathRendering; 441 // Subclass must initialize this in its constructor. 442 sk_sp<const GrCaps> fCaps; 443 444 typedef SkTArray<SkPoint, true> SamplePattern; 445 446 private: 447 // called when the 3D context state is unknown. Subclass should emit any 448 // assumed 3D context state and dirty any state cache. 449 virtual void onResetContext(uint32_t resetBits) = 0; 450 451 // Called before certain draws in order to guarantee coherent results from dst reads. 452 virtual void xferBarrier(GrRenderTarget*, GrXferBarrierType) = 0; 453 454 // overridden by backend-specific derived class to create objects. 455 // Texture size and sample size will have already been validated in base class before 456 // onCreateTexture is called. 457 virtual sk_sp<GrTexture> onCreateTexture(const GrSurfaceDesc&, SkBudgeted, 458 const GrMipLevel texels[], int mipLevelCount) = 0; 459 460 virtual sk_sp<GrTexture> onWrapBackendTexture(const GrBackendTexture&, GrWrapOwnership, 461 GrWrapCacheable, GrIOType) = 0; 462 virtual sk_sp<GrTexture> onWrapRenderableBackendTexture(const GrBackendTexture&, int sampleCnt, 463 GrWrapOwnership, GrWrapCacheable) = 0; 464 virtual sk_sp<GrRenderTarget> onWrapBackendRenderTarget(const GrBackendRenderTarget&) = 0; 465 virtual sk_sp<GrRenderTarget> onWrapBackendTextureAsRenderTarget(const GrBackendTexture&, 466 int sampleCnt) = 0; 467 virtual sk_sp<GrRenderTarget> onWrapVulkanSecondaryCBAsRenderTarget(const SkImageInfo&, 468 const GrVkDrawableInfo&); 469 470 virtual sk_sp<GrBuffer> onCreateBuffer(size_t size, GrBufferType intendedType, GrAccessPattern, 471 const void* data) = 0; 472 473 // overridden by backend-specific derived class to perform the surface read 474 virtual bool onReadPixels(GrSurface*, int left, int top, int width, int height, GrColorType, 475 void* buffer, size_t rowBytes) = 0; 476 477 // overridden by backend-specific derived class to perform the surface write 478 virtual bool onWritePixels(GrSurface*, int left, int top, int width, int height, GrColorType, 479 const GrMipLevel texels[], int mipLevelCount) = 0; 480 481 // overridden by backend-specific derived class to perform the texture transfer 482 virtual bool onTransferPixels(GrTexture*, int left, int top, int width, int height, 483 GrColorType colorType, GrBuffer* transferBuffer, size_t offset, 484 size_t rowBytes) = 0; 485 486 // overridden by backend-specific derived class to perform the resolve 487 virtual void onResolveRenderTarget(GrRenderTarget* target) = 0; 488 489 // overridden by backend specific derived class to perform mip map level regeneration. 490 virtual bool onRegenerateMipMapLevels(GrTexture*) = 0; 491 492 // overridden by backend specific derived class to perform the copy surface 493 virtual bool onCopySurface(GrSurface* dst, GrSurfaceOrigin dstOrigin, 494 GrSurface* src, GrSurfaceOrigin srcOrigin, 495 const SkIRect& srcRect, const SkIPoint& dstPoint, 496 bool canDiscardOutsideDstRect) = 0; 497 498 virtual void onFinishFlush(bool insertedSemaphores) = 0; 499 500 #ifdef SK_ENABLE_DUMP_GPU onDumpJSON(SkJSONWriter *)501 virtual void onDumpJSON(SkJSONWriter*) const {} 502 #endif 503 resetContext()504 void resetContext() { 505 this->onResetContext(fResetBits); 506 fResetBits = 0; 507 ++fResetTimestamp; 508 } 509 510 ResetTimestamp fResetTimestamp; 511 uint32_t fResetBits; 512 // The context owns us, not vice-versa, so this ptr is not ref'ed by Gpu. 513 GrContext* fContext; 514 515 friend class GrPathRendering; 516 typedef SkRefCnt INHERITED; 517 }; 518 519 #endif 520