1 /* 2 * Copyright 2010 Google Inc. 3 * 4 * Use of this source code is governed by a BSD-style license that can be 5 * found in the LICENSE file. 6 */ 7 8 #ifndef GrContext_DEFINED 9 #define GrContext_DEFINED 10 11 #include "GrCaps.h" 12 #include "GrColor.h" 13 #include "SkMatrix.h" 14 #include "SkPathEffect.h" 15 #include "SkTypes.h" 16 #include "../private/GrAuditTrail.h" 17 #include "../private/GrSingleOwner.h" 18 #include "GrContextOptions.h" 19 20 class GrAtlasGlyphCache; 21 class GrBackendSemaphore; 22 class GrContextPriv; 23 class GrContextThreadSafeProxy; 24 class GrDrawingManager; 25 struct GrDrawOpAtlasConfig; 26 class GrFragmentProcessor; 27 struct GrGLInterface; 28 class GrGpu; 29 class GrIndexBuffer; 30 struct GrMockOptions; 31 class GrOvalRenderer; 32 class GrPath; 33 class GrProxyProvider; 34 class GrRenderTargetContext; 35 class GrResourceEntry; 36 class GrResourceCache; 37 class GrResourceProvider; 38 class GrSamplerState; 39 class GrSurfaceProxy; 40 class GrTextBlobCache; 41 class GrTextContext; 42 class GrTextureProxy; 43 class GrVertexBuffer; 44 struct GrVkBackendContext; 45 class GrSwizzle; 46 class SkTraceMemoryDump; 47 48 class SkImage; 49 class SkSurfaceProps; 50 class SkTaskGroup; 51 52 class SK_API GrContext : public SkRefCnt { 53 public: 54 /** 55 * Creates a GrContext for a backend context. 56 */ 57 static GrContext* Create(GrBackend, GrBackendContext, const GrContextOptions& options); 58 static GrContext* Create(GrBackend, GrBackendContext); 59 60 static sk_sp<GrContext> MakeGL(sk_sp<const GrGLInterface>, const GrContextOptions&); 61 static sk_sp<GrContext> MakeGL(sk_sp<const GrGLInterface>); 62 // Deprecated 63 static sk_sp<GrContext> MakeGL(const GrGLInterface*); 64 static sk_sp<GrContext> MakeGL(const GrGLInterface*, const GrContextOptions&); 65 66 #ifdef SK_VULKAN 67 static sk_sp<GrContext> MakeVulkan(sk_sp<const GrVkBackendContext>, const GrContextOptions&); 68 static sk_sp<GrContext> MakeVulkan(sk_sp<const GrVkBackendContext>); 69 #endif 70 71 #ifdef SK_METAL 72 /** 73 * Makes a GrContext which uses Metal as the backend. The device parameter is an MTLDevice 74 * and queue is an MTLCommandQueue which should be used by the backend. These objects must 75 * have a ref on them which can be transferred to Ganesh which will release the ref when the 76 * GrContext is destroyed. 77 */ 78 static sk_sp<GrContext> MakeMetal(void* device, void* queue, const GrContextOptions& options); 79 static sk_sp<GrContext> MakeMetal(void* device, void* queue); 80 #endif 81 82 static sk_sp<GrContext> MakeMock(const GrMockOptions*, const GrContextOptions&); 83 static sk_sp<GrContext> MakeMock(const GrMockOptions*); 84 85 virtual ~GrContext(); 86 87 sk_sp<GrContextThreadSafeProxy> threadSafeProxy(); 88 89 /** 90 * The GrContext normally assumes that no outsider is setting state 91 * within the underlying 3D API's context/device/whatever. This call informs 92 * the context that the state was modified and it should resend. Shouldn't 93 * be called frequently for good performance. 94 * The flag bits, state, is dpendent on which backend is used by the 95 * context, either GL or D3D (possible in future). 96 */ 97 void resetContext(uint32_t state = kAll_GrBackendState); 98 99 /** 100 * Callback function to allow classes to cleanup on GrContext destruction. 101 * The 'info' field is filled in with the 'info' passed to addCleanUp. 102 */ 103 typedef void (*PFCleanUpFunc)(const GrContext* context, void* info); 104 105 /** 106 * Add a function to be called from within GrContext's destructor. 107 * This gives classes a chance to free resources held on a per context basis. 108 * The 'info' parameter will be stored and passed to the callback function. 109 */ addCleanUp(PFCleanUpFunc cleanUp,void * info)110 void addCleanUp(PFCleanUpFunc cleanUp, void* info) { 111 CleanUpData* entry = fCleanUpData.push(); 112 113 entry->fFunc = cleanUp; 114 entry->fInfo = info; 115 } 116 117 /** 118 * Abandons all GPU resources and assumes the underlying backend 3D API context is no longer 119 * usable. Call this if you have lost the associated GPU context, and thus internal texture, 120 * buffer, etc. references/IDs are now invalid. Calling this ensures that the destructors of the 121 * GrContext and any of its created resource objects will not make backend 3D API calls. Content 122 * rendered but not previously flushed may be lost. After this function is called all subsequent 123 * calls on the GrContext will fail or be no-ops. 124 * 125 * The typical use case for this function is that the underlying 3D context was lost and further 126 * API calls may crash. 127 */ 128 void abandonContext(); 129 130 /** 131 * This is similar to abandonContext() however the underlying 3D context is not yet lost and 132 * the GrContext will cleanup all allocated resources before returning. After returning it will 133 * assume that the underlying context may no longer be valid. 134 * 135 * The typical use case for this function is that the client is going to destroy the 3D context 136 * but can't guarantee that GrContext will be destroyed first (perhaps because it may be ref'ed 137 * elsewhere by either the client or Skia objects). 138 */ 139 void releaseResourcesAndAbandonContext(); 140 141 /////////////////////////////////////////////////////////////////////////// 142 // Resource Cache 143 144 /** 145 * Return the current GPU resource cache limits. 146 * 147 * @param maxResources If non-null, returns maximum number of resources that 148 * can be held in the cache. 149 * @param maxResourceBytes If non-null, returns maximum number of bytes of 150 * video memory that can be held in the cache. 151 */ 152 void getResourceCacheLimits(int* maxResources, size_t* maxResourceBytes) const; 153 154 /** 155 * Gets the current GPU resource cache usage. 156 * 157 * @param resourceCount If non-null, returns the number of resources that are held in the 158 * cache. 159 * @param maxResourceBytes If non-null, returns the total number of bytes of video memory held 160 * in the cache. 161 */ 162 void getResourceCacheUsage(int* resourceCount, size_t* resourceBytes) const; 163 164 /** 165 * Gets the number of bytes in the cache consumed by purgeable (e.g. unlocked) resources. 166 */ 167 size_t getResourceCachePurgeableBytes() const; 168 169 /** 170 * Specify the GPU resource cache limits. If the current cache exceeds either 171 * of these, it will be purged (LRU) to keep the cache within these limits. 172 * 173 * @param maxResources The maximum number of resources that can be held in 174 * the cache. 175 * @param maxResourceBytes The maximum number of bytes of video memory 176 * that can be held in the cache. 177 */ 178 void setResourceCacheLimits(int maxResources, size_t maxResourceBytes); 179 180 /** 181 * Frees GPU created by the context. Can be called to reduce GPU memory 182 * pressure. 183 */ 184 void freeGpuResources(); 185 186 /** 187 * Purge all the unlocked resources from the cache. 188 * This entry point is mainly meant for timing texture uploads 189 * and is not defined in normal builds of Skia. 190 */ 191 void purgeAllUnlockedResources(); 192 193 /** 194 * Purge GPU resources that haven't been used in the past 'msNotUsed' milliseconds or are 195 * otherwise marked for deletion, regardless of whether the context is under budget. 196 */ 197 void performDeferredCleanup(std::chrono::milliseconds msNotUsed); 198 199 // Temporary compatibility API for Android. purgeResourcesNotUsedInMs(std::chrono::milliseconds msNotUsed)200 void purgeResourcesNotUsedInMs(std::chrono::milliseconds msNotUsed) { 201 this->performDeferredCleanup(msNotUsed); 202 } 203 204 /** 205 * Purge unlocked resources from the cache until the the provided byte count has been reached 206 * or we have purged all unlocked resources. The default policy is to purge in LRU order, but 207 * can be overridden to prefer purging scratch resources (in LRU order) prior to purging other 208 * resource types. 209 * 210 * @param maxBytesToPurge the desired number of bytes to be purged. 211 * @param preferScratchResources If true scratch resources will be purged prior to other 212 * resource types. 213 */ 214 void purgeUnlockedResources(size_t bytesToPurge, bool preferScratchResources); 215 216 /** Access the context capabilities */ caps()217 const GrCaps* caps() const { return fCaps.get(); } 218 219 /* 220 * Create a new render target context backed by a deferred-style 221 * GrRenderTargetProxy. We guarantee that "asTextureProxy" will succeed for 222 * renderTargetContexts created via this entry point. 223 */ 224 sk_sp<GrRenderTargetContext> makeDeferredRenderTargetContext( 225 SkBackingFit fit, 226 int width, int height, 227 GrPixelConfig config, 228 sk_sp<SkColorSpace> colorSpace, 229 int sampleCnt = 1, 230 GrMipMapped = GrMipMapped::kNo, 231 GrSurfaceOrigin origin = kBottomLeft_GrSurfaceOrigin, 232 const SkSurfaceProps* surfaceProps = nullptr, 233 SkBudgeted = SkBudgeted::kYes); 234 /* 235 * This method will attempt to create a renderTargetContext that has, at least, the number of 236 * channels and precision per channel as requested in 'config' (e.g., A8 and 888 can be 237 * converted to 8888). It may also swizzle the channels (e.g., BGRA -> RGBA). 238 * SRGB-ness will be preserved. 239 */ 240 sk_sp<GrRenderTargetContext> makeDeferredRenderTargetContextWithFallback( 241 SkBackingFit fit, 242 int width, int height, 243 GrPixelConfig config, 244 sk_sp<SkColorSpace> colorSpace, 245 int sampleCnt = 1, 246 GrMipMapped = GrMipMapped::kNo, 247 GrSurfaceOrigin origin = kBottomLeft_GrSurfaceOrigin, 248 const SkSurfaceProps* surfaceProps = nullptr, 249 SkBudgeted budgeted = SkBudgeted::kYes); 250 251 /////////////////////////////////////////////////////////////////////////// 252 // Misc. 253 254 /** 255 * Call to ensure all drawing to the context has been issued to the underlying 3D API. 256 */ 257 void flush(); 258 259 /** 260 * Call to ensure all drawing to the context has been issued to the underlying 3D API. After 261 * issuing all commands, numSemaphore semaphores will be signaled by the gpu. The client passes 262 * in an array of numSemaphores GrBackendSemaphores. In general these GrBackendSemaphore's can 263 * be either initialized or not. If they are initialized, the backend uses the passed in 264 * semaphore. If it is not initialized, a new semaphore is created and the GrBackendSemaphore 265 * object is initialized with that semaphore. 266 * 267 * The client will own and be responsible for deleting the underlying semaphores that are stored 268 * and returned in initialized GrBackendSemaphore objects. The GrBackendSemaphore objects 269 * themselves can be deleted as soon as this function returns. 270 * 271 * If the backend API is OpenGL only uninitialized GrBackendSemaphores are supported. 272 * If the backend API is Vulkan either initialized or unitialized semaphores are supported. 273 * If unitialized, the semaphores which are created will be valid for use only with the VkDevice 274 * with which they were created. 275 * 276 * If this call returns GrSemaphoresSubmited::kNo, the GPU backend will not have created or 277 * added any semaphores to signal on the GPU. Thus the client should not have the GPU wait on 278 * any of the semaphores. However, any pending commands to the context will still be flushed. 279 */ 280 GrSemaphoresSubmitted flushAndSignalSemaphores(int numSemaphores, 281 GrBackendSemaphore signalSemaphores[]); 282 283 /** 284 * An ID associated with this context, guaranteed to be unique. 285 */ uniqueID()286 uint32_t uniqueID() { return fUniqueID; } 287 288 /////////////////////////////////////////////////////////////////////////// 289 // Functions intended for internal use only. 290 bool abandoned() const; 291 292 /** Reset GPU stats */ 293 void resetGpuStats() const ; 294 295 /** Prints cache stats to the string if GR_CACHE_STATS == 1. */ 296 void dumpCacheStats(SkString*) const; 297 void dumpCacheStatsKeyValuePairs(SkTArray<SkString>* keys, SkTArray<double>* values) const; 298 void printCacheStats() const; 299 300 /** Prints GPU stats to the string if GR_GPU_STATS == 1. */ 301 void dumpGpuStats(SkString*) const; 302 void dumpGpuStatsKeyValuePairs(SkTArray<SkString>* keys, SkTArray<double>* values) const; 303 void printGpuStats() const; 304 305 /** Returns a string with detailed information about the context & GPU, in JSON format. */ 306 SkString dump() const; 307 308 /** Specify the TextBlob cache limit. If the current cache exceeds this limit it will purge. 309 this is for testing only */ 310 void setTextBlobCacheLimit_ForTesting(size_t bytes); 311 312 /** Specify the sizes of the GrAtlasTextContext atlases. The configs pointer below should be 313 to an array of 3 entries */ 314 void setTextContextAtlasSizes_ForTesting(const GrDrawOpAtlasConfig* configs); 315 316 /** Enumerates all cached GPU resources and dumps their memory to traceMemoryDump. */ 317 void dumpMemoryStatistics(SkTraceMemoryDump* traceMemoryDump) const; 318 319 /** Get pointer to atlas texture for given mask format. Note that this wraps an 320 actively mutating texture in an SkImage. This could yield unexpected results 321 if it gets cached or used more generally. */ 322 sk_sp<SkImage> getFontAtlasImage_ForTesting(GrMaskFormat format, uint32_t index = 0); 323 getAuditTrail()324 GrAuditTrail* getAuditTrail() { return &fAuditTrail; } 325 getPersistentCache()326 GrContextOptions::PersistentCache* getPersistentCache() { return fPersistentCache; } 327 328 /** This is only useful for debug purposes */ 329 SkDEBUGCODE(GrSingleOwner* debugSingleOwner() const { return &fSingleOwner; } ) 330 331 // Provides access to functions that aren't part of the public API. 332 GrContextPriv contextPriv(); 333 const GrContextPriv contextPriv() const; 334 335 protected: 336 GrContext(GrContextThreadSafeProxy*); 337 GrContext(GrBackend); 338 339 private: 340 sk_sp<GrGpu> fGpu; 341 sk_sp<const GrCaps> fCaps; 342 GrResourceCache* fResourceCache; 343 GrResourceProvider* fResourceProvider; 344 GrProxyProvider* fProxyProvider; 345 346 sk_sp<GrContextThreadSafeProxy> fThreadSafeProxy; 347 348 GrAtlasGlyphCache* fAtlasGlyphCache; 349 std::unique_ptr<GrTextBlobCache> fTextBlobCache; 350 351 bool fDisableGpuYUVConversion; 352 bool fDidTestPMConversions; 353 // true if the PM/UPM conversion succeeded; false otherwise 354 bool fPMUPMConversionsRoundTrip; 355 356 // In debug builds we guard against improper thread handling 357 // This guard is passed to the GrDrawingManager and, from there to all the 358 // GrRenderTargetContexts. It is also passed to the GrResourceProvider and SkGpuDevice. 359 mutable GrSingleOwner fSingleOwner; 360 361 std::unique_ptr<SkTaskGroup> fTaskGroup; 362 363 struct CleanUpData { 364 PFCleanUpFunc fFunc; 365 void* fInfo; 366 }; 367 368 SkTDArray<CleanUpData> fCleanUpData; 369 370 const uint32_t fUniqueID; 371 372 std::unique_ptr<GrDrawingManager> fDrawingManager; 373 374 GrAuditTrail fAuditTrail; 375 376 const GrBackend fBackend; 377 378 GrContextOptions::PersistentCache* fPersistentCache; 379 380 // TODO: have the GrClipStackClip use renderTargetContexts and rm this friending 381 friend class GrContextPriv; 382 383 bool init(const GrContextOptions&); // init must be called after either constructor. 384 385 /** 386 * These functions create premul <-> unpremul effects. If the second argument is 'true', they 387 * use the specialized round-trip effects from GrConfigConversionEffect, otherwise they 388 * create effects that do naive multiply or divide. 389 */ 390 std::unique_ptr<GrFragmentProcessor> createPMToUPMEffect(std::unique_ptr<GrFragmentProcessor>, 391 bool useConfigConversionEffect); 392 std::unique_ptr<GrFragmentProcessor> createUPMToPMEffect(std::unique_ptr<GrFragmentProcessor>, 393 bool useConfigConversionEffect); 394 395 /** 396 * Returns true if createPMtoUPMEffect and createUPMToPMEffect will succeed for non-sRGB 8888 397 * configs. In other words, did we find a pair of round-trip preserving conversion effects? 398 */ 399 bool validPMUPMConversionExists(); 400 401 /** 402 * A callback similar to the above for use by the TextBlobCache 403 * TODO move textblob draw calls below context so we can use the call above. 404 */ 405 static void TextBlobCacheOverBudgetCB(void* data); 406 407 typedef SkRefCnt INHERITED; 408 }; 409 410 /** 411 * Can be used to perform actions related to the generating GrContext in a thread safe manner. The 412 * proxy does not access the 3D API (e.g. OpenGL) that backs the generating GrContext. 413 */ 414 class GrContextThreadSafeProxy : public SkRefCnt { 415 public: matches(GrContext * context)416 bool matches(GrContext* context) const { return context->uniqueID() == fContextUniqueID; } 417 418 private: 419 // DDL TODO: need to add unit tests for backend & maybe options GrContextThreadSafeProxy(sk_sp<const GrCaps> caps,uint32_t uniqueID,GrBackend backend,const GrContextOptions & options)420 GrContextThreadSafeProxy(sk_sp<const GrCaps> caps, 421 uint32_t uniqueID, 422 GrBackend backend, 423 const GrContextOptions& options) 424 : fCaps(std::move(caps)) 425 , fContextUniqueID(uniqueID) 426 , fBackend(backend) 427 , fOptions(options) { 428 } 429 430 sk_sp<const GrCaps> fCaps; 431 const uint32_t fContextUniqueID; 432 const GrBackend fBackend; 433 const GrContextOptions fOptions; 434 435 friend class GrContext; 436 friend class GrContextPriv; 437 friend class SkImage; 438 439 typedef SkRefCnt INHERITED; 440 }; 441 442 #endif 443