1 /* 2 * Copyright 2010 Google Inc. 3 * 4 * Use of this source code is governed by a BSD-style license that can be 5 * found in the LICENSE file. 6 */ 7 8 #ifndef GrContext_DEFINED 9 #define GrContext_DEFINED 10 11 #include "SkMatrix.h" 12 #include "SkPathEffect.h" 13 #include "SkTypes.h" 14 #include "../private/GrAuditTrail.h" 15 #include "../private/GrRecordingContext.h" 16 #include "../private/GrSingleOwner.h" 17 #include "GrContextOptions.h" 18 19 // We shouldn't need this but currently Android is relying on this being include transitively. 20 #include "SkUnPreMultiply.h" 21 22 class GrAtlasManager; 23 class GrBackendFormat; 24 class GrBackendSemaphore; 25 class GrCaps; 26 class GrContextPriv; 27 class GrContextThreadSafeProxy; 28 class GrDrawingManager; 29 class GrFragmentProcessor; 30 struct GrGLInterface; 31 class GrStrikeCache; 32 class GrGpu; 33 struct GrMockOptions; 34 class GrOpMemoryPool; 35 class GrPath; 36 class GrProxyProvider; 37 class GrRenderTargetContext; 38 class GrResourceCache; 39 class GrResourceProvider; 40 class GrSamplerState; 41 class GrSkSLFPFactoryCache; 42 class GrSurfaceProxy; 43 class GrSwizzle; 44 class GrTextBlobCache; 45 class GrTextContext; 46 class GrTextureProxy; 47 struct GrVkBackendContext; 48 49 class SkImage; 50 class SkSurfaceProps; 51 class SkTaskGroup; 52 class SkTraceMemoryDump; 53 54 class SK_API GrContext : public GrRecordingContext { 55 public: 56 /** 57 * Creates a GrContext for a backend context. If no GrGLInterface is provided then the result of 58 * GrGLMakeNativeInterface() is used if it succeeds. 59 */ 60 static sk_sp<GrContext> MakeGL(sk_sp<const GrGLInterface>, const GrContextOptions&); 61 static sk_sp<GrContext> MakeGL(sk_sp<const GrGLInterface>); 62 static sk_sp<GrContext> MakeGL(const GrContextOptions&); 63 static sk_sp<GrContext> MakeGL(); 64 65 static sk_sp<GrContext> MakeVulkan(const GrVkBackendContext&, const GrContextOptions&); 66 static sk_sp<GrContext> MakeVulkan(const GrVkBackendContext&); 67 68 #ifdef SK_METAL 69 /** 70 * Makes a GrContext which uses Metal as the backend. The device parameter is an MTLDevice 71 * and queue is an MTLCommandQueue which should be used by the backend. These objects must 72 * have a ref on them which can be transferred to Ganesh which will release the ref when the 73 * GrContext is destroyed. 74 */ 75 static sk_sp<GrContext> MakeMetal(void* device, void* queue, const GrContextOptions& options); 76 static sk_sp<GrContext> MakeMetal(void* device, void* queue); 77 #endif 78 79 static sk_sp<GrContext> MakeMock(const GrMockOptions*, const GrContextOptions&); 80 static sk_sp<GrContext> MakeMock(const GrMockOptions*); 81 82 virtual ~GrContext(); 83 84 sk_sp<GrContextThreadSafeProxy> threadSafeProxy(); 85 86 /** 87 * The GrContext normally assumes that no outsider is setting state 88 * within the underlying 3D API's context/device/whatever. This call informs 89 * the context that the state was modified and it should resend. Shouldn't 90 * be called frequently for good performance. 91 * The flag bits, state, is dpendent on which backend is used by the 92 * context, either GL or D3D (possible in future). 93 */ 94 void resetContext(uint32_t state = kAll_GrBackendState); 95 96 /** 97 * Abandons all GPU resources and assumes the underlying backend 3D API context is no longer 98 * usable. Call this if you have lost the associated GPU context, and thus internal texture, 99 * buffer, etc. references/IDs are now invalid. Calling this ensures that the destructors of the 100 * GrContext and any of its created resource objects will not make backend 3D API calls. Content 101 * rendered but not previously flushed may be lost. After this function is called all subsequent 102 * calls on the GrContext will fail or be no-ops. 103 * 104 * The typical use case for this function is that the underlying 3D context was lost and further 105 * API calls may crash. 106 */ 107 virtual void abandonContext(); 108 109 /** 110 * Returns true if the context was abandoned. 111 */ 112 bool abandoned() const; 113 114 /** 115 * This is similar to abandonContext() however the underlying 3D context is not yet lost and 116 * the GrContext will cleanup all allocated resources before returning. After returning it will 117 * assume that the underlying context may no longer be valid. 118 * 119 * The typical use case for this function is that the client is going to destroy the 3D context 120 * but can't guarantee that GrContext will be destroyed first (perhaps because it may be ref'ed 121 * elsewhere by either the client or Skia objects). 122 */ 123 virtual void releaseResourcesAndAbandonContext(); 124 125 /////////////////////////////////////////////////////////////////////////// 126 // Resource Cache 127 128 /** 129 * Return the current GPU resource cache limits. 130 * 131 * @param maxResources If non-null, returns maximum number of resources that 132 * can be held in the cache. 133 * @param maxResourceBytes If non-null, returns maximum number of bytes of 134 * video memory that can be held in the cache. 135 */ 136 void getResourceCacheLimits(int* maxResources, size_t* maxResourceBytes) const; 137 138 /** 139 * Gets the current GPU resource cache usage. 140 * 141 * @param resourceCount If non-null, returns the number of resources that are held in the 142 * cache. 143 * @param maxResourceBytes If non-null, returns the total number of bytes of video memory held 144 * in the cache. 145 */ 146 void getResourceCacheUsage(int* resourceCount, size_t* resourceBytes) const; 147 148 /** 149 * Gets the number of bytes in the cache consumed by purgeable (e.g. unlocked) resources. 150 */ 151 size_t getResourceCachePurgeableBytes() const; 152 153 /** 154 * Specify the GPU resource cache limits. If the current cache exceeds either 155 * of these, it will be purged (LRU) to keep the cache within these limits. 156 * 157 * @param maxResources The maximum number of resources that can be held in 158 * the cache. 159 * @param maxResourceBytes The maximum number of bytes of video memory 160 * that can be held in the cache. 161 */ 162 void setResourceCacheLimits(int maxResources, size_t maxResourceBytes); 163 164 /** 165 * Frees GPU created by the context. Can be called to reduce GPU memory 166 * pressure. 167 */ 168 virtual void freeGpuResources(); 169 170 /** 171 * Purge GPU resources that haven't been used in the past 'msNotUsed' milliseconds or are 172 * otherwise marked for deletion, regardless of whether the context is under budget. 173 */ 174 void performDeferredCleanup(std::chrono::milliseconds msNotUsed); 175 176 // Temporary compatibility API for Android. purgeResourcesNotUsedInMs(std::chrono::milliseconds msNotUsed)177 void purgeResourcesNotUsedInMs(std::chrono::milliseconds msNotUsed) { 178 this->performDeferredCleanup(msNotUsed); 179 } 180 181 /** 182 * Purge unlocked resources from the cache until the the provided byte count has been reached 183 * or we have purged all unlocked resources. The default policy is to purge in LRU order, but 184 * can be overridden to prefer purging scratch resources (in LRU order) prior to purging other 185 * resource types. 186 * 187 * @param maxBytesToPurge the desired number of bytes to be purged. 188 * @param preferScratchResources If true scratch resources will be purged prior to other 189 * resource types. 190 */ 191 void purgeUnlockedResources(size_t bytesToPurge, bool preferScratchResources); 192 193 /** 194 * This entry point is intended for instances where an app has been backgrounded or 195 * suspended. 196 * If 'scratchResourcesOnly' is true all unlocked scratch resources will be purged but the 197 * unlocked resources with persistent data will remain. If 'scratchResourcesOnly' is false 198 * then all unlocked resources will be purged. 199 * In either case, after the unlocked resources are purged a separate pass will be made to 200 * ensure that resource usage is under budget (i.e., even if 'scratchResourcesOnly' is true 201 * some resources with persistent data may be purged to be under budget). 202 * 203 * @param scratchResourcesOnly If true only unlocked scratch resources will be purged prior 204 * enforcing the budget requirements. 205 */ 206 void purgeUnlockedResources(bool scratchResourcesOnly); 207 208 /** 209 * Gets the maximum supported texture size. 210 */ 211 int maxTextureSize() const; 212 213 /** 214 * Gets the maximum supported render target size. 215 */ 216 int maxRenderTargetSize() const; 217 218 /** 219 * Can a SkImage be created with the given color type. 220 */ 221 bool colorTypeSupportedAsImage(SkColorType) const; 222 223 /** 224 * Can a SkSurface be created with the given color type. To check whether MSAA is supported 225 * use maxSurfaceSampleCountForColorType(). 226 */ colorTypeSupportedAsSurface(SkColorType colorType)227 bool colorTypeSupportedAsSurface(SkColorType colorType) const { 228 return this->maxSurfaceSampleCountForColorType(colorType) > 0; 229 } 230 231 /** 232 * Gets the maximum supported sample count for a color type. 1 is returned if only non-MSAA 233 * rendering is supported for the color type. 0 is returned if rendering to this color type 234 * is not supported at all. 235 */ 236 int maxSurfaceSampleCountForColorType(SkColorType) const; 237 238 /////////////////////////////////////////////////////////////////////////// 239 // Misc. 240 241 /** 242 * Call to ensure all drawing to the context has been issued to the underlying 3D API. 243 */ 244 void flush(); 245 246 /** 247 * Call to ensure all drawing to the context has been issued to the underlying 3D API. After 248 * issuing all commands, numSemaphore semaphores will be signaled by the gpu. The client passes 249 * in an array of numSemaphores GrBackendSemaphores. In general these GrBackendSemaphore's can 250 * be either initialized or not. If they are initialized, the backend uses the passed in 251 * semaphore. If it is not initialized, a new semaphore is created and the GrBackendSemaphore 252 * object is initialized with that semaphore. 253 * 254 * The client will own and be responsible for deleting the underlying semaphores that are stored 255 * and returned in initialized GrBackendSemaphore objects. The GrBackendSemaphore objects 256 * themselves can be deleted as soon as this function returns. 257 * 258 * If the backend API is OpenGL only uninitialized GrBackendSemaphores are supported. 259 * If the backend API is Vulkan either initialized or unitialized semaphores are supported. 260 * If unitialized, the semaphores which are created will be valid for use only with the VkDevice 261 * with which they were created. 262 * 263 * If this call returns GrSemaphoresSubmited::kNo, the GPU backend will not have created or 264 * added any semaphores to signal on the GPU. Thus the client should not have the GPU wait on 265 * any of the semaphores. However, any pending commands to the context will still be flushed. 266 */ 267 GrSemaphoresSubmitted flushAndSignalSemaphores(int numSemaphores, 268 GrBackendSemaphore signalSemaphores[]); 269 270 // Provides access to functions that aren't part of the public API. 271 GrContextPriv contextPriv(); 272 const GrContextPriv contextPriv() const; 273 274 /** Enumerates all cached GPU resources and dumps their memory to traceMemoryDump. */ 275 // Chrome is using this! 276 void dumpMemoryStatistics(SkTraceMemoryDump* traceMemoryDump) const; 277 278 bool supportsDistanceFieldText() const; 279 280 void storeVkPipelineCacheData(); 281 282 protected: 283 GrContext(GrBackendApi, int32_t id = SK_InvalidGenID); 284 285 bool initCommon(const GrContextOptions&); 286 virtual bool init(const GrContextOptions&) = 0; // must be called after the ctor! 287 288 virtual GrAtlasManager* onGetAtlasManager() = 0; 289 290 sk_sp<const GrCaps> fCaps; 291 sk_sp<GrContextThreadSafeProxy> fThreadSafeProxy; 292 sk_sp<GrSkSLFPFactoryCache> fFPFactoryCache; 293 294 private: 295 // fTaskGroup must appear before anything that uses it (e.g. fGpu), so that it is destroyed 296 // after all of its users. Clients of fTaskGroup will generally want to ensure that they call 297 // wait() on it as they are being destroyed, to avoid the possibility of pending tasks being 298 // invoked after objects they depend upon have already been destroyed. 299 std::unique_ptr<SkTaskGroup> fTaskGroup; 300 sk_sp<GrGpu> fGpu; 301 GrResourceCache* fResourceCache; 302 GrResourceProvider* fResourceProvider; 303 GrProxyProvider* fProxyProvider; 304 305 // All the GrOp-derived classes use this pool. 306 sk_sp<GrOpMemoryPool> fOpMemoryPool; 307 308 GrStrikeCache* fGlyphCache; 309 std::unique_ptr<GrTextBlobCache> fTextBlobCache; 310 311 bool fDisableGpuYUVConversion; 312 bool fSharpenMipmappedTextures; 313 bool fDidTestPMConversions; 314 // true if the PM/UPM conversion succeeded; false otherwise 315 bool fPMUPMConversionsRoundTrip; 316 317 // In debug builds we guard against improper thread handling 318 // This guard is passed to the GrDrawingManager and, from there to all the 319 // GrRenderTargetContexts. It is also passed to the GrResourceProvider and SkGpuDevice. 320 mutable GrSingleOwner fSingleOwner; 321 322 std::unique_ptr<GrDrawingManager> fDrawingManager; 323 324 GrAuditTrail fAuditTrail; 325 326 GrContextOptions::PersistentCache* fPersistentCache; 327 328 // TODO: have the GrClipStackClip use renderTargetContexts and rm this friending 329 friend class GrContextPriv; 330 331 /** 332 * These functions create premul <-> unpremul effects, using the specialized round-trip effects 333 * from GrConfigConversionEffect. 334 */ 335 std::unique_ptr<GrFragmentProcessor> createPMToUPMEffect(std::unique_ptr<GrFragmentProcessor>); 336 std::unique_ptr<GrFragmentProcessor> createUPMToPMEffect(std::unique_ptr<GrFragmentProcessor>); 337 338 /** 339 * Returns true if createPMToUPMEffect and createUPMToPMEffect will succeed. In other words, 340 * did we find a pair of round-trip preserving conversion effects? 341 */ 342 bool validPMUPMConversionExists(); 343 344 /** 345 * A callback similar to the above for use by the TextBlobCache 346 * TODO move textblob draw calls below context so we can use the call above. 347 */ 348 static void TextBlobCacheOverBudgetCB(void* data); 349 350 typedef GrRecordingContext INHERITED; 351 }; 352 353 #endif 354