1 /*
2 * Copyright 2015 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "src/gpu/GrDrawOpAtlas.h"
9
10 #include <memory>
11
12 #include "include/private/SkTPin.h"
13 #include "src/core/SkOpts.h"
14 #include "src/gpu/GrBackendUtils.h"
15 #include "src/gpu/GrOnFlushResourceProvider.h"
16 #include "src/gpu/GrOpFlushState.h"
17 #include "src/gpu/GrProxyProvider.h"
18 #include "src/gpu/GrResourceProvider.h"
19 #include "src/gpu/GrResourceProviderPriv.h"
20 #include "src/gpu/GrSurfaceProxyPriv.h"
21 #include "src/gpu/GrTexture.h"
22 #include "src/gpu/GrTracing.h"
23
24 #ifdef DUMP_ATLAS_DATA
25 static bool gDumpAtlasData = false;
26 #endif
27
28 #ifdef SK_DEBUG
validate(const AtlasLocator & atlasLocator) const29 void GrDrawOpAtlas::validate(const AtlasLocator& atlasLocator) const {
30 // Verify that the plotIndex stored in the PlotLocator is consistent with the glyph rectangle
31 int numPlotsX = fTextureWidth / fPlotWidth;
32 int numPlotsY = fTextureHeight / fPlotHeight;
33
34 int plotIndex = atlasLocator.plotIndex();
35 auto topLeft = atlasLocator.topLeft();
36 int plotX = topLeft.x() / fPlotWidth;
37 int plotY = topLeft.y() / fPlotHeight;
38 SkASSERT(plotIndex == (numPlotsY - plotY - 1) * numPlotsX + (numPlotsX - plotX - 1));
39 }
40 #endif
41
42 // When proxy allocation is deferred until flush time the proxies acting as atlases require
43 // special handling. This is because the usage that can be determined from the ops themselves
44 // isn't sufficient. Independent of the ops there will be ASAP and inline uploads to the
45 // atlases. Extending the usage interval of any op that uses an atlas to the start of the
46 // flush (as is done for proxies that are used for sw-generated masks) also won't work because
47 // the atlas persists even beyond the last use in an op - for a given flush. Given this, atlases
48 // must explicitly manage the lifetime of their backing proxies via the onFlushCallback system
49 // (which calls this method).
instantiate(GrOnFlushResourceProvider * onFlushResourceProvider)50 void GrDrawOpAtlas::instantiate(GrOnFlushResourceProvider* onFlushResourceProvider) {
51 for (uint32_t i = 0; i < fNumActivePages; ++i) {
52 // All the atlas pages are now instantiated at flush time in the activeNewPage method.
53 SkASSERT(fViews[i].proxy() && fViews[i].proxy()->isInstantiated());
54 }
55 }
56
Make(GrProxyProvider * proxyProvider,const GrBackendFormat & format,GrColorType colorType,int width,int height,int plotWidth,int plotHeight,GenerationCounter * generationCounter,AllowMultitexturing allowMultitexturing,EvictionCallback * evictor)57 std::unique_ptr<GrDrawOpAtlas> GrDrawOpAtlas::Make(GrProxyProvider* proxyProvider,
58 const GrBackendFormat& format,
59 GrColorType colorType, int width,
60 int height, int plotWidth, int plotHeight,
61 GenerationCounter* generationCounter,
62 AllowMultitexturing allowMultitexturing,
63 EvictionCallback* evictor) {
64 if (!format.isValid()) {
65 return nullptr;
66 }
67
68 std::unique_ptr<GrDrawOpAtlas> atlas(new GrDrawOpAtlas(proxyProvider, format, colorType,
69 width, height, plotWidth, plotHeight,
70 generationCounter,
71 allowMultitexturing));
72 if (!atlas->getViews()[0].proxy()) {
73 return nullptr;
74 }
75
76 if (evictor != nullptr) {
77 atlas->fEvictionCallbacks.emplace_back(evictor);
78 }
79 return atlas;
80 }
81
82 ////////////////////////////////////////////////////////////////////////////////
Plot(int pageIndex,int plotIndex,GenerationCounter * generationCounter,int offX,int offY,int width,int height,GrColorType colorType)83 GrDrawOpAtlas::Plot::Plot(int pageIndex, int plotIndex, GenerationCounter* generationCounter,
84 int offX, int offY, int width, int height, GrColorType colorType)
85 : fLastUpload(GrDeferredUploadToken::AlreadyFlushedToken())
86 , fLastUse(GrDeferredUploadToken::AlreadyFlushedToken())
87 , fFlushesSinceLastUse(0)
88 , fPageIndex(pageIndex)
89 , fPlotIndex(plotIndex)
90 , fGenerationCounter(generationCounter)
91 , fGenID(fGenerationCounter->next())
92 , fPlotLocator(fPageIndex, fPlotIndex, fGenID)
93 , fData(nullptr)
94 , fWidth(width)
95 , fHeight(height)
96 , fX(offX)
97 , fY(offY)
98 , fRectanizer(width, height)
99 , fOffset(SkIPoint16::Make(fX * fWidth, fY * fHeight))
100 , fColorType(colorType)
101 , fBytesPerPixel(GrColorTypeBytesPerPixel(colorType))
102 #ifdef SK_DEBUG
103 , fDirty(false)
104 #endif
105 {
106 // We expect the allocated dimensions to be a multiple of 4 bytes
107 SkASSERT(((width*fBytesPerPixel) & 0x3) == 0);
108 // The padding for faster uploads only works for 1, 2 and 4 byte texels
109 SkASSERT(fBytesPerPixel != 3 && fBytesPerPixel <= 4);
110 fDirtyRect.setEmpty();
111 }
112
~Plot()113 GrDrawOpAtlas::Plot::~Plot() {
114 sk_free(fData);
115 }
116
addSubImage(int width,int height,const void * image,AtlasLocator * atlasLocator)117 bool GrDrawOpAtlas::Plot::addSubImage(
118 int width, int height, const void* image, AtlasLocator* atlasLocator) {
119 SkASSERT(width <= fWidth && height <= fHeight);
120
121 SkIPoint16 loc;
122 if (!fRectanizer.addRect(width, height, &loc)) {
123 return false;
124 }
125
126 GrIRect16 rect = GrIRect16::MakeXYWH(loc.fX, loc.fY, width, height);
127
128 if (!fData) {
129 fData = reinterpret_cast<unsigned char*>(
130 sk_calloc_throw(fBytesPerPixel * fWidth * fHeight));
131 }
132 size_t rowBytes = width * fBytesPerPixel;
133 const unsigned char* imagePtr = (const unsigned char*)image;
134 // point ourselves at the right starting spot
135 unsigned char* dataPtr = fData;
136 dataPtr += fBytesPerPixel * fWidth * rect.fTop;
137 dataPtr += fBytesPerPixel * rect.fLeft;
138 // copy into the data buffer, swizzling as we go if this is ARGB data
139 constexpr bool kBGRAIsNative = kN32_SkColorType == kBGRA_8888_SkColorType;
140 if (4 == fBytesPerPixel && kBGRAIsNative) {
141 for (int i = 0; i < height; ++i) {
142 SkOpts::RGBA_to_BGRA((uint32_t*)dataPtr, (const uint32_t*)imagePtr, width);
143 dataPtr += fBytesPerPixel * fWidth;
144 imagePtr += rowBytes;
145 }
146 } else {
147 for (int i = 0; i < height; ++i) {
148 memcpy(dataPtr, imagePtr, rowBytes);
149 dataPtr += fBytesPerPixel * fWidth;
150 imagePtr += rowBytes;
151 }
152 }
153
154 fDirtyRect.join({rect.fLeft, rect.fTop, rect.fRight, rect.fBottom});
155
156 rect.offset(fOffset.fX, fOffset.fY);
157 atlasLocator->updateRect(rect);
158 SkDEBUGCODE(fDirty = true;)
159
160 return true;
161 }
162
uploadToTexture(GrDeferredTextureUploadWritePixelsFn & writePixels,GrTextureProxy * proxy)163 void GrDrawOpAtlas::Plot::uploadToTexture(GrDeferredTextureUploadWritePixelsFn& writePixels,
164 GrTextureProxy* proxy) {
165 // We should only be issuing uploads if we are in fact dirty
166 SkASSERT(fDirty && fData && proxy && proxy->peekTexture());
167 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
168 size_t rowBytes = fBytesPerPixel * fWidth;
169 const unsigned char* dataPtr = fData;
170 // Clamp to 4-byte aligned boundaries
171 unsigned int clearBits = 0x3 / fBytesPerPixel;
172 fDirtyRect.fLeft &= ~clearBits;
173 fDirtyRect.fRight += clearBits;
174 fDirtyRect.fRight &= ~clearBits;
175 SkASSERT(fDirtyRect.fRight <= fWidth);
176 // Set up dataPtr
177 dataPtr += rowBytes * fDirtyRect.fTop;
178 dataPtr += fBytesPerPixel * fDirtyRect.fLeft;
179
180 writePixels(proxy,
181 fDirtyRect.makeOffset(fOffset.fX, fOffset.fY),
182 fColorType,
183 dataPtr,
184 rowBytes);
185 fDirtyRect.setEmpty();
186 SkDEBUGCODE(fDirty = false;)
187 }
188
resetRects()189 void GrDrawOpAtlas::Plot::resetRects() {
190 fRectanizer.reset();
191
192 fGenID = fGenerationCounter->next();
193 fPlotLocator = PlotLocator(fPageIndex, fPlotIndex, fGenID);
194 fLastUpload = GrDeferredUploadToken::AlreadyFlushedToken();
195 fLastUse = GrDeferredUploadToken::AlreadyFlushedToken();
196
197 // zero out the plot
198 if (fData) {
199 sk_bzero(fData, fBytesPerPixel * fWidth * fHeight);
200 }
201
202 fDirtyRect.setEmpty();
203 SkDEBUGCODE(fDirty = false;)
204 }
205
206 ///////////////////////////////////////////////////////////////////////////////
207
GrDrawOpAtlas(GrProxyProvider * proxyProvider,const GrBackendFormat & format,GrColorType colorType,int width,int height,int plotWidth,int plotHeight,GenerationCounter * generationCounter,AllowMultitexturing allowMultitexturing)208 GrDrawOpAtlas::GrDrawOpAtlas(GrProxyProvider* proxyProvider, const GrBackendFormat& format,
209 GrColorType colorType, int width, int height,
210 int plotWidth, int plotHeight, GenerationCounter* generationCounter,
211 AllowMultitexturing allowMultitexturing)
212 : fFormat(format)
213 , fColorType(colorType)
214 , fTextureWidth(width)
215 , fTextureHeight(height)
216 , fPlotWidth(plotWidth)
217 , fPlotHeight(plotHeight)
218 , fGenerationCounter(generationCounter)
219 , fAtlasGeneration(fGenerationCounter->next())
220 , fPrevFlushToken(GrDeferredUploadToken::AlreadyFlushedToken())
221 , fFlushesSinceLastUse(0)
222 , fMaxPages(AllowMultitexturing::kYes == allowMultitexturing ? kMaxMultitexturePages : 1)
223 , fNumActivePages(0) {
224 int numPlotsX = width/plotWidth;
225 int numPlotsY = height/plotHeight;
226 SkASSERT(numPlotsX * numPlotsY <= GrDrawOpAtlas::kMaxPlots);
227 SkASSERT(fPlotWidth * numPlotsX == fTextureWidth);
228 SkASSERT(fPlotHeight * numPlotsY == fTextureHeight);
229
230 fNumPlots = numPlotsX * numPlotsY;
231
232 this->createPages(proxyProvider, generationCounter);
233 }
234
processEviction(PlotLocator plotLocator)235 inline void GrDrawOpAtlas::processEviction(PlotLocator plotLocator) {
236 for (EvictionCallback* evictor : fEvictionCallbacks) {
237 evictor->evict(plotLocator);
238 }
239
240 fAtlasGeneration = fGenerationCounter->next();
241 }
242
updatePlot(GrDeferredUploadTarget * target,AtlasLocator * atlasLocator,Plot * plot)243 inline bool GrDrawOpAtlas::updatePlot(GrDeferredUploadTarget* target,
244 AtlasLocator* atlasLocator, Plot* plot) {
245 int pageIdx = plot->pageIndex();
246 this->makeMRU(plot, pageIdx);
247
248 // If our most recent upload has already occurred then we have to insert a new
249 // upload. Otherwise, we already have a scheduled upload that hasn't yet ocurred.
250 // This new update will piggy back on that previously scheduled update.
251 if (plot->lastUploadToken() < target->tokenTracker()->nextTokenToFlush()) {
252 // With c+14 we could move sk_sp into lamba to only ref once.
253 sk_sp<Plot> plotsp(SkRef(plot));
254
255 GrTextureProxy* proxy = fViews[pageIdx].asTextureProxy();
256 SkASSERT(proxy && proxy->isInstantiated()); // This is occurring at flush time
257
258 GrDeferredUploadToken lastUploadToken = target->addASAPUpload(
259 [plotsp, proxy](GrDeferredTextureUploadWritePixelsFn& writePixels) {
260 plotsp->uploadToTexture(writePixels, proxy);
261 });
262 plot->setLastUploadToken(lastUploadToken);
263 }
264 atlasLocator->updatePlotLocator(plot->plotLocator());
265 SkDEBUGCODE(this->validate(*atlasLocator);)
266 return true;
267 }
268
uploadToPage(unsigned int pageIdx,GrDeferredUploadTarget * target,int width,int height,const void * image,AtlasLocator * atlasLocator)269 bool GrDrawOpAtlas::uploadToPage(unsigned int pageIdx, GrDeferredUploadTarget* target, int width,
270 int height, const void* image, AtlasLocator* atlasLocator) {
271 SkASSERT(fViews[pageIdx].proxy() && fViews[pageIdx].proxy()->isInstantiated());
272
273 // look through all allocated plots for one we can share, in Most Recently Refed order
274 PlotList::Iter plotIter;
275 plotIter.init(fPages[pageIdx].fPlotList, PlotList::Iter::kHead_IterStart);
276
277 for (Plot* plot = plotIter.get(); plot; plot = plotIter.next()) {
278 SkASSERT(GrBackendFormatBytesPerPixel(fViews[pageIdx].proxy()->backendFormat()) ==
279 plot->bpp());
280
281 if (plot->addSubImage(width, height, image, atlasLocator)) {
282 return this->updatePlot(target, atlasLocator, plot);
283 }
284 }
285
286 return false;
287 }
288
289 // Number of atlas-related flushes beyond which we consider a plot to no longer be in use.
290 //
291 // This value is somewhat arbitrary -- the idea is to keep it low enough that
292 // a page with unused plots will get removed reasonably quickly, but allow it
293 // to hang around for a bit in case it's needed. The assumption is that flushes
294 // are rare; i.e., we are not continually refreshing the frame.
295 static constexpr auto kPlotRecentlyUsedCount = 32;
296 static constexpr auto kAtlasRecentlyUsedCount = 128;
297
addToAtlas(GrResourceProvider * resourceProvider,GrDeferredUploadTarget * target,int width,int height,const void * image,AtlasLocator * atlasLocator)298 GrDrawOpAtlas::ErrorCode GrDrawOpAtlas::addToAtlas(GrResourceProvider* resourceProvider,
299 GrDeferredUploadTarget* target,
300 int width, int height, const void* image,
301 AtlasLocator* atlasLocator) {
302 if (width > fPlotWidth || height > fPlotHeight) {
303 return ErrorCode::kError;
304 }
305
306 // Look through each page to see if we can upload without having to flush
307 // We prioritize this upload to the first pages, not the most recently used, to make it easier
308 // to remove unused pages in reverse page order.
309 for (unsigned int pageIdx = 0; pageIdx < fNumActivePages; ++pageIdx) {
310 if (this->uploadToPage(pageIdx, target, width, height, image, atlasLocator)) {
311 return ErrorCode::kSucceeded;
312 }
313 }
314
315 // If the above fails, then see if the least recently used plot per page has already been
316 // flushed to the gpu if we're at max page allocation, or if the plot has aged out otherwise.
317 // We wait until we've grown to the full number of pages to begin evicting already flushed
318 // plots so that we can maximize the opportunity for reuse.
319 // As before we prioritize this upload to the first pages, not the most recently used.
320 if (fNumActivePages == this->maxPages()) {
321 for (unsigned int pageIdx = 0; pageIdx < fNumActivePages; ++pageIdx) {
322 Plot* plot = fPages[pageIdx].fPlotList.tail();
323 SkASSERT(plot);
324 if (plot->lastUseToken() < target->tokenTracker()->nextTokenToFlush()) {
325 this->processEvictionAndResetRects(plot);
326 SkASSERT(GrBackendFormatBytesPerPixel(fViews[pageIdx].proxy()->backendFormat()) ==
327 plot->bpp());
328 SkDEBUGCODE(bool verify = )plot->addSubImage(width, height, image, atlasLocator);
329 SkASSERT(verify);
330 if (!this->updatePlot(target, atlasLocator, plot)) {
331 return ErrorCode::kError;
332 }
333 return ErrorCode::kSucceeded;
334 }
335 }
336 } else {
337 // If we haven't activated all the available pages, try to create a new one and add to it
338 if (!this->activateNewPage(resourceProvider)) {
339 return ErrorCode::kError;
340 }
341
342 if (this->uploadToPage(fNumActivePages-1, target, width, height, image, atlasLocator)) {
343 return ErrorCode::kSucceeded;
344 } else {
345 // If we fail to upload to a newly activated page then something has gone terribly
346 // wrong - return an error
347 return ErrorCode::kError;
348 }
349 }
350
351 if (!fNumActivePages) {
352 return ErrorCode::kError;
353 }
354
355 // Try to find a plot that we can perform an inline upload to.
356 // We prioritize this upload in reverse order of pages to counterbalance the order above.
357 Plot* plot = nullptr;
358 for (int pageIdx = ((int)fNumActivePages)-1; pageIdx >= 0; --pageIdx) {
359 Plot* currentPlot = fPages[pageIdx].fPlotList.tail();
360 if (currentPlot->lastUseToken() != target->tokenTracker()->nextDrawToken()) {
361 plot = currentPlot;
362 break;
363 }
364 }
365
366 // If we can't find a plot that is not used in a draw currently being prepared by an op, then
367 // we have to fail. This gives the op a chance to enqueue the draw, and call back into this
368 // function. When that draw is enqueued, the draw token advances, and the subsequent call will
369 // continue past this branch and prepare an inline upload that will occur after the enqueued
370 // draw which references the plot's pre-upload content.
371 if (!plot) {
372 return ErrorCode::kTryAgain;
373 }
374
375 this->processEviction(plot->plotLocator());
376 int pageIdx = plot->pageIndex();
377 fPages[pageIdx].fPlotList.remove(plot);
378 sk_sp<Plot>& newPlot = fPages[pageIdx].fPlotArray[plot->plotIndex()];
379 newPlot.reset(plot->clone());
380
381 fPages[pageIdx].fPlotList.addToHead(newPlot.get());
382 SkASSERT(GrBackendFormatBytesPerPixel(fViews[pageIdx].proxy()->backendFormat()) ==
383 newPlot->bpp());
384 SkDEBUGCODE(bool verify = )newPlot->addSubImage(width, height, image, atlasLocator);
385 SkASSERT(verify);
386
387 // Note that this plot will be uploaded inline with the draws whereas the
388 // one it displaced most likely was uploaded ASAP.
389 // With c++14 we could move sk_sp into lambda to only ref once.
390 sk_sp<Plot> plotsp(SkRef(newPlot.get()));
391
392 GrTextureProxy* proxy = fViews[pageIdx].asTextureProxy();
393 SkASSERT(proxy && proxy->isInstantiated());
394
395 GrDeferredUploadToken lastUploadToken = target->addInlineUpload(
396 [plotsp, proxy](GrDeferredTextureUploadWritePixelsFn& writePixels) {
397 plotsp->uploadToTexture(writePixels, proxy);
398 });
399 newPlot->setLastUploadToken(lastUploadToken);
400
401 atlasLocator->updatePlotLocator(newPlot->plotLocator());
402 SkDEBUGCODE(this->validate(*atlasLocator);)
403
404 return ErrorCode::kSucceeded;
405 }
406
compact(GrDeferredUploadToken startTokenForNextFlush)407 void GrDrawOpAtlas::compact(GrDeferredUploadToken startTokenForNextFlush) {
408 if (fNumActivePages < 1) {
409 fPrevFlushToken = startTokenForNextFlush;
410 return;
411 }
412
413 // For all plots, reset number of flushes since used if used this frame.
414 PlotList::Iter plotIter;
415 bool atlasUsedThisFlush = false;
416 for (uint32_t pageIndex = 0; pageIndex < fNumActivePages; ++pageIndex) {
417 plotIter.init(fPages[pageIndex].fPlotList, PlotList::Iter::kHead_IterStart);
418 while (Plot* plot = plotIter.get()) {
419 // Reset number of flushes since used
420 if (plot->lastUseToken().inInterval(fPrevFlushToken, startTokenForNextFlush)) {
421 plot->resetFlushesSinceLastUsed();
422 atlasUsedThisFlush = true;
423 }
424
425 plotIter.next();
426 }
427 }
428
429 if (atlasUsedThisFlush) {
430 fFlushesSinceLastUse = 0;
431 } else {
432 ++fFlushesSinceLastUse;
433 }
434
435 // We only try to compact if the atlas was used in the recently completed flush or
436 // hasn't been used in a long time.
437 // This is to handle the case where a lot of text or path rendering has occurred but then just
438 // a blinking cursor is drawn.
439 if (atlasUsedThisFlush || fFlushesSinceLastUse > kAtlasRecentlyUsedCount) {
440 SkTArray<Plot*> availablePlots;
441 uint32_t lastPageIndex = fNumActivePages - 1;
442
443 // For all plots but the last one, update number of flushes since used, and check to see
444 // if there are any in the first pages that the last page can safely upload to.
445 for (uint32_t pageIndex = 0; pageIndex < lastPageIndex; ++pageIndex) {
446 #ifdef DUMP_ATLAS_DATA
447 if (gDumpAtlasData) {
448 SkDebugf("page %d: ", pageIndex);
449 }
450 #endif
451 plotIter.init(fPages[pageIndex].fPlotList, PlotList::Iter::kHead_IterStart);
452 while (Plot* plot = plotIter.get()) {
453 // Update number of flushes since plot was last used
454 // We only increment the 'sinceLastUsed' count for flushes where the atlas was used
455 // to avoid deleting everything when we return to text drawing in the blinking
456 // cursor case
457 if (!plot->lastUseToken().inInterval(fPrevFlushToken, startTokenForNextFlush)) {
458 plot->incFlushesSinceLastUsed();
459 }
460
461 #ifdef DUMP_ATLAS_DATA
462 if (gDumpAtlasData) {
463 SkDebugf("%d ", plot->flushesSinceLastUsed());
464 }
465 #endif
466 // Count plots we can potentially upload to in all pages except the last one
467 // (the potential compactee).
468 if (plot->flushesSinceLastUsed() > kPlotRecentlyUsedCount) {
469 availablePlots.push_back() = plot;
470 }
471
472 plotIter.next();
473 }
474 #ifdef DUMP_ATLAS_DATA
475 if (gDumpAtlasData) {
476 SkDebugf("\n");
477 }
478 #endif
479 }
480
481 // Count recently used plots in the last page and evict any that are no longer in use.
482 // Since we prioritize uploading to the first pages, this will eventually
483 // clear out usage of this page unless we have a large need.
484 plotIter.init(fPages[lastPageIndex].fPlotList, PlotList::Iter::kHead_IterStart);
485 unsigned int usedPlots = 0;
486 #ifdef DUMP_ATLAS_DATA
487 if (gDumpAtlasData) {
488 SkDebugf("page %d: ", lastPageIndex);
489 }
490 #endif
491 while (Plot* plot = plotIter.get()) {
492 // Update number of flushes since plot was last used
493 if (!plot->lastUseToken().inInterval(fPrevFlushToken, startTokenForNextFlush)) {
494 plot->incFlushesSinceLastUsed();
495 }
496
497 #ifdef DUMP_ATLAS_DATA
498 if (gDumpAtlasData) {
499 SkDebugf("%d ", plot->flushesSinceLastUsed());
500 }
501 #endif
502 // If this plot was used recently
503 if (plot->flushesSinceLastUsed() <= kPlotRecentlyUsedCount) {
504 usedPlots++;
505 } else if (plot->lastUseToken() != GrDeferredUploadToken::AlreadyFlushedToken()) {
506 // otherwise if aged out just evict it.
507 this->processEvictionAndResetRects(plot);
508 }
509 plotIter.next();
510 }
511 #ifdef DUMP_ATLAS_DATA
512 if (gDumpAtlasData) {
513 SkDebugf("\n");
514 }
515 #endif
516
517 // If recently used plots in the last page are using less than a quarter of the page, try
518 // to evict them if there's available space in earlier pages. Since we prioritize uploading
519 // to the first pages, this will eventually clear out usage of this page unless we have a
520 // large need.
521 if (availablePlots.count() && usedPlots && usedPlots <= fNumPlots / 4) {
522 plotIter.init(fPages[lastPageIndex].fPlotList, PlotList::Iter::kHead_IterStart);
523 while (Plot* plot = plotIter.get()) {
524 // If this plot was used recently
525 if (plot->flushesSinceLastUsed() <= kPlotRecentlyUsedCount) {
526 // See if there's room in an earlier page and if so evict.
527 // We need to be somewhat harsh here so that a handful of plots that are
528 // consistently in use don't end up locking the page in memory.
529 if (availablePlots.count() > 0) {
530 this->processEvictionAndResetRects(plot);
531 this->processEvictionAndResetRects(availablePlots.back());
532 availablePlots.pop_back();
533 --usedPlots;
534 }
535 if (!usedPlots || !availablePlots.count()) {
536 break;
537 }
538 }
539 plotIter.next();
540 }
541 }
542
543 // If none of the plots in the last page have been used recently, delete it.
544 if (!usedPlots) {
545 #ifdef DUMP_ATLAS_DATA
546 if (gDumpAtlasData) {
547 SkDebugf("delete %d\n", fNumActivePages-1);
548 }
549 #endif
550 this->deactivateLastPage();
551 fFlushesSinceLastUse = 0;
552 }
553 }
554
555 fPrevFlushToken = startTokenForNextFlush;
556 }
557
createPages(GrProxyProvider * proxyProvider,GenerationCounter * generationCounter)558 bool GrDrawOpAtlas::createPages(
559 GrProxyProvider* proxyProvider, GenerationCounter* generationCounter) {
560 SkASSERT(SkIsPow2(fTextureWidth) && SkIsPow2(fTextureHeight));
561
562 SkISize dims = {fTextureWidth, fTextureHeight};
563
564 int numPlotsX = fTextureWidth/fPlotWidth;
565 int numPlotsY = fTextureHeight/fPlotHeight;
566
567 for (uint32_t i = 0; i < this->maxPages(); ++i) {
568 skgpu::Swizzle swizzle = proxyProvider->caps()->getReadSwizzle(fFormat, fColorType);
569 if (GrColorTypeIsAlphaOnly(fColorType)) {
570 swizzle = skgpu::Swizzle::Concat(swizzle, skgpu::Swizzle("aaaa"));
571 }
572 sk_sp<GrSurfaceProxy> proxy = proxyProvider->createProxy(
573 fFormat, dims, GrRenderable::kNo, 1, GrMipmapped::kNo, SkBackingFit::kExact,
574 SkBudgeted::kYes, GrProtected::kNo, GrInternalSurfaceFlags::kNone,
575 GrSurfaceProxy::UseAllocator::kNo);
576 if (!proxy) {
577 return false;
578 }
579 fViews[i] = GrSurfaceProxyView(std::move(proxy), kTopLeft_GrSurfaceOrigin, swizzle);
580
581 // set up allocated plots
582 fPages[i].fPlotArray = std::make_unique<sk_sp<Plot>[]>(numPlotsX * numPlotsY);
583
584 sk_sp<Plot>* currPlot = fPages[i].fPlotArray.get();
585 for (int y = numPlotsY - 1, r = 0; y >= 0; --y, ++r) {
586 for (int x = numPlotsX - 1, c = 0; x >= 0; --x, ++c) {
587 uint32_t plotIndex = r * numPlotsX + c;
588 currPlot->reset(new Plot(
589 i, plotIndex, generationCounter, x, y, fPlotWidth, fPlotHeight, fColorType));
590
591 // build LRU list
592 fPages[i].fPlotList.addToHead(currPlot->get());
593 ++currPlot;
594 }
595 }
596
597 }
598
599 return true;
600 }
601
activateNewPage(GrResourceProvider * resourceProvider)602 bool GrDrawOpAtlas::activateNewPage(GrResourceProvider* resourceProvider) {
603 SkASSERT(fNumActivePages < this->maxPages());
604
605 if (!fViews[fNumActivePages].proxy()->instantiate(resourceProvider)) {
606 return false;
607 }
608
609 #ifdef DUMP_ATLAS_DATA
610 if (gDumpAtlasData) {
611 SkDebugf("activated page#: %d\n", fNumActivePages);
612 }
613 #endif
614
615 ++fNumActivePages;
616 return true;
617 }
618
619
deactivateLastPage()620 inline void GrDrawOpAtlas::deactivateLastPage() {
621 SkASSERT(fNumActivePages);
622
623 uint32_t lastPageIndex = fNumActivePages - 1;
624
625 int numPlotsX = fTextureWidth/fPlotWidth;
626 int numPlotsY = fTextureHeight/fPlotHeight;
627
628 fPages[lastPageIndex].fPlotList.reset();
629 for (int r = 0; r < numPlotsY; ++r) {
630 for (int c = 0; c < numPlotsX; ++c) {
631 uint32_t plotIndex = r * numPlotsX + c;
632
633 Plot* currPlot = fPages[lastPageIndex].fPlotArray[plotIndex].get();
634 currPlot->resetRects();
635 currPlot->resetFlushesSinceLastUsed();
636
637 // rebuild the LRU list
638 SkDEBUGCODE(currPlot->fPrev = currPlot->fNext = nullptr);
639 SkDEBUGCODE(currPlot->fList = nullptr);
640 fPages[lastPageIndex].fPlotList.addToHead(currPlot);
641 }
642 }
643
644 // remove ref to the backing texture
645 fViews[lastPageIndex].proxy()->deinstantiate();
646 --fNumActivePages;
647 }
648
GrDrawOpAtlasConfig(int maxTextureSize,size_t maxBytes)649 GrDrawOpAtlasConfig::GrDrawOpAtlasConfig(int maxTextureSize, size_t maxBytes) {
650 static const SkISize kARGBDimensions[] = {
651 {256, 256}, // maxBytes < 2^19
652 {512, 256}, // 2^19 <= maxBytes < 2^20
653 {512, 512}, // 2^20 <= maxBytes < 2^21
654 {1024, 512}, // 2^21 <= maxBytes < 2^22
655 {1024, 1024}, // 2^22 <= maxBytes < 2^23
656 {2048, 1024}, // 2^23 <= maxBytes
657 };
658
659 // Index 0 corresponds to maxBytes of 2^18, so start by dividing it by that
660 maxBytes >>= 18;
661 // Take the floor of the log to get the index
662 int index = maxBytes > 0
663 ? SkTPin<int>(SkPrevLog2(maxBytes), 0, SK_ARRAY_COUNT(kARGBDimensions) - 1)
664 : 0;
665
666 SkASSERT(kARGBDimensions[index].width() <= kMaxAtlasDim);
667 SkASSERT(kARGBDimensions[index].height() <= kMaxAtlasDim);
668 fARGBDimensions.set(std::min<int>(kARGBDimensions[index].width(), maxTextureSize),
669 std::min<int>(kARGBDimensions[index].height(), maxTextureSize));
670 fMaxTextureSize = std::min<int>(maxTextureSize, kMaxAtlasDim);
671 }
672
atlasDimensions(GrMaskFormat type) const673 SkISize GrDrawOpAtlasConfig::atlasDimensions(GrMaskFormat type) const {
674 if (kA8_GrMaskFormat == type) {
675 // A8 is always 2x the ARGB dimensions, clamped to the max allowed texture size
676 return { std::min<int>(2 * fARGBDimensions.width(), fMaxTextureSize),
677 std::min<int>(2 * fARGBDimensions.height(), fMaxTextureSize) };
678 } else {
679 return fARGBDimensions;
680 }
681 }
682
plotDimensions(GrMaskFormat type) const683 SkISize GrDrawOpAtlasConfig::plotDimensions(GrMaskFormat type) const {
684 if (kA8_GrMaskFormat == type) {
685 SkISize atlasDimensions = this->atlasDimensions(type);
686 // For A8 we want to grow the plots at larger texture sizes to accept more of the
687 // larger SDF glyphs. Since the largest SDF glyph can be 170x170 with padding, this
688 // allows us to pack 3 in a 512x256 plot, or 9 in a 512x512 plot.
689
690 // This will give us 512x256 plots for 2048x1024, 512x512 plots for 2048x2048,
691 // and 256x256 plots otherwise.
692 int plotWidth = atlasDimensions.width() >= 2048 ? 512 : 256;
693 int plotHeight = atlasDimensions.height() >= 2048 ? 512 : 256;
694
695 return { plotWidth, plotHeight };
696 } else {
697 // ARGB and LCD always use 256x256 plots -- this has been shown to be faster
698 return { 256, 256 };
699 }
700 }
701