1 /*
2 * Copyright 2015 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "src/gpu/GrDrawOpAtlas.h"
9
10 #include <memory>
11
12 #include "include/private/SkTPin.h"
13 #include "src/core/SkOpts.h"
14 #include "src/gpu/GrBackendUtils.h"
15 #include "src/gpu/GrOnFlushResourceProvider.h"
16 #include "src/gpu/GrOpFlushState.h"
17 #include "src/gpu/GrProxyProvider.h"
18 #include "src/gpu/GrResourceProvider.h"
19 #include "src/gpu/GrResourceProviderPriv.h"
20 #include "src/gpu/GrSurfaceProxyPriv.h"
21 #include "src/gpu/GrTexture.h"
22 #include "src/gpu/GrTracing.h"
23
24 #ifdef DUMP_ATLAS_DATA
25 static bool gDumpAtlasData = false;
26 #endif
27
28 #ifdef SK_DEBUG
validate(const AtlasLocator & atlasLocator) const29 void GrDrawOpAtlas::validate(const AtlasLocator& atlasLocator) const {
30 // Verify that the plotIndex stored in the PlotLocator is consistent with the glyph rectangle
31 int numPlotsX = fTextureWidth / fPlotWidth;
32 int numPlotsY = fTextureHeight / fPlotHeight;
33
34 int plotIndex = atlasLocator.plotIndex();
35 auto topLeft = atlasLocator.topLeft();
36 int plotX = topLeft.x() / fPlotWidth;
37 int plotY = topLeft.y() / fPlotHeight;
38 SkASSERT(plotIndex == (numPlotsY - plotY - 1) * numPlotsX + (numPlotsX - plotX - 1));
39 }
40 #endif
41
42 // When proxy allocation is deferred until flush time the proxies acting as atlases require
43 // special handling. This is because the usage that can be determined from the ops themselves
44 // isn't sufficient. Independent of the ops there will be ASAP and inline uploads to the
45 // atlases. Extending the usage interval of any op that uses an atlas to the start of the
46 // flush (as is done for proxies that are used for sw-generated masks) also won't work because
47 // the atlas persists even beyond the last use in an op - for a given flush. Given this, atlases
48 // must explicitly manage the lifetime of their backing proxies via the onFlushCallback system
49 // (which calls this method).
instantiate(GrOnFlushResourceProvider * onFlushResourceProvider)50 void GrDrawOpAtlas::instantiate(GrOnFlushResourceProvider* onFlushResourceProvider) {
51 for (uint32_t i = 0; i < fNumActivePages; ++i) {
52 // All the atlas pages are now instantiated at flush time in the activeNewPage method.
53 SkASSERT(fViews[i].proxy() && fViews[i].proxy()->isInstantiated());
54 }
55 }
56
Make(GrProxyProvider * proxyProvider,const GrBackendFormat & format,GrColorType colorType,int width,int height,int plotWidth,int plotHeight,GenerationCounter * generationCounter,AllowMultitexturing allowMultitexturing,EvictionCallback * evictor)57 std::unique_ptr<GrDrawOpAtlas> GrDrawOpAtlas::Make(GrProxyProvider* proxyProvider,
58 const GrBackendFormat& format,
59 GrColorType colorType, int width,
60 int height, int plotWidth, int plotHeight,
61 GenerationCounter* generationCounter,
62 AllowMultitexturing allowMultitexturing,
63 EvictionCallback* evictor) {
64 if (!format.isValid()) {
65 return nullptr;
66 }
67
68 std::unique_ptr<GrDrawOpAtlas> atlas(new GrDrawOpAtlas(proxyProvider, format, colorType,
69 width, height, plotWidth, plotHeight,
70 generationCounter,
71 allowMultitexturing));
72 if (!atlas->getViews()[0].proxy()) {
73 return nullptr;
74 }
75
76 if (evictor != nullptr) {
77 atlas->fEvictionCallbacks.emplace_back(evictor);
78 }
79 return atlas;
80 }
81
82 ////////////////////////////////////////////////////////////////////////////////
Plot(int pageIndex,int plotIndex,GenerationCounter * generationCounter,int offX,int offY,int width,int height,GrColorType colorType)83 GrDrawOpAtlas::Plot::Plot(int pageIndex, int plotIndex, GenerationCounter* generationCounter,
84 int offX, int offY, int width, int height, GrColorType colorType)
85 : fLastUpload(GrDeferredUploadToken::AlreadyFlushedToken())
86 , fLastUse(GrDeferredUploadToken::AlreadyFlushedToken())
87 , fFlushesSinceLastUse(0)
88 , fPageIndex(pageIndex)
89 , fPlotIndex(plotIndex)
90 , fGenerationCounter(generationCounter)
91 , fGenID(fGenerationCounter->next())
92 , fPlotLocator(fPageIndex, fPlotIndex, fGenID)
93 , fData(nullptr)
94 , fWidth(width)
95 , fHeight(height)
96 , fX(offX)
97 , fY(offY)
98 , fRectanizer(width, height)
99 , fOffset(SkIPoint16::Make(fX * fWidth, fY * fHeight))
100 , fColorType(colorType)
101 , fBytesPerPixel(GrColorTypeBytesPerPixel(colorType))
102 #ifdef SK_DEBUG
103 , fDirty(false)
104 #endif
105 {
106 // We expect the allocated dimensions to be a multiple of 4 bytes
107 SkASSERT(((width*fBytesPerPixel) & 0x3) == 0);
108 // The padding for faster uploads only works for 1, 2 and 4 byte texels
109 SkASSERT(fBytesPerPixel != 3 && fBytesPerPixel <= 4);
110 fDirtyRect.setEmpty();
111 }
112
~Plot()113 GrDrawOpAtlas::Plot::~Plot() {
114 sk_free(fData);
115 }
116
addSubImage(int width,int height,const void * image,AtlasLocator * atlasLocator)117 bool GrDrawOpAtlas::Plot::addSubImage(
118 int width, int height, const void* image, AtlasLocator* atlasLocator) {
119 SkASSERT(width <= fWidth && height <= fHeight);
120
121 SkIPoint16 loc;
122 if (!fRectanizer.addRect(width, height, &loc)) {
123 return false;
124 }
125
126 GrIRect16 rect = GrIRect16::MakeXYWH(loc.fX, loc.fY, width, height);
127
128 if (!fData) {
129 fData = reinterpret_cast<unsigned char*>(
130 sk_calloc_throw(fBytesPerPixel * fWidth * fHeight));
131 }
132 size_t rowBytes = width * fBytesPerPixel;
133 const unsigned char* imagePtr = (const unsigned char*)image;
134 // point ourselves at the right starting spot
135 unsigned char* dataPtr = fData;
136 dataPtr += fBytesPerPixel * fWidth * rect.fTop;
137 dataPtr += fBytesPerPixel * rect.fLeft;
138 // copy into the data buffer, swizzling as we go if this is ARGB data
139 if (4 == fBytesPerPixel && kN32_SkColorType == kBGRA_8888_SkColorType) {
140 for (int i = 0; i < height; ++i) {
141 SkOpts::RGBA_to_BGRA((uint32_t*)dataPtr, (const uint32_t*)imagePtr, width);
142 dataPtr += fBytesPerPixel * fWidth;
143 imagePtr += rowBytes;
144 }
145 } else {
146 for (int i = 0; i < height; ++i) {
147 memcpy(dataPtr, imagePtr, rowBytes);
148 dataPtr += fBytesPerPixel * fWidth;
149 imagePtr += rowBytes;
150 }
151 }
152
153 fDirtyRect.join({rect.fLeft, rect.fTop, rect.fRight, rect.fBottom});
154
155 rect.offset(fOffset.fX, fOffset.fY);
156 atlasLocator->updateRect(rect);
157 SkDEBUGCODE(fDirty = true;)
158
159 return true;
160 }
161
uploadToTexture(GrDeferredTextureUploadWritePixelsFn & writePixels,GrTextureProxy * proxy)162 void GrDrawOpAtlas::Plot::uploadToTexture(GrDeferredTextureUploadWritePixelsFn& writePixels,
163 GrTextureProxy* proxy) {
164 // We should only be issuing uploads if we are in fact dirty
165 SkASSERT(fDirty && fData && proxy && proxy->peekTexture());
166 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
167 size_t rowBytes = fBytesPerPixel * fWidth;
168 const unsigned char* dataPtr = fData;
169 // Clamp to 4-byte aligned boundaries
170 unsigned int clearBits = 0x3 / fBytesPerPixel;
171 fDirtyRect.fLeft &= ~clearBits;
172 fDirtyRect.fRight += clearBits;
173 fDirtyRect.fRight &= ~clearBits;
174 SkASSERT(fDirtyRect.fRight <= fWidth);
175 // Set up dataPtr
176 dataPtr += rowBytes * fDirtyRect.fTop;
177 dataPtr += fBytesPerPixel * fDirtyRect.fLeft;
178
179 writePixels(proxy,
180 fDirtyRect.makeOffset(fOffset.fX, fOffset.fY),
181 fColorType,
182 dataPtr,
183 rowBytes);
184 fDirtyRect.setEmpty();
185 SkDEBUGCODE(fDirty = false;)
186 }
187
resetRects()188 void GrDrawOpAtlas::Plot::resetRects() {
189 fRectanizer.reset();
190
191 fGenID = fGenerationCounter->next();
192 fPlotLocator = PlotLocator(fPageIndex, fPlotIndex, fGenID);
193 fLastUpload = GrDeferredUploadToken::AlreadyFlushedToken();
194 fLastUse = GrDeferredUploadToken::AlreadyFlushedToken();
195
196 // zero out the plot
197 if (fData) {
198 sk_bzero(fData, fBytesPerPixel * fWidth * fHeight);
199 }
200
201 fDirtyRect.setEmpty();
202 SkDEBUGCODE(fDirty = false;)
203 }
204
205 ///////////////////////////////////////////////////////////////////////////////
206
GrDrawOpAtlas(GrProxyProvider * proxyProvider,const GrBackendFormat & format,GrColorType colorType,int width,int height,int plotWidth,int plotHeight,GenerationCounter * generationCounter,AllowMultitexturing allowMultitexturing)207 GrDrawOpAtlas::GrDrawOpAtlas(GrProxyProvider* proxyProvider, const GrBackendFormat& format,
208 GrColorType colorType, int width, int height,
209 int plotWidth, int plotHeight, GenerationCounter* generationCounter,
210 AllowMultitexturing allowMultitexturing)
211 : fFormat(format)
212 , fColorType(colorType)
213 , fTextureWidth(width)
214 , fTextureHeight(height)
215 , fPlotWidth(plotWidth)
216 , fPlotHeight(plotHeight)
217 , fGenerationCounter(generationCounter)
218 , fAtlasGeneration(fGenerationCounter->next())
219 , fPrevFlushToken(GrDeferredUploadToken::AlreadyFlushedToken())
220 , fFlushesSinceLastUse(0)
221 , fMaxPages(AllowMultitexturing::kYes == allowMultitexturing ? kMaxMultitexturePages : 1)
222 , fNumActivePages(0) {
223 int numPlotsX = width/plotWidth;
224 int numPlotsY = height/plotHeight;
225 SkASSERT(numPlotsX * numPlotsY <= GrDrawOpAtlas::kMaxPlots);
226 SkASSERT(fPlotWidth * numPlotsX == fTextureWidth);
227 SkASSERT(fPlotHeight * numPlotsY == fTextureHeight);
228
229 fNumPlots = numPlotsX * numPlotsY;
230
231 this->createPages(proxyProvider, generationCounter);
232 }
233
processEviction(PlotLocator plotLocator)234 inline void GrDrawOpAtlas::processEviction(PlotLocator plotLocator) {
235 for (EvictionCallback* evictor : fEvictionCallbacks) {
236 evictor->evict(plotLocator);
237 }
238
239 fAtlasGeneration = fGenerationCounter->next();
240 }
241
updatePlot(GrDeferredUploadTarget * target,AtlasLocator * atlasLocator,Plot * plot)242 inline bool GrDrawOpAtlas::updatePlot(GrDeferredUploadTarget* target,
243 AtlasLocator* atlasLocator, Plot* plot) {
244 int pageIdx = plot->pageIndex();
245 this->makeMRU(plot, pageIdx);
246
247 // If our most recent upload has already occurred then we have to insert a new
248 // upload. Otherwise, we already have a scheduled upload that hasn't yet ocurred.
249 // This new update will piggy back on that previously scheduled update.
250 if (plot->lastUploadToken() < target->tokenTracker()->nextTokenToFlush()) {
251 // With c+14 we could move sk_sp into lamba to only ref once.
252 sk_sp<Plot> plotsp(SkRef(plot));
253
254 GrTextureProxy* proxy = fViews[pageIdx].asTextureProxy();
255 SkASSERT(proxy && proxy->isInstantiated()); // This is occurring at flush time
256
257 GrDeferredUploadToken lastUploadToken = target->addASAPUpload(
258 [plotsp, proxy](GrDeferredTextureUploadWritePixelsFn& writePixels) {
259 plotsp->uploadToTexture(writePixels, proxy);
260 });
261 plot->setLastUploadToken(lastUploadToken);
262 }
263 atlasLocator->updatePlotLocator(plot->plotLocator());
264 SkDEBUGCODE(this->validate(*atlasLocator);)
265 return true;
266 }
267
uploadToPage(unsigned int pageIdx,GrDeferredUploadTarget * target,int width,int height,const void * image,AtlasLocator * atlasLocator)268 bool GrDrawOpAtlas::uploadToPage(unsigned int pageIdx, GrDeferredUploadTarget* target, int width,
269 int height, const void* image, AtlasLocator* atlasLocator) {
270 SkASSERT(fViews[pageIdx].proxy() && fViews[pageIdx].proxy()->isInstantiated());
271
272 // look through all allocated plots for one we can share, in Most Recently Refed order
273 PlotList::Iter plotIter;
274 plotIter.init(fPages[pageIdx].fPlotList, PlotList::Iter::kHead_IterStart);
275
276 for (Plot* plot = plotIter.get(); plot; plot = plotIter.next()) {
277 SkASSERT(GrBackendFormatBytesPerPixel(fViews[pageIdx].proxy()->backendFormat()) ==
278 plot->bpp());
279
280 if (plot->addSubImage(width, height, image, atlasLocator)) {
281 return this->updatePlot(target, atlasLocator, plot);
282 }
283 }
284
285 return false;
286 }
287
288 // Number of atlas-related flushes beyond which we consider a plot to no longer be in use.
289 //
290 // This value is somewhat arbitrary -- the idea is to keep it low enough that
291 // a page with unused plots will get removed reasonably quickly, but allow it
292 // to hang around for a bit in case it's needed. The assumption is that flushes
293 // are rare; i.e., we are not continually refreshing the frame.
294 static constexpr auto kPlotRecentlyUsedCount = 32;
295 static constexpr auto kAtlasRecentlyUsedCount = 128;
296
addToAtlas(GrResourceProvider * resourceProvider,GrDeferredUploadTarget * target,int width,int height,const void * image,AtlasLocator * atlasLocator)297 GrDrawOpAtlas::ErrorCode GrDrawOpAtlas::addToAtlas(GrResourceProvider* resourceProvider,
298 GrDeferredUploadTarget* target,
299 int width, int height, const void* image,
300 AtlasLocator* atlasLocator) {
301 if (width > fPlotWidth || height > fPlotHeight) {
302 return ErrorCode::kError;
303 }
304
305 // Look through each page to see if we can upload without having to flush
306 // We prioritize this upload to the first pages, not the most recently used, to make it easier
307 // to remove unused pages in reverse page order.
308 for (unsigned int pageIdx = 0; pageIdx < fNumActivePages; ++pageIdx) {
309 if (this->uploadToPage(pageIdx, target, width, height, image, atlasLocator)) {
310 return ErrorCode::kSucceeded;
311 }
312 }
313
314 // If the above fails, then see if the least recently used plot per page has already been
315 // flushed to the gpu if we're at max page allocation, or if the plot has aged out otherwise.
316 // We wait until we've grown to the full number of pages to begin evicting already flushed
317 // plots so that we can maximize the opportunity for reuse.
318 // As before we prioritize this upload to the first pages, not the most recently used.
319 if (fNumActivePages == this->maxPages()) {
320 for (unsigned int pageIdx = 0; pageIdx < fNumActivePages; ++pageIdx) {
321 Plot* plot = fPages[pageIdx].fPlotList.tail();
322 SkASSERT(plot);
323 if (plot->lastUseToken() < target->tokenTracker()->nextTokenToFlush()) {
324 this->processEvictionAndResetRects(plot);
325 SkASSERT(GrBackendFormatBytesPerPixel(fViews[pageIdx].proxy()->backendFormat()) ==
326 plot->bpp());
327 SkDEBUGCODE(bool verify = )plot->addSubImage(width, height, image, atlasLocator);
328 SkASSERT(verify);
329 if (!this->updatePlot(target, atlasLocator, plot)) {
330 return ErrorCode::kError;
331 }
332 return ErrorCode::kSucceeded;
333 }
334 }
335 } else {
336 // If we haven't activated all the available pages, try to create a new one and add to it
337 if (!this->activateNewPage(resourceProvider)) {
338 return ErrorCode::kError;
339 }
340
341 if (this->uploadToPage(fNumActivePages-1, target, width, height, image, atlasLocator)) {
342 return ErrorCode::kSucceeded;
343 } else {
344 // If we fail to upload to a newly activated page then something has gone terribly
345 // wrong - return an error
346 return ErrorCode::kError;
347 }
348 }
349
350 if (!fNumActivePages) {
351 return ErrorCode::kError;
352 }
353
354 // Try to find a plot that we can perform an inline upload to.
355 // We prioritize this upload in reverse order of pages to counterbalance the order above.
356 Plot* plot = nullptr;
357 for (int pageIdx = ((int)fNumActivePages)-1; pageIdx >= 0; --pageIdx) {
358 Plot* currentPlot = fPages[pageIdx].fPlotList.tail();
359 if (currentPlot->lastUseToken() != target->tokenTracker()->nextDrawToken()) {
360 plot = currentPlot;
361 break;
362 }
363 }
364
365 // If we can't find a plot that is not used in a draw currently being prepared by an op, then
366 // we have to fail. This gives the op a chance to enqueue the draw, and call back into this
367 // function. When that draw is enqueued, the draw token advances, and the subsequent call will
368 // continue past this branch and prepare an inline upload that will occur after the enqueued
369 // draw which references the plot's pre-upload content.
370 if (!plot) {
371 return ErrorCode::kTryAgain;
372 }
373
374 this->processEviction(plot->plotLocator());
375 int pageIdx = plot->pageIndex();
376 fPages[pageIdx].fPlotList.remove(plot);
377 sk_sp<Plot>& newPlot = fPages[pageIdx].fPlotArray[plot->plotIndex()];
378 newPlot.reset(plot->clone());
379
380 fPages[pageIdx].fPlotList.addToHead(newPlot.get());
381 SkASSERT(GrBackendFormatBytesPerPixel(fViews[pageIdx].proxy()->backendFormat()) ==
382 newPlot->bpp());
383 SkDEBUGCODE(bool verify = )newPlot->addSubImage(width, height, image, atlasLocator);
384 SkASSERT(verify);
385
386 // Note that this plot will be uploaded inline with the draws whereas the
387 // one it displaced most likely was uploaded ASAP.
388 // With c++14 we could move sk_sp into lambda to only ref once.
389 sk_sp<Plot> plotsp(SkRef(newPlot.get()));
390
391 GrTextureProxy* proxy = fViews[pageIdx].asTextureProxy();
392 SkASSERT(proxy && proxy->isInstantiated());
393
394 GrDeferredUploadToken lastUploadToken = target->addInlineUpload(
395 [plotsp, proxy](GrDeferredTextureUploadWritePixelsFn& writePixels) {
396 plotsp->uploadToTexture(writePixels, proxy);
397 });
398 newPlot->setLastUploadToken(lastUploadToken);
399
400 atlasLocator->updatePlotLocator(newPlot->plotLocator());
401 SkDEBUGCODE(this->validate(*atlasLocator);)
402
403 return ErrorCode::kSucceeded;
404 }
405
compact(GrDeferredUploadToken startTokenForNextFlush)406 void GrDrawOpAtlas::compact(GrDeferredUploadToken startTokenForNextFlush) {
407 if (fNumActivePages < 1) {
408 fPrevFlushToken = startTokenForNextFlush;
409 return;
410 }
411
412 // For all plots, reset number of flushes since used if used this frame.
413 PlotList::Iter plotIter;
414 bool atlasUsedThisFlush = false;
415 for (uint32_t pageIndex = 0; pageIndex < fNumActivePages; ++pageIndex) {
416 plotIter.init(fPages[pageIndex].fPlotList, PlotList::Iter::kHead_IterStart);
417 while (Plot* plot = plotIter.get()) {
418 // Reset number of flushes since used
419 if (plot->lastUseToken().inInterval(fPrevFlushToken, startTokenForNextFlush)) {
420 plot->resetFlushesSinceLastUsed();
421 atlasUsedThisFlush = true;
422 }
423
424 plotIter.next();
425 }
426 }
427
428 if (atlasUsedThisFlush) {
429 fFlushesSinceLastUse = 0;
430 } else {
431 ++fFlushesSinceLastUse;
432 }
433
434 // We only try to compact if the atlas was used in the recently completed flush or
435 // hasn't been used in a long time.
436 // This is to handle the case where a lot of text or path rendering has occurred but then just
437 // a blinking cursor is drawn.
438 if (atlasUsedThisFlush || fFlushesSinceLastUse > kAtlasRecentlyUsedCount) {
439 SkTArray<Plot*> availablePlots;
440 uint32_t lastPageIndex = fNumActivePages - 1;
441
442 // For all plots but the last one, update number of flushes since used, and check to see
443 // if there are any in the first pages that the last page can safely upload to.
444 for (uint32_t pageIndex = 0; pageIndex < lastPageIndex; ++pageIndex) {
445 #ifdef DUMP_ATLAS_DATA
446 if (gDumpAtlasData) {
447 SkDebugf("page %d: ", pageIndex);
448 }
449 #endif
450 plotIter.init(fPages[pageIndex].fPlotList, PlotList::Iter::kHead_IterStart);
451 while (Plot* plot = plotIter.get()) {
452 // Update number of flushes since plot was last used
453 // We only increment the 'sinceLastUsed' count for flushes where the atlas was used
454 // to avoid deleting everything when we return to text drawing in the blinking
455 // cursor case
456 if (!plot->lastUseToken().inInterval(fPrevFlushToken, startTokenForNextFlush)) {
457 plot->incFlushesSinceLastUsed();
458 }
459
460 #ifdef DUMP_ATLAS_DATA
461 if (gDumpAtlasData) {
462 SkDebugf("%d ", plot->flushesSinceLastUsed());
463 }
464 #endif
465 // Count plots we can potentially upload to in all pages except the last one
466 // (the potential compactee).
467 if (plot->flushesSinceLastUsed() > kPlotRecentlyUsedCount) {
468 availablePlots.push_back() = plot;
469 }
470
471 plotIter.next();
472 }
473 #ifdef DUMP_ATLAS_DATA
474 if (gDumpAtlasData) {
475 SkDebugf("\n");
476 }
477 #endif
478 }
479
480 // Count recently used plots in the last page and evict any that are no longer in use.
481 // Since we prioritize uploading to the first pages, this will eventually
482 // clear out usage of this page unless we have a large need.
483 plotIter.init(fPages[lastPageIndex].fPlotList, PlotList::Iter::kHead_IterStart);
484 unsigned int usedPlots = 0;
485 #ifdef DUMP_ATLAS_DATA
486 if (gDumpAtlasData) {
487 SkDebugf("page %d: ", lastPageIndex);
488 }
489 #endif
490 while (Plot* plot = plotIter.get()) {
491 // Update number of flushes since plot was last used
492 if (!plot->lastUseToken().inInterval(fPrevFlushToken, startTokenForNextFlush)) {
493 plot->incFlushesSinceLastUsed();
494 }
495
496 #ifdef DUMP_ATLAS_DATA
497 if (gDumpAtlasData) {
498 SkDebugf("%d ", plot->flushesSinceLastUsed());
499 }
500 #endif
501 // If this plot was used recently
502 if (plot->flushesSinceLastUsed() <= kPlotRecentlyUsedCount) {
503 usedPlots++;
504 } else if (plot->lastUseToken() != GrDeferredUploadToken::AlreadyFlushedToken()) {
505 // otherwise if aged out just evict it.
506 this->processEvictionAndResetRects(plot);
507 }
508 plotIter.next();
509 }
510 #ifdef DUMP_ATLAS_DATA
511 if (gDumpAtlasData) {
512 SkDebugf("\n");
513 }
514 #endif
515
516 // If recently used plots in the last page are using less than a quarter of the page, try
517 // to evict them if there's available space in earlier pages. Since we prioritize uploading
518 // to the first pages, this will eventually clear out usage of this page unless we have a
519 // large need.
520 if (availablePlots.count() && usedPlots && usedPlots <= fNumPlots / 4) {
521 plotIter.init(fPages[lastPageIndex].fPlotList, PlotList::Iter::kHead_IterStart);
522 while (Plot* plot = plotIter.get()) {
523 // If this plot was used recently
524 if (plot->flushesSinceLastUsed() <= kPlotRecentlyUsedCount) {
525 // See if there's room in an earlier page and if so evict.
526 // We need to be somewhat harsh here so that a handful of plots that are
527 // consistently in use don't end up locking the page in memory.
528 if (availablePlots.count() > 0) {
529 this->processEvictionAndResetRects(plot);
530 this->processEvictionAndResetRects(availablePlots.back());
531 availablePlots.pop_back();
532 --usedPlots;
533 }
534 if (!usedPlots || !availablePlots.count()) {
535 break;
536 }
537 }
538 plotIter.next();
539 }
540 }
541
542 // If none of the plots in the last page have been used recently, delete it.
543 if (!usedPlots) {
544 #ifdef DUMP_ATLAS_DATA
545 if (gDumpAtlasData) {
546 SkDebugf("delete %d\n", fNumActivePages-1);
547 }
548 #endif
549 this->deactivateLastPage();
550 fFlushesSinceLastUse = 0;
551 }
552 }
553
554 fPrevFlushToken = startTokenForNextFlush;
555 }
556
createPages(GrProxyProvider * proxyProvider,GenerationCounter * generationCounter)557 bool GrDrawOpAtlas::createPages(
558 GrProxyProvider* proxyProvider, GenerationCounter* generationCounter) {
559 SkASSERT(SkIsPow2(fTextureWidth) && SkIsPow2(fTextureHeight));
560
561 SkISize dims = {fTextureWidth, fTextureHeight};
562
563 int numPlotsX = fTextureWidth/fPlotWidth;
564 int numPlotsY = fTextureHeight/fPlotHeight;
565
566 for (uint32_t i = 0; i < this->maxPages(); ++i) {
567 GrSwizzle swizzle = proxyProvider->caps()->getReadSwizzle(fFormat, fColorType);
568 if (GrColorTypeIsAlphaOnly(fColorType)) {
569 swizzle = GrSwizzle::Concat(swizzle, GrSwizzle("aaaa"));
570 }
571 sk_sp<GrSurfaceProxy> proxy = proxyProvider->createProxy(
572 fFormat, dims, GrRenderable::kNo, 1, GrMipmapped::kNo, SkBackingFit::kExact,
573 SkBudgeted::kYes, GrProtected::kNo, GrInternalSurfaceFlags::kNone,
574 GrSurfaceProxy::UseAllocator::kNo);
575 if (!proxy) {
576 return false;
577 }
578 fViews[i] = GrSurfaceProxyView(std::move(proxy), kTopLeft_GrSurfaceOrigin, swizzle);
579
580 // set up allocated plots
581 fPages[i].fPlotArray = std::make_unique<sk_sp<Plot>[]>(numPlotsX * numPlotsY);
582
583 sk_sp<Plot>* currPlot = fPages[i].fPlotArray.get();
584 for (int y = numPlotsY - 1, r = 0; y >= 0; --y, ++r) {
585 for (int x = numPlotsX - 1, c = 0; x >= 0; --x, ++c) {
586 uint32_t plotIndex = r * numPlotsX + c;
587 currPlot->reset(new Plot(
588 i, plotIndex, generationCounter, x, y, fPlotWidth, fPlotHeight, fColorType));
589
590 // build LRU list
591 fPages[i].fPlotList.addToHead(currPlot->get());
592 ++currPlot;
593 }
594 }
595
596 }
597
598 return true;
599 }
600
activateNewPage(GrResourceProvider * resourceProvider)601 bool GrDrawOpAtlas::activateNewPage(GrResourceProvider* resourceProvider) {
602 SkASSERT(fNumActivePages < this->maxPages());
603
604 if (!fViews[fNumActivePages].proxy()->instantiate(resourceProvider)) {
605 return false;
606 }
607
608 #ifdef DUMP_ATLAS_DATA
609 if (gDumpAtlasData) {
610 SkDebugf("activated page#: %d\n", fNumActivePages);
611 }
612 #endif
613
614 ++fNumActivePages;
615 return true;
616 }
617
618
deactivateLastPage()619 inline void GrDrawOpAtlas::deactivateLastPage() {
620 SkASSERT(fNumActivePages);
621
622 uint32_t lastPageIndex = fNumActivePages - 1;
623
624 int numPlotsX = fTextureWidth/fPlotWidth;
625 int numPlotsY = fTextureHeight/fPlotHeight;
626
627 fPages[lastPageIndex].fPlotList.reset();
628 for (int r = 0; r < numPlotsY; ++r) {
629 for (int c = 0; c < numPlotsX; ++c) {
630 uint32_t plotIndex = r * numPlotsX + c;
631
632 Plot* currPlot = fPages[lastPageIndex].fPlotArray[plotIndex].get();
633 currPlot->resetRects();
634 currPlot->resetFlushesSinceLastUsed();
635
636 // rebuild the LRU list
637 SkDEBUGCODE(currPlot->fPrev = currPlot->fNext = nullptr);
638 SkDEBUGCODE(currPlot->fList = nullptr);
639 fPages[lastPageIndex].fPlotList.addToHead(currPlot);
640 }
641 }
642
643 // remove ref to the backing texture
644 fViews[lastPageIndex].proxy()->deinstantiate();
645 --fNumActivePages;
646 }
647
GrDrawOpAtlasConfig(int maxTextureSize,size_t maxBytes)648 GrDrawOpAtlasConfig::GrDrawOpAtlasConfig(int maxTextureSize, size_t maxBytes) {
649 static const SkISize kARGBDimensions[] = {
650 {256, 256}, // maxBytes < 2^19
651 {512, 256}, // 2^19 <= maxBytes < 2^20
652 {512, 512}, // 2^20 <= maxBytes < 2^21
653 {1024, 512}, // 2^21 <= maxBytes < 2^22
654 {1024, 1024}, // 2^22 <= maxBytes < 2^23
655 {2048, 1024}, // 2^23 <= maxBytes
656 };
657
658 // Index 0 corresponds to maxBytes of 2^18, so start by dividing it by that
659 maxBytes >>= 18;
660 // Take the floor of the log to get the index
661 int index = maxBytes > 0
662 ? SkTPin<int>(SkPrevLog2(maxBytes), 0, SK_ARRAY_COUNT(kARGBDimensions) - 1)
663 : 0;
664
665 SkASSERT(kARGBDimensions[index].width() <= kMaxAtlasDim);
666 SkASSERT(kARGBDimensions[index].height() <= kMaxAtlasDim);
667 fARGBDimensions.set(std::min<int>(kARGBDimensions[index].width(), maxTextureSize),
668 std::min<int>(kARGBDimensions[index].height(), maxTextureSize));
669 fMaxTextureSize = std::min<int>(maxTextureSize, kMaxAtlasDim);
670 }
671
atlasDimensions(GrMaskFormat type) const672 SkISize GrDrawOpAtlasConfig::atlasDimensions(GrMaskFormat type) const {
673 if (kA8_GrMaskFormat == type) {
674 // A8 is always 2x the ARGB dimensions, clamped to the max allowed texture size
675 return { std::min<int>(2 * fARGBDimensions.width(), fMaxTextureSize),
676 std::min<int>(2 * fARGBDimensions.height(), fMaxTextureSize) };
677 } else {
678 return fARGBDimensions;
679 }
680 }
681
plotDimensions(GrMaskFormat type) const682 SkISize GrDrawOpAtlasConfig::plotDimensions(GrMaskFormat type) const {
683 if (kA8_GrMaskFormat == type) {
684 SkISize atlasDimensions = this->atlasDimensions(type);
685 // For A8 we want to grow the plots at larger texture sizes to accept more of the
686 // larger SDF glyphs. Since the largest SDF glyph can be 170x170 with padding, this
687 // allows us to pack 3 in a 512x256 plot, or 9 in a 512x512 plot.
688
689 // This will give us 512x256 plots for 2048x1024, 512x512 plots for 2048x2048,
690 // and 256x256 plots otherwise.
691 int plotWidth = atlasDimensions.width() >= 2048 ? 512 : 256;
692 int plotHeight = atlasDimensions.height() >= 2048 ? 512 : 256;
693
694 return { plotWidth, plotHeight };
695 } else {
696 // ARGB and LCD always use 256x256 plots -- this has been shown to be faster
697 return { 256, 256 };
698 }
699 }
700