1 /*
2 * Copyright 2015 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "src/gpu/GrDrawOpAtlas.h"
9
10 #include <memory>
11
12 #include "include/private/SkTPin.h"
13 #include "src/core/SkOpts.h"
14 #include "src/gpu/GrBackendUtils.h"
15 #include "src/gpu/GrOnFlushResourceProvider.h"
16 #include "src/gpu/GrOpFlushState.h"
17 #include "src/gpu/GrProxyProvider.h"
18 #include "src/gpu/GrResourceProvider.h"
19 #include "src/gpu/GrResourceProviderPriv.h"
20 #include "src/gpu/GrSurfaceProxyPriv.h"
21 #include "src/gpu/GrTexture.h"
22 #include "src/gpu/GrTracing.h"
23
24 #ifdef DUMP_ATLAS_DATA
25 static bool gDumpAtlasData = false;
26 #endif
27
28 #ifdef SK_DEBUG
validate(const AtlasLocator & atlasLocator) const29 void GrDrawOpAtlas::validate(const AtlasLocator& atlasLocator) const {
30 // Verify that the plotIndex stored in the PlotLocator is consistent with the glyph rectangle
31 int numPlotsX = fTextureWidth / fPlotWidth;
32 int numPlotsY = fTextureHeight / fPlotHeight;
33
34 int plotIndex = atlasLocator.plotIndex();
35 auto topLeft = atlasLocator.topLeft();
36 int plotX = topLeft.x() / fPlotWidth;
37 int plotY = topLeft.y() / fPlotHeight;
38 SkASSERT(plotIndex == (numPlotsY - plotY - 1) * numPlotsX + (numPlotsX - plotX - 1));
39 }
40 #endif
41
42 // When proxy allocation is deferred until flush time the proxies acting as atlases require
43 // special handling. This is because the usage that can be determined from the ops themselves
44 // isn't sufficient. Independent of the ops there will be ASAP and inline uploads to the
45 // atlases. Extending the usage interval of any op that uses an atlas to the start of the
46 // flush (as is done for proxies that are used for sw-generated masks) also won't work because
47 // the atlas persists even beyond the last use in an op - for a given flush. Given this, atlases
48 // must explicitly manage the lifetime of their backing proxies via the onFlushCallback system
49 // (which calls this method).
instantiate(GrOnFlushResourceProvider * onFlushResourceProvider)50 void GrDrawOpAtlas::instantiate(GrOnFlushResourceProvider* onFlushResourceProvider) {
51 for (uint32_t i = 0; i < fNumActivePages; ++i) {
52 // All the atlas pages are now instantiated at flush time in the activeNewPage method.
53 SkASSERT(fViews[i].proxy() && fViews[i].proxy()->isInstantiated());
54 }
55 }
56
Make(GrProxyProvider * proxyProvider,const GrBackendFormat & format,GrColorType colorType,int width,int height,int plotWidth,int plotHeight,GenerationCounter * generationCounter,AllowMultitexturing allowMultitexturing,int atlasPageNum,EvictionCallback * evictor)57 std::unique_ptr<GrDrawOpAtlas> GrDrawOpAtlas::Make(GrProxyProvider* proxyProvider,
58 const GrBackendFormat& format,
59 GrColorType colorType, int width,
60 int height, int plotWidth, int plotHeight,
61 GenerationCounter* generationCounter,
62 AllowMultitexturing allowMultitexturing,
63 #ifdef SK_ENABLE_SMALL_PAGE
64 int atlasPageNum,
65 #endif
66 EvictionCallback* evictor) {
67 if (!format.isValid()) {
68 return nullptr;
69 }
70
71 std::unique_ptr<GrDrawOpAtlas> atlas(new GrDrawOpAtlas(proxyProvider, format, colorType,
72 width, height, plotWidth, plotHeight,
73 #ifdef SK_ENABLE_SMALL_PAGE
74 generationCounter, allowMultitexturing, atlasPageNum));
75 #else
76 generationCounter, allowMultitexturing));
77 #endif
78 if (!atlas->getViews()[0].proxy()) {
79 return nullptr;
80 }
81
82 if (evictor != nullptr) {
83 atlas->fEvictionCallbacks.emplace_back(evictor);
84 }
85 return atlas;
86 }
87
88 ////////////////////////////////////////////////////////////////////////////////
Plot(int pageIndex,int plotIndex,GenerationCounter * generationCounter,int offX,int offY,int width,int height,GrColorType colorType)89 GrDrawOpAtlas::Plot::Plot(int pageIndex, int plotIndex, GenerationCounter* generationCounter,
90 int offX, int offY, int width, int height, GrColorType colorType)
91 : fLastUpload(GrDeferredUploadToken::AlreadyFlushedToken())
92 , fLastUse(GrDeferredUploadToken::AlreadyFlushedToken())
93 , fFlushesSinceLastUse(0)
94 , fPageIndex(pageIndex)
95 , fPlotIndex(plotIndex)
96 , fGenerationCounter(generationCounter)
97 , fGenID(fGenerationCounter->next())
98 , fPlotLocator(fPageIndex, fPlotIndex, fGenID)
99 , fData(nullptr)
100 , fWidth(width)
101 , fHeight(height)
102 , fX(offX)
103 , fY(offY)
104 , fRectanizer(width, height)
105 , fOffset(SkIPoint16::Make(fX * fWidth, fY * fHeight))
106 , fColorType(colorType)
107 , fBytesPerPixel(GrColorTypeBytesPerPixel(colorType))
108 #ifdef SK_DEBUG
109 , fDirty(false)
110 #endif
111 {
112 // We expect the allocated dimensions to be a multiple of 4 bytes
113 SkASSERT(((width*fBytesPerPixel) & 0x3) == 0);
114 // The padding for faster uploads only works for 1, 2 and 4 byte texels
115 SkASSERT(fBytesPerPixel != 3 && fBytesPerPixel <= 4);
116 fDirtyRect.setEmpty();
117 }
118
~Plot()119 GrDrawOpAtlas::Plot::~Plot() {
120 sk_free(fData);
121 }
122
addSubImage(int width,int height,const void * image,AtlasLocator * atlasLocator)123 bool GrDrawOpAtlas::Plot::addSubImage(
124 int width, int height, const void* image, AtlasLocator* atlasLocator) {
125 SkASSERT(width <= fWidth && height <= fHeight);
126
127 SkIPoint16 loc;
128 if (!fRectanizer.addRect(width, height, &loc)) {
129 return false;
130 }
131
132 GrIRect16 rect = GrIRect16::MakeXYWH(loc.fX, loc.fY, width, height);
133
134 if (!fData) {
135 fData = reinterpret_cast<unsigned char*>(
136 sk_calloc_throw(fBytesPerPixel * fWidth * fHeight));
137 }
138 size_t rowBytes = width * fBytesPerPixel;
139 const unsigned char* imagePtr = (const unsigned char*)image;
140 // point ourselves at the right starting spot
141 unsigned char* dataPtr = fData;
142 dataPtr += fBytesPerPixel * fWidth * rect.fTop;
143 dataPtr += fBytesPerPixel * rect.fLeft;
144 // copy into the data buffer, swizzling as we go if this is ARGB data
145 if (4 == fBytesPerPixel && kN32_SkColorType == kBGRA_8888_SkColorType) {
146 for (int i = 0; i < height; ++i) {
147 SkOpts::RGBA_to_BGRA((uint32_t*)dataPtr, (const uint32_t*)imagePtr, width);
148 dataPtr += fBytesPerPixel * fWidth;
149 imagePtr += rowBytes;
150 }
151 } else {
152 for (int i = 0; i < height; ++i) {
153 memcpy(dataPtr, imagePtr, rowBytes);
154 dataPtr += fBytesPerPixel * fWidth;
155 imagePtr += rowBytes;
156 }
157 }
158
159 fDirtyRect.join({rect.fLeft, rect.fTop, rect.fRight, rect.fBottom});
160
161 rect.offset(fOffset.fX, fOffset.fY);
162 atlasLocator->updateRect(rect);
163 SkDEBUGCODE(fDirty = true;)
164
165 return true;
166 }
167
uploadToTexture(GrDeferredTextureUploadWritePixelsFn & writePixels,GrTextureProxy * proxy)168 void GrDrawOpAtlas::Plot::uploadToTexture(GrDeferredTextureUploadWritePixelsFn& writePixels,
169 GrTextureProxy* proxy) {
170 // We should only be issuing uploads if we are in fact dirty
171 SkASSERT(fDirty && fData && proxy && proxy->peekTexture());
172 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
173 size_t rowBytes = fBytesPerPixel * fWidth;
174 const unsigned char* dataPtr = fData;
175 // Clamp to 4-byte aligned boundaries
176 unsigned int clearBits = 0x3 / fBytesPerPixel;
177 fDirtyRect.fLeft &= ~clearBits;
178 fDirtyRect.fRight += clearBits;
179 fDirtyRect.fRight &= ~clearBits;
180 SkASSERT(fDirtyRect.fRight <= fWidth);
181 // Set up dataPtr
182 dataPtr += rowBytes * fDirtyRect.fTop;
183 dataPtr += fBytesPerPixel * fDirtyRect.fLeft;
184
185 writePixels(proxy,
186 fDirtyRect.makeOffset(fOffset.fX, fOffset.fY),
187 fColorType,
188 dataPtr,
189 rowBytes);
190 fDirtyRect.setEmpty();
191 SkDEBUGCODE(fDirty = false;)
192 }
193
resetRects()194 void GrDrawOpAtlas::Plot::resetRects() {
195 fRectanizer.reset();
196
197 fGenID = fGenerationCounter->next();
198 fPlotLocator = PlotLocator(fPageIndex, fPlotIndex, fGenID);
199 fLastUpload = GrDeferredUploadToken::AlreadyFlushedToken();
200 fLastUse = GrDeferredUploadToken::AlreadyFlushedToken();
201
202 // zero out the plot
203 if (fData) {
204 sk_bzero(fData, fBytesPerPixel * fWidth * fHeight);
205 }
206
207 fDirtyRect.setEmpty();
208 SkDEBUGCODE(fDirty = false;)
209 }
210
211 ///////////////////////////////////////////////////////////////////////////////
212
GrDrawOpAtlas(GrProxyProvider * proxyProvider,const GrBackendFormat & format,GrColorType colorType,int width,int height,int plotWidth,int plotHeight,GenerationCounter * generationCounter,AllowMultitexturing allowMultitexturing,int atlasPageNum)213 GrDrawOpAtlas::GrDrawOpAtlas(GrProxyProvider* proxyProvider, const GrBackendFormat& format,
214 GrColorType colorType, int width, int height,
215 int plotWidth, int plotHeight, GenerationCounter* generationCounter,
216 #ifdef SK_ENABLE_SMALL_PAGE
217 AllowMultitexturing allowMultitexturing, int atlasPageNum)
218 #else
219 AllowMultitexturing allowMultitexturing)
220 #endif
221 : fFormat(format)
222 , fColorType(colorType)
223 , fTextureWidth(width)
224 , fTextureHeight(height)
225 , fPlotWidth(plotWidth)
226 , fPlotHeight(plotHeight)
227 , fGenerationCounter(generationCounter)
228 , fAtlasGeneration(fGenerationCounter->next())
229 , fPrevFlushToken(GrDeferredUploadToken::AlreadyFlushedToken())
230 , fFlushesSinceLastUse(0)
231 #ifdef SK_ENABLE_SMALL_PAGE
232 , fMaxPages(AllowMultitexturing::kYes == allowMultitexturing ? ((atlasPageNum > 16) ? 16 : atlasPageNum) : 1)
233 #else
234 , fMaxPages(AllowMultitexturing::kYes == allowMultitexturing ? kMaxMultitexturePages : 1)
235 #endif
236 , fNumActivePages(0) {
237 int numPlotsX = width/plotWidth;
238 int numPlotsY = height/plotHeight;
239 SkASSERT(numPlotsX * numPlotsY <= GrDrawOpAtlas::kMaxPlots);
240 SkASSERT(fPlotWidth * numPlotsX == fTextureWidth);
241 SkASSERT(fPlotHeight * numPlotsY == fTextureHeight);
242
243 fNumPlots = numPlotsX * numPlotsY;
244
245 this->createPages(proxyProvider, generationCounter);
246 SkDebugf("Texture[Width:%{public}d, Height:%{public}d, MaxPage:%{public}d], Plot[Width:%{public}d, Height:%{public}d].", \
247 fTextureWidth, fTextureHeight, fMaxPages, fPlotWidth, fPlotHeight);
248 }
249
processEviction(PlotLocator plotLocator)250 inline void GrDrawOpAtlas::processEviction(PlotLocator plotLocator) {
251 for (EvictionCallback* evictor : fEvictionCallbacks) {
252 evictor->evict(plotLocator);
253 }
254
255 fAtlasGeneration = fGenerationCounter->next();
256 }
257
updatePlot(GrDeferredUploadTarget * target,AtlasLocator * atlasLocator,Plot * plot)258 inline bool GrDrawOpAtlas::updatePlot(GrDeferredUploadTarget* target,
259 AtlasLocator* atlasLocator, Plot* plot) {
260 int pageIdx = plot->pageIndex();
261 this->makeMRU(plot, pageIdx);
262
263 // If our most recent upload has already occurred then we have to insert a new
264 // upload. Otherwise, we already have a scheduled upload that hasn't yet ocurred.
265 // This new update will piggy back on that previously scheduled update.
266 if (plot->lastUploadToken() < target->tokenTracker()->nextTokenToFlush()) {
267 // With c+14 we could move sk_sp into lamba to only ref once.
268 sk_sp<Plot> plotsp(SkRef(plot));
269
270 GrTextureProxy* proxy = fViews[pageIdx].asTextureProxy();
271 SkASSERT(proxy && proxy->isInstantiated()); // This is occurring at flush time
272
273 GrDeferredUploadToken lastUploadToken = target->addASAPUpload(
274 [plotsp, proxy](GrDeferredTextureUploadWritePixelsFn& writePixels) {
275 plotsp->uploadToTexture(writePixels, proxy);
276 });
277 plot->setLastUploadToken(lastUploadToken);
278 }
279 atlasLocator->updatePlotLocator(plot->plotLocator());
280 SkDEBUGCODE(this->validate(*atlasLocator);)
281 return true;
282 }
283
uploadToPage(unsigned int pageIdx,GrDeferredUploadTarget * target,int width,int height,const void * image,AtlasLocator * atlasLocator)284 bool GrDrawOpAtlas::uploadToPage(unsigned int pageIdx, GrDeferredUploadTarget* target, int width,
285 int height, const void* image, AtlasLocator* atlasLocator) {
286 SkASSERT(fViews[pageIdx].proxy() && fViews[pageIdx].proxy()->isInstantiated());
287
288 // look through all allocated plots for one we can share, in Most Recently Refed order
289 PlotList::Iter plotIter;
290 plotIter.init(fPages[pageIdx].fPlotList, PlotList::Iter::kHead_IterStart);
291
292 for (Plot* plot = plotIter.get(); plot; plot = plotIter.next()) {
293 SkASSERT(GrBackendFormatBytesPerPixel(fViews[pageIdx].proxy()->backendFormat()) ==
294 plot->bpp());
295
296 if (plot->addSubImage(width, height, image, atlasLocator)) {
297 return this->updatePlot(target, atlasLocator, plot);
298 }
299 }
300
301 return false;
302 }
303
304 // Number of atlas-related flushes beyond which we consider a plot to no longer be in use.
305 //
306 // This value is somewhat arbitrary -- the idea is to keep it low enough that
307 // a page with unused plots will get removed reasonably quickly, but allow it
308 // to hang around for a bit in case it's needed. The assumption is that flushes
309 // are rare; i.e., we are not continually refreshing the frame.
310 static constexpr auto kPlotRecentlyUsedCount = 32;
311 static constexpr auto kAtlasRecentlyUsedCount = 128;
312
addToAtlas(GrResourceProvider * resourceProvider,GrDeferredUploadTarget * target,int width,int height,const void * image,AtlasLocator * atlasLocator)313 GrDrawOpAtlas::ErrorCode GrDrawOpAtlas::addToAtlas(GrResourceProvider* resourceProvider,
314 GrDeferredUploadTarget* target,
315 int width, int height, const void* image,
316 AtlasLocator* atlasLocator) {
317 if (width > fPlotWidth || height > fPlotHeight) {
318 return ErrorCode::kError;
319 }
320
321 // Look through each page to see if we can upload without having to flush
322 // We prioritize this upload to the first pages, not the most recently used, to make it easier
323 // to remove unused pages in reverse page order.
324 for (unsigned int pageIdx = 0; pageIdx < fNumActivePages; ++pageIdx) {
325 if (this->uploadToPage(pageIdx, target, width, height, image, atlasLocator)) {
326 return ErrorCode::kSucceeded;
327 }
328 }
329
330 // If the above fails, then see if the least recently used plot per page has already been
331 // flushed to the gpu if we're at max page allocation, or if the plot has aged out otherwise.
332 // We wait until we've grown to the full number of pages to begin evicting already flushed
333 // plots so that we can maximize the opportunity for reuse.
334 // As before we prioritize this upload to the first pages, not the most recently used.
335 if (fNumActivePages == this->maxPages()) {
336 for (unsigned int pageIdx = 0; pageIdx < fNumActivePages; ++pageIdx) {
337 Plot* plot = fPages[pageIdx].fPlotList.tail();
338 SkASSERT(plot);
339 if (plot->lastUseToken() < target->tokenTracker()->nextTokenToFlush()) {
340 this->processEvictionAndResetRects(plot);
341 SkASSERT(GrBackendFormatBytesPerPixel(fViews[pageIdx].proxy()->backendFormat()) ==
342 plot->bpp());
343 SkDEBUGCODE(bool verify = )plot->addSubImage(width, height, image, atlasLocator);
344 SkASSERT(verify);
345 if (!this->updatePlot(target, atlasLocator, plot)) {
346 return ErrorCode::kError;
347 }
348 return ErrorCode::kSucceeded;
349 }
350 }
351 } else {
352 // If we haven't activated all the available pages, try to create a new one and add to it
353 if (!this->activateNewPage(resourceProvider)) {
354 return ErrorCode::kError;
355 }
356
357 if (this->uploadToPage(fNumActivePages-1, target, width, height, image, atlasLocator)) {
358 return ErrorCode::kSucceeded;
359 } else {
360 // If we fail to upload to a newly activated page then something has gone terribly
361 // wrong - return an error
362 return ErrorCode::kError;
363 }
364 }
365
366 if (!fNumActivePages) {
367 return ErrorCode::kError;
368 }
369
370 // Try to find a plot that we can perform an inline upload to.
371 // We prioritize this upload in reverse order of pages to counterbalance the order above.
372 Plot* plot = nullptr;
373 for (int pageIdx = ((int)fNumActivePages)-1; pageIdx >= 0; --pageIdx) {
374 Plot* currentPlot = fPages[pageIdx].fPlotList.tail();
375 if (currentPlot->lastUseToken() != target->tokenTracker()->nextDrawToken()) {
376 plot = currentPlot;
377 break;
378 }
379 }
380
381 // If we can't find a plot that is not used in a draw currently being prepared by an op, then
382 // we have to fail. This gives the op a chance to enqueue the draw, and call back into this
383 // function. When that draw is enqueued, the draw token advances, and the subsequent call will
384 // continue past this branch and prepare an inline upload that will occur after the enqueued
385 // draw which references the plot's pre-upload content.
386 if (!plot) {
387 return ErrorCode::kTryAgain;
388 }
389
390 this->processEviction(plot->plotLocator());
391 int pageIdx = plot->pageIndex();
392 fPages[pageIdx].fPlotList.remove(plot);
393 sk_sp<Plot>& newPlot = fPages[pageIdx].fPlotArray[plot->plotIndex()];
394 newPlot.reset(plot->clone());
395
396 fPages[pageIdx].fPlotList.addToHead(newPlot.get());
397 SkASSERT(GrBackendFormatBytesPerPixel(fViews[pageIdx].proxy()->backendFormat()) ==
398 newPlot->bpp());
399 SkDEBUGCODE(bool verify = )newPlot->addSubImage(width, height, image, atlasLocator);
400 SkASSERT(verify);
401
402 // Note that this plot will be uploaded inline with the draws whereas the
403 // one it displaced most likely was uploaded ASAP.
404 // With c++14 we could move sk_sp into lambda to only ref once.
405 sk_sp<Plot> plotsp(SkRef(newPlot.get()));
406
407 GrTextureProxy* proxy = fViews[pageIdx].asTextureProxy();
408 SkASSERT(proxy && proxy->isInstantiated());
409
410 GrDeferredUploadToken lastUploadToken = target->addInlineUpload(
411 [plotsp, proxy](GrDeferredTextureUploadWritePixelsFn& writePixels) {
412 plotsp->uploadToTexture(writePixels, proxy);
413 });
414 newPlot->setLastUploadToken(lastUploadToken);
415
416 atlasLocator->updatePlotLocator(newPlot->plotLocator());
417 SkDEBUGCODE(this->validate(*atlasLocator);)
418
419 return ErrorCode::kSucceeded;
420 }
421
422 #ifdef SK_ENABLE_SMALL_PAGE
compactRadicals(GrDeferredUploadToken startTokenForNextFlush)423 void GrDrawOpAtlas::compactRadicals(GrDeferredUploadToken startTokenForNextFlush) {
424 if (fNumActivePages <= 1) {
425 return;
426 }
427 PlotList::Iter plotIter;
428 unsigned short usedAtlasLastFlush = 0;
429 for (uint32_t pageIndex = 0; pageIndex < fNumActivePages; ++pageIndex) {
430 plotIter.init(fPages[pageIndex].fPlotList, PlotList::Iter::kHead_IterStart);
431 while (Plot* plot = plotIter.get()) {
432 if (plot->lastUseToken().inInterval(fPrevFlushToken, startTokenForNextFlush)) {
433 usedAtlasLastFlush |= (1 << pageIndex);
434 break;
435 } else if (plot->lastUploadToken() != GrDeferredUploadToken::AlreadyFlushedToken()) {
436 this->processEvictionAndResetRects(plot);
437 }
438 plotIter.next();
439 }
440 }
441 int lastPageIndex = fNumActivePages - 1;
442 while (lastPageIndex > 0 && !(usedAtlasLastFlush & (1 << lastPageIndex))) {
443 deactivateLastPage();
444 lastPageIndex--;
445 }
446 }
447 #endif
448
compact(GrDeferredUploadToken startTokenForNextFlush)449 void GrDrawOpAtlas::compact(GrDeferredUploadToken startTokenForNextFlush) {
450 #ifdef SK_ENABLE_SMALL_PAGE
451 int threshold;
452 if (this->fUseRadicalsCompact) {
453 threshold = 1;
454 compactRadicals(startTokenForNextFlush);
455 } else {
456 threshold = kPlotRecentlyUsedCount;
457 }
458 #else
459 int threshold = kPlotRecentlyUsedCount;
460 #endif
461 if (fNumActivePages < 1) {
462 fPrevFlushToken = startTokenForNextFlush;
463 return;
464 }
465
466 // For all plots, reset number of flushes since used if used this frame.
467 PlotList::Iter plotIter;
468 bool atlasUsedThisFlush = false;
469 for (uint32_t pageIndex = 0; pageIndex < fNumActivePages; ++pageIndex) {
470 plotIter.init(fPages[pageIndex].fPlotList, PlotList::Iter::kHead_IterStart);
471 while (Plot* plot = plotIter.get()) {
472 // Reset number of flushes since used
473 if (plot->lastUseToken().inInterval(fPrevFlushToken, startTokenForNextFlush)) {
474 plot->resetFlushesSinceLastUsed();
475 atlasUsedThisFlush = true;
476 }
477
478 plotIter.next();
479 }
480 }
481
482 if (atlasUsedThisFlush) {
483 fFlushesSinceLastUse = 0;
484 } else {
485 ++fFlushesSinceLastUse;
486 }
487
488 // We only try to compact if the atlas was used in the recently completed flush or
489 // hasn't been used in a long time.
490 // This is to handle the case where a lot of text or path rendering has occurred but then just
491 // a blinking cursor is drawn.
492 if (atlasUsedThisFlush || fFlushesSinceLastUse > kAtlasRecentlyUsedCount) {
493 SkTArray<Plot*> availablePlots;
494 uint32_t lastPageIndex = fNumActivePages - 1;
495
496 // For all plots but the last one, update number of flushes since used, and check to see
497 // if there are any in the first pages that the last page can safely upload to.
498 for (uint32_t pageIndex = 0; pageIndex < lastPageIndex; ++pageIndex) {
499 #ifdef DUMP_ATLAS_DATA
500 if (gDumpAtlasData) {
501 SkDebugf("page %d: ", pageIndex);
502 }
503 #endif
504 plotIter.init(fPages[pageIndex].fPlotList, PlotList::Iter::kHead_IterStart);
505 while (Plot* plot = plotIter.get()) {
506 // Update number of flushes since plot was last used
507 // We only increment the 'sinceLastUsed' count for flushes where the atlas was used
508 // to avoid deleting everything when we return to text drawing in the blinking
509 // cursor case
510 if (!plot->lastUseToken().inInterval(fPrevFlushToken, startTokenForNextFlush)) {
511 plot->incFlushesSinceLastUsed();
512 }
513
514 #ifdef DUMP_ATLAS_DATA
515 if (gDumpAtlasData) {
516 SkDebugf("%d ", plot->flushesSinceLastUsed());
517 }
518 #endif
519 // Count plots we can potentially upload to in all pages except the last one
520 // (the potential compactee).
521 if (plot->flushesSinceLastUsed() > threshold) {
522 availablePlots.push_back() = plot;
523 }
524
525 plotIter.next();
526 }
527 #ifdef DUMP_ATLAS_DATA
528 if (gDumpAtlasData) {
529 SkDebugf("\n");
530 }
531 #endif
532 }
533
534 // Count recently used plots in the last page and evict any that are no longer in use.
535 // Since we prioritize uploading to the first pages, this will eventually
536 // clear out usage of this page unless we have a large need.
537 plotIter.init(fPages[lastPageIndex].fPlotList, PlotList::Iter::kHead_IterStart);
538 unsigned int usedPlots = 0;
539 #ifdef DUMP_ATLAS_DATA
540 if (gDumpAtlasData) {
541 SkDebugf("page %d: ", lastPageIndex);
542 }
543 #endif
544 while (Plot* plot = plotIter.get()) {
545 // Update number of flushes since plot was last used
546 if (!plot->lastUseToken().inInterval(fPrevFlushToken, startTokenForNextFlush)) {
547 plot->incFlushesSinceLastUsed();
548 }
549
550 #ifdef DUMP_ATLAS_DATA
551 if (gDumpAtlasData) {
552 SkDebugf("%d ", plot->flushesSinceLastUsed());
553 }
554 #endif
555 // If this plot was used recently
556 if (plot->flushesSinceLastUsed() <= threshold) {
557 usedPlots++;
558 } else if (plot->lastUseToken() != GrDeferredUploadToken::AlreadyFlushedToken()) {
559 // otherwise if aged out just evict it.
560 this->processEvictionAndResetRects(plot);
561 }
562 plotIter.next();
563 }
564 #ifdef DUMP_ATLAS_DATA
565 if (gDumpAtlasData) {
566 SkDebugf("\n");
567 }
568 #endif
569
570 // If recently used plots in the last page are using less than a quarter of the page, try
571 // to evict them if there's available space in earlier pages. Since we prioritize uploading
572 // to the first pages, this will eventually clear out usage of this page unless we have a
573 // large need.
574 if (availablePlots.count() && usedPlots && usedPlots <= fNumPlots / 4) {
575 plotIter.init(fPages[lastPageIndex].fPlotList, PlotList::Iter::kHead_IterStart);
576 while (Plot* plot = plotIter.get()) {
577 // If this plot was used recently
578 if (plot->flushesSinceLastUsed() <= threshold) {
579 // See if there's room in an earlier page and if so evict.
580 // We need to be somewhat harsh here so that a handful of plots that are
581 // consistently in use don't end up locking the page in memory.
582 if (availablePlots.count() > 0) {
583 this->processEvictionAndResetRects(plot);
584 this->processEvictionAndResetRects(availablePlots.back());
585 availablePlots.pop_back();
586 --usedPlots;
587 }
588 if (!usedPlots || !availablePlots.count()) {
589 break;
590 }
591 }
592 plotIter.next();
593 }
594 }
595
596 // If none of the plots in the last page have been used recently, delete it.
597 if (!usedPlots) {
598 #ifdef DUMP_ATLAS_DATA
599 if (gDumpAtlasData) {
600 SkDebugf("delete %d\n", fNumActivePages-1);
601 }
602 #endif
603 this->deactivateLastPage();
604 fFlushesSinceLastUse = 0;
605 }
606 }
607
608 fPrevFlushToken = startTokenForNextFlush;
609 }
610
createPages(GrProxyProvider * proxyProvider,GenerationCounter * generationCounter)611 bool GrDrawOpAtlas::createPages(
612 GrProxyProvider* proxyProvider, GenerationCounter* generationCounter) {
613 SkASSERT(SkIsPow2(fTextureWidth) && SkIsPow2(fTextureHeight));
614
615 SkISize dims = {fTextureWidth, fTextureHeight};
616
617 int numPlotsX = fTextureWidth/fPlotWidth;
618 int numPlotsY = fTextureHeight/fPlotHeight;
619
620 for (uint32_t i = 0; i < this->maxPages(); ++i) {
621 GrSwizzle swizzle = proxyProvider->caps()->getReadSwizzle(fFormat, fColorType);
622 if (GrColorTypeIsAlphaOnly(fColorType)) {
623 swizzle = GrSwizzle::Concat(swizzle, GrSwizzle("aaaa"));
624 }
625 sk_sp<GrSurfaceProxy> proxy = proxyProvider->createProxy(
626 fFormat, dims, GrRenderable::kNo, 1, GrMipmapped::kNo, SkBackingFit::kExact,
627 SkBudgeted::kYes, GrProtected::kNo, GrInternalSurfaceFlags::kNone,
628 GrSurfaceProxy::UseAllocator::kNo);
629 if (!proxy) {
630 return false;
631 }
632 fViews[i] = GrSurfaceProxyView(std::move(proxy), kTopLeft_GrSurfaceOrigin, swizzle);
633
634 // set up allocated plots
635 fPages[i].fPlotArray = std::make_unique<sk_sp<Plot>[]>(numPlotsX * numPlotsY);
636
637 sk_sp<Plot>* currPlot = fPages[i].fPlotArray.get();
638 for (int y = numPlotsY - 1, r = 0; y >= 0; --y, ++r) {
639 for (int x = numPlotsX - 1, c = 0; x >= 0; --x, ++c) {
640 uint32_t plotIndex = r * numPlotsX + c;
641 currPlot->reset(new Plot(
642 i, plotIndex, generationCounter, x, y, fPlotWidth, fPlotHeight, fColorType));
643
644 // build LRU list
645 fPages[i].fPlotList.addToHead(currPlot->get());
646 ++currPlot;
647 }
648 }
649
650 }
651
652 return true;
653 }
654
activateNewPage(GrResourceProvider * resourceProvider)655 bool GrDrawOpAtlas::activateNewPage(GrResourceProvider* resourceProvider) {
656 SkASSERT(fNumActivePages < this->maxPages());
657
658 if (!fViews[fNumActivePages].proxy()->instantiate(resourceProvider)) {
659 return false;
660 }
661
662 #ifdef DUMP_ATLAS_DATA
663 if (gDumpAtlasData) {
664 SkDebugf("activated page#: %d\n", fNumActivePages);
665 }
666 #endif
667
668 ++fNumActivePages;
669 return true;
670 }
671
672
deactivateLastPage()673 inline void GrDrawOpAtlas::deactivateLastPage() {
674 SkASSERT(fNumActivePages);
675
676 uint32_t lastPageIndex = fNumActivePages - 1;
677
678 int numPlotsX = fTextureWidth/fPlotWidth;
679 int numPlotsY = fTextureHeight/fPlotHeight;
680
681 fPages[lastPageIndex].fPlotList.reset();
682 for (int r = 0; r < numPlotsY; ++r) {
683 for (int c = 0; c < numPlotsX; ++c) {
684 uint32_t plotIndex = r * numPlotsX + c;
685
686 Plot* currPlot = fPages[lastPageIndex].fPlotArray[plotIndex].get();
687 currPlot->resetRects();
688 currPlot->resetFlushesSinceLastUsed();
689
690 // rebuild the LRU list
691 SkDEBUGCODE(currPlot->fPrev = currPlot->fNext = nullptr);
692 SkDEBUGCODE(currPlot->fList = nullptr);
693 fPages[lastPageIndex].fPlotList.addToHead(currPlot);
694 }
695 }
696
697 // remove ref to the backing texture
698 fViews[lastPageIndex].proxy()->deinstantiate();
699 --fNumActivePages;
700 }
701
GrDrawOpAtlasConfig(int maxTextureSize,size_t maxBytes)702 GrDrawOpAtlasConfig::GrDrawOpAtlasConfig(int maxTextureSize, size_t maxBytes) {
703 static const SkISize kARGBDimensions[] = {
704 {256, 256}, // maxBytes < 2^19
705 {512, 256}, // 2^19 <= maxBytes < 2^20
706 {512, 512}, // 2^20 <= maxBytes < 2^21
707 {1024, 512}, // 2^21 <= maxBytes < 2^22
708 {1024, 1024}, // 2^22 <= maxBytes < 2^23
709 {2048, 1024}, // 2^23 <= maxBytes
710 };
711
712 // Index 0 corresponds to maxBytes of 2^18, so start by dividing it by that
713 maxBytes >>= 18;
714 // Take the floor of the log to get the index
715 int index = maxBytes > 0
716 ? SkTPin<int>(SkPrevLog2(maxBytes), 0, SK_ARRAY_COUNT(kARGBDimensions) - 1)
717 : 0;
718
719 SkASSERT(kARGBDimensions[index].width() <= kMaxAtlasDim);
720 SkASSERT(kARGBDimensions[index].height() <= kMaxAtlasDim);
721 fARGBDimensions.set(std::min<int>(kARGBDimensions[index].width(), maxTextureSize),
722 std::min<int>(kARGBDimensions[index].height(), maxTextureSize));
723 fMaxTextureSize = std::min<int>(maxTextureSize, kMaxAtlasDim);
724 }
725
726 #ifdef SK_ENABLE_SMALL_PAGE
resetAsSmallPage()727 int GrDrawOpAtlasConfig::resetAsSmallPage() {
728 size_t maxBytes = fARGBDimensions.width() * fARGBDimensions.height() * 4;
729 fARGBDimensions.set(512, 512);
730 return maxBytes / (fARGBDimensions.width() * fARGBDimensions.height());
731 }
732 #endif
733
atlasDimensions(GrMaskFormat type) const734 SkISize GrDrawOpAtlasConfig::atlasDimensions(GrMaskFormat type) const {
735 if (kA8_GrMaskFormat == type) {
736 // A8 is always 2x the ARGB dimensions, clamped to the max allowed texture size
737 return { std::min<int>(2 * fARGBDimensions.width(), fMaxTextureSize),
738 std::min<int>(2 * fARGBDimensions.height(), fMaxTextureSize) };
739 } else {
740 return fARGBDimensions;
741 }
742 }
743
plotDimensions(GrMaskFormat type) const744 SkISize GrDrawOpAtlasConfig::plotDimensions(GrMaskFormat type) const {
745 if (kA8_GrMaskFormat == type) {
746 SkISize atlasDimensions = this->atlasDimensions(type);
747 // For A8 we want to grow the plots at larger texture sizes to accept more of the
748 // larger SDF glyphs. Since the largest SDF glyph can be 170x170 with padding, this
749 // allows us to pack 3 in a 512x256 plot, or 9 in a 512x512 plot.
750
751 #ifdef SK_ENABLE_SMALL_PAGE
752 // This will give us 515×512 plots for 1024x1024, 256x256 plots otherwise.
753 int plotWidth = atlasDimensions.width() >= 1024 ? 512 : 256;
754 int plotHeight = atlasDimensions.height() >= 1024 ? 512 : 256;
755 #else
756 // This will give us 512x256 plots for 2048x1024, 512x512 plots for 2048x2048,
757 // and 256x256 plots otherwise.
758 int plotWidth = atlasDimensions.width() >= 2048 ? 512 : 256;
759 int plotHeight = atlasDimensions.height() >= 2048 ? 512 : 256;
760 #endif
761
762 return { plotWidth, plotHeight };
763 } else {
764 // ARGB and LCD always use 256x256 plots -- this has been shown to be faster
765 return { 256, 256 };
766 }
767 }
768