• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2014 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #ifndef SkTextureCompressor_Blitter_DEFINED
9 #define SkTextureCompressor_Blitter_DEFINED
10 
11 #include "SkTypes.h"
12 #include "SkBlitter.h"
13 
14 namespace SkTextureCompressor {
15 
16 // Ostensibly, SkBlitter::BlitRect is supposed to set a rect of pixels to full
17 // alpha. This becomes problematic when using compressed texture blitters, since
18 // the rect rarely falls along block boundaries. The proper way to handle this is
19 // to update the compressed encoding of a block by resetting the proper parameters
20 // (and even recompressing the block) where a rect falls inbetween block boundaries.
21 // PEDANTIC_BLIT_RECT attempts to do this by requiring the struct passed to
22 // SkTCompressedAlphaBlitter to implement an UpdateBlock function call.
23 //
24 // However, the way that BlitRect gets used almost exclusively is to bracket inverse
25 // fills for paths. In other words, the top few rows and bottom few rows of a path
26 // that's getting inverse filled are called using blitRect. The rest are called using
27 // the standard blitAntiH. As a result, we can just call  blitAntiH with a faux RLE
28 // of full alpha values, and then check in our flush() call that we don't run off the
29 // edge of the buffer. This is why we do not need this flag to be turned on.
30 //
31 // NOTE: This code is unfinished, but is inteded as a starting point if an when
32 // bugs are introduced from the existing code.
33 #define PEDANTIC_BLIT_RECT 0
34 
35 // This class implements a blitter that blits directly into a buffer that will
36 // be used as an compressed alpha texture. We compute this buffer by
37 // buffering scan lines and then outputting them all at once. The number of
38 // scan lines buffered is controlled by kBlockSize
39 //
40 // The CompressorType is a struct with a bunch of static methods that provides
41 // the specialized compression functionality of the blitter. A complete CompressorType
42 // will implement the following static functions;
43 //
44 // struct CompressorType {
45 //     // The function used to compress an A8 block. The layout of the
46 //     // block is also expected to be in column-major order.
47 //     static void CompressA8Vertical(uint8_t* dst, const uint8_t block[]);
48 //
49 //     // The function used to compress an A8 block. The layout of the
50 //     // block is also expected to be in row-major order.
51 //     static void CompressA8Horizontal(uint8_t* dst, const uint8_t* src, int srcRowBytes);
52 //
53 #if PEDANTIC_BLIT_RECT
54 //     // The function used to update an already compressed block. This will
55 //     // most likely be implementation dependent. The mask variable will have
56 //     // 0xFF in positions where the block should be updated and 0 in positions
57 //     // where it shouldn't. src contains an uncompressed buffer of pixels.
58 //     static void UpdateBlock(uint8_t* dst, const uint8_t* src, int srcRowBytes,
59 //                             const uint8_t* mask);
60 #endif
61 // };
62 template<int BlockDim, int EncodedBlockSize, typename CompressorType>
63 class SkTCompressedAlphaBlitter : public SkBlitter {
64 public:
SkTCompressedAlphaBlitter(int width,int height,void * compressedBuffer)65     SkTCompressedAlphaBlitter(int width, int height, void *compressedBuffer)
66         // 0x7FFE is one minus the largest positive 16-bit int. We use it for
67         // debugging to make sure that we're properly setting the nextX distance
68         // in flushRuns().
69 #ifdef SK_DEBUG
70         : fCalledOnceWithNonzeroY(false)
71         , fBlitMaskCalled(false),
72 #else
73         :
74 #endif
75         kLongestRun(0x7FFE), kZeroAlpha(0)
76         , fNextRun(0)
77         , fWidth(width)
78         , fHeight(height)
79         , fBuffer(compressedBuffer)
80         {
81             SkASSERT((width % BlockDim) == 0);
82             SkASSERT((height % BlockDim) == 0);
83         }
84 
~SkTCompressedAlphaBlitter()85     virtual ~SkTCompressedAlphaBlitter() { this->flushRuns(); }
86 
87     // Blit a horizontal run of one or more pixels.
blitH(int x,int y,int width)88     void blitH(int x, int y, int width) override {
89         // This function is intended to be called from any standard RGB
90         // buffer, so we should never encounter it. However, if some code
91         // path does end up here, then this needs to be investigated.
92         SkFAIL("Not implemented!");
93     }
94 
95     // Blit a horizontal run of antialiased pixels; runs[] is a *sparse*
96     // zero-terminated run-length encoding of spans of constant alpha values.
blitAntiH(int x,int y,const SkAlpha antialias[],const int16_t runs[])97     void blitAntiH(int x, int y,
98                    const SkAlpha antialias[],
99                    const int16_t runs[]) override {
100         SkASSERT(0 == x);
101 
102         // Make sure that the new row to blit is either the first
103         // row that we're blitting, or it's exactly the next scan row
104         // since the last row that we blit. This is to ensure that when
105         // we go to flush the runs, that they are all the same four
106         // runs.
107         if (fNextRun > 0 &&
108             ((x != fBufferedRuns[fNextRun-1].fX) ||
109              (y-1 != fBufferedRuns[fNextRun-1].fY))) {
110             this->flushRuns();
111         }
112 
113         // Align the rows to a block boundary. If we receive rows that
114         // are not on a block boundary, then fill in the preceding runs
115         // with zeros. We do this by producing a single RLE that says
116         // that we have 0x7FFE pixels of zero (0x7FFE = 32766).
117         const int row = BlockDim * (y / BlockDim);
118         while ((row + fNextRun) < y) {
119             fBufferedRuns[fNextRun].fAlphas = &kZeroAlpha;
120             fBufferedRuns[fNextRun].fRuns = &kLongestRun;
121             fBufferedRuns[fNextRun].fX = 0;
122             fBufferedRuns[fNextRun].fY = row + fNextRun;
123             ++fNextRun;
124         }
125 
126         // Make sure that our assumptions aren't violated...
127         SkASSERT(fNextRun == (y % BlockDim));
128         SkASSERT(fNextRun == 0 || fBufferedRuns[fNextRun - 1].fY < y);
129 
130         // Set the values of the next run
131         fBufferedRuns[fNextRun].fAlphas = antialias;
132         fBufferedRuns[fNextRun].fRuns = runs;
133         fBufferedRuns[fNextRun].fX = x;
134         fBufferedRuns[fNextRun].fY = y;
135 
136         // If we've output a block of scanlines in a row that don't violate our
137         // assumptions, then it's time to flush them...
138         if (BlockDim == ++fNextRun) {
139             this->flushRuns();
140         }
141     }
142 
143     // Blit a vertical run of pixels with a constant alpha value.
blitV(int x,int y,int height,SkAlpha alpha)144     void blitV(int x, int y, int height, SkAlpha alpha) override {
145         // This function is currently not implemented. It is not explicitly
146         // required by the contract, but if at some time a code path runs into
147         // this function (which is entirely possible), it needs to be implemented.
148         //
149         // TODO (krajcevski):
150         // This function will be most easily implemented in one of two ways:
151         // 1. Buffer each vertical column value and then construct a list
152         //    of alpha values and output all of the blocks at once. This only
153         //    requires a write to the compressed buffer
154         // 2. Replace the indices of each block with the proper indices based
155         //    on the alpha value. This requires a read and write of the compressed
156         //    buffer, but much less overhead.
157         SkFAIL("Not implemented!");
158     }
159 
160     // Blit a solid rectangle one or more pixels wide. It's assumed that blitRect
161     // is called as a way to bracket blitAntiH where above and below the path the
162     // called path just needs a solid rectangle to fill in the mask.
163 #ifdef SK_DEBUG
164     bool fCalledOnceWithNonzeroY;
165 #endif
blitRect(int x,int y,int width,int height)166     void blitRect(int x, int y, int width, int height) override {
167 
168         // Assumptions:
169         SkASSERT(0 == x);
170         SkASSERT(width <= fWidth);
171 
172         // Make sure that we're only ever bracketing calls to blitAntiH.
173         SkASSERT((0 == y) || (!fCalledOnceWithNonzeroY && (fCalledOnceWithNonzeroY = true)));
174 
175 #if !(PEDANTIC_BLIT_RECT)
176         for (int i = 0; i < height; ++i) {
177             const SkAlpha kFullAlpha = 0xFF;
178             this->blitAntiH(x, y+i, &kFullAlpha, &kLongestRun);
179         }
180 #else
181         const int startBlockX = (x / BlockDim) * BlockDim;
182         const int startBlockY = (y / BlockDim) * BlockDim;
183 
184         const int endBlockX = ((x + width) / BlockDim) * BlockDim;
185         const int endBlockY = ((y + height) / BlockDim) * BlockDim;
186 
187         // If start and end are the same, then we only need to update a single block...
188         if (startBlockY == endBlockY && startBlockX == endBlockX) {
189             uint8_t mask[BlockDim*BlockDim];
190             memset(mask, 0, sizeof(mask));
191 
192             const int xoff = x - startBlockX;
193             SkASSERT((xoff + width) <= BlockDim);
194 
195             const int yoff = y - startBlockY;
196             SkASSERT((yoff + height) <= BlockDim);
197 
198             for (int j = 0; j < height; ++j) {
199                 memset(mask + (j + yoff)*BlockDim + xoff, 0xFF, width);
200             }
201 
202             uint8_t* dst = this->getBlock(startBlockX, startBlockY);
203             CompressorType::UpdateBlock(dst, mask, BlockDim, mask);
204 
205         // If start and end are the same in the y dimension, then we can freely update an
206         // entire row of blocks...
207         } else if (startBlockY == endBlockY) {
208 
209             this->updateBlockRow(x, y, width, height, startBlockY, startBlockX, endBlockX);
210 
211         // Similarly, if the start and end are in the same column, then we can just update
212         // an entire column of blocks...
213         } else if (startBlockX == endBlockX) {
214 
215             this->updateBlockCol(x, y, width, height, startBlockX, startBlockY, endBlockY);
216 
217         // Otherwise, the rect spans a non-trivial region of blocks, and we have to construct
218         // a kind of 9-patch to update each of the pieces of the rect. The top and bottom
219         // rows are updated using updateBlockRow, and the left and right columns are updated
220         // using updateBlockColumn. Anything in the middle is simply memset to an opaque block
221         // encoding.
222         } else {
223 
224             const int innerStartBlockX = startBlockX + BlockDim;
225             const int innerStartBlockY = startBlockY + BlockDim;
226 
227             // Blit top row
228             const int topRowHeight = innerStartBlockY - y;
229             this->updateBlockRow(x, y, width, topRowHeight, startBlockY,
230                                  startBlockX, endBlockX);
231 
232             // Advance y
233             y += topRowHeight;
234             height -= topRowHeight;
235 
236             // Blit middle
237             if (endBlockY > innerStartBlockY) {
238 
239                 // Update left row
240                 this->updateBlockCol(x, y, innerStartBlockX - x, endBlockY, startBlockY,
241                                      startBlockX, innerStartBlockX);
242 
243                 // Update the middle with an opaque encoding...
244                 uint8_t mask[BlockDim*BlockDim];
245                 memset(mask, 0xFF, sizeof(mask));
246 
247                 uint8_t opaqueEncoding[EncodedBlockSize];
248                 CompressorType::CompressA8Horizontal(opaqueEncoding, mask, BlockDim);
249 
250                 for (int j = innerStartBlockY; j < endBlockY; j += BlockDim) {
251                     uint8_t* opaqueDst = this->getBlock(innerStartBlockX, j);
252                     for (int i = innerStartBlockX; i < endBlockX; i += BlockDim) {
253                         memcpy(opaqueDst, opaqueEncoding, EncodedBlockSize);
254                         opaqueDst += EncodedBlockSize;
255                     }
256                 }
257 
258                 // If we need to update the right column, do that too
259                 if (x + width > endBlockX) {
260                     this->updateBlockCol(endBlockX, y, x + width - endBlockX, endBlockY,
261                                          endBlockX, innerStartBlockY, endBlockY);
262                 }
263 
264                 // Advance y
265                 height = y + height - endBlockY;
266                 y = endBlockY;
267             }
268 
269             // If we need to update the last row, then do that, too.
270             if (height > 0) {
271                 this->updateBlockRow(x, y, width, height, endBlockY,
272                                      startBlockX, endBlockX);
273             }
274         }
275 #endif
276     }
277 
278     // Blit a rectangle with one alpha-blended column on the left,
279     // width (zero or more) opaque pixels, and one alpha-blended column
280     // on the right. The result will always be at least two pixels wide.
blitAntiRect(int x,int y,int width,int height,SkAlpha leftAlpha,SkAlpha rightAlpha)281     void blitAntiRect(int x, int y, int width, int height,
282                       SkAlpha leftAlpha, SkAlpha rightAlpha) override {
283         // This function is currently not implemented. It is not explicitly
284         // required by the contract, but if at some time a code path runs into
285         // this function (which is entirely possible), it needs to be implemented.
286         //
287         // TODO (krajcevski):
288         // This function will be most easily implemented as follows:
289         // 1. If width/height are smaller than a block, then update the
290         //    indices of the affected blocks.
291         // 2. If width/height are larger than a block, then construct a 9-patch
292         //    of block encodings that represent the rectangle, and write them
293         //    to the compressed buffer as necessary. Whether or not the blocks
294         //    are overwritten by zeros or just their indices are updated is up
295         //    to debate.
296         SkFAIL("Not implemented!");
297     }
298 
299     // Blit a pattern of pixels defined by a rectangle-clipped mask; We make an
300     // assumption here that if this function gets called, then it will replace all
301     // of the compressed texture blocks that it touches. Hence, two separate calls
302     // to blitMask that have clips next to one another will cause artifacts. Most
303     // of the time, however, this function gets called because constructing the mask
304     // was faster than constructing the RLE for blitAntiH, and this function will
305     // only be called once.
306 #ifdef SK_DEBUG
307     bool fBlitMaskCalled;
308 #endif
blitMask(const SkMask & mask,const SkIRect & clip)309     void blitMask(const SkMask& mask, const SkIRect& clip) override {
310 
311         // Assumptions:
312         SkASSERT(!fBlitMaskCalled);
313         SkDEBUGCODE(fBlitMaskCalled = true);
314         SkASSERT(SkMask::kA8_Format == mask.fFormat);
315         SkASSERT(mask.fBounds.contains(clip));
316 
317         // Start from largest block boundary less than the clip boundaries.
318         const int startI = BlockDim * (clip.left() / BlockDim);
319         const int startJ = BlockDim * (clip.top() / BlockDim);
320 
321         for (int j = startJ; j < clip.bottom(); j += BlockDim) {
322 
323             // Get the destination for this block row
324             uint8_t* dst = this->getBlock(startI, j);
325             for (int i = startI; i < clip.right(); i += BlockDim) {
326 
327                 // At this point, the block should intersect the clip.
328                 SkASSERT(SkIRect::IntersectsNoEmptyCheck(
329                              SkIRect::MakeXYWH(i, j, BlockDim, BlockDim), clip));
330 
331                 // Do we need to pad it?
332                 if (i < clip.left() || j < clip.top() ||
333                     i + BlockDim > clip.right() || j + BlockDim > clip.bottom()) {
334 
335                     uint8_t block[BlockDim*BlockDim];
336                     memset(block, 0, sizeof(block));
337 
338                     const int startX = SkMax32(i, clip.left());
339                     const int startY = SkMax32(j, clip.top());
340 
341                     const int endX = SkMin32(i + BlockDim, clip.right());
342                     const int endY = SkMin32(j + BlockDim, clip.bottom());
343 
344                     for (int y = startY; y < endY; ++y) {
345                         const int col = startX - i;
346                         const int row = y - j;
347                         const int valsWide = endX - startX;
348                         SkASSERT(valsWide <= BlockDim);
349                         SkASSERT(0 <= col && col < BlockDim);
350                         SkASSERT(0 <= row && row < BlockDim);
351                         memcpy(block + row*BlockDim + col,
352                                mask.getAddr8(startX, j + row), valsWide);
353                     }
354 
355                     CompressorType::CompressA8Horizontal(dst, block, BlockDim);
356                 } else {
357                     // Otherwise, just compress it.
358                     uint8_t*const src = mask.getAddr8(i, j);
359                     const uint32_t rb = mask.fRowBytes;
360                     CompressorType::CompressA8Horizontal(dst, src, rb);
361                 }
362 
363                 dst += EncodedBlockSize;
364             }
365         }
366     }
367 
368     // If the blitter just sets a single value for each pixel, return the
369     // bitmap it draws into, and assign value. If not, return nullptr and ignore
370     // the value parameter.
justAnOpaqueColor(uint32_t * value)371     const SkPixmap* justAnOpaqueColor(uint32_t* value) override {
372         return nullptr;
373     }
374 
375     /**
376      * Compressed texture blitters only really work correctly if they get
377      * BlockDim rows at a time. That being said, this blitter tries it's best
378      * to preserve semantics if blitAntiH doesn't get called in too many
379      * weird ways...
380      */
requestRowsPreserved()381     int requestRowsPreserved() const override { return BlockDim; }
382 
383 private:
384     static const int kPixelsPerBlock = BlockDim * BlockDim;
385 
386     // The longest possible run of pixels that this blitter will receive.
387     // This is initialized in the constructor to 0x7FFE, which is one less
388     // than the largest positive 16-bit integer. We make sure that it's one
389     // less for debugging purposes. We also don't make this variable static
390     // in order to make sure that we can construct a valid pointer to it.
391     const int16_t kLongestRun;
392 
393     // Usually used in conjunction with kLongestRun. This is initialized to
394     // zero.
395     const SkAlpha kZeroAlpha;
396 
397     // This is the information that we buffer whenever we're asked to blit
398     // a row with this blitter.
399     struct BufferedRun {
400         const SkAlpha* fAlphas;
401         const int16_t* fRuns;
402         int fX, fY;
403     } fBufferedRuns[BlockDim];
404 
405     // The next row [0, BlockDim) that we need to blit.
406     int fNextRun;
407 
408     // The width and height of the image that we're blitting
409     const int fWidth;
410     const int fHeight;
411 
412     // The compressed buffer that we're blitting into. It is assumed that the buffer
413     // is large enough to store a compressed image of size fWidth*fHeight.
414     void* const fBuffer;
415 
416     // Various utility functions
blocksWide()417     int blocksWide() const { return fWidth / BlockDim; }
blocksTall()418     int blocksTall() const { return fHeight / BlockDim; }
totalBlocks()419     int totalBlocks() const { return (fWidth * fHeight) / kPixelsPerBlock; }
420 
421     // Returns the block index for the block containing pixel (x, y). Block
422     // indices start at zero and proceed in raster order.
getBlockOffset(int x,int y)423     int getBlockOffset(int x, int y) const {
424         SkASSERT(x < fWidth);
425         SkASSERT(y < fHeight);
426         const int blockCol = x / BlockDim;
427         const int blockRow = y / BlockDim;
428         return blockRow * this->blocksWide() + blockCol;
429     }
430 
431     // Returns a pointer to the block containing pixel (x, y)
getBlock(int x,int y)432     uint8_t *getBlock(int x, int y) const {
433         uint8_t* ptr = reinterpret_cast<uint8_t*>(fBuffer);
434         return ptr + EncodedBlockSize*this->getBlockOffset(x, y);
435     }
436 
437     // Updates the block whose columns are stored in block. curAlphai is expected
438     // to store the alpha values that will be placed within each of the columns in
439     // the range [col, col+colsLeft).
440     typedef uint32_t Column[BlockDim/4];
441     typedef uint32_t Block[BlockDim][BlockDim/4];
updateBlockColumns(Block block,const int col,const int colsLeft,const Column curAlphai)442     inline void updateBlockColumns(Block block, const int col,
443                                    const int colsLeft, const Column curAlphai) {
444         SkASSERT(block);
445         SkASSERT(col + colsLeft <= BlockDim);
446 
447         for (int i = col; i < (col + colsLeft); ++i) {
448             memcpy(block[i], curAlphai, sizeof(Column));
449         }
450     }
451 
452     // The following function writes the buffered runs to compressed blocks.
453     // If fNextRun < BlockDim, then we fill the runs that we haven't buffered with
454     // the constant zero buffer.
flushRuns()455     void flushRuns() {
456         // If we don't have any runs, then just return.
457         if (0 == fNextRun) {
458             return;
459         }
460 
461 #ifndef NDEBUG
462         // Make sure that if we have any runs, they all match
463         for (int i = 1; i < fNextRun; ++i) {
464             SkASSERT(fBufferedRuns[i].fY == fBufferedRuns[i-1].fY + 1);
465             SkASSERT(fBufferedRuns[i].fX == fBufferedRuns[i-1].fX);
466         }
467 #endif
468 
469         // If we don't have as many runs as we have rows, fill in the remaining
470         // runs with constant zeros.
471         for (int i = fNextRun; i < BlockDim; ++i) {
472             fBufferedRuns[i].fY = fBufferedRuns[0].fY + i;
473             fBufferedRuns[i].fX = fBufferedRuns[0].fX;
474             fBufferedRuns[i].fAlphas = &kZeroAlpha;
475             fBufferedRuns[i].fRuns = &kLongestRun;
476         }
477 
478         // Make sure that our assumptions aren't violated.
479         SkASSERT(fNextRun > 0 && fNextRun <= BlockDim);
480         SkASSERT((fBufferedRuns[0].fY % BlockDim) == 0);
481 
482         // The following logic walks BlockDim rows at a time and outputs compressed
483         // blocks to the buffer passed into the constructor.
484         // We do the following:
485         //
486         //      c1 c2 c3 c4
487         // -----------------------------------------------------------------------
488         // ... |  |  |  |  |  ----> fBufferedRuns[0]
489         // -----------------------------------------------------------------------
490         // ... |  |  |  |  |  ----> fBufferedRuns[1]
491         // -----------------------------------------------------------------------
492         // ... |  |  |  |  |  ----> fBufferedRuns[2]
493         // -----------------------------------------------------------------------
494         // ... |  |  |  |  |  ----> fBufferedRuns[3]
495         // -----------------------------------------------------------------------
496         //
497         // curX -- the macro X value that we've gotten to.
498         // c[BlockDim] -- the buffers that represent the columns of the current block
499         //                  that we're operating on
500         // curAlphaColumn -- buffer containing the column of alpha values from fBufferedRuns.
501         // nextX -- for each run, the next point at which we need to update curAlphaColumn
502         //          after the value of curX.
503         // finalX -- the minimum of all the nextX values.
504         //
505         // curX advances to finalX outputting any blocks that it passes along
506         // the way. Since finalX will not change when we reach the end of a
507         // run, the termination criteria will be whenever curX == finalX at the
508         // end of a loop.
509 
510         // Setup:
511         Block block;
512         sk_bzero(block, sizeof(block));
513 
514         Column curAlphaColumn;
515         sk_bzero(curAlphaColumn, sizeof(curAlphaColumn));
516 
517         SkAlpha *curAlpha = reinterpret_cast<SkAlpha*>(&curAlphaColumn);
518 
519         int nextX[BlockDim];
520         for (int i = 0; i < BlockDim; ++i) {
521             nextX[i] = 0x7FFFFF;
522         }
523 
524         uint8_t* outPtr = this->getBlock(fBufferedRuns[0].fX, fBufferedRuns[0].fY);
525 
526         // Populate the first set of runs and figure out how far we need to
527         // advance on the first step
528         int curX = 0;
529         int finalX = 0xFFFFF;
530         for (int i = 0; i < BlockDim; ++i) {
531             nextX[i] = *(fBufferedRuns[i].fRuns);
532             curAlpha[i] = *(fBufferedRuns[i].fAlphas);
533 
534             finalX = SkMin32(nextX[i], finalX);
535         }
536 
537         // Make sure that we have a valid right-bound X value
538         SkASSERT(finalX < 0xFFFFF);
539 
540         // If the finalX is the longest run, then just blit until we have
541         // width...
542         if (kLongestRun == finalX) {
543             finalX = fWidth;
544         }
545 
546         // Run the blitter...
547         while (curX != finalX) {
548             SkASSERT(finalX >= curX);
549 
550             // Do we need to populate the rest of the block?
551             if ((finalX - (BlockDim*(curX / BlockDim))) >= BlockDim) {
552                 const int col = curX % BlockDim;
553                 const int colsLeft = BlockDim - col;
554                 SkASSERT(curX + colsLeft <= finalX);
555 
556                 this->updateBlockColumns(block, col, colsLeft, curAlphaColumn);
557 
558                 // Write this block
559                 CompressorType::CompressA8Vertical(outPtr, reinterpret_cast<uint8_t*>(block));
560                 outPtr += EncodedBlockSize;
561                 curX += colsLeft;
562             }
563 
564             // If we can advance even further, then just keep memsetting the block
565             if ((finalX - curX) >= BlockDim) {
566                 SkASSERT((curX % BlockDim) == 0);
567 
568                 const int col = 0;
569                 const int colsLeft = BlockDim;
570 
571                 this->updateBlockColumns(block, col, colsLeft, curAlphaColumn);
572 
573                 // While we can keep advancing, just keep writing the block.
574                 uint8_t lastBlock[EncodedBlockSize];
575                 CompressorType::CompressA8Vertical(lastBlock, reinterpret_cast<uint8_t*>(block));
576                 while((finalX - curX) >= BlockDim) {
577                     memcpy(outPtr, lastBlock, EncodedBlockSize);
578                     outPtr += EncodedBlockSize;
579                     curX += BlockDim;
580                 }
581             }
582 
583             // If we haven't advanced within the block then do so.
584             if (curX < finalX) {
585                 const int col = curX % BlockDim;
586                 const int colsLeft = finalX - curX;
587 
588                 this->updateBlockColumns(block, col, colsLeft, curAlphaColumn);
589                 curX += colsLeft;
590             }
591 
592             SkASSERT(curX == finalX);
593 
594             // Figure out what the next advancement is...
595             if (finalX < fWidth) {
596                 for (int i = 0; i < BlockDim; ++i) {
597                     if (nextX[i] == finalX) {
598                         const int16_t run = *(fBufferedRuns[i].fRuns);
599                         fBufferedRuns[i].fRuns += run;
600                         fBufferedRuns[i].fAlphas += run;
601                         curAlpha[i] = *(fBufferedRuns[i].fAlphas);
602                         nextX[i] += *(fBufferedRuns[i].fRuns);
603                     }
604                 }
605 
606                 finalX = 0xFFFFF;
607                 for (int i = 0; i < BlockDim; ++i) {
608                     finalX = SkMin32(nextX[i], finalX);
609                 }
610             } else {
611                 curX = finalX;
612             }
613         }
614 
615         // If we didn't land on a block boundary, output the block...
616         if ((curX % BlockDim) > 0) {
617 #ifdef SK_DEBUG
618             for (int i = 0; i < BlockDim; ++i) {
619                 SkASSERT(nextX[i] == kLongestRun || nextX[i] == curX);
620             }
621 #endif
622             const int col = curX % BlockDim;
623             const int colsLeft = BlockDim - col;
624 
625             memset(curAlphaColumn, 0, sizeof(curAlphaColumn));
626             this->updateBlockColumns(block, col, colsLeft, curAlphaColumn);
627 
628             CompressorType::CompressA8Vertical(outPtr, reinterpret_cast<uint8_t*>(block));
629         }
630 
631         fNextRun = 0;
632     }
633 
634 #if PEDANTIC_BLIT_RECT
updateBlockRow(int x,int y,int width,int height,int blockRow,int startBlockX,int endBlockX)635     void updateBlockRow(int x, int y, int width, int height,
636                         int blockRow, int startBlockX, int endBlockX) {
637         if (0 == width || 0 == height || startBlockX == endBlockX) {
638             return;
639         }
640 
641         uint8_t* dst = this->getBlock(startBlockX, BlockDim * (y / BlockDim));
642 
643         // One horizontal strip to update
644         uint8_t mask[BlockDim*BlockDim];
645         memset(mask, 0, sizeof(mask));
646 
647         // Update the left cap
648         int blockX = startBlockX;
649         const int yoff = y - blockRow;
650         for (int j = 0; j < height; ++j) {
651             const int xoff = x - blockX;
652             memset(mask + (j + yoff)*BlockDim + xoff, 0xFF, BlockDim - xoff);
653         }
654         CompressorType::UpdateBlock(dst, mask, BlockDim, mask);
655         dst += EncodedBlockSize;
656         blockX += BlockDim;
657 
658         // Update the middle
659         if (blockX < endBlockX) {
660             for (int j = 0; j < height; ++j) {
661                 memset(mask + (j + yoff)*BlockDim, 0xFF, BlockDim);
662             }
663             while (blockX < endBlockX) {
664                 CompressorType::UpdateBlock(dst, mask, BlockDim, mask);
665                 dst += EncodedBlockSize;
666                 blockX += BlockDim;
667             }
668         }
669 
670         SkASSERT(endBlockX == blockX);
671 
672         // Update the right cap (if we need to)
673         if (x + width > endBlockX) {
674             memset(mask, 0, sizeof(mask));
675             for (int j = 0; j < height; ++j) {
676                 const int xoff = (x+width-blockX);
677                 memset(mask + (j+yoff)*BlockDim, 0xFF, xoff);
678             }
679             CompressorType::UpdateBlock(dst, mask, BlockDim, mask);
680         }
681     }
682 
updateBlockCol(int x,int y,int width,int height,int blockCol,int startBlockY,int endBlockY)683     void updateBlockCol(int x, int y, int width, int height,
684                         int blockCol, int startBlockY, int endBlockY) {
685         if (0 == width || 0 == height || startBlockY == endBlockY) {
686             return;
687         }
688 
689         // One vertical strip to update
690         uint8_t mask[BlockDim*BlockDim];
691         memset(mask, 0, sizeof(mask));
692         const int maskX0 = x - blockCol;
693         const int maskWidth = maskX0 + width;
694         SkASSERT(maskWidth <= BlockDim);
695 
696         // Update the top cap
697         int blockY = startBlockY;
698         for (int j = (y - blockY); j < BlockDim; ++j) {
699             memset(mask + maskX0 + j*BlockDim, 0xFF, maskWidth);
700         }
701         CompressorType::UpdateBlock(this->getBlock(blockCol, blockY), mask, BlockDim, mask);
702         blockY += BlockDim;
703 
704         // Update middle
705         if (blockY < endBlockY) {
706             for (int j = 0; j < BlockDim; ++j) {
707                 memset(mask + maskX0 + j*BlockDim, 0xFF, maskWidth);
708             }
709             while (blockY < endBlockY) {
710                 CompressorType::UpdateBlock(this->getBlock(blockCol, blockY),
711                                             mask, BlockDim, mask);
712                 blockY += BlockDim;
713             }
714         }
715 
716         SkASSERT(endBlockY == blockY);
717 
718         // Update bottom
719         if (y + height > endBlockY) {
720             for (int j = y+height; j < endBlockY + BlockDim; ++j) {
721                 memset(mask + (j-endBlockY)*BlockDim, 0, BlockDim);
722             }
723             CompressorType::UpdateBlock(this->getBlock(blockCol, blockY),
724                                         mask, BlockDim, mask);
725         }
726     }
727 #endif  // PEDANTIC_BLIT_RECT
728 
729 };
730 
731 }  // namespace SkTextureCompressor
732 
733 #endif  // SkTextureCompressor_Blitter_DEFINED
734