1 /*
2 * Copyright 2019 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "src/gpu/GrDataUtils.h"
9
10 #include "include/private/SkTPin.h"
11 #include "include/third_party/skcms/skcms.h"
12 #include "src/core/SkColorSpaceXformSteps.h"
13 #include "src/core/SkCompressedDataUtils.h"
14 #include "src/core/SkConvertPixels.h"
15 #include "src/core/SkMathPriv.h"
16 #include "src/core/SkMipmap.h"
17 #include "src/core/SkRasterPipeline.h"
18 #include "src/core/SkTLazy.h"
19 #include "src/core/SkTraceEvent.h"
20 #include "src/core/SkUtils.h"
21 #include "src/gpu/GrCaps.h"
22 #include "src/gpu/GrColor.h"
23 #include "src/gpu/GrImageInfo.h"
24 #include "src/gpu/GrPixmap.h"
25 #include "src/gpu/GrSwizzle.h"
26
27 struct ETC1Block {
28 uint32_t fHigh;
29 uint32_t fLow;
30 };
31
32 constexpr uint32_t kDiffBit = 0x2; // set -> differential; not-set -> individual
33
extend_5To8bits(int b)34 static inline int extend_5To8bits(int b) {
35 int c = b & 0x1f;
36 return (c << 3) | (c >> 2);
37 }
38
39 static const int kNumETC1ModifierTables = 8;
40 static const int kNumETC1PixelIndices = 4;
41
42 // The index of each row in this table is the ETC1 table codeword
43 // The index of each column in this table is the ETC1 pixel index value
44 static const int kETC1ModifierTables[kNumETC1ModifierTables][kNumETC1PixelIndices] = {
45 /* 0 */ { 2, 8, -2, -8 },
46 /* 1 */ { 5, 17, -5, -17 },
47 /* 2 */ { 9, 29, -9, -29 },
48 /* 3 */ { 13, 42, -13, -42 },
49 /* 4 */ { 18, 60, -18, -60 },
50 /* 5 */ { 24, 80, -24, -80 },
51 /* 6 */ { 33, 106, -33, -106 },
52 /* 7 */ { 47, 183, -47, -183 }
53 };
54
55 // Evaluate one of the entries in 'kModifierTables' to see how close it can get (r8,g8,b8) to
56 // the original color (rOrig, gOrib, bOrig).
test_table_entry(int rOrig,int gOrig,int bOrig,int r8,int g8,int b8,int table,int offset)57 static int test_table_entry(int rOrig, int gOrig, int bOrig,
58 int r8, int g8, int b8,
59 int table, int offset) {
60 SkASSERT(0 <= table && table < 8);
61 SkASSERT(0 <= offset && offset < 4);
62
63 r8 = SkTPin<int>(r8 + kETC1ModifierTables[table][offset], 0, 255);
64 g8 = SkTPin<int>(g8 + kETC1ModifierTables[table][offset], 0, 255);
65 b8 = SkTPin<int>(b8 + kETC1ModifierTables[table][offset], 0, 255);
66
67 return SkTAbs(rOrig - r8) + SkTAbs(gOrig - g8) + SkTAbs(bOrig - b8);
68 }
69
70 // Create an ETC1 compressed block that is filled with 'col'
create_etc1_block(SkColor col,ETC1Block * block)71 static void create_etc1_block(SkColor col, ETC1Block* block) {
72 uint32_t high = 0;
73 uint32_t low = 0;
74
75 int rOrig = SkColorGetR(col);
76 int gOrig = SkColorGetG(col);
77 int bOrig = SkColorGetB(col);
78
79 int r5 = SkMulDiv255Round(31, rOrig);
80 int g5 = SkMulDiv255Round(31, gOrig);
81 int b5 = SkMulDiv255Round(31, bOrig);
82
83 int r8 = extend_5To8bits(r5);
84 int g8 = extend_5To8bits(g5);
85 int b8 = extend_5To8bits(b5);
86
87 // We always encode solid color textures in differential mode (i.e., with a 555 base color) but
88 // with zero diffs (i.e., bits 26-24, 18-16 and 10-8 are left 0).
89 high |= (r5 << 27) | (g5 << 19) | (b5 << 11) | kDiffBit;
90
91 int bestTableIndex = 0, bestPixelIndex = 0;
92 int bestSoFar = 1024;
93 for (int tableIndex = 0; tableIndex < kNumETC1ModifierTables; ++tableIndex) {
94 for (int pixelIndex = 0; pixelIndex < kNumETC1PixelIndices; ++pixelIndex) {
95 int score = test_table_entry(rOrig, gOrig, bOrig, r8, g8, b8,
96 tableIndex, pixelIndex);
97
98 if (bestSoFar > score) {
99 bestSoFar = score;
100 bestTableIndex = tableIndex;
101 bestPixelIndex = pixelIndex;
102 }
103 }
104 }
105
106 high |= (bestTableIndex << 5) | (bestTableIndex << 2);
107
108 if (bestPixelIndex & 0x1) {
109 low |= 0xFFFF;
110 }
111 if (bestPixelIndex & 0x2) {
112 low |= 0xFFFF0000;
113 }
114
115 block->fHigh = SkBSwap32(high);
116 block->fLow = SkBSwap32(low);
117 }
118
num_4x4_blocks(int size)119 static int num_4x4_blocks(int size) {
120 return ((size + 3) & ~3) >> 2;
121 }
122
num_6x6_blocks(int size)123 static int num_6x6_blocks(int size) {
124 // Divide the image size by 6, rounding down
125 return (size + 5) / 6;
126 }
127
num_8x8_blocks(int size)128 static int num_8x8_blocks(int size) {
129 // Divide the image size by 8, rounding down
130 return ((size + 7) & ~7) >> 3;
131 }
132
num_ETC1_blocks(int w,int h)133 static int num_ETC1_blocks(int w, int h) {
134 w = num_4x4_blocks(w);
135 h = num_4x4_blocks(h);
136
137 return w * h;
138 }
139
140 struct BC1Block {
141 uint16_t fColor0;
142 uint16_t fColor1;
143 uint32_t fIndices;
144 };
145
to565(SkColor col)146 static uint16_t to565(SkColor col) {
147 int r5 = SkMulDiv255Round(31, SkColorGetR(col));
148 int g6 = SkMulDiv255Round(63, SkColorGetG(col));
149 int b5 = SkMulDiv255Round(31, SkColorGetB(col));
150
151 return (r5 << 11) | (g6 << 5) | b5;
152 }
153
154 // Create a BC1 compressed block that has two colors but is initialized to 'col0'
create_BC1_block(SkColor col0,SkColor col1,BC1Block * block)155 static void create_BC1_block(SkColor col0, SkColor col1, BC1Block* block) {
156 block->fColor0 = to565(col0);
157 block->fColor1 = to565(col1);
158 SkASSERT(block->fColor0 <= block->fColor1); // we always assume transparent blocks
159
160 if (col0 == SK_ColorTRANSPARENT) {
161 // This sets all 16 pixels to just use color3 (under the assumption
162 // that this is a kBC1_RGBA8_UNORM texture. Note that in this case
163 // fColor0 will be opaque black.
164 block->fIndices = 0xFFFFFFFF;
165 } else {
166 // This sets all 16 pixels to just use 'fColor0'
167 block->fIndices = 0;
168 }
169 }
170
GrNumBlocks(SkImage::CompressionType type,SkISize baseDimensions)171 size_t GrNumBlocks(SkImage::CompressionType type, SkISize baseDimensions) {
172 switch (type) {
173 case SkImage::CompressionType::kNone:
174 return baseDimensions.width() * baseDimensions.height();
175 case SkImage::CompressionType::kETC2_RGB8_UNORM:
176 case SkImage::CompressionType::kBC1_RGB8_UNORM:
177 case SkImage::CompressionType::kBC1_RGBA8_UNORM:
178 case SkImage::CompressionType::kASTC_RGBA8_4x4: {
179 int numBlocksWidth = num_4x4_blocks(baseDimensions.width());
180 int numBlocksHeight = num_4x4_blocks(baseDimensions.height());
181
182 return numBlocksWidth * numBlocksHeight;
183 }
184 case SkImage::CompressionType::kASTC_RGBA8_6x6: {
185 int numBlocksWidth = num_6x6_blocks(baseDimensions.width());
186 int numBlocksHeight = num_6x6_blocks(baseDimensions.height());
187
188 return numBlocksWidth * numBlocksHeight;
189 }
190 case SkImage::CompressionType::kASTC_RGBA8_8x8: {
191 int numBlocksWidth = num_8x8_blocks(baseDimensions.width());
192 int numBlocksHeight = num_8x8_blocks(baseDimensions.height());
193
194 return numBlocksWidth * numBlocksHeight;
195 }
196 }
197 SkUNREACHABLE;
198 }
199
GrCompressedRowBytes(SkImage::CompressionType type,int width)200 size_t GrCompressedRowBytes(SkImage::CompressionType type, int width) {
201 switch (type) {
202 case SkImage::CompressionType::kNone:
203 return 0;
204 case SkImage::CompressionType::kETC2_RGB8_UNORM:
205 case SkImage::CompressionType::kBC1_RGB8_UNORM:
206 case SkImage::CompressionType::kBC1_RGBA8_UNORM: {
207 int numBlocksWidth = num_4x4_blocks(width);
208
209 static_assert(sizeof(ETC1Block) == sizeof(BC1Block));
210 return numBlocksWidth * sizeof(ETC1Block);
211 }
212 case SkImage::CompressionType::kASTC_RGBA8_4x4: {
213 int numBlocksWidth = num_4x4_blocks(width);
214
215 // The evil number 16 here is the constant size of ASTC 4x4 block
216 return numBlocksWidth * 16;
217 }
218 case SkImage::CompressionType::kASTC_RGBA8_6x6: {
219 int numBlocksWidth = num_6x6_blocks(width);
220
221 // The evil number 16 here is the constant size of ASTC 6x6 block
222 return numBlocksWidth * 16;
223 }
224 case SkImage::CompressionType::kASTC_RGBA8_8x8: {
225 int numBlocksWidth = num_8x8_blocks(width);
226
227 // The evil number 16 here is the constant size of ASTC 8x8 block
228 return numBlocksWidth * 16;
229 }
230 }
231 SkUNREACHABLE;
232 }
233
GrCompressedDimensions(SkImage::CompressionType type,SkISize baseDimensions)234 SkISize GrCompressedDimensions(SkImage::CompressionType type, SkISize baseDimensions) {
235 switch (type) {
236 case SkImage::CompressionType::kNone:
237 return baseDimensions;
238 case SkImage::CompressionType::kETC2_RGB8_UNORM:
239 case SkImage::CompressionType::kBC1_RGB8_UNORM:
240 case SkImage::CompressionType::kBC1_RGBA8_UNORM:
241 case SkImage::CompressionType::kASTC_RGBA8_4x4: {
242 int numBlocksWidth = num_4x4_blocks(baseDimensions.width());
243 int numBlocksHeight = num_4x4_blocks(baseDimensions.height());
244
245 // Each BC1_RGB8_UNORM and ETC1 block and ASTC 4x4 block has 16 pixels
246 return { 4 * numBlocksWidth, 4 * numBlocksHeight };
247 }
248 case SkImage::CompressionType::kASTC_RGBA8_6x6: {
249 int numBlocksWidth = num_6x6_blocks(baseDimensions.width());
250 int numBlocksHeight = num_6x6_blocks(baseDimensions.height());
251
252 // Each ASTC 6x6 block has 36 pixels
253 return { 6 * numBlocksWidth, 6 * numBlocksHeight };
254 }
255 case SkImage::CompressionType::kASTC_RGBA8_8x8: {
256 int numBlocksWidth = num_8x8_blocks(baseDimensions.width());
257 int numBlocksHeight = num_8x8_blocks(baseDimensions.height());
258
259 // Each ASTC 8x8 block has 64 pixels
260 return { 8 * numBlocksWidth, 8 * numBlocksHeight };
261 }
262 }
263 SkUNREACHABLE;
264 }
265
266 // Fill in 'dest' with ETC1 blocks derived from 'colorf'
fillin_ETC1_with_color(SkISize dimensions,const SkColor4f & colorf,char * dest)267 static void fillin_ETC1_with_color(SkISize dimensions, const SkColor4f& colorf, char* dest) {
268 SkColor color = colorf.toSkColor();
269
270 ETC1Block block;
271 create_etc1_block(color, &block);
272
273 int numBlocks = num_ETC1_blocks(dimensions.width(), dimensions.height());
274
275 for (int i = 0; i < numBlocks; ++i) {
276 memcpy(dest, &block, sizeof(ETC1Block));
277 dest += sizeof(ETC1Block);
278 }
279 }
280
281 // Fill in 'dest' with BC1 blocks derived from 'colorf'
fillin_BC1_with_color(SkISize dimensions,const SkColor4f & colorf,char * dest)282 static void fillin_BC1_with_color(SkISize dimensions, const SkColor4f& colorf, char* dest) {
283 SkColor color = colorf.toSkColor();
284
285 BC1Block block;
286 create_BC1_block(color, color, &block);
287
288 int numBlocks = num_ETC1_blocks(dimensions.width(), dimensions.height());
289
290 for (int i = 0; i < numBlocks; ++i) {
291 memcpy(dest, &block, sizeof(BC1Block));
292 dest += sizeof(BC1Block);
293 }
294 }
295
296 #if GR_TEST_UTILS
297
298 // Fill in 'dstPixels' with BC1 blocks derived from the 'pixmap'.
GrTwoColorBC1Compress(const SkPixmap & pixmap,SkColor otherColor,char * dstPixels)299 void GrTwoColorBC1Compress(const SkPixmap& pixmap, SkColor otherColor, char* dstPixels) {
300 BC1Block* dstBlocks = reinterpret_cast<BC1Block*>(dstPixels);
301 SkASSERT(pixmap.colorType() == SkColorType::kRGBA_8888_SkColorType);
302
303 BC1Block block;
304
305 // black -> fColor0, otherColor -> fColor1
306 create_BC1_block(SK_ColorBLACK, otherColor, &block);
307
308 int numXBlocks = num_4x4_blocks(pixmap.width());
309 int numYBlocks = num_4x4_blocks(pixmap.height());
310
311 for (int y = 0; y < numYBlocks; ++y) {
312 for (int x = 0; x < numXBlocks; ++x) {
313 int shift = 0;
314 int offsetX = 4 * x, offsetY = 4 * y;
315 block.fIndices = 0; // init all the pixels to color0 (i.e., opaque black)
316 for (int i = 0; i < 4; ++i) {
317 for (int j = 0; j < 4; ++j, shift += 2) {
318 if (offsetX + j >= pixmap.width() || offsetY + i >= pixmap.height()) {
319 // This can happen for the topmost levels of a mipmap and for
320 // non-multiple of 4 textures
321 continue;
322 }
323
324 SkColor tmp = pixmap.getColor(offsetX + j, offsetY + i);
325 if (tmp == SK_ColorTRANSPARENT) {
326 // For RGBA BC1 images color3 is set to transparent black
327 block.fIndices |= 3 << shift;
328 } else if (tmp != SK_ColorBLACK) {
329 block.fIndices |= 1 << shift; // color1
330 }
331 }
332 }
333
334 dstBlocks[y*numXBlocks + x] = block;
335 }
336 }
337 }
338
339 #endif
340
GrComputeTightCombinedBufferSize(size_t bytesPerPixel,SkISize baseDimensions,SkTArray<size_t> * individualMipOffsets,int mipLevelCount)341 size_t GrComputeTightCombinedBufferSize(size_t bytesPerPixel, SkISize baseDimensions,
342 SkTArray<size_t>* individualMipOffsets, int mipLevelCount) {
343 SkASSERT(individualMipOffsets && !individualMipOffsets->count());
344 SkASSERT(mipLevelCount >= 1);
345
346 individualMipOffsets->push_back(0);
347
348 size_t combinedBufferSize = baseDimensions.width() * bytesPerPixel * baseDimensions.height();
349 SkISize levelDimensions = baseDimensions;
350
351 // The Vulkan spec for copying a buffer to an image requires that the alignment must be at
352 // least 4 bytes and a multiple of the bytes per pixel of the image config.
353 SkASSERT(bytesPerPixel == 1 || bytesPerPixel == 2 || bytesPerPixel == 3 ||
354 bytesPerPixel == 4 || bytesPerPixel == 8 || bytesPerPixel == 16);
355 int desiredAlignment = (bytesPerPixel == 3) ? 12 : (bytesPerPixel > 4 ? bytesPerPixel : 4);
356
357 for (int currentMipLevel = 1; currentMipLevel < mipLevelCount; ++currentMipLevel) {
358 levelDimensions = {std::max(1, levelDimensions.width() /2),
359 std::max(1, levelDimensions.height()/2)};
360
361 size_t trimmedSize = levelDimensions.area() * bytesPerPixel;
362 const size_t alignmentDiff = combinedBufferSize % desiredAlignment;
363 if (alignmentDiff != 0) {
364 combinedBufferSize += desiredAlignment - alignmentDiff;
365 }
366 SkASSERT((0 == combinedBufferSize % 4) && (0 == combinedBufferSize % bytesPerPixel));
367
368 individualMipOffsets->push_back(combinedBufferSize);
369 combinedBufferSize += trimmedSize;
370 }
371
372 SkASSERT(individualMipOffsets->count() == mipLevelCount);
373 return combinedBufferSize;
374 }
375
GrFillInCompressedData(SkImage::CompressionType type,SkISize dimensions,GrMipmapped mipMapped,char * dstPixels,const SkColor4f & colorf)376 void GrFillInCompressedData(SkImage::CompressionType type, SkISize dimensions,
377 GrMipmapped mipMapped, char* dstPixels, const SkColor4f& colorf) {
378 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
379
380 int numMipLevels = 1;
381 if (mipMapped == GrMipmapped::kYes) {
382 numMipLevels = SkMipmap::ComputeLevelCount(dimensions.width(), dimensions.height()) + 1;
383 }
384
385 size_t offset = 0;
386
387 for (int i = 0; i < numMipLevels; ++i) {
388 size_t levelSize = SkCompressedDataSize(type, dimensions, nullptr, false);
389
390 if (SkImage::CompressionType::kETC2_RGB8_UNORM == type) {
391 fillin_ETC1_with_color(dimensions, colorf, &dstPixels[offset]);
392 } else {
393 SkASSERT(type == SkImage::CompressionType::kBC1_RGB8_UNORM ||
394 type == SkImage::CompressionType::kBC1_RGBA8_UNORM);
395 fillin_BC1_with_color(dimensions, colorf, &dstPixels[offset]);
396 }
397
398 offset += levelSize;
399 dimensions = {std::max(1, dimensions.width()/2), std::max(1, dimensions.height()/2)};
400 }
401 }
402
get_load_and_src_swizzle(GrColorType ct,SkRasterPipeline::StockStage * load,bool * isNormalized,bool * isSRGB)403 static GrSwizzle get_load_and_src_swizzle(GrColorType ct, SkRasterPipeline::StockStage* load,
404 bool* isNormalized, bool* isSRGB) {
405 GrSwizzle swizzle("rgba");
406 *isNormalized = true;
407 *isSRGB = false;
408 switch (ct) {
409 case GrColorType::kAlpha_8: *load = SkRasterPipeline::load_a8; break;
410 case GrColorType::kAlpha_16: *load = SkRasterPipeline::load_a16; break;
411 case GrColorType::kBGR_565: *load = SkRasterPipeline::load_565; break;
412 case GrColorType::kABGR_4444: *load = SkRasterPipeline::load_4444; break;
413 case GrColorType::kARGB_4444: swizzle = GrSwizzle("bgra");
414 *load = SkRasterPipeline::load_4444; break;
415 case GrColorType::kBGRA_4444: swizzle = GrSwizzle("gbar");
416 *load = SkRasterPipeline::load_4444; break;
417 case GrColorType::kRGBA_8888: *load = SkRasterPipeline::load_8888; break;
418 case GrColorType::kRG_88: *load = SkRasterPipeline::load_rg88; break;
419 case GrColorType::kRGBA_1010102: *load = SkRasterPipeline::load_1010102; break;
420 case GrColorType::kBGRA_1010102: *load = SkRasterPipeline::load_1010102;
421 swizzle = GrSwizzle("bgra");
422 break;
423 case GrColorType::kAlpha_F16: *load = SkRasterPipeline::load_af16; break;
424 case GrColorType::kRGBA_F16_Clamped: *load = SkRasterPipeline::load_f16; break;
425 case GrColorType::kRG_1616: *load = SkRasterPipeline::load_rg1616; break;
426 case GrColorType::kRGBA_16161616: *load = SkRasterPipeline::load_16161616; break;
427
428 case GrColorType::kRGBA_8888_SRGB: *load = SkRasterPipeline::load_8888;
429 *isSRGB = true;
430 break;
431 case GrColorType::kRG_F16: *load = SkRasterPipeline::load_rgf16;
432 *isNormalized = false;
433 break;
434 case GrColorType::kRGBA_F16: *load = SkRasterPipeline::load_f16;
435 *isNormalized = false;
436 break;
437 case GrColorType::kRGBA_F32: *load = SkRasterPipeline::load_f32;
438 *isNormalized = false;
439 break;
440 case GrColorType::kAlpha_8xxx: *load = SkRasterPipeline::load_8888;
441 swizzle = GrSwizzle("000r");
442 break;
443 case GrColorType::kAlpha_F32xxx: *load = SkRasterPipeline::load_f32;
444 swizzle = GrSwizzle("000r");
445 break;
446 case GrColorType::kGray_8xxx: *load = SkRasterPipeline::load_8888;
447 swizzle = GrSwizzle("rrr1");
448 break;
449 case GrColorType::kGray_8: *load = SkRasterPipeline::load_a8;
450 swizzle = GrSwizzle("aaa1");
451 break;
452 case GrColorType::kGrayAlpha_88: *load = SkRasterPipeline::load_rg88;
453 swizzle = GrSwizzle("rrrg");
454 break;
455 case GrColorType::kBGRA_8888: *load = SkRasterPipeline::load_8888;
456 swizzle = GrSwizzle("bgra");
457 break;
458 case GrColorType::kRGB_888x: *load = SkRasterPipeline::load_8888;
459 swizzle = GrSwizzle("rgb1");
460 break;
461
462 // These are color types we don't expect to ever have to load.
463 case GrColorType::kRGB_888:
464 case GrColorType::kR_8:
465 case GrColorType::kR_16:
466 case GrColorType::kR_F16:
467 case GrColorType::kGray_F16:
468 case GrColorType::kUnknown:
469 SK_ABORT("unexpected CT");
470 }
471 return swizzle;
472 }
473
474 enum class LumMode {
475 kNone,
476 kToRGB,
477 kToAlpha
478 };
479
get_dst_swizzle_and_store(GrColorType ct,SkRasterPipeline::StockStage * store,LumMode * lumMode,bool * isNormalized,bool * isSRGB)480 static GrSwizzle get_dst_swizzle_and_store(GrColorType ct, SkRasterPipeline::StockStage* store,
481 LumMode* lumMode, bool* isNormalized, bool* isSRGB) {
482 GrSwizzle swizzle("rgba");
483 *isNormalized = true;
484 *isSRGB = false;
485 *lumMode = LumMode::kNone;
486 switch (ct) {
487 case GrColorType::kAlpha_8: *store = SkRasterPipeline::store_a8; break;
488 case GrColorType::kAlpha_16: *store = SkRasterPipeline::store_a16; break;
489 case GrColorType::kBGR_565: *store = SkRasterPipeline::store_565; break;
490 case GrColorType::kABGR_4444: *store = SkRasterPipeline::store_4444; break;
491 case GrColorType::kARGB_4444: swizzle = GrSwizzle("bgra");
492 *store = SkRasterPipeline::store_4444; break;
493 case GrColorType::kBGRA_4444: swizzle = GrSwizzle("argb");
494 *store = SkRasterPipeline::store_4444; break;
495 case GrColorType::kRGBA_8888: *store = SkRasterPipeline::store_8888; break;
496 case GrColorType::kRG_88: *store = SkRasterPipeline::store_rg88; break;
497 case GrColorType::kRGBA_1010102: *store = SkRasterPipeline::store_1010102; break;
498 case GrColorType::kBGRA_1010102: swizzle = GrSwizzle("bgra");
499 *store = SkRasterPipeline::store_1010102;
500 break;
501 case GrColorType::kRGBA_F16_Clamped: *store = SkRasterPipeline::store_f16; break;
502 case GrColorType::kRG_1616: *store = SkRasterPipeline::store_rg1616; break;
503 case GrColorType::kRGBA_16161616: *store = SkRasterPipeline::store_16161616; break;
504
505 case GrColorType::kRGBA_8888_SRGB: *store = SkRasterPipeline::store_8888;
506 *isSRGB = true;
507 break;
508 case GrColorType::kRG_F16: *store = SkRasterPipeline::store_rgf16;
509 *isNormalized = false;
510 break;
511 case GrColorType::kAlpha_F16: *store = SkRasterPipeline::store_af16;
512 *isNormalized = false;
513 break;
514 case GrColorType::kRGBA_F16: *store = SkRasterPipeline::store_f16;
515 *isNormalized = false;
516 break;
517 case GrColorType::kRGBA_F32: *store = SkRasterPipeline::store_f32;
518 *isNormalized = false;
519 break;
520 case GrColorType::kAlpha_8xxx: *store = SkRasterPipeline::store_8888;
521 swizzle = GrSwizzle("a000");
522 break;
523 case GrColorType::kAlpha_F32xxx: *store = SkRasterPipeline::store_f32;
524 swizzle = GrSwizzle("a000");
525 break;
526 case GrColorType::kBGRA_8888: swizzle = GrSwizzle("bgra");
527 *store = SkRasterPipeline::store_8888;
528 break;
529 case GrColorType::kRGB_888x: swizzle = GrSwizzle("rgb1");
530 *store = SkRasterPipeline::store_8888;
531 break;
532 case GrColorType::kR_8: swizzle = GrSwizzle("agbr");
533 *store = SkRasterPipeline::store_a8;
534 break;
535 case GrColorType::kR_16: swizzle = GrSwizzle("agbr");
536 *store = SkRasterPipeline::store_a16;
537 break;
538 case GrColorType::kR_F16: swizzle = GrSwizzle("agbr");
539 *store = SkRasterPipeline::store_af16;
540 break;
541 case GrColorType::kGray_F16: *lumMode = LumMode::kToAlpha;
542 *store = SkRasterPipeline::store_af16;
543 break;
544 case GrColorType::kGray_8: *lumMode = LumMode::kToAlpha;
545 *store = SkRasterPipeline::store_a8;
546 break;
547 case GrColorType::kGrayAlpha_88: *lumMode = LumMode::kToRGB;
548 swizzle = GrSwizzle("ragb");
549 *store = SkRasterPipeline::store_rg88;
550 break;
551 case GrColorType::kGray_8xxx: *lumMode = LumMode::kToRGB;
552 *store = SkRasterPipeline::store_8888;
553 swizzle = GrSwizzle("r000");
554 break;
555
556 // These are color types we don't expect to ever have to store.
557 case GrColorType::kRGB_888: // This is handled specially in GrConvertPixels.
558 case GrColorType::kUnknown:
559 SK_ABORT("unexpected CT");
560 }
561 return swizzle;
562 }
563
append_clamp_gamut(SkRasterPipeline * pipeline)564 static inline void append_clamp_gamut(SkRasterPipeline* pipeline) {
565 // SkRasterPipeline may not know our color type and also doesn't like caller to directly
566 // append clamp_gamut. Fake it out.
567 static SkImageInfo fakeII = SkImageInfo::MakeN32Premul(1, 1);
568 pipeline->append_gamut_clamp_if_normalized(fakeII);
569 }
570
GrConvertPixels(const GrPixmap & dst,const GrCPixmap & src,bool flipY)571 bool GrConvertPixels(const GrPixmap& dst, const GrCPixmap& src, bool flipY) {
572 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
573 if (src.dimensions().isEmpty() || dst.dimensions().isEmpty()) {
574 return false;
575 }
576 if (src.colorType() == GrColorType::kUnknown || dst.colorType() == GrColorType::kUnknown) {
577 return false;
578 }
579 if (!src.hasPixels() || !dst.hasPixels()) {
580 return false;
581 }
582 if (dst.dimensions() != src.dimensions()) {
583 return false;
584 }
585 if (dst.colorType() == GrColorType::kRGB_888) {
586 // SkRasterPipeline doesn't handle writing to RGB_888. So we have it write to RGB_888x and
587 // then do another conversion that does the 24bit packing. We could be cleverer and skip the
588 // temp pixmap if this is the only conversion but this is rare so keeping it simple.
589 GrPixmap temp = GrPixmap::Allocate(dst.info().makeColorType(GrColorType::kRGB_888x));
590 if (!GrConvertPixels(temp, src, flipY)) {
591 return false;
592 }
593 auto* tRow = reinterpret_cast<const char*>(temp.addr());
594 auto* dRow = reinterpret_cast<char*>(dst.addr());
595 for (int y = 0; y < dst.height(); ++y, tRow += temp.rowBytes(), dRow += dst.rowBytes()) {
596 for (int x = 0; x < dst.width(); ++x) {
597 auto t = tRow + x*sizeof(uint32_t);
598 auto d = dRow + x*3;
599 memcpy(d, t, 3);
600 }
601 }
602 return true;
603 } else if (src.colorType() == GrColorType::kRGB_888) {
604 // SkRasterPipeline doesn't handle reading from RGB_888. So convert it to RGB_888x and then
605 // do a recursive call if there is any remaining conversion.
606 GrPixmap temp = GrPixmap::Allocate(src.info().makeColorType(GrColorType::kRGB_888x));
607 auto* sRow = reinterpret_cast<const char*>(src.addr());
608 auto* tRow = reinterpret_cast<char*>(temp.addr());
609 for (int y = 0; y < src.height(); ++y, sRow += src.rowBytes(), tRow += temp.rowBytes()) {
610 for (int x = 0; x < src.width(); ++x) {
611 auto s = sRow + x*3;
612 auto t = tRow + x*sizeof(uint32_t);
613 memcpy(t, s, 3);
614 t[3] = static_cast<char>(0xFF);
615 }
616 }
617 return GrConvertPixels(dst, temp, flipY);
618 }
619
620 size_t srcBpp = src.info().bpp();
621 size_t dstBpp = dst.info().bpp();
622
623 // SkRasterPipeline operates on row-pixels not row-bytes.
624 SkASSERT(dst.rowBytes() % dstBpp == 0);
625 SkASSERT(src.rowBytes() % srcBpp == 0);
626
627 bool premul = src.alphaType() == kUnpremul_SkAlphaType &&
628 dst.alphaType() == kPremul_SkAlphaType;
629 bool unpremul = src.alphaType() == kPremul_SkAlphaType &&
630 dst.alphaType() == kUnpremul_SkAlphaType;
631 bool alphaOrCSConversion =
632 premul || unpremul || !SkColorSpace::Equals(src.colorSpace(), dst.colorSpace());
633
634 if (src.colorType() == dst.colorType() && !alphaOrCSConversion) {
635 size_t tightRB = dstBpp * dst.width();
636 if (flipY) {
637 auto s = static_cast<const char*>(src.addr());
638 auto d = SkTAddOffset<char>(dst.addr(), dst.rowBytes()*(dst.height() - 1));
639 for (int y = 0; y < dst.height(); ++y, d -= dst.rowBytes(), s += src.rowBytes()) {
640 memcpy(d, s, tightRB);
641 }
642 } else {
643 SkRectMemcpy(dst.addr(), dst.rowBytes(),
644 src.addr(), src.rowBytes(),
645 tightRB, src.height());
646 }
647 return true;
648 }
649
650 SkRasterPipeline::StockStage load;
651 bool srcIsNormalized;
652 bool srcIsSRGB;
653 auto loadSwizzle = get_load_and_src_swizzle(src.colorType(),
654 &load,
655 &srcIsNormalized,
656 &srcIsSRGB);
657
658 SkRasterPipeline::StockStage store;
659 LumMode lumMode;
660 bool dstIsNormalized;
661 bool dstIsSRGB;
662 auto storeSwizzle = get_dst_swizzle_and_store(dst.colorType(),
663 &store,
664 &lumMode,
665 &dstIsNormalized,
666 &dstIsSRGB);
667
668 bool clampGamut;
669 SkTLazy<SkColorSpaceXformSteps> steps;
670 GrSwizzle loadStoreSwizzle;
671 if (alphaOrCSConversion) {
672 steps.init(src.colorSpace(), src.alphaType(), dst.colorSpace(), dst.alphaType());
673 clampGamut = dstIsNormalized && dst.alphaType() == kPremul_SkAlphaType;
674 } else {
675 clampGamut = dstIsNormalized && !srcIsNormalized && dst.alphaType() == kPremul_SkAlphaType;
676 if (!clampGamut) {
677 loadStoreSwizzle = GrSwizzle::Concat(loadSwizzle, storeSwizzle);
678 }
679 }
680 int cnt = 1;
681 int height = src.height();
682 SkRasterPipeline_MemoryCtx
683 srcCtx{const_cast<void*>(src.addr()), SkToInt(src.rowBytes()/srcBpp)},
684 dstCtx{ dst.addr(), SkToInt(dst.rowBytes()/dstBpp)};
685
686 if (flipY) {
687 // It *almost* works to point the src at the last row and negate the stride and run the
688 // whole rectangle. However, SkRasterPipeline::run()'s control loop uses size_t loop
689 // variables so it winds up relying on unsigned overflow math. It works out in practice
690 // but UBSAN says "no!" as it's technically undefined and in theory a compiler could emit
691 // code that didn't do what is intended. So we go one row at a time. :(
692 srcCtx.pixels = static_cast<char*>(srcCtx.pixels) + src.rowBytes()*(height - 1);
693 std::swap(cnt, height);
694 }
695
696 bool hasConversion = alphaOrCSConversion || clampGamut || lumMode != LumMode::kNone;
697
698 if (srcIsSRGB && dstIsSRGB && !hasConversion) {
699 // No need to convert from srgb if we are just going to immediately convert it back.
700 srcIsSRGB = dstIsSRGB = false;
701 }
702
703 hasConversion = hasConversion || srcIsSRGB || dstIsSRGB;
704
705 for (int i = 0; i < cnt; ++i) {
706 SkRasterPipeline_<256> pipeline;
707 pipeline.append(load, &srcCtx);
708 if (hasConversion) {
709 loadSwizzle.apply(&pipeline);
710 if (srcIsSRGB) {
711 pipeline.append_transfer_function(*skcms_sRGB_TransferFunction());
712 }
713 if (alphaOrCSConversion) {
714 steps->apply(&pipeline);
715 }
716 if (clampGamut) {
717 append_clamp_gamut(&pipeline);
718 }
719 switch (lumMode) {
720 case LumMode::kNone:
721 break;
722 case LumMode::kToRGB:
723 pipeline.append(SkRasterPipeline::StockStage::bt709_luminance_or_luma_to_rgb);
724 break;
725 case LumMode::kToAlpha:
726 pipeline.append(SkRasterPipeline::StockStage::bt709_luminance_or_luma_to_alpha);
727 // If we ever need to store srgb-encoded gray (e.g. GL_SLUMINANCE8) then we
728 // should use ToRGB and then a swizzle stage rather than ToAlpha. The subsequent
729 // transfer function stage ignores the alpha channel (where we just stashed the
730 // gray).
731 SkASSERT(!dstIsSRGB);
732 break;
733 }
734 if (dstIsSRGB) {
735 pipeline.append_transfer_function(*skcms_sRGB_Inverse_TransferFunction());
736 }
737 storeSwizzle.apply(&pipeline);
738 } else {
739 loadStoreSwizzle.apply(&pipeline);
740 }
741 pipeline.append(store, &dstCtx);
742 pipeline.run(0, 0, src.width(), height);
743 srcCtx.pixels = static_cast<char*>(srcCtx.pixels) - src.rowBytes();
744 dstCtx.pixels = static_cast<char*>(dstCtx.pixels) + dst.rowBytes();
745 }
746 return true;
747 }
748
GrClearImage(const GrImageInfo & dstInfo,void * dst,size_t dstRB,std::array<float,4> color)749 bool GrClearImage(const GrImageInfo& dstInfo, void* dst, size_t dstRB, std::array<float, 4> color) {
750 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
751
752 if (!dstInfo.isValid()) {
753 return false;
754 }
755 if (!dst) {
756 return false;
757 }
758 if (dstRB < dstInfo.minRowBytes()) {
759 return false;
760 }
761 if (dstInfo.colorType() == GrColorType::kRGB_888) {
762 // SkRasterPipeline doesn't handle writing to RGB_888. So we handle that specially here.
763 uint32_t rgba = SkColor4f{color[0], color[1], color[2], color[3]}.toBytes_RGBA();
764 for (int y = 0; y < dstInfo.height(); ++y) {
765 char* d = static_cast<char*>(dst) + y * dstRB;
766 for (int x = 0; x < dstInfo.width(); ++x, d += 3) {
767 memcpy(d, &rgba, 3);
768 }
769 }
770 return true;
771 }
772
773 LumMode lumMode;
774 bool isNormalized;
775 bool dstIsSRGB;
776 SkRasterPipeline::StockStage store;
777 GrSwizzle storeSwizzle = get_dst_swizzle_and_store(dstInfo.colorType(), &store, &lumMode,
778 &isNormalized, &dstIsSRGB);
779 char block[64];
780 SkArenaAlloc alloc(block, sizeof(block), 1024);
781 SkRasterPipeline_<256> pipeline;
782 pipeline.append_constant_color(&alloc, color.data());
783 switch (lumMode) {
784 case LumMode::kNone:
785 break;
786 case LumMode::kToRGB:
787 pipeline.append(SkRasterPipeline::StockStage::bt709_luminance_or_luma_to_rgb);
788 break;
789 case LumMode::kToAlpha:
790 pipeline.append(SkRasterPipeline::StockStage::bt709_luminance_or_luma_to_alpha);
791 // If we ever need to store srgb-encoded gray (e.g. GL_SLUMINANCE8) then we should use
792 // ToRGB and then a swizzle stage rather than ToAlpha. The subsequent transfer function
793 // stage ignores the alpha channel (where we just stashed the gray).
794 SkASSERT(!dstIsSRGB);
795 break;
796 }
797 if (dstIsSRGB) {
798 pipeline.append_transfer_function(*skcms_sRGB_Inverse_TransferFunction());
799 }
800 storeSwizzle.apply(&pipeline);
801 SkRasterPipeline_MemoryCtx dstCtx{dst, SkToInt(dstRB/dstInfo.bpp())};
802 pipeline.append(store, &dstCtx);
803 pipeline.run(0, 0, dstInfo.width(), dstInfo.height());
804
805 return true;
806 }
807