1 /*
2 * Copyright 2021 Google LLC
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "src/gpu/graphite/UniformManager.h"
9
10 #include "include/core/SkM44.h"
11 #include "include/core/SkMatrix.h"
12 #include "include/private/base/SkAlign.h"
13 #include "src/base/SkHalf.h"
14 #include "include/private/base/SkTemplates.h"
15 #include "src/gpu/graphite/DrawTypes.h"
16 #include "src/gpu/graphite/PipelineData.h"
17 #include "src/gpu/graphite/Uniform.h"
18
19 // ensure that these types are the sizes the uniform data is expecting
20 static_assert(sizeof(int32_t) == 4);
21 static_assert(sizeof(float) == 4);
22 static_assert(sizeof(int16_t) == 2);
23 static_assert(sizeof(SkHalf) == 2);
24
25 namespace skgpu::graphite {
26
27 //////////////////////////////////////////////////////////////////////////////
28 template<typename BaseType>
tight_vec_size(int vecLength)29 static constexpr size_t tight_vec_size(int vecLength) {
30 return sizeof(BaseType) * vecLength;
31 }
32
33 /**
34 * From Section 7.6.2.2 "Standard Uniform Block Layout":
35 * 1. If the member is a scalar consuming N basic machine units, the base alignment is N.
36 * 2. If the member is a two- or four-component vector with components consuming N basic machine
37 * units, the base alignment is 2N or 4N, respectively.
38 * 3. If the member is a three-component vector with components consuming N
39 * basic machine units, the base alignment is 4N.
40 * 4. If the member is an array of scalars or vectors, the base alignment and array
41 * stride are set to match the base alignment of a single array element, according
42 * to rules (1), (2), and (3), and rounded up to the base alignment of a vec4. The
43 * array may have padding at the end; the base offset of the member following
44 * the array is rounded up to the next multiple of the base alignment.
45 * 5. If the member is a column-major matrix with C columns and R rows, the
46 * matrix is stored identically to an array of C column vectors with R components each,
47 * according to rule (4).
48 * 6. If the member is an array of S column-major matrices with C columns and
49 * R rows, the matrix is stored identically to a row of S × C column vectors
50 * with R components each, according to rule (4).
51 * 7. If the member is a row-major matrix with C columns and R rows, the matrix
52 * is stored identically to an array of R row vectors with C components each,
53 * according to rule (4).
54 * 8. If the member is an array of S row-major matrices with C columns and R
55 * rows, the matrix is stored identically to a row of S × R row vectors with C
56 * components each, according to rule (4).
57 * 9. If the member is a structure, the base alignment of the structure is N, where
58 * N is the largest base alignment value of any of its members, and rounded
59 * up to the base alignment of a vec4. The individual members of this substructure are then
60 * assigned offsets by applying this set of rules recursively,
61 * where the base offset of the first member of the sub-structure is equal to the
62 * aligned offset of the structure. The structure may have padding at the end;
63 * the base offset of the member following the sub-structure is rounded up to
64 * the next multiple of the base alignment of the structure.
65 * 10. If the member is an array of S structures, the S elements of the array are laid
66 * out in order, according to rule (9).
67 */
68 template<typename BaseType, int RowsOrVecLength = 1, int Cols = 1>
69 struct Rules140 {
70 /**
71 * For an array of scalars or vectors this returns the stride between array elements. For
72 * matrices or arrays of matrices this returns the stride between columns of the matrix. Note
73 * that for single (non-array) scalars or vectors we don't require a stride.
74 */
Strideskgpu::graphite::Rules14075 static constexpr size_t Stride(int count) {
76 SkASSERT(count >= 1 || count == graphite::Uniform::kNonArray);
77 static_assert(RowsOrVecLength >= 1 && RowsOrVecLength <= 4);
78 static_assert(Cols >= 1 && Cols <= 4);
79 if (Cols != 1) {
80 // This is a matrix or array of matrices. We return the stride between columns.
81 SkASSERT(RowsOrVecLength > 1);
82 return Rules140<BaseType, RowsOrVecLength>::Stride(Uniform::kNonArray);
83 }
84
85 // Get alignment of a single non-array vector of BaseType by Rule 1, 2, or 3.
86 int n = RowsOrVecLength == 3 ? 4 : RowsOrVecLength;
87 if (count == Uniform::kNonArray) {
88 return n * sizeof(BaseType);
89 }
90
91 // Rule 4.
92
93 // Alignment of vec4 by Rule 2.
94 constexpr size_t kVec4Alignment = tight_vec_size<float>(4);
95 size_t kElementAlignment = tight_vec_size<BaseType>(n);
96 // Round kElementAlignment up to multiple of kVec4Alignment.
97 size_t m = (kElementAlignment + kVec4Alignment - 1) / kVec4Alignment;
98 return m * kVec4Alignment;
99 }
100 };
101
102 /**
103 * When using the std430 storage layout, shader storage blocks will be laid out in buffer storage
104 * identically to uniform and shader storage blocks using the std140 layout, except that the base
105 * alignment and stride of arrays of scalars and vectors in rule 4 and of structures in rule 9 are
106 * not rounded up a multiple of the base alignment of a vec4.
107 */
108 template<typename BaseType, int RowsOrVecLength = 1, int Cols = 1>
109 struct Rules430 {
Strideskgpu::graphite::Rules430110 static constexpr size_t Stride(int count) {
111 SkASSERT(count >= 1 || count == Uniform::kNonArray);
112 static_assert(RowsOrVecLength >= 1 && RowsOrVecLength <= 4);
113 static_assert(Cols >= 1 && Cols <= 4);
114
115 if (Cols != 1) {
116 // This is a matrix or array of matrices. We return the stride between columns.
117 SkASSERT(RowsOrVecLength > 1);
118 return Rules430<BaseType, RowsOrVecLength>::Stride(Uniform::kNonArray);
119 }
120
121 // Get alignment of a single non-array vector of BaseType by Rule 1, 2, or 3.
122 int n = RowsOrVecLength == 3 ? 4 : RowsOrVecLength;
123 if (count == Uniform::kNonArray) {
124 return n * sizeof(BaseType);
125 }
126
127 // Rule 4 without the round up to a multiple of align-of vec4.
128 return tight_vec_size<BaseType>(n);
129 }
130 };
131
132 // The strides used here were derived from the rules we've imposed on ourselves in
133 // GrMtlPipelineStateDataManger. Everything is tight except 3-component which have the stride of
134 // their 4-component equivalents.
135 template<typename BaseType, int RowsOrVecLength = 1, int Cols = 1>
136 struct RulesMetal {
Strideskgpu::graphite::RulesMetal137 static constexpr size_t Stride(int count) {
138 SkASSERT(count >= 1 || count == Uniform::kNonArray);
139 static_assert(RowsOrVecLength >= 1 && RowsOrVecLength <= 4);
140 static_assert(Cols >= 1 && Cols <= 4);
141
142 if (Cols != 1) {
143 // This is a matrix or array of matrices. We return the stride between columns.
144 SkASSERT(RowsOrVecLength > 1);
145 return RulesMetal<BaseType, RowsOrVecLength>::Stride(Uniform::kNonArray);
146 }
147
148 // Get alignment of a single non-array vector of BaseType by Rule 1, 2, or 3.
149 int n = RowsOrVecLength == 3 ? 4 : RowsOrVecLength;
150 if (count == 0) {
151 return n * sizeof(BaseType);
152 }
153
154 return tight_vec_size<BaseType>(n);
155 }
156 };
157
158 template<template<typename BaseType, int RowsOrVecLength, int Cols> class Rules>
159 class Writer {
160 private:
161 template <typename MemType, typename UniformType>
CopyUniforms(void * dst,const void * src,int numUniforms)162 static void CopyUniforms(void* dst, const void* src, int numUniforms) {
163 if constexpr (std::is_same<MemType, UniformType>::value) {
164 // Matching types--use memcpy.
165 std::memcpy(dst, src, numUniforms * sizeof(MemType));
166 return;
167 }
168
169 if constexpr (std::is_same<MemType, float>::value &&
170 std::is_same<UniformType, SkHalf>::value) {
171 // Convert floats to half.
172 const float* floatBits = static_cast<const float*>(src);
173 SkHalf* halfBits = static_cast<SkHalf*>(dst);
174 while (numUniforms-- > 0) {
175 *halfBits++ = SkFloatToHalf(*floatBits++);
176 }
177 return;
178 }
179
180 if constexpr (std::is_same<MemType, int32_t>::value &&
181 std::is_same<UniformType, int16_t>::value) {
182 // Convert ints to short.
183 const int32_t* intBits = static_cast<const int32_t*>(src);
184 int16_t* shortBits = static_cast<int16_t*>(dst);
185 while (numUniforms-- > 0) {
186 *shortBits++ = int16_t(*intBits++);
187 }
188 return;
189 }
190
191 SK_ABORT("implement conversion from MemType to UniformType");
192 }
193
194 template <typename MemType, typename UniformType, int RowsOrVecLength = 1, int Cols = 1>
Write(void * dst,int n,const MemType src[])195 static uint32_t Write(void *dst, int n, const MemType src[]) {
196 size_t stride = Rules<UniformType, RowsOrVecLength, Cols>::Stride(n);
197 n = (n == Uniform::kNonArray) ? 1 : n;
198 n *= Cols;
199
200 // A null value for `dst` means that this method was called to calculate the size of the
201 // write without actually copying data.
202 if (dst) {
203 if (stride == RowsOrVecLength * sizeof(UniformType)) {
204 CopyUniforms<MemType, UniformType>(dst, src, n * RowsOrVecLength);
205 } else {
206 for (int i = 0; i < n; ++i) {
207 CopyUniforms<MemType, UniformType>(dst, src, RowsOrVecLength);
208 src += RowsOrVecLength;
209 dst = SkTAddOffset<void>(dst, stride);
210 }
211 }
212 }
213
214 return n * stride;
215 }
216
217 template <typename UniformType>
WriteSkMatrices(void * dst,int n,const SkMatrix m[])218 static uint32_t WriteSkMatrices(void *dst, int n, const SkMatrix m[]) {
219 // Stride() will give us the stride of each column, so mul by 3 to get matrix stride.
220 size_t stride = 3 * Rules<UniformType, 3, 3>::Stride(1);
221 n = std::max(n, 1);
222
223 // A null value for `dst` means that this method was called to calculate the size of the
224 // write without actually copying data.
225 if (dst) {
226 size_t offset = 0;
227 for (int i = 0; i < n; ++i) {
228 float mt[] = {
229 m[i].get(SkMatrix::kMScaleX),
230 m[i].get(SkMatrix::kMSkewY),
231 m[i].get(SkMatrix::kMPersp0),
232 m[i].get(SkMatrix::kMSkewX),
233 m[i].get(SkMatrix::kMScaleY),
234 m[i].get(SkMatrix::kMPersp1),
235 m[i].get(SkMatrix::kMTransX),
236 m[i].get(SkMatrix::kMTransY),
237 m[i].get(SkMatrix::kMPersp2),
238 };
239 Write<float, UniformType, 3, 3>(SkTAddOffset<void>(dst, offset), 1, mt);
240 offset += stride;
241 }
242 }
243 return n * stride;
244 }
245
246 public:
247 // If `dest` is a nullptr, then this method returns the size of the write without writing any
248 // data.
WriteUniform(SkSLType type,CType ctype,void * dest,int n,const void * src)249 static uint32_t WriteUniform(SkSLType type,
250 CType ctype,
251 void *dest,
252 int n,
253 const void *src) {
254 SkASSERT(n >= 1 || n == Uniform::kNonArray);
255 switch (type) {
256 case SkSLType::kShort:
257 return Write<int32_t, int16_t>(dest, n, static_cast<const int32_t *>(src));
258
259 case SkSLType::kShort2:
260 return Write<int32_t, int16_t, 2>(dest, n, static_cast<const int32_t *>(src));
261
262 case SkSLType::kShort3:
263 return Write<int32_t, int16_t, 3>(dest, n, static_cast<const int32_t *>(src));
264
265 case SkSLType::kShort4:
266 return Write<int32_t, int16_t, 4>(dest, n, static_cast<const int32_t *>(src));
267
268 case SkSLType::kInt:
269 return Write<int32_t, int32_t>(dest, n, static_cast<const int32_t *>(src));
270
271 case SkSLType::kInt2:
272 return Write<int32_t, int32_t, 2>(dest, n, static_cast<const int32_t *>(src));
273
274 case SkSLType::kInt3:
275 return Write<int32_t, int32_t, 3>(dest, n, static_cast<const int32_t *>(src));
276
277 case SkSLType::kInt4:
278 return Write<int32_t, int32_t, 4>(dest, n, static_cast<const int32_t *>(src));
279
280 case SkSLType::kHalf:
281 return Write<float, SkHalf>(dest, n, static_cast<const float *>(src));
282
283 case SkSLType::kFloat:
284 return Write<float, float>(dest, n, static_cast<const float *>(src));
285
286 case SkSLType::kHalf2:
287 return Write<float, SkHalf, 2>(dest, n, static_cast<const float *>(src));
288
289 case SkSLType::kFloat2:
290 return Write<float, float, 2>(dest, n, static_cast<const float *>(src));
291
292 case SkSLType::kHalf3:
293 return Write<float, SkHalf, 3>(dest, n, static_cast<const float *>(src));
294
295 case SkSLType::kFloat3:
296 return Write<float, float, 3>(dest, n, static_cast<const float *>(src));
297
298 case SkSLType::kHalf4:
299 return Write<float, SkHalf, 4>(dest, n, static_cast<const float *>(src));
300
301 case SkSLType::kFloat4:
302 return Write<float, float, 4>(dest, n, static_cast<const float *>(src));
303
304 case SkSLType::kHalf2x2:
305 return Write<float, SkHalf, 2, 2>(dest, n, static_cast<const float *>(src));
306
307 case SkSLType::kFloat2x2:
308 return Write<float, float, 2, 2>(dest, n, static_cast<const float *>(src));
309
310 case SkSLType::kHalf3x3:
311 switch (ctype) {
312 case CType::kDefault:
313 return Write<float, SkHalf, 3, 3>(dest, n, static_cast<const float *>(src));
314 case CType::kSkMatrix:
315 return WriteSkMatrices<SkHalf>(dest, n, static_cast<const SkMatrix *>(src));
316 }
317 SkUNREACHABLE;
318
319 case SkSLType::kFloat3x3:
320 switch (ctype) {
321 case CType::kDefault:
322 return Write<float, float, 3, 3>(dest, n, static_cast<const float *>(src));
323 case CType::kSkMatrix:
324 return WriteSkMatrices<float>(dest, n, static_cast<const SkMatrix *>(src));
325 }
326 SkUNREACHABLE;
327
328 case SkSLType::kHalf4x4:
329 return Write<float, SkHalf, 4, 4>(dest, n, static_cast<const float *>(src));
330
331 case SkSLType::kFloat4x4:
332 return Write<float, float, 4, 4>(dest, n, static_cast<const float *>(src));
333
334 default:
335 SK_ABORT("Unexpected uniform type");
336 }
337 }
338 };
339
340 // To determine whether a current offset is aligned, we can just 'and' the lowest bits with the
341 // alignment mask. A value of 0 means aligned, any other value is how many bytes past alignment we
342 // are. This works since all alignments are powers of 2. The mask is always (alignment - 1).
sksltype_to_alignment_mask(SkSLType type)343 static uint32_t sksltype_to_alignment_mask(SkSLType type) {
344 switch (type) {
345 case SkSLType::kInt:
346 case SkSLType::kUInt:
347 case SkSLType::kFloat:
348 return 0x3;
349 case SkSLType::kInt2:
350 case SkSLType::kUInt2:
351 case SkSLType::kFloat2:
352 return 0x7;
353 case SkSLType::kInt3:
354 case SkSLType::kUInt3:
355 case SkSLType::kFloat3:
356 case SkSLType::kInt4:
357 case SkSLType::kUInt4:
358 case SkSLType::kFloat4:
359 return 0xF;
360
361 case SkSLType::kFloat2x2:
362 return 0x7;
363 case SkSLType::kFloat3x3:
364 return 0xF;
365 case SkSLType::kFloat4x4:
366 return 0xF;
367
368 case SkSLType::kShort:
369 case SkSLType::kUShort:
370 case SkSLType::kHalf:
371 return 0x1;
372 case SkSLType::kShort2:
373 case SkSLType::kUShort2:
374 case SkSLType::kHalf2:
375 return 0x3;
376 case SkSLType::kShort3:
377 case SkSLType::kShort4:
378 case SkSLType::kUShort3:
379 case SkSLType::kUShort4:
380 case SkSLType::kHalf3:
381 case SkSLType::kHalf4:
382 return 0x7;
383
384 case SkSLType::kHalf2x2:
385 return 0x3;
386 case SkSLType::kHalf3x3:
387 return 0x7;
388 case SkSLType::kHalf4x4:
389 return 0x7;
390
391 // This query is only valid for certain types.
392 case SkSLType::kVoid:
393 case SkSLType::kBool:
394 case SkSLType::kBool2:
395 case SkSLType::kBool3:
396 case SkSLType::kBool4:
397 case SkSLType::kTexture2DSampler:
398 case SkSLType::kTextureExternalSampler:
399 case SkSLType::kTexture2DRectSampler:
400 case SkSLType::kSampler:
401 case SkSLType::kTexture2D:
402 case SkSLType::kInput:
403 break;
404 }
405 SK_ABORT("Unexpected type");
406 }
407
408 // Given the current offset into the ubo, calculate the offset for the uniform we're trying to add
409 // taking into consideration all alignment requirements. Returns the aligned start offset for the
410 // new uniform.
get_ubo_aligned_offset(Layout layout,uint32_t currentOffset,SkSLType type,bool isArray)411 static uint32_t get_ubo_aligned_offset(Layout layout,
412 uint32_t currentOffset,
413 SkSLType type,
414 bool isArray) {
415 uint32_t alignmentMask;
416 if (layout == Layout::kStd140 && isArray) {
417 // std140 array element alignment always equals the base alignment of a vec4.
418 alignmentMask = sksltype_to_alignment_mask(SkSLType::kFloat4);
419 } else {
420 alignmentMask = sksltype_to_alignment_mask(type);
421 }
422 return (currentOffset + alignmentMask) & ~alignmentMask;
423 }
424
getUniformTypeForLayout(SkSLType type)425 SkSLType UniformOffsetCalculator::getUniformTypeForLayout(SkSLType type) {
426 if (fLayout != Layout::kMetal) {
427 // GL/Vk expect uniforms in 32-bit precision. Convert lower-precision types to 32-bit.
428 switch (type) {
429 case SkSLType::kShort: return SkSLType::kInt;
430 case SkSLType::kUShort: return SkSLType::kUInt;
431 case SkSLType::kHalf: return SkSLType::kFloat;
432
433 case SkSLType::kShort2: return SkSLType::kInt2;
434 case SkSLType::kUShort2: return SkSLType::kUInt2;
435 case SkSLType::kHalf2: return SkSLType::kFloat2;
436
437 case SkSLType::kShort3: return SkSLType::kInt3;
438 case SkSLType::kUShort3: return SkSLType::kUInt3;
439 case SkSLType::kHalf3: return SkSLType::kFloat3;
440
441 case SkSLType::kShort4: return SkSLType::kInt4;
442 case SkSLType::kUShort4: return SkSLType::kUInt4;
443 case SkSLType::kHalf4: return SkSLType::kFloat4;
444
445 case SkSLType::kHalf2x2: return SkSLType::kFloat2x2;
446 case SkSLType::kHalf3x3: return SkSLType::kFloat3x3;
447 case SkSLType::kHalf4x4: return SkSLType::kFloat4x4;
448
449 default: break;
450 }
451 }
452
453 return type;
454 }
455
setLayout(Layout layout)456 void UniformOffsetCalculator::setLayout(Layout layout) {
457 fLayout = layout;
458 switch (layout) {
459 case Layout::kStd140:
460 fWriteUniform = Writer<Rules140>::WriteUniform;
461 break;
462 case Layout::kStd430:
463 fWriteUniform = Writer<Rules430>::WriteUniform;
464 break;
465 case Layout::kMetal:
466 fWriteUniform = Writer<RulesMetal>::WriteUniform;
467 break;
468 case Layout::kInvalid:
469 SK_ABORT("Invalid layout type");
470 break;
471 }
472 }
473
UniformOffsetCalculator(Layout layout,uint32_t startingOffset)474 UniformOffsetCalculator::UniformOffsetCalculator(Layout layout, uint32_t startingOffset)
475 : fLayout(layout), fOffset(startingOffset) {
476 this->setLayout(fLayout);
477 }
478
advanceOffset(SkSLType type,unsigned int count)479 size_t UniformOffsetCalculator::advanceOffset(SkSLType type, unsigned int count) {
480 SkSLType revisedType = this->getUniformTypeForLayout(type);
481
482 // Insert padding as needed to get the correct uniform alignment.
483 uint32_t alignedOffset = get_ubo_aligned_offset(fLayout,
484 fOffset,
485 revisedType,
486 /*isArray=*/count != Uniform::kNonArray);
487 SkASSERT(alignedOffset >= fOffset);
488
489 // Append the uniform size to our offset, then return the uniform start position.
490 uint32_t uniformSize = fWriteUniform(revisedType, CType::kDefault,
491 /*dest=*/nullptr, count, /*src=*/nullptr);
492 fOffset = alignedOffset + uniformSize;
493 return alignedOffset;
494 }
495
finishUniformDataBlock()496 UniformDataBlock UniformManager::finishUniformDataBlock() {
497 size_t size = SkAlignTo(fStorage.size(), fReqAlignment);
498 size_t paddingSize = size - fStorage.size();
499 char* padding = fStorage.append(paddingSize);
500 memset(padding, 0, paddingSize);
501 return UniformDataBlock(SkSpan(fStorage.begin(), size));
502 }
503
resetWithNewLayout(Layout layout)504 void UniformManager::resetWithNewLayout(Layout layout) {
505 if (layout != fLayout) {
506 this->setLayout(layout);
507 }
508 this->reset();
509 }
510
reset()511 void UniformManager::reset() {
512 fOffset = 0;
513 fReqAlignment = 0;
514 fStorage.clear();
515 }
516
checkReset() const517 void UniformManager::checkReset() const {
518 SkASSERT(fOffset == 0);
519 SkASSERT(fStorage.empty());
520 }
521
setExpectedUniforms(SkSpan<const Uniform> expectedUniforms)522 void UniformManager::setExpectedUniforms(SkSpan<const Uniform> expectedUniforms) {
523 SkDEBUGCODE(fExpectedUniforms = expectedUniforms;)
524 SkDEBUGCODE(fExpectedUniformIndex = 0;)
525 }
526
checkExpected(SkSLType type,unsigned int count)527 void UniformManager::checkExpected(SkSLType type, unsigned int count) {
528 SkASSERT(fExpectedUniforms.size());
529 SkASSERT(fExpectedUniformIndex >= 0 && fExpectedUniformIndex < (int)fExpectedUniforms.size());
530
531 SkASSERT(fExpectedUniforms[fExpectedUniformIndex].type() == type);
532 SkASSERT((fExpectedUniforms[fExpectedUniformIndex].count() == 0 && count == 1) ||
533 fExpectedUniforms[fExpectedUniformIndex].count() == count);
534 SkDEBUGCODE(fExpectedUniformIndex++;)
535 }
536
doneWithExpectedUniforms()537 void UniformManager::doneWithExpectedUniforms() {
538 SkASSERT(fExpectedUniformIndex == static_cast<int>(fExpectedUniforms.size()));
539 SkDEBUGCODE(fExpectedUniforms = {};)
540 }
541
writeInternal(SkSLType type,unsigned int count,const void * src)542 void UniformManager::writeInternal(SkSLType type, unsigned int count, const void* src) {
543 SkSLType revisedType = this->getUniformTypeForLayout(type);
544
545 const uint32_t startOffset = fOffset;
546 const uint32_t alignedStartOffset = this->advanceOffset(revisedType, count);
547 SkASSERT(fOffset > alignedStartOffset); // `fOffset` now equals the total bytes to be written
548 const uint32_t bytesNeeded = fOffset - alignedStartOffset;
549
550 // Insert padding if needed.
551 if (alignedStartOffset > startOffset) {
552 fStorage.append(alignedStartOffset - startOffset);
553 }
554 char* dst = fStorage.append(bytesNeeded);
555 [[maybe_unused]] uint32_t bytesWritten =
556 fWriteUniform(revisedType, CType::kDefault, dst, count, src);
557 SkASSERT(bytesNeeded == bytesWritten);
558
559 fReqAlignment = std::max(fReqAlignment, sksltype_to_alignment_mask(revisedType) + 1);
560 }
561
write(SkSLType type,const void * src)562 void UniformManager::write(SkSLType type, const void* src) {
563 this->checkExpected(type, 1);
564 this->writeInternal(type, Uniform::kNonArray, src);
565 }
566
writeArray(SkSLType type,const void * src,unsigned int count)567 void UniformManager::writeArray(SkSLType type, const void* src, unsigned int count) {
568 // Don't write any elements if count is 0. Since Uniform::kNonArray == 0, passing count
569 // directly would cause a one-element non-array write.
570 if (count > 0) {
571 this->checkExpected(type, count);
572 this->writeInternal(type, count, src);
573 }
574 }
575
write(const Uniform & u,const uint8_t * src)576 void UniformManager::write(const Uniform& u, const uint8_t* src) {
577 this->checkExpected(u.type(), (u.count() == Uniform::kNonArray) ? 1 : u.count());
578 this->writeInternal(u.type(), u.count(), src);
579 }
580
write(const SkM44 & mat)581 void UniformManager::write(const SkM44& mat) {
582 static constexpr SkSLType kType = SkSLType::kFloat4x4;
583 this->write(kType, &mat);
584 }
585
write(const SkPMColor4f & color)586 void UniformManager::write(const SkPMColor4f& color) {
587 static constexpr SkSLType kType = SkSLType::kFloat4;
588 this->write(kType, &color);
589 }
590
write(const SkRect & rect)591 void UniformManager::write(const SkRect& rect) {
592 static constexpr SkSLType kType = SkSLType::kFloat4;
593 this->write(kType, &rect);
594 }
595
write(const SkPoint & point)596 void UniformManager::write(const SkPoint& point) {
597 static constexpr SkSLType kType = SkSLType::kFloat2;
598 this->write(kType, &point);
599 }
600
write(float f)601 void UniformManager::write(float f) {
602 static constexpr SkSLType kType = SkSLType::kFloat;
603 this->write(kType, &f);
604 }
605
write(int i)606 void UniformManager::write(int i) {
607 static constexpr SkSLType kType = SkSLType::kInt;
608 this->write(kType, &i);
609 }
610
write(const SkV2 & v)611 void UniformManager::write(const SkV2& v) {
612 static constexpr SkSLType kType = SkSLType::kFloat2;
613 this->write(kType, &v);
614 }
615
write(const SkV4 & v)616 void UniformManager::write(const SkV4& v) {
617 static constexpr SkSLType kType = SkSLType::kFloat4;
618 this->write(kType, &v);
619 }
620
writeArray(SkSpan<const SkColor4f> arr)621 void UniformManager::writeArray(SkSpan<const SkColor4f> arr) {
622 static constexpr SkSLType kType = SkSLType::kFloat4;
623 this->writeArray(kType, arr.data(), arr.size());
624 }
625
writeArray(SkSpan<const SkPMColor4f> arr)626 void UniformManager::writeArray(SkSpan<const SkPMColor4f> arr) {
627 static constexpr SkSLType kType = SkSLType::kFloat4;
628 this->writeArray(kType, arr.data(), arr.size());
629 }
630
writeArray(SkSpan<const float> arr)631 void UniformManager::writeArray(SkSpan<const float> arr) {
632 static constexpr SkSLType kType = SkSLType::kFloat;
633 this->writeArray(kType, arr.data(), arr.size());
634 }
635
writeHalf(const SkMatrix & mat)636 void UniformManager::writeHalf(const SkMatrix& mat) {
637 static constexpr SkSLType kType = SkSLType::kHalf3x3;
638 this->write(kType, &mat);
639 }
640
writeHalfArray(SkSpan<const float> arr)641 void UniformManager::writeHalfArray(SkSpan<const float> arr) {
642 static constexpr SkSLType kType = SkSLType::kHalf;
643 this->writeArray(kType, arr.data(), arr.size());
644 }
645
646 } // namespace skgpu::graphite
647