• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2020 The Dawn Authors
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 //     http://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14 
15 #ifndef DAWNNATIVE_SUBRESOURCESTORAGE_H_
16 #define DAWNNATIVE_SUBRESOURCESTORAGE_H_
17 
18 #include "common/Assert.h"
19 #include "common/TypeTraits.h"
20 #include "dawn_native/EnumMaskIterator.h"
21 #include "dawn_native/Subresource.h"
22 
23 #include <array>
24 #include <limits>
25 #include <memory>
26 #include <vector>
27 
28 namespace dawn_native {
29 
30     // SubresourceStorage<T> acts like a simple map from subresource (aspect, layer, level) to a
31     // value of type T except that it tries to compress similar subresources so that algorithms
32     // can act on a whole range of subresources at once if they have the same state.
33     //
34     // For example a very common case to optimize for is the tracking of the usage of texture
35     // subresources inside a render pass: the vast majority of texture views will select the whole
36     // texture while a small minority will select a sub-range. We want to optimize the common case
37     // by setting and checking a single "usage" value when a full subresource is used but at the
38     // same time allow per-subresource data when needed.
39     //
40     // Another example is barrier tracking per-subresource in the backends: it will often happen
41     // that during texture upload each mip level will have a different "barrier state". However
42     // when the texture is fully uploaded and after it is used for sampling (with a full view) for
43     // the first time, the barrier state will likely be the same across all the subresources.
44     // That's why some form of "recompression" of subresource state must be possibe.
45     //
46     // In order to keep the implementation details private and to avoid iterator-hell, this
47     // container uses a more functional approach of calling a closure on the interesting ranges.
48     // This is for example how to look at the state of all subresources.
49     //
50     //   subresources.Iterate([](const SubresourceRange& range, const T& data) {
51     //      // Do something with the knowledge that all the subresources in `range` have value
52     //      // `data`.
53     //   });
54     //
55     // SubresourceStorage internally tracks compression state per aspect and then per layer of each
56     // aspect. This means that a 2-aspect texture can have the following compression state:
57     //
58     //  - Aspect 0 is fully compressed.
59     //  - Aspect 1 is partially compressed:
60     //    - Aspect 1 layer 3 is decompressed.
61     //    - Aspect 1 layer 0-2 and 4-42 are compressed.
62     //
63     // A useful model to reason about SubresourceStorage is to represent is as a tree:
64     //
65     //  - SubresourceStorage is the root.
66     //    |-> Nodes 1 deep represent each aspect. If an aspect is compressed, its node doesn't have
67     //       any children because the data is constant across all of the subtree.
68     //      |-> Nodes 2 deep represent layers (for uncompressed aspects). If a layer is compressed,
69     //         its node doesn't have any children because the data is constant across all of the
70     //         subtree.
71     //        |-> Nodes 3 deep represent individial mip levels (for uncompressed layers).
72     //
73     // The concept of recompression is the removal of all child nodes of a non-leaf node when the
74     // data is constant across them. Decompression is the addition of child nodes to a leaf node
75     // and copying of its data to all its children.
76     //
77     // The choice of having secondary compression for array layers is to optimize for the cases
78     // where transfer operations are used to update specific layers of texture with render or
79     // transfer operations, while the rest is untouched. It seems much less likely that there
80     // would be operations that touch all Nth mips of a 2D array texture without touching the
81     // others.
82     //
83     // There are several hot code paths that create new SubresourceStorage like the tracking of
84     // resource usage per-pass. We don't want to allocate a container for the decompressed data
85     // unless we have to because it would dramatically lower performance. Instead
86     // SubresourceStorage contains an inline array that contains the per-aspect compressed data
87     // and only allocates a per-subresource on aspect decompression.
88     //
89     // T must be a copyable type that supports equality comparison with ==.
90     //
91     // The implementation of functions in this file can have a lot of control flow and corner cases
92     // so each modification should come with extensive tests and ensure 100% code coverage of the
93     // modified functions. See instructions at
94     // https://chromium.googlesource.com/chromium/src/+/master/docs/testing/code_coverage.md#local-coverage-script
95     // to run the test with code coverage. A command line that worked in the past (with the right
96     // GN args for the out/coverage directory in a Chromium checkout) is:
97     //
98     /*
99        python tools/code_coverage/coverage.py dawn_unittests -b out/coverage -o out/report -c \
100            "out/coverage/dawn_unittests --gtest_filter=SubresourceStorage\*" -f \
101            third_party/dawn/src/dawn_native
102     */
103     //
104     // TODO(crbug.com/dawn/836): Make the recompression optional, the calling code should know
105     // if recompression can happen or not in Update() and Merge()
106     template <typename T>
107     class SubresourceStorage {
108       public:
109         static_assert(std::is_copy_assignable<T>::value, "T must be copyable");
110         static_assert(HasEqualityOperator<T>::value, "T requires bool operator == (T, T)");
111 
112         // Creates the storage with the given "dimensions" and all subresources starting with the
113         // initial value.
114         SubresourceStorage(Aspect aspects,
115                            uint32_t arrayLayerCount,
116                            uint32_t mipLevelCount,
117                            T initialValue = {});
118 
119         // Returns the data for a single subresource. Note that the reference returned might be the
120         // same for multiple subresources.
121         const T& Get(Aspect aspect, uint32_t arrayLayer, uint32_t mipLevel) const;
122 
123         // Given an iterateFunc that's a function or function-like objet that can be called with
124         // arguments of type (const SubresourceRange& range, const T& data) and returns void,
125         // calls it with aggregate ranges if possible, such that each subresource is part of
126         // exactly one of the ranges iterateFunc is called with (and obviously data is the value
127         // stored for that subresource). For example:
128         //
129         //   subresources.Iterate([&](const SubresourceRange& range, const T& data) {
130         //       // Do something with range and data.
131         //   });
132         template <typename F>
133         void Iterate(F&& iterateFunc) const;
134 
135         // Given an updateFunc that's a function or function-like objet that can be called with
136         // arguments of type (const SubresourceRange& range, T* data) and returns void,
137         // calls it with ranges that in aggregate form `range` and pass for each of the
138         // sub-ranges a pointer to modify the value for that sub-range. For example:
139         //
140         //   subresources.Update(view->GetRange(), [](const SubresourceRange&, T* data) {
141         //       *data |= wgpu::TextureUsage::Stuff;
142         //   });
143         //
144         // /!\ WARNING: updateFunc should never use range to compute the update to data otherwise
145         // your code is likely to break when compression happens. Range should only be used for
146         // side effects like using it to compute a Vulkan pipeline barrier.
147         template <typename F>
148         void Update(const SubresourceRange& range, F&& updateFunc);
149 
150         // Given a mergeFunc that's a function or a function-like object that can be called with
151         // arguments of type (const SubresourceRange& range, T* data, const U& otherData) and
152         // returns void, calls it with ranges that in aggregate form the full resources and pass
153         // for each of the sub-ranges a pointer to modify the value for that sub-range and the
154         // corresponding value from other for that sub-range. For example:
155         //
156         //   subresources.Merge(otherUsages,
157         //       [](const SubresourceRange&, T* data, const T& otherData) {
158         //          *data |= otherData;
159         //       });
160         //
161         // /!\ WARNING: mergeFunc should never use range to compute the update to data otherwise
162         // your code is likely to break when compression happens. Range should only be used for
163         // side effects like using it to compute a Vulkan pipeline barrier.
164         template <typename U, typename F>
165         void Merge(const SubresourceStorage<U>& other, F&& mergeFunc);
166 
167         // Other operations to consider:
168         //
169         //  - UpdateTo(Range, T) that updates the range to a constant value.
170 
171         // Methods to query the internal state of SubresourceStorage for testing.
172         Aspect GetAspectsForTesting() const;
173         uint32_t GetArrayLayerCountForTesting() const;
174         uint32_t GetMipLevelCountForTesting() const;
175         bool IsAspectCompressedForTesting(Aspect aspect) const;
176         bool IsLayerCompressedForTesting(Aspect aspect, uint32_t layer) const;
177 
178       private:
179         template <typename U>
180         friend class SubresourceStorage;
181 
182         void DecompressAspect(uint32_t aspectIndex);
183         void RecompressAspect(uint32_t aspectIndex);
184 
185         void DecompressLayer(uint32_t aspectIndex, uint32_t layer);
186         void RecompressLayer(uint32_t aspectIndex, uint32_t layer);
187 
188         SubresourceRange GetFullLayerRange(Aspect aspect, uint32_t layer) const;
189 
190         // LayerCompressed should never be called when the aspect is compressed otherwise it would
191         // need to check that mLayerCompressed is not null before indexing it.
192         bool& LayerCompressed(uint32_t aspectIndex, uint32_t layerIndex);
193         bool LayerCompressed(uint32_t aspectIndex, uint32_t layerIndex) const;
194 
195         // Return references to the data for a compressed plane / layer or subresource.
196         // Each variant should be called exactly under the correct compression level.
197         T& DataInline(uint32_t aspectIndex);
198         T& Data(uint32_t aspectIndex, uint32_t layer, uint32_t level = 0);
199         const T& DataInline(uint32_t aspectIndex) const;
200         const T& Data(uint32_t aspectIndex, uint32_t layer, uint32_t level = 0) const;
201 
202         Aspect mAspects;
203         uint8_t mMipLevelCount;
204         uint16_t mArrayLayerCount;
205 
206         // Invariant: if an aspect is marked compressed, then all it's layers are marked as
207         // compressed.
208         static constexpr size_t kMaxAspects = 2;
209         std::array<bool, kMaxAspects> mAspectCompressed;
210         std::array<T, kMaxAspects> mInlineAspectData;
211 
212         // Indexed as mLayerCompressed[aspectIndex * mArrayLayerCount + layer].
213         std::unique_ptr<bool[]> mLayerCompressed;
214 
215         // Indexed as mData[(aspectIndex * mArrayLayerCount + layer) * mMipLevelCount + level].
216         // The data for a compressed aspect is stored in the slot for (aspect, 0, 0). Similarly
217         // the data for a compressed layer of aspect if in the slot for (aspect, layer, 0).
218         std::unique_ptr<T[]> mData;
219     };
220 
221     template <typename T>
SubresourceStorage(Aspect aspects,uint32_t arrayLayerCount,uint32_t mipLevelCount,T initialValue)222     SubresourceStorage<T>::SubresourceStorage(Aspect aspects,
223                                               uint32_t arrayLayerCount,
224                                               uint32_t mipLevelCount,
225                                               T initialValue)
226         : mAspects(aspects), mMipLevelCount(mipLevelCount), mArrayLayerCount(arrayLayerCount) {
227         ASSERT(arrayLayerCount <= std::numeric_limits<decltype(mArrayLayerCount)>::max());
228         ASSERT(mipLevelCount <= std::numeric_limits<decltype(mMipLevelCount)>::max());
229 
230         uint32_t aspectCount = GetAspectCount(aspects);
231         ASSERT(aspectCount <= kMaxAspects);
232 
233         for (uint32_t aspectIndex = 0; aspectIndex < aspectCount; aspectIndex++) {
234             mAspectCompressed[aspectIndex] = true;
235             DataInline(aspectIndex) = initialValue;
236         }
237     }
238 
239     template <typename T>
240     template <typename F>
Update(const SubresourceRange & range,F && updateFunc)241     void SubresourceStorage<T>::Update(const SubresourceRange& range, F&& updateFunc) {
242         bool fullLayers = range.baseMipLevel == 0 && range.levelCount == mMipLevelCount;
243         bool fullAspects =
244             range.baseArrayLayer == 0 && range.layerCount == mArrayLayerCount && fullLayers;
245 
246         for (Aspect aspect : IterateEnumMask(range.aspects)) {
247             uint32_t aspectIndex = GetAspectIndex(aspect);
248 
249             // Call the updateFunc once for the whole aspect if possible or decompress and fallback
250             // to per-layer handling.
251             if (mAspectCompressed[aspectIndex]) {
252                 if (fullAspects) {
253                     SubresourceRange updateRange =
254                         SubresourceRange::MakeFull(aspect, mArrayLayerCount, mMipLevelCount);
255                     updateFunc(updateRange, &DataInline(aspectIndex));
256                     continue;
257                 }
258                 DecompressAspect(aspectIndex);
259             }
260 
261             uint32_t layerEnd = range.baseArrayLayer + range.layerCount;
262             for (uint32_t layer = range.baseArrayLayer; layer < layerEnd; layer++) {
263                 // Call the updateFunc once for the whole layer if possible or decompress and
264                 // fallback to per-level handling.
265                 if (LayerCompressed(aspectIndex, layer)) {
266                     if (fullLayers) {
267                         SubresourceRange updateRange = GetFullLayerRange(aspect, layer);
268                         updateFunc(updateRange, &Data(aspectIndex, layer));
269                         continue;
270                     }
271                     DecompressLayer(aspectIndex, layer);
272                 }
273 
274                 // Worst case: call updateFunc per level.
275                 uint32_t levelEnd = range.baseMipLevel + range.levelCount;
276                 for (uint32_t level = range.baseMipLevel; level < levelEnd; level++) {
277                     SubresourceRange updateRange =
278                         SubresourceRange::MakeSingle(aspect, layer, level);
279                     updateFunc(updateRange, &Data(aspectIndex, layer, level));
280                 }
281 
282                 // If the range has fullLayers then it is likely we can recompress after the calls
283                 // to updateFunc (this branch is skipped if updateFunc was called for the whole
284                 // layer).
285                 if (fullLayers) {
286                     RecompressLayer(aspectIndex, layer);
287                 }
288             }
289 
290             // If the range has fullAspects then it is likely we can recompress after the calls to
291             // updateFunc (this branch is skipped if updateFunc was called for the whole aspect).
292             if (fullAspects) {
293                 RecompressAspect(aspectIndex);
294             }
295         }
296     }
297 
298     template <typename T>
299     template <typename U, typename F>
Merge(const SubresourceStorage<U> & other,F && mergeFunc)300     void SubresourceStorage<T>::Merge(const SubresourceStorage<U>& other, F&& mergeFunc) {
301         ASSERT(mAspects == other.mAspects);
302         ASSERT(mArrayLayerCount == other.mArrayLayerCount);
303         ASSERT(mMipLevelCount == other.mMipLevelCount);
304 
305         for (Aspect aspect : IterateEnumMask(mAspects)) {
306             uint32_t aspectIndex = GetAspectIndex(aspect);
307 
308             // If the other storage's aspect is compressed we don't need to decompress anything
309             // in `this` and can just iterate through it, merging with `other`'s constant value for
310             // the aspect. For code simplicity this can be done with a call to Update().
311             if (other.mAspectCompressed[aspectIndex]) {
312                 const U& otherData = other.DataInline(aspectIndex);
313                 Update(SubresourceRange::MakeFull(aspect, mArrayLayerCount, mMipLevelCount),
314                        [&](const SubresourceRange& subrange, T* data) {
315                            mergeFunc(subrange, data, otherData);
316                        });
317                 continue;
318             }
319 
320             // Other doesn't have the aspect compressed so we must do at least per-layer merging.
321             if (mAspectCompressed[aspectIndex]) {
322                 DecompressAspect(aspectIndex);
323             }
324 
325             for (uint32_t layer = 0; layer < mArrayLayerCount; layer++) {
326                 // Similarly to above, use a fast path if other's layer is compressed.
327                 if (other.LayerCompressed(aspectIndex, layer)) {
328                     const U& otherData = other.Data(aspectIndex, layer);
329                     Update(GetFullLayerRange(aspect, layer),
330                            [&](const SubresourceRange& subrange, T* data) {
331                                mergeFunc(subrange, data, otherData);
332                            });
333                     continue;
334                 }
335 
336                 // Sad case, other is decompressed for this layer, do per-level merging.
337                 if (LayerCompressed(aspectIndex, layer)) {
338                     DecompressLayer(aspectIndex, layer);
339                 }
340 
341                 for (uint32_t level = 0; level < mMipLevelCount; level++) {
342                     SubresourceRange updateRange =
343                         SubresourceRange::MakeSingle(aspect, layer, level);
344                     mergeFunc(updateRange, &Data(aspectIndex, layer, level),
345                               other.Data(aspectIndex, layer, level));
346                 }
347 
348                 RecompressLayer(aspectIndex, layer);
349             }
350 
351             RecompressAspect(aspectIndex);
352         }
353     }
354 
355     template <typename T>
356     template <typename F>
Iterate(F && iterateFunc)357     void SubresourceStorage<T>::Iterate(F&& iterateFunc) const {
358         for (Aspect aspect : IterateEnumMask(mAspects)) {
359             uint32_t aspectIndex = GetAspectIndex(aspect);
360 
361             // Fastest path, call iterateFunc on the whole aspect at once.
362             if (mAspectCompressed[aspectIndex]) {
363                 SubresourceRange range =
364                     SubresourceRange::MakeFull(aspect, mArrayLayerCount, mMipLevelCount);
365                 iterateFunc(range, DataInline(aspectIndex));
366                 continue;
367             }
368 
369             for (uint32_t layer = 0; layer < mArrayLayerCount; layer++) {
370                 // Fast path, call iterateFunc on the whole array layer at once.
371                 if (LayerCompressed(aspectIndex, layer)) {
372                     SubresourceRange range = GetFullLayerRange(aspect, layer);
373                     iterateFunc(range, Data(aspectIndex, layer));
374                     continue;
375                 }
376 
377                 // Slow path, call iterateFunc for each mip level.
378                 for (uint32_t level = 0; level < mMipLevelCount; level++) {
379                     SubresourceRange range = SubresourceRange::MakeSingle(aspect, layer, level);
380                     iterateFunc(range, Data(aspectIndex, layer, level));
381                 }
382             }
383         }
384     }
385 
386     template <typename T>
Get(Aspect aspect,uint32_t arrayLayer,uint32_t mipLevel)387     const T& SubresourceStorage<T>::Get(Aspect aspect,
388                                         uint32_t arrayLayer,
389                                         uint32_t mipLevel) const {
390         uint32_t aspectIndex = GetAspectIndex(aspect);
391         ASSERT(aspectIndex < GetAspectCount(mAspects));
392         ASSERT(arrayLayer < mArrayLayerCount);
393         ASSERT(mipLevel < mMipLevelCount);
394 
395         // Fastest path, the aspect is compressed!
396         if (mAspectCompressed[aspectIndex]) {
397             return DataInline(aspectIndex);
398         }
399 
400         // Fast path, the array layer is compressed.
401         if (LayerCompressed(aspectIndex, arrayLayer)) {
402             return Data(aspectIndex, arrayLayer);
403         }
404 
405         return Data(aspectIndex, arrayLayer, mipLevel);
406     }
407 
408     template <typename T>
GetAspectsForTesting()409     Aspect SubresourceStorage<T>::GetAspectsForTesting() const {
410         return mAspects;
411     }
412 
413     template <typename T>
GetArrayLayerCountForTesting()414     uint32_t SubresourceStorage<T>::GetArrayLayerCountForTesting() const {
415         return mArrayLayerCount;
416     }
417 
418     template <typename T>
GetMipLevelCountForTesting()419     uint32_t SubresourceStorage<T>::GetMipLevelCountForTesting() const {
420         return mMipLevelCount;
421     }
422 
423     template <typename T>
IsAspectCompressedForTesting(Aspect aspect)424     bool SubresourceStorage<T>::IsAspectCompressedForTesting(Aspect aspect) const {
425         return mAspectCompressed[GetAspectIndex(aspect)];
426     }
427 
428     template <typename T>
IsLayerCompressedForTesting(Aspect aspect,uint32_t layer)429     bool SubresourceStorage<T>::IsLayerCompressedForTesting(Aspect aspect, uint32_t layer) const {
430         return mAspectCompressed[GetAspectIndex(aspect)] ||
431                mLayerCompressed[GetAspectIndex(aspect) * mArrayLayerCount + layer];
432     }
433 
434     template <typename T>
DecompressAspect(uint32_t aspectIndex)435     void SubresourceStorage<T>::DecompressAspect(uint32_t aspectIndex) {
436         ASSERT(mAspectCompressed[aspectIndex]);
437         const T& aspectData = DataInline(aspectIndex);
438         mAspectCompressed[aspectIndex] = false;
439 
440         // Extra allocations are only needed when aspects are decompressed. Create them lazily.
441         if (mData == nullptr) {
442             ASSERT(mLayerCompressed == nullptr);
443 
444             uint32_t aspectCount = GetAspectCount(mAspects);
445             mLayerCompressed = std::make_unique<bool[]>(aspectCount * mArrayLayerCount);
446             mData = std::make_unique<T[]>(aspectCount * mArrayLayerCount * mMipLevelCount);
447 
448             for (uint32_t layerIndex = 0; layerIndex < aspectCount * mArrayLayerCount;
449                  layerIndex++) {
450                 mLayerCompressed[layerIndex] = true;
451             }
452         }
453 
454         ASSERT(LayerCompressed(aspectIndex, 0));
455         for (uint32_t layer = 0; layer < mArrayLayerCount; layer++) {
456             Data(aspectIndex, layer) = aspectData;
457             ASSERT(LayerCompressed(aspectIndex, layer));
458         }
459     }
460 
461     template <typename T>
RecompressAspect(uint32_t aspectIndex)462     void SubresourceStorage<T>::RecompressAspect(uint32_t aspectIndex) {
463         ASSERT(!mAspectCompressed[aspectIndex]);
464         // All layers of the aspect must be compressed for the aspect to possibly recompress.
465         for (uint32_t layer = 0; layer < mArrayLayerCount; layer++) {
466             if (!LayerCompressed(aspectIndex, layer)) {
467                 return;
468             }
469         }
470 
471         T layer0Data = Data(aspectIndex, 0);
472         for (uint32_t layer = 1; layer < mArrayLayerCount; layer++) {
473             if (!(Data(aspectIndex, layer) == layer0Data)) {
474                 return;
475             }
476         }
477 
478         mAspectCompressed[aspectIndex] = true;
479         DataInline(aspectIndex) = layer0Data;
480     }
481 
482     template <typename T>
DecompressLayer(uint32_t aspectIndex,uint32_t layer)483     void SubresourceStorage<T>::DecompressLayer(uint32_t aspectIndex, uint32_t layer) {
484         ASSERT(LayerCompressed(aspectIndex, layer));
485         ASSERT(!mAspectCompressed[aspectIndex]);
486         const T& layerData = Data(aspectIndex, layer);
487         LayerCompressed(aspectIndex, layer) = false;
488 
489         // We assume that (aspect, layer, 0) is stored at the same place as (aspect, layer) which
490         // allows starting the iteration at level 1.
491         for (uint32_t level = 1; level < mMipLevelCount; level++) {
492             Data(aspectIndex, layer, level) = layerData;
493         }
494     }
495 
496     template <typename T>
RecompressLayer(uint32_t aspectIndex,uint32_t layer)497     void SubresourceStorage<T>::RecompressLayer(uint32_t aspectIndex, uint32_t layer) {
498         ASSERT(!LayerCompressed(aspectIndex, layer));
499         ASSERT(!mAspectCompressed[aspectIndex]);
500         const T& level0Data = Data(aspectIndex, layer, 0);
501 
502         for (uint32_t level = 1; level < mMipLevelCount; level++) {
503             if (!(Data(aspectIndex, layer, level) == level0Data)) {
504                 return;
505             }
506         }
507 
508         LayerCompressed(aspectIndex, layer) = true;
509     }
510 
511     template <typename T>
GetFullLayerRange(Aspect aspect,uint32_t layer)512     SubresourceRange SubresourceStorage<T>::GetFullLayerRange(Aspect aspect, uint32_t layer) const {
513         return {aspect, {layer, 1}, {0, mMipLevelCount}};
514     }
515 
516     template <typename T>
LayerCompressed(uint32_t aspectIndex,uint32_t layer)517     bool& SubresourceStorage<T>::LayerCompressed(uint32_t aspectIndex, uint32_t layer) {
518         ASSERT(!mAspectCompressed[aspectIndex]);
519         return mLayerCompressed[aspectIndex * mArrayLayerCount + layer];
520     }
521 
522     template <typename T>
LayerCompressed(uint32_t aspectIndex,uint32_t layer)523     bool SubresourceStorage<T>::LayerCompressed(uint32_t aspectIndex, uint32_t layer) const {
524         ASSERT(!mAspectCompressed[aspectIndex]);
525         return mLayerCompressed[aspectIndex * mArrayLayerCount + layer];
526     }
527 
528     template <typename T>
DataInline(uint32_t aspectIndex)529     T& SubresourceStorage<T>::DataInline(uint32_t aspectIndex) {
530         ASSERT(mAspectCompressed[aspectIndex]);
531         return mInlineAspectData[aspectIndex];
532     }
533     template <typename T>
Data(uint32_t aspectIndex,uint32_t layer,uint32_t level)534     T& SubresourceStorage<T>::Data(uint32_t aspectIndex, uint32_t layer, uint32_t level) {
535         ASSERT(level == 0 || !LayerCompressed(aspectIndex, layer));
536         ASSERT(!mAspectCompressed[aspectIndex]);
537         return mData[(aspectIndex * mArrayLayerCount + layer) * mMipLevelCount + level];
538     }
539     template <typename T>
DataInline(uint32_t aspectIndex)540     const T& SubresourceStorage<T>::DataInline(uint32_t aspectIndex) const {
541         ASSERT(mAspectCompressed[aspectIndex]);
542         return mInlineAspectData[aspectIndex];
543     }
544     template <typename T>
Data(uint32_t aspectIndex,uint32_t layer,uint32_t level)545     const T& SubresourceStorage<T>::Data(uint32_t aspectIndex,
546                                          uint32_t layer,
547                                          uint32_t level) const {
548         ASSERT(level == 0 || !LayerCompressed(aspectIndex, layer));
549         ASSERT(!mAspectCompressed[aspectIndex]);
550         return mData[(aspectIndex * mArrayLayerCount + layer) * mMipLevelCount + level];
551     }
552 
553 }  // namespace dawn_native
554 
555 #endif  // DAWNNATIVE_SUBRESOURCESTORAGE_H_
556