1 /*
2 * Copyright 2015 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "SkRWBuffer.h"
9
10 #include "SkMakeUnique.h"
11 #include "SkMalloc.h"
12 #include "SkStream.h"
13 #include "SkTo.h"
14
15 #include <atomic>
16 #include <new>
17
18 // Force small chunks to be a page's worth
19 static const size_t kMinAllocSize = 4096;
20
21 struct SkBufferBlock {
22 SkBufferBlock* fNext; // updated by the writer
23 size_t fUsed; // updated by the writer
24 const size_t fCapacity;
25
SkBufferBlockSkBufferBlock26 SkBufferBlock(size_t capacity) : fNext(nullptr), fUsed(0), fCapacity(capacity) {}
27
startDataSkBufferBlock28 const void* startData() const { return this + 1; }
29
availSkBufferBlock30 size_t avail() const { return fCapacity - fUsed; }
availDataSkBufferBlock31 void* availData() { return (char*)this->startData() + fUsed; }
32
AllocSkBufferBlock33 static SkBufferBlock* Alloc(size_t length) {
34 size_t capacity = LengthToCapacity(length);
35 void* buffer = sk_malloc_throw(sizeof(SkBufferBlock) + capacity);
36 return new (buffer) SkBufferBlock(capacity);
37 }
38
39 // Return number of bytes actually appended. Important that we always completely this block
40 // before spilling into the next, since the reader uses fCapacity to know how many it can read.
41 //
appendSkBufferBlock42 size_t append(const void* src, size_t length) {
43 this->validate();
44 size_t amount = SkTMin(this->avail(), length);
45 memcpy(this->availData(), src, amount);
46 fUsed += amount;
47 this->validate();
48 return amount;
49 }
50
51 // Do not call in the reader thread, since the writer may be updating fUsed.
52 // (The assertion is still true, but TSAN still may complain about its raciness.)
validateSkBufferBlock53 void validate() const {
54 #ifdef SK_DEBUG
55 SkASSERT(fCapacity > 0);
56 SkASSERT(fUsed <= fCapacity);
57 #endif
58 }
59
60 private:
LengthToCapacitySkBufferBlock61 static size_t LengthToCapacity(size_t length) {
62 const size_t minSize = kMinAllocSize - sizeof(SkBufferBlock);
63 return SkTMax(length, minSize);
64 }
65 };
66
67 struct SkBufferHead {
68 mutable std::atomic<int32_t> fRefCnt;
69 SkBufferBlock fBlock;
70
SkBufferHeadSkBufferHead71 SkBufferHead(size_t capacity) : fRefCnt(1), fBlock(capacity) {}
72
LengthToCapacitySkBufferHead73 static size_t LengthToCapacity(size_t length) {
74 const size_t minSize = kMinAllocSize - sizeof(SkBufferHead);
75 return SkTMax(length, minSize);
76 }
77
AllocSkBufferHead78 static SkBufferHead* Alloc(size_t length) {
79 size_t capacity = LengthToCapacity(length);
80 size_t size = sizeof(SkBufferHead) + capacity;
81 void* buffer = sk_malloc_throw(size);
82 return new (buffer) SkBufferHead(capacity);
83 }
84
refSkBufferHead85 void ref() const {
86 SkAssertResult(fRefCnt.fetch_add(+1, std::memory_order_relaxed));
87 }
88
unrefSkBufferHead89 void unref() const {
90 // A release here acts in place of all releases we "should" have been doing in ref().
91 int32_t oldRefCnt = fRefCnt.fetch_add(-1, std::memory_order_acq_rel);
92 SkASSERT(oldRefCnt);
93 if (1 == oldRefCnt) {
94 // Like unique(), the acquire is only needed on success.
95 SkBufferBlock* block = fBlock.fNext;
96 sk_free((void*)this);
97 while (block) {
98 SkBufferBlock* next = block->fNext;
99 sk_free(block);
100 block = next;
101 }
102 }
103 }
104
validateSkBufferHead105 void validate(size_t minUsed, const SkBufferBlock* tail = nullptr) const {
106 #ifdef SK_DEBUG
107 SkASSERT(fRefCnt.load(std::memory_order_relaxed) > 0);
108 size_t totalUsed = 0;
109 const SkBufferBlock* block = &fBlock;
110 const SkBufferBlock* lastBlock = block;
111 while (block) {
112 block->validate();
113 totalUsed += block->fUsed;
114 lastBlock = block;
115 block = block->fNext;
116 }
117 SkASSERT(minUsed <= totalUsed);
118 if (tail) {
119 SkASSERT(tail == lastBlock);
120 }
121 #endif
122 }
123 };
124
125 ///////////////////////////////////////////////////////////////////////////////////////////////////
126 // The reader can only access block.fCapacity (which never changes), and cannot access
127 // block.fUsed, which may be updated by the writer.
128 //
SkROBuffer(const SkBufferHead * head,size_t available,const SkBufferBlock * tail)129 SkROBuffer::SkROBuffer(const SkBufferHead* head, size_t available, const SkBufferBlock* tail)
130 : fHead(head), fAvailable(available), fTail(tail)
131 {
132 if (head) {
133 fHead->ref();
134 SkASSERT(available > 0);
135 head->validate(available, tail);
136 } else {
137 SkASSERT(0 == available);
138 SkASSERT(!tail);
139 }
140 }
141
~SkROBuffer()142 SkROBuffer::~SkROBuffer() {
143 if (fHead) {
144 fHead->unref();
145 }
146 }
147
Iter(const SkROBuffer * buffer)148 SkROBuffer::Iter::Iter(const SkROBuffer* buffer) {
149 this->reset(buffer);
150 }
151
Iter(const sk_sp<SkROBuffer> & buffer)152 SkROBuffer::Iter::Iter(const sk_sp<SkROBuffer>& buffer) {
153 this->reset(buffer.get());
154 }
155
reset(const SkROBuffer * buffer)156 void SkROBuffer::Iter::reset(const SkROBuffer* buffer) {
157 fBuffer = buffer;
158 if (buffer && buffer->fHead) {
159 fBlock = &buffer->fHead->fBlock;
160 fRemaining = buffer->fAvailable;
161 } else {
162 fBlock = nullptr;
163 fRemaining = 0;
164 }
165 }
166
data() const167 const void* SkROBuffer::Iter::data() const {
168 return fRemaining ? fBlock->startData() : nullptr;
169 }
170
size() const171 size_t SkROBuffer::Iter::size() const {
172 if (!fBlock) {
173 return 0;
174 }
175 return SkTMin(fBlock->fCapacity, fRemaining);
176 }
177
next()178 bool SkROBuffer::Iter::next() {
179 if (fRemaining) {
180 fRemaining -= this->size();
181 if (fBuffer->fTail == fBlock) {
182 // There are more blocks, but fBuffer does not know about them.
183 SkASSERT(0 == fRemaining);
184 fBlock = nullptr;
185 } else {
186 fBlock = fBlock->fNext;
187 }
188 }
189 return fRemaining != 0;
190 }
191
192 ///////////////////////////////////////////////////////////////////////////////////////////////////
193
SkRWBuffer(size_t initialCapacity)194 SkRWBuffer::SkRWBuffer(size_t initialCapacity) : fHead(nullptr), fTail(nullptr), fTotalUsed(0) {
195 if (initialCapacity) {
196 fHead = SkBufferHead::Alloc(initialCapacity);
197 fTail = &fHead->fBlock;
198 }
199 }
200
~SkRWBuffer()201 SkRWBuffer::~SkRWBuffer() {
202 this->validate();
203 if (fHead) {
204 fHead->unref();
205 }
206 }
207
208 // It is important that we always completely fill the current block before spilling over to the
209 // next, since our reader will be using fCapacity (min'd against its total available) to know how
210 // many bytes to read from a given block.
211 //
append(const void * src,size_t length,size_t reserve)212 void SkRWBuffer::append(const void* src, size_t length, size_t reserve) {
213 this->validate();
214 if (0 == length) {
215 return;
216 }
217
218 fTotalUsed += length;
219
220 if (nullptr == fHead) {
221 fHead = SkBufferHead::Alloc(length + reserve);
222 fTail = &fHead->fBlock;
223 }
224
225 size_t written = fTail->append(src, length);
226 SkASSERT(written <= length);
227 src = (const char*)src + written;
228 length -= written;
229
230 if (length) {
231 SkBufferBlock* block = SkBufferBlock::Alloc(length + reserve);
232 fTail->fNext = block;
233 fTail = block;
234 written = fTail->append(src, length);
235 SkASSERT(written == length);
236 }
237 this->validate();
238 }
239
240 #ifdef SK_DEBUG
validate() const241 void SkRWBuffer::validate() const {
242 if (fHead) {
243 fHead->validate(fTotalUsed, fTail);
244 } else {
245 SkASSERT(nullptr == fTail);
246 SkASSERT(0 == fTotalUsed);
247 }
248 }
249 #endif
250
251 ///////////////////////////////////////////////////////////////////////////////////////////////////
252
253 class SkROBufferStreamAsset : public SkStreamAsset {
validate() const254 void validate() const {
255 #ifdef SK_DEBUG
256 SkASSERT(fGlobalOffset <= fBuffer->size());
257 SkASSERT(fLocalOffset <= fIter.size());
258 SkASSERT(fLocalOffset <= fGlobalOffset);
259 #endif
260 }
261
262 #ifdef SK_DEBUG
263 class AutoValidate {
264 SkROBufferStreamAsset* fStream;
265 public:
AutoValidate(SkROBufferStreamAsset * stream)266 AutoValidate(SkROBufferStreamAsset* stream) : fStream(stream) { stream->validate(); }
~AutoValidate()267 ~AutoValidate() { fStream->validate(); }
268 };
269 #define AUTO_VALIDATE AutoValidate av(this);
270 #else
271 #define AUTO_VALIDATE
272 #endif
273
274 public:
SkROBufferStreamAsset(sk_sp<SkROBuffer> buffer)275 SkROBufferStreamAsset(sk_sp<SkROBuffer> buffer) : fBuffer(std::move(buffer)), fIter(fBuffer) {
276 fGlobalOffset = fLocalOffset = 0;
277 }
278
getLength() const279 size_t getLength() const override { return fBuffer->size(); }
280
rewind()281 bool rewind() override {
282 AUTO_VALIDATE
283 fIter.reset(fBuffer.get());
284 fGlobalOffset = fLocalOffset = 0;
285 return true;
286 }
287
read(void * dst,size_t request)288 size_t read(void* dst, size_t request) override {
289 AUTO_VALIDATE
290 size_t bytesRead = 0;
291 for (;;) {
292 size_t size = fIter.size();
293 SkASSERT(fLocalOffset <= size);
294 size_t avail = SkTMin(size - fLocalOffset, request - bytesRead);
295 if (dst) {
296 memcpy(dst, (const char*)fIter.data() + fLocalOffset, avail);
297 dst = (char*)dst + avail;
298 }
299 bytesRead += avail;
300 fLocalOffset += avail;
301 SkASSERT(bytesRead <= request);
302 if (bytesRead == request) {
303 break;
304 }
305 // If we get here, we've exhausted the current iter
306 SkASSERT(fLocalOffset == size);
307 fLocalOffset = 0;
308 if (!fIter.next()) {
309 break; // ran out of data
310 }
311 }
312 fGlobalOffset += bytesRead;
313 SkASSERT(fGlobalOffset <= fBuffer->size());
314 return bytesRead;
315 }
316
isAtEnd() const317 bool isAtEnd() const override {
318 return fBuffer->size() == fGlobalOffset;
319 }
320
getPosition() const321 size_t getPosition() const override {
322 return fGlobalOffset;
323 }
324
seek(size_t position)325 bool seek(size_t position) override {
326 AUTO_VALIDATE
327 if (position < fGlobalOffset) {
328 this->rewind();
329 }
330 (void)this->skip(position - fGlobalOffset);
331 return true;
332 }
333
move(long offset)334 bool move(long offset) override{
335 AUTO_VALIDATE
336 offset += fGlobalOffset;
337 if (offset <= 0) {
338 this->rewind();
339 } else {
340 (void)this->seek(SkToSizeT(offset));
341 }
342 return true;
343 }
344
345 private:
onDuplicate() const346 SkStreamAsset* onDuplicate() const override {
347 return new SkROBufferStreamAsset(fBuffer);
348 }
349
onFork() const350 SkStreamAsset* onFork() const override {
351 auto clone = this->duplicate();
352 clone->seek(this->getPosition());
353 return clone.release();
354 }
355
356 sk_sp<SkROBuffer> fBuffer;
357 SkROBuffer::Iter fIter;
358 size_t fLocalOffset;
359 size_t fGlobalOffset;
360 };
361
makeStreamSnapshot() const362 std::unique_ptr<SkStreamAsset> SkRWBuffer::makeStreamSnapshot() const {
363 return skstd::make_unique<SkROBufferStreamAsset>(this->makeROBufferSnapshot());
364 }
365