1 /*
2 * Copyright (C) 2006, 2008 Apple Inc. All rights reserved.
3 * Copyright (C) Research In Motion Limited 2009-2010. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY
15 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR
18 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
19 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
20 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
21 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
22 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
24 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27 #include "config.h"
28 #include "platform/SharedBuffer.h"
29
30 #include "wtf/unicode/Unicode.h"
31 #include "wtf/unicode/UTF8.h"
32
33 #undef SHARED_BUFFER_STATS
34
35 #ifdef SHARED_BUFFER_STATS
36 #include "wtf/DataLog.h"
37 #include "wtf/MainThread.h"
38 #endif
39
40 namespace blink {
41
42 static const unsigned segmentSize = 0x1000;
43 static const unsigned segmentPositionMask = 0x0FFF;
44
segmentIndex(unsigned position)45 static inline unsigned segmentIndex(unsigned position)
46 {
47 return position / segmentSize;
48 }
49
offsetInSegment(unsigned position)50 static inline unsigned offsetInSegment(unsigned position)
51 {
52 return position & segmentPositionMask;
53 }
54
allocateSegment()55 static inline char* allocateSegment()
56 {
57 return static_cast<char*>(fastMalloc(segmentSize));
58 }
59
freeSegment(char * p)60 static inline void freeSegment(char* p)
61 {
62 fastFree(p);
63 }
64
65 #ifdef SHARED_BUFFER_STATS
66
statsMutex()67 static Mutex& statsMutex()
68 {
69 DEFINE_STATIC_LOCAL(Mutex, mutex, ());
70 return mutex;
71 }
72
liveBuffers()73 static HashSet<SharedBuffer*>& liveBuffers()
74 {
75 DEFINE_STATIC_LOCAL(HashSet<SharedBuffer*>, buffers, ());
76 return buffers;
77 }
78
sizeComparator(SharedBuffer * a,SharedBuffer * b)79 static bool sizeComparator(SharedBuffer* a, SharedBuffer* b)
80 {
81 return a->size() > b->size();
82 }
83
snippetForBuffer(SharedBuffer * sharedBuffer)84 static CString snippetForBuffer(SharedBuffer* sharedBuffer)
85 {
86 const unsigned kMaxSnippetLength = 64;
87 char* snippet = 0;
88 unsigned snippetLength = std::min(sharedBuffer->size(), kMaxSnippetLength);
89 CString result = CString::newUninitialized(snippetLength, snippet);
90
91 const char* segment;
92 unsigned offset = 0;
93 while (unsigned segmentLength = sharedBuffer->getSomeData(segment, offset)) {
94 unsigned length = std::min(segmentLength, snippetLength - offset);
95 memcpy(snippet + offset, segment, length);
96 offset += segmentLength;
97 if (offset >= snippetLength)
98 break;
99 }
100
101 for (unsigned i = 0; i < snippetLength; ++i) {
102 if (!isASCIIPrintable(snippet[i]))
103 snippet[i] = '?';
104 }
105
106 return result;
107 }
108
printStats(void *)109 static void printStats(void*)
110 {
111 MutexLocker locker(statsMutex());
112 Vector<SharedBuffer*> buffers;
113 for (HashSet<SharedBuffer*>::const_iterator iter = liveBuffers().begin(); iter != liveBuffers().end(); ++iter)
114 buffers.append(*iter);
115 std::sort(buffers.begin(), buffers.end(), sizeComparator);
116
117 dataLogF("---- Shared Buffer Stats ----\n");
118 for (size_t i = 0; i < buffers.size() && i < 64; ++i) {
119 CString snippet = snippetForBuffer(buffers[i]);
120 dataLogF("Buffer size=%8u %s\n", buffers[i]->size(), snippet.data());
121 }
122 }
123
didCreateSharedBuffer(SharedBuffer * buffer)124 static void didCreateSharedBuffer(SharedBuffer* buffer)
125 {
126 MutexLocker locker(statsMutex());
127 liveBuffers().add(buffer);
128
129 callOnMainThread(printStats, 0);
130 }
131
willDestroySharedBuffer(SharedBuffer * buffer)132 static void willDestroySharedBuffer(SharedBuffer* buffer)
133 {
134 MutexLocker locker(statsMutex());
135 liveBuffers().remove(buffer);
136 }
137
138 #endif
139
SharedBuffer()140 SharedBuffer::SharedBuffer()
141 : m_size(0)
142 , m_buffer(PurgeableVector::NotPurgeable)
143 {
144 #ifdef SHARED_BUFFER_STATS
145 didCreateSharedBuffer(this);
146 #endif
147 }
148
SharedBuffer(size_t size)149 SharedBuffer::SharedBuffer(size_t size)
150 : m_size(size)
151 , m_buffer(PurgeableVector::NotPurgeable)
152 {
153 m_buffer.reserveCapacity(size);
154 m_buffer.grow(size);
155 #ifdef SHARED_BUFFER_STATS
156 didCreateSharedBuffer(this);
157 #endif
158 }
159
SharedBuffer(const char * data,int size)160 SharedBuffer::SharedBuffer(const char* data, int size)
161 : m_size(0)
162 , m_buffer(PurgeableVector::NotPurgeable)
163 {
164 // FIXME: Use unsigned consistently, and check for invalid casts when calling into SharedBuffer from other code.
165 if (size < 0)
166 CRASH();
167
168 append(data, size);
169
170 #ifdef SHARED_BUFFER_STATS
171 didCreateSharedBuffer(this);
172 #endif
173 }
174
SharedBuffer(const char * data,int size,PurgeableVector::PurgeableOption purgeable)175 SharedBuffer::SharedBuffer(const char* data, int size, PurgeableVector::PurgeableOption purgeable)
176 : m_size(0)
177 , m_buffer(purgeable)
178 {
179 // FIXME: Use unsigned consistently, and check for invalid casts when calling into SharedBuffer from other code.
180 if (size < 0)
181 CRASH();
182
183 append(data, size);
184
185 #ifdef SHARED_BUFFER_STATS
186 didCreateSharedBuffer(this);
187 #endif
188 }
189
SharedBuffer(const unsigned char * data,int size)190 SharedBuffer::SharedBuffer(const unsigned char* data, int size)
191 : m_size(0)
192 , m_buffer(PurgeableVector::NotPurgeable)
193 {
194 // FIXME: Use unsigned consistently, and check for invalid casts when calling into SharedBuffer from other code.
195 if (size < 0)
196 CRASH();
197
198 append(reinterpret_cast<const char*>(data), size);
199
200 #ifdef SHARED_BUFFER_STATS
201 didCreateSharedBuffer(this);
202 #endif
203 }
204
~SharedBuffer()205 SharedBuffer::~SharedBuffer()
206 {
207 clear();
208
209 #ifdef SHARED_BUFFER_STATS
210 willDestroySharedBuffer(this);
211 #endif
212 }
213
adoptVector(Vector<char> & vector)214 PassRefPtr<SharedBuffer> SharedBuffer::adoptVector(Vector<char>& vector)
215 {
216 RefPtr<SharedBuffer> buffer = create();
217 buffer->m_buffer.adopt(vector);
218 buffer->m_size = buffer->m_buffer.size();
219 return buffer.release();
220 }
221
size() const222 unsigned SharedBuffer::size() const
223 {
224 return m_size;
225 }
226
data() const227 const char* SharedBuffer::data() const
228 {
229 mergeSegmentsIntoBuffer();
230 return m_buffer.data();
231 }
232
append(PassRefPtr<SharedBuffer> data)233 void SharedBuffer::append(PassRefPtr<SharedBuffer> data)
234 {
235 const char* segment;
236 size_t position = 0;
237 while (size_t length = data->getSomeData(segment, position)) {
238 append(segment, length);
239 position += length;
240 }
241 }
242
append(const char * data,unsigned length)243 void SharedBuffer::append(const char* data, unsigned length)
244 {
245 ASSERT(isLocked());
246 if (!length)
247 return;
248
249 ASSERT(m_size >= m_buffer.size());
250 unsigned positionInSegment = offsetInSegment(m_size - m_buffer.size());
251 m_size += length;
252
253 if (m_size <= segmentSize) {
254 // No need to use segments for small resource data.
255 m_buffer.append(data, length);
256 return;
257 }
258
259 char* segment;
260 if (!positionInSegment) {
261 segment = allocateSegment();
262 m_segments.append(segment);
263 } else
264 segment = m_segments.last() + positionInSegment;
265
266 unsigned segmentFreeSpace = segmentSize - positionInSegment;
267 unsigned bytesToCopy = std::min(length, segmentFreeSpace);
268
269 for (;;) {
270 memcpy(segment, data, bytesToCopy);
271 if (static_cast<unsigned>(length) == bytesToCopy)
272 break;
273
274 length -= bytesToCopy;
275 data += bytesToCopy;
276 segment = allocateSegment();
277 m_segments.append(segment);
278 bytesToCopy = std::min(length, segmentSize);
279 }
280 }
281
append(const Vector<char> & data)282 void SharedBuffer::append(const Vector<char>& data)
283 {
284 append(data.data(), data.size());
285 }
286
clear()287 void SharedBuffer::clear()
288 {
289 for (unsigned i = 0; i < m_segments.size(); ++i)
290 freeSegment(m_segments[i]);
291
292 m_segments.clear();
293 m_size = 0;
294 m_buffer.clear();
295 }
296
copy() const297 PassRefPtr<SharedBuffer> SharedBuffer::copy() const
298 {
299 RefPtr<SharedBuffer> clone(adoptRef(new SharedBuffer));
300 clone->m_size = m_size;
301 clone->m_buffer.reserveCapacity(m_size);
302 clone->m_buffer.append(m_buffer.data(), m_buffer.size());
303 if (!m_segments.isEmpty()) {
304 const char* segment = 0;
305 unsigned position = m_buffer.size();
306 while (unsigned segmentSize = getSomeData(segment, position)) {
307 clone->m_buffer.append(segment, segmentSize);
308 position += segmentSize;
309 }
310 ASSERT(position == clone->size());
311 }
312 return clone.release();
313 }
314
mergeSegmentsIntoBuffer() const315 void SharedBuffer::mergeSegmentsIntoBuffer() const
316 {
317 unsigned bufferSize = m_buffer.size();
318 if (m_size > bufferSize) {
319 m_buffer.reserveCapacity(m_size);
320 unsigned bytesLeft = m_size - bufferSize;
321 for (unsigned i = 0; i < m_segments.size(); ++i) {
322 unsigned bytesToCopy = std::min(bytesLeft, segmentSize);
323 m_buffer.append(m_segments[i], bytesToCopy);
324 bytesLeft -= bytesToCopy;
325 freeSegment(m_segments[i]);
326 }
327 m_segments.clear();
328 }
329 }
330
getSomeData(const char * & someData,unsigned position) const331 unsigned SharedBuffer::getSomeData(const char*& someData, unsigned position) const
332 {
333 ASSERT(isLocked());
334 unsigned totalSize = size();
335 if (position >= totalSize) {
336 someData = 0;
337 return 0;
338 }
339
340 ASSERT_WITH_SECURITY_IMPLICATION(position < m_size);
341 unsigned consecutiveSize = m_buffer.size();
342 if (position < consecutiveSize) {
343 someData = m_buffer.data() + position;
344 return consecutiveSize - position;
345 }
346
347 position -= consecutiveSize;
348 unsigned segments = m_segments.size();
349 unsigned maxSegmentedSize = segments * segmentSize;
350 unsigned segment = segmentIndex(position);
351 if (segment < segments) {
352 unsigned bytesLeft = totalSize - consecutiveSize;
353 unsigned segmentedSize = std::min(maxSegmentedSize, bytesLeft);
354
355 unsigned positionInSegment = offsetInSegment(position);
356 someData = m_segments[segment] + positionInSegment;
357 return segment == segments - 1 ? segmentedSize - position : segmentSize - positionInSegment;
358 }
359 ASSERT_NOT_REACHED();
360 return 0;
361 }
362
getAsArrayBuffer() const363 PassRefPtr<ArrayBuffer> SharedBuffer::getAsArrayBuffer() const
364 {
365 RefPtr<ArrayBuffer> arrayBuffer = ArrayBuffer::createUninitialized(static_cast<unsigned>(size()), 1);
366
367 if (!arrayBuffer)
368 return nullptr;
369
370 const char* segment = 0;
371 unsigned position = 0;
372 while (unsigned segmentSize = getSomeData(segment, position)) {
373 memcpy(static_cast<char*>(arrayBuffer->data()) + position, segment, segmentSize);
374 position += segmentSize;
375 }
376
377 if (position != arrayBuffer->byteLength()) {
378 ASSERT_NOT_REACHED();
379 // Don't return the incomplete ArrayBuffer.
380 return nullptr;
381 }
382
383 return arrayBuffer;
384 }
385
getAsSkData() const386 PassRefPtr<SkData> SharedBuffer::getAsSkData() const
387 {
388 unsigned bufferLength = size();
389 SkData* data = SkData::NewUninitialized(bufferLength);
390 char* buffer = static_cast<char*>(data->writable_data());
391 const char* segment = 0;
392 unsigned position = 0;
393 while (unsigned segmentSize = getSomeData(segment, position)) {
394 memcpy(buffer + position, segment, segmentSize);
395 position += segmentSize;
396 }
397
398 if (position != bufferLength) {
399 ASSERT_NOT_REACHED();
400 // Don't return the incomplete SkData.
401 return nullptr;
402 }
403 return adoptRef(data);
404 }
405
lock()406 bool SharedBuffer::lock()
407 {
408 return m_buffer.lock();
409 }
410
unlock()411 void SharedBuffer::unlock()
412 {
413 mergeSegmentsIntoBuffer();
414 m_buffer.unlock();
415 }
416
isLocked() const417 bool SharedBuffer::isLocked() const
418 {
419 return m_buffer.isLocked();
420 }
421
422 } // namespace blink
423