1 /*
2 * Copyright (C) 2005 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #define LOG_TAG "Vector"
18
19 #include <string.h>
20 #include <stdlib.h>
21 #include <stdio.h>
22
23 #include <cutils/log.h>
24 #include <safe_iop.h>
25
26 #include <utils/Errors.h>
27 #include <utils/VectorImpl.h>
28
29 #include "SharedBuffer.h"
30
31 /*****************************************************************************/
32
33
34 namespace android {
35
36 // ----------------------------------------------------------------------------
37
38 const size_t kMinVectorCapacity = 4;
39
max(size_t a,size_t b)40 static inline size_t max(size_t a, size_t b) {
41 return a>b ? a : b;
42 }
43
44 // ----------------------------------------------------------------------------
45
VectorImpl(size_t itemSize,uint32_t flags)46 VectorImpl::VectorImpl(size_t itemSize, uint32_t flags)
47 : mStorage(0), mCount(0), mFlags(flags), mItemSize(itemSize)
48 {
49 }
50
VectorImpl(const VectorImpl & rhs)51 VectorImpl::VectorImpl(const VectorImpl& rhs)
52 : mStorage(rhs.mStorage), mCount(rhs.mCount),
53 mFlags(rhs.mFlags), mItemSize(rhs.mItemSize)
54 {
55 if (mStorage) {
56 SharedBuffer::bufferFromData(mStorage)->acquire();
57 }
58 }
59
~VectorImpl()60 VectorImpl::~VectorImpl()
61 {
62 ALOGW_IF(mCount,
63 "[%p] subclasses of VectorImpl must call finish_vector()"
64 " in their destructor. Leaking %d bytes.",
65 this, (int)(mCount*mItemSize));
66 // We can't call _do_destroy() here because the vtable is already gone.
67 }
68
operator =(const VectorImpl & rhs)69 VectorImpl& VectorImpl::operator = (const VectorImpl& rhs)
70 {
71 LOG_ALWAYS_FATAL_IF(mItemSize != rhs.mItemSize,
72 "Vector<> have different types (this=%p, rhs=%p)", this, &rhs);
73 if (this != &rhs) {
74 release_storage();
75 if (rhs.mCount) {
76 mStorage = rhs.mStorage;
77 mCount = rhs.mCount;
78 SharedBuffer::bufferFromData(mStorage)->acquire();
79 } else {
80 mStorage = 0;
81 mCount = 0;
82 }
83 }
84 return *this;
85 }
86
editArrayImpl()87 void* VectorImpl::editArrayImpl()
88 {
89 if (mStorage) {
90 const SharedBuffer* sb = SharedBuffer::bufferFromData(mStorage);
91 SharedBuffer* editable = sb->attemptEdit();
92 if (editable == 0) {
93 // If we're here, we're not the only owner of the buffer.
94 // We must make a copy of it.
95 editable = SharedBuffer::alloc(sb->size());
96 // Fail instead of returning a pointer to storage that's not
97 // editable. Otherwise we'd be editing the contents of a buffer
98 // for which we're not the only owner, which is undefined behaviour.
99 LOG_ALWAYS_FATAL_IF(editable == NULL);
100 _do_copy(editable->data(), mStorage, mCount);
101 release_storage();
102 mStorage = editable->data();
103 }
104 }
105 return mStorage;
106 }
107
capacity() const108 size_t VectorImpl::capacity() const
109 {
110 if (mStorage) {
111 return SharedBuffer::bufferFromData(mStorage)->size() / mItemSize;
112 }
113 return 0;
114 }
115
insertVectorAt(const VectorImpl & vector,size_t index)116 ssize_t VectorImpl::insertVectorAt(const VectorImpl& vector, size_t index)
117 {
118 return insertArrayAt(vector.arrayImpl(), index, vector.size());
119 }
120
appendVector(const VectorImpl & vector)121 ssize_t VectorImpl::appendVector(const VectorImpl& vector)
122 {
123 return insertVectorAt(vector, size());
124 }
125
insertArrayAt(const void * array,size_t index,size_t length)126 ssize_t VectorImpl::insertArrayAt(const void* array, size_t index, size_t length)
127 {
128 if (index > size())
129 return BAD_INDEX;
130 void* where = _grow(index, length);
131 if (where) {
132 _do_copy(where, array, length);
133 }
134 return where ? index : (ssize_t)NO_MEMORY;
135 }
136
appendArray(const void * array,size_t length)137 ssize_t VectorImpl::appendArray(const void* array, size_t length)
138 {
139 return insertArrayAt(array, size(), length);
140 }
141
insertAt(size_t index,size_t numItems)142 ssize_t VectorImpl::insertAt(size_t index, size_t numItems)
143 {
144 return insertAt(0, index, numItems);
145 }
146
insertAt(const void * item,size_t index,size_t numItems)147 ssize_t VectorImpl::insertAt(const void* item, size_t index, size_t numItems)
148 {
149 if (index > size())
150 return BAD_INDEX;
151 void* where = _grow(index, numItems);
152 if (where) {
153 if (item) {
154 _do_splat(where, item, numItems);
155 } else {
156 _do_construct(where, numItems);
157 }
158 }
159 return where ? index : (ssize_t)NO_MEMORY;
160 }
161
sortProxy(const void * lhs,const void * rhs,void * func)162 static int sortProxy(const void* lhs, const void* rhs, void* func)
163 {
164 return (*(VectorImpl::compar_t)func)(lhs, rhs);
165 }
166
sort(VectorImpl::compar_t cmp)167 status_t VectorImpl::sort(VectorImpl::compar_t cmp)
168 {
169 return sort(sortProxy, (void*)cmp);
170 }
171
sort(VectorImpl::compar_r_t cmp,void * state)172 status_t VectorImpl::sort(VectorImpl::compar_r_t cmp, void* state)
173 {
174 // the sort must be stable. we're using insertion sort which
175 // is well suited for small and already sorted arrays
176 // for big arrays, it could be better to use mergesort
177 const ssize_t count = size();
178 if (count > 1) {
179 void* array = const_cast<void*>(arrayImpl());
180 void* temp = 0;
181 ssize_t i = 1;
182 while (i < count) {
183 void* item = reinterpret_cast<char*>(array) + mItemSize*(i);
184 void* curr = reinterpret_cast<char*>(array) + mItemSize*(i-1);
185 if (cmp(curr, item, state) > 0) {
186
187 if (!temp) {
188 // we're going to have to modify the array...
189 array = editArrayImpl();
190 if (!array) return NO_MEMORY;
191 temp = malloc(mItemSize);
192 if (!temp) return NO_MEMORY;
193 item = reinterpret_cast<char*>(array) + mItemSize*(i);
194 curr = reinterpret_cast<char*>(array) + mItemSize*(i-1);
195 } else {
196 _do_destroy(temp, 1);
197 }
198
199 _do_copy(temp, item, 1);
200
201 ssize_t j = i-1;
202 void* next = reinterpret_cast<char*>(array) + mItemSize*(i);
203 do {
204 _do_destroy(next, 1);
205 _do_copy(next, curr, 1);
206 next = curr;
207 --j;
208 curr = NULL;
209 if (j >= 0) {
210 curr = reinterpret_cast<char*>(array) + mItemSize*(j);
211 }
212 } while (j>=0 && (cmp(curr, temp, state) > 0));
213
214 _do_destroy(next, 1);
215 _do_copy(next, temp, 1);
216 }
217 i++;
218 }
219
220 if (temp) {
221 _do_destroy(temp, 1);
222 free(temp);
223 }
224 }
225 return NO_ERROR;
226 }
227
pop()228 void VectorImpl::pop()
229 {
230 if (size())
231 removeItemsAt(size()-1, 1);
232 }
233
push()234 void VectorImpl::push()
235 {
236 push(0);
237 }
238
push(const void * item)239 void VectorImpl::push(const void* item)
240 {
241 insertAt(item, size());
242 }
243
add()244 ssize_t VectorImpl::add()
245 {
246 return add(0);
247 }
248
add(const void * item)249 ssize_t VectorImpl::add(const void* item)
250 {
251 return insertAt(item, size());
252 }
253
replaceAt(size_t index)254 ssize_t VectorImpl::replaceAt(size_t index)
255 {
256 return replaceAt(0, index);
257 }
258
replaceAt(const void * prototype,size_t index)259 ssize_t VectorImpl::replaceAt(const void* prototype, size_t index)
260 {
261 ALOG_ASSERT(index<size(),
262 "[%p] replace: index=%d, size=%d", this, (int)index, (int)size());
263
264 if (index >= size()) {
265 return BAD_INDEX;
266 }
267
268 void* item = editItemLocation(index);
269 if (item != prototype) {
270 if (item == 0)
271 return NO_MEMORY;
272 _do_destroy(item, 1);
273 if (prototype == 0) {
274 _do_construct(item, 1);
275 } else {
276 _do_copy(item, prototype, 1);
277 }
278 }
279 return ssize_t(index);
280 }
281
removeItemsAt(size_t index,size_t count)282 ssize_t VectorImpl::removeItemsAt(size_t index, size_t count)
283 {
284 ALOG_ASSERT((index+count)<=size(),
285 "[%p] remove: index=%d, count=%d, size=%d",
286 this, (int)index, (int)count, (int)size());
287
288 if ((index+count) > size())
289 return BAD_VALUE;
290 _shrink(index, count);
291 return index;
292 }
293
finish_vector()294 void VectorImpl::finish_vector()
295 {
296 release_storage();
297 mStorage = 0;
298 mCount = 0;
299 }
300
clear()301 void VectorImpl::clear()
302 {
303 _shrink(0, mCount);
304 }
305
editItemLocation(size_t index)306 void* VectorImpl::editItemLocation(size_t index)
307 {
308 ALOG_ASSERT(index<capacity(),
309 "[%p] editItemLocation: index=%d, capacity=%d, count=%d",
310 this, (int)index, (int)capacity(), (int)mCount);
311
312 if (index < capacity()) {
313 void* buffer = editArrayImpl();
314 if (buffer) {
315 return reinterpret_cast<char*>(buffer) + index*mItemSize;
316 }
317 }
318 return 0;
319 }
320
itemLocation(size_t index) const321 const void* VectorImpl::itemLocation(size_t index) const
322 {
323 ALOG_ASSERT(index<capacity(),
324 "[%p] itemLocation: index=%d, capacity=%d, count=%d",
325 this, (int)index, (int)capacity(), (int)mCount);
326
327 if (index < capacity()) {
328 const void* buffer = arrayImpl();
329 if (buffer) {
330 return reinterpret_cast<const char*>(buffer) + index*mItemSize;
331 }
332 }
333 return 0;
334 }
335
setCapacity(size_t new_capacity)336 ssize_t VectorImpl::setCapacity(size_t new_capacity)
337 {
338 // The capacity must always be greater than or equal to the size
339 // of this vector.
340 if (new_capacity <= size()) {
341 return capacity();
342 }
343
344 size_t new_allocation_size = 0;
345 LOG_ALWAYS_FATAL_IF(!safe_mul(&new_allocation_size, new_capacity, mItemSize));
346 SharedBuffer* sb = SharedBuffer::alloc(new_allocation_size);
347 if (sb) {
348 void* array = sb->data();
349 _do_copy(array, mStorage, size());
350 release_storage();
351 mStorage = const_cast<void*>(array);
352 } else {
353 return NO_MEMORY;
354 }
355 return new_capacity;
356 }
357
resize(size_t size)358 ssize_t VectorImpl::resize(size_t size) {
359 ssize_t result = NO_ERROR;
360 if (size > mCount) {
361 result = insertAt(mCount, size - mCount);
362 } else if (size < mCount) {
363 result = removeItemsAt(size, mCount - size);
364 }
365 return result < 0 ? result : size;
366 }
367
release_storage()368 void VectorImpl::release_storage()
369 {
370 if (mStorage) {
371 const SharedBuffer* sb = SharedBuffer::bufferFromData(mStorage);
372 if (sb->release(SharedBuffer::eKeepStorage) == 1) {
373 _do_destroy(mStorage, mCount);
374 SharedBuffer::dealloc(sb);
375 }
376 }
377 }
378
_grow(size_t where,size_t amount)379 void* VectorImpl::_grow(size_t where, size_t amount)
380 {
381 // ALOGV("_grow(this=%p, where=%d, amount=%d) count=%d, capacity=%d",
382 // this, (int)where, (int)amount, (int)mCount, (int)capacity());
383
384 ALOG_ASSERT(where <= mCount,
385 "[%p] _grow: where=%d, amount=%d, count=%d",
386 this, (int)where, (int)amount, (int)mCount); // caller already checked
387
388 size_t new_size;
389 LOG_ALWAYS_FATAL_IF(!safe_add(&new_size, mCount, amount), "new_size overflow");
390
391 if (capacity() < new_size) {
392 // NOTE: This implementation used to resize vectors as per ((3*x + 1) / 2)
393 // (sigh..). Also note, the " + 1" was necessary to handle the special case
394 // where x == 1, where the resized_capacity will be equal to the old
395 // capacity without the +1. The old calculation wouldn't work properly
396 // if x was zero.
397 //
398 // This approximates the old calculation, using (x + (x/2) + 1) instead.
399 size_t new_capacity = 0;
400 LOG_ALWAYS_FATAL_IF(!safe_add(&new_capacity, new_size, (new_size / 2)),
401 "new_capacity overflow");
402 LOG_ALWAYS_FATAL_IF(!safe_add(&new_capacity, new_capacity, static_cast<size_t>(1u)),
403 "new_capacity overflow");
404 new_capacity = max(kMinVectorCapacity, new_capacity);
405
406 size_t new_alloc_size = 0;
407 LOG_ALWAYS_FATAL_IF(!safe_mul(&new_alloc_size, new_capacity, mItemSize),
408 "new_alloc_size overflow");
409
410 // ALOGV("grow vector %p, new_capacity=%d", this, (int)new_capacity);
411 if ((mStorage) &&
412 (mCount==where) &&
413 (mFlags & HAS_TRIVIAL_COPY) &&
414 (mFlags & HAS_TRIVIAL_DTOR))
415 {
416 const SharedBuffer* cur_sb = SharedBuffer::bufferFromData(mStorage);
417 SharedBuffer* sb = cur_sb->editResize(new_alloc_size);
418 if (sb) {
419 mStorage = sb->data();
420 } else {
421 return NULL;
422 }
423 } else {
424 SharedBuffer* sb = SharedBuffer::alloc(new_alloc_size);
425 if (sb) {
426 void* array = sb->data();
427 if (where != 0) {
428 _do_copy(array, mStorage, where);
429 }
430 if (where != mCount) {
431 const void* from = reinterpret_cast<const uint8_t *>(mStorage) + where*mItemSize;
432 void* dest = reinterpret_cast<uint8_t *>(array) + (where+amount)*mItemSize;
433 _do_copy(dest, from, mCount-where);
434 }
435 release_storage();
436 mStorage = const_cast<void*>(array);
437 } else {
438 return NULL;
439 }
440 }
441 } else {
442 void* array = editArrayImpl();
443 if (where != mCount) {
444 const void* from = reinterpret_cast<const uint8_t *>(array) + where*mItemSize;
445 void* to = reinterpret_cast<uint8_t *>(array) + (where+amount)*mItemSize;
446 _do_move_forward(to, from, mCount - where);
447 }
448 }
449 mCount = new_size;
450 void* free_space = const_cast<void*>(itemLocation(where));
451 return free_space;
452 }
453
_shrink(size_t where,size_t amount)454 void VectorImpl::_shrink(size_t where, size_t amount)
455 {
456 if (!mStorage)
457 return;
458
459 // ALOGV("_shrink(this=%p, where=%d, amount=%d) count=%d, capacity=%d",
460 // this, (int)where, (int)amount, (int)mCount, (int)capacity());
461
462 ALOG_ASSERT(where + amount <= mCount,
463 "[%p] _shrink: where=%d, amount=%d, count=%d",
464 this, (int)where, (int)amount, (int)mCount); // caller already checked
465
466 size_t new_size;
467 LOG_ALWAYS_FATAL_IF(!safe_sub(&new_size, mCount, amount));
468
469 if (new_size < (capacity() / 2)) {
470 // NOTE: (new_size * 2) is safe because capacity didn't overflow and
471 // new_size < (capacity / 2)).
472 const size_t new_capacity = max(kMinVectorCapacity, new_size * 2);
473
474 // NOTE: (new_capacity * mItemSize), (where * mItemSize) and
475 // ((where + amount) * mItemSize) beyond this point are safe because
476 // we are always reducing the capacity of the underlying SharedBuffer.
477 // In other words, (old_capacity * mItemSize) did not overflow, and
478 // where < (where + amount) < new_capacity < old_capacity.
479 if ((where == new_size) &&
480 (mFlags & HAS_TRIVIAL_COPY) &&
481 (mFlags & HAS_TRIVIAL_DTOR))
482 {
483 const SharedBuffer* cur_sb = SharedBuffer::bufferFromData(mStorage);
484 SharedBuffer* sb = cur_sb->editResize(new_capacity * mItemSize);
485 if (sb) {
486 mStorage = sb->data();
487 } else {
488 return;
489 }
490 } else {
491 SharedBuffer* sb = SharedBuffer::alloc(new_capacity * mItemSize);
492 if (sb) {
493 void* array = sb->data();
494 if (where != 0) {
495 _do_copy(array, mStorage, where);
496 }
497 if (where != new_size) {
498 const void* from = reinterpret_cast<const uint8_t *>(mStorage) + (where+amount)*mItemSize;
499 void* dest = reinterpret_cast<uint8_t *>(array) + where*mItemSize;
500 _do_copy(dest, from, new_size - where);
501 }
502 release_storage();
503 mStorage = const_cast<void*>(array);
504 } else{
505 return;
506 }
507 }
508 } else {
509 void* array = editArrayImpl();
510 void* to = reinterpret_cast<uint8_t *>(array) + where*mItemSize;
511 _do_destroy(to, amount);
512 if (where != new_size) {
513 const void* from = reinterpret_cast<uint8_t *>(array) + (where+amount)*mItemSize;
514 _do_move_backward(to, from, new_size - where);
515 }
516 }
517 mCount = new_size;
518 }
519
itemSize() const520 size_t VectorImpl::itemSize() const {
521 return mItemSize;
522 }
523
_do_construct(void * storage,size_t num) const524 void VectorImpl::_do_construct(void* storage, size_t num) const
525 {
526 if (!(mFlags & HAS_TRIVIAL_CTOR)) {
527 do_construct(storage, num);
528 }
529 }
530
_do_destroy(void * storage,size_t num) const531 void VectorImpl::_do_destroy(void* storage, size_t num) const
532 {
533 if (!(mFlags & HAS_TRIVIAL_DTOR)) {
534 do_destroy(storage, num);
535 }
536 }
537
_do_copy(void * dest,const void * from,size_t num) const538 void VectorImpl::_do_copy(void* dest, const void* from, size_t num) const
539 {
540 if (!(mFlags & HAS_TRIVIAL_COPY)) {
541 do_copy(dest, from, num);
542 } else {
543 memcpy(dest, from, num*itemSize());
544 }
545 }
546
_do_splat(void * dest,const void * item,size_t num) const547 void VectorImpl::_do_splat(void* dest, const void* item, size_t num) const {
548 do_splat(dest, item, num);
549 }
550
_do_move_forward(void * dest,const void * from,size_t num) const551 void VectorImpl::_do_move_forward(void* dest, const void* from, size_t num) const {
552 do_move_forward(dest, from, num);
553 }
554
_do_move_backward(void * dest,const void * from,size_t num) const555 void VectorImpl::_do_move_backward(void* dest, const void* from, size_t num) const {
556 do_move_backward(dest, from, num);
557 }
558
559 /*****************************************************************************/
560
SortedVectorImpl(size_t itemSize,uint32_t flags)561 SortedVectorImpl::SortedVectorImpl(size_t itemSize, uint32_t flags)
562 : VectorImpl(itemSize, flags)
563 {
564 }
565
SortedVectorImpl(const VectorImpl & rhs)566 SortedVectorImpl::SortedVectorImpl(const VectorImpl& rhs)
567 : VectorImpl(rhs)
568 {
569 }
570
~SortedVectorImpl()571 SortedVectorImpl::~SortedVectorImpl()
572 {
573 }
574
operator =(const SortedVectorImpl & rhs)575 SortedVectorImpl& SortedVectorImpl::operator = (const SortedVectorImpl& rhs)
576 {
577 return static_cast<SortedVectorImpl&>( VectorImpl::operator = (static_cast<const VectorImpl&>(rhs)) );
578 }
579
indexOf(const void * item) const580 ssize_t SortedVectorImpl::indexOf(const void* item) const
581 {
582 return _indexOrderOf(item);
583 }
584
orderOf(const void * item) const585 size_t SortedVectorImpl::orderOf(const void* item) const
586 {
587 size_t o;
588 _indexOrderOf(item, &o);
589 return o;
590 }
591
_indexOrderOf(const void * item,size_t * order) const592 ssize_t SortedVectorImpl::_indexOrderOf(const void* item, size_t* order) const
593 {
594 if (order) *order = 0;
595 if (isEmpty()) {
596 return NAME_NOT_FOUND;
597 }
598 // binary search
599 ssize_t err = NAME_NOT_FOUND;
600 ssize_t l = 0;
601 ssize_t h = size()-1;
602 ssize_t mid;
603 const void* a = arrayImpl();
604 const size_t s = itemSize();
605 while (l <= h) {
606 mid = l + (h - l)/2;
607 const void* const curr = reinterpret_cast<const char *>(a) + (mid*s);
608 const int c = do_compare(curr, item);
609 if (c == 0) {
610 err = l = mid;
611 break;
612 } else if (c < 0) {
613 l = mid + 1;
614 } else {
615 h = mid - 1;
616 }
617 }
618 if (order) *order = l;
619 return err;
620 }
621
add(const void * item)622 ssize_t SortedVectorImpl::add(const void* item)
623 {
624 size_t order;
625 ssize_t index = _indexOrderOf(item, &order);
626 if (index < 0) {
627 index = VectorImpl::insertAt(item, order, 1);
628 } else {
629 index = VectorImpl::replaceAt(item, index);
630 }
631 return index;
632 }
633
merge(const VectorImpl & vector)634 ssize_t SortedVectorImpl::merge(const VectorImpl& vector)
635 {
636 // naive merge...
637 if (!vector.isEmpty()) {
638 const void* buffer = vector.arrayImpl();
639 const size_t is = itemSize();
640 size_t s = vector.size();
641 for (size_t i=0 ; i<s ; i++) {
642 ssize_t err = add( reinterpret_cast<const char*>(buffer) + i*is );
643 if (err<0) {
644 return err;
645 }
646 }
647 }
648 return NO_ERROR;
649 }
650
merge(const SortedVectorImpl & vector)651 ssize_t SortedVectorImpl::merge(const SortedVectorImpl& vector)
652 {
653 // we've merging a sorted vector... nice!
654 ssize_t err = NO_ERROR;
655 if (!vector.isEmpty()) {
656 // first take care of the case where the vectors are sorted together
657 if (do_compare(vector.itemLocation(vector.size()-1), arrayImpl()) <= 0) {
658 err = VectorImpl::insertVectorAt(static_cast<const VectorImpl&>(vector), 0);
659 } else if (do_compare(vector.arrayImpl(), itemLocation(size()-1)) >= 0) {
660 err = VectorImpl::appendVector(static_cast<const VectorImpl&>(vector));
661 } else {
662 // this could be made a little better
663 err = merge(static_cast<const VectorImpl&>(vector));
664 }
665 }
666 return err;
667 }
668
remove(const void * item)669 ssize_t SortedVectorImpl::remove(const void* item)
670 {
671 ssize_t i = indexOf(item);
672 if (i>=0) {
673 VectorImpl::removeItemsAt(i, 1);
674 }
675 return i;
676 }
677
678 /*****************************************************************************/
679
680 }; // namespace android
681
682