1 /*
2 * Copyright (C) 2005 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #define LOG_TAG "Vector"
18
19 #include <utils/VectorImpl.h>
20
21 #include <stdio.h>
22 #include <stdlib.h>
23 #include <string.h>
24
25 #include <log/log.h>
26
27 #include "SharedBuffer.h"
28
29 /*****************************************************************************/
30
31
32 namespace android {
33
34 // ----------------------------------------------------------------------------
35
36 const size_t kMinVectorCapacity = 4;
37
max(size_t a,size_t b)38 static inline size_t max(size_t a, size_t b) {
39 return a>b ? a : b;
40 }
41
42 // ----------------------------------------------------------------------------
43
VectorImpl(size_t itemSize,uint32_t flags)44 VectorImpl::VectorImpl(size_t itemSize, uint32_t flags)
45 : mStorage(nullptr), mCount(0), mFlags(flags), mItemSize(itemSize)
46 {
47 }
48
VectorImpl(const VectorImpl & rhs)49 VectorImpl::VectorImpl(const VectorImpl& rhs)
50 : mStorage(rhs.mStorage), mCount(rhs.mCount),
51 mFlags(rhs.mFlags), mItemSize(rhs.mItemSize)
52 {
53 if (mStorage) {
54 SharedBuffer::bufferFromData(mStorage)->acquire();
55 }
56 }
57
~VectorImpl()58 VectorImpl::~VectorImpl()
59 {
60 ALOGW_IF(mCount,
61 "[%p] subclasses of VectorImpl must call finish_vector()"
62 " in their destructor. Leaking %d bytes.",
63 this, (int)(mCount*mItemSize));
64 // We can't call _do_destroy() here because the vtable is already gone.
65 }
66
operator =(const VectorImpl & rhs)67 VectorImpl& VectorImpl::operator = (const VectorImpl& rhs)
68 {
69 LOG_ALWAYS_FATAL_IF(mItemSize != rhs.mItemSize,
70 "Vector<> have different types (this=%p, rhs=%p)", this, &rhs);
71 if (this != &rhs) {
72 release_storage();
73 if (rhs.mCount) {
74 mStorage = rhs.mStorage;
75 mCount = rhs.mCount;
76 SharedBuffer::bufferFromData(mStorage)->acquire();
77 } else {
78 mStorage = nullptr;
79 mCount = 0;
80 }
81 }
82 return *this;
83 }
84
editArrayImpl()85 void* VectorImpl::editArrayImpl()
86 {
87 if (mStorage) {
88 const SharedBuffer* sb = SharedBuffer::bufferFromData(mStorage);
89 SharedBuffer* editable = sb->attemptEdit();
90 if (editable == nullptr) {
91 // If we're here, we're not the only owner of the buffer.
92 // We must make a copy of it.
93 editable = SharedBuffer::alloc(sb->size());
94 // Fail instead of returning a pointer to storage that's not
95 // editable. Otherwise we'd be editing the contents of a buffer
96 // for which we're not the only owner, which is undefined behaviour.
97 LOG_ALWAYS_FATAL_IF(editable == nullptr);
98 _do_copy(editable->data(), mStorage, mCount);
99 release_storage();
100 mStorage = editable->data();
101 }
102 }
103 return mStorage;
104 }
105
capacity() const106 size_t VectorImpl::capacity() const
107 {
108 if (mStorage) {
109 return SharedBuffer::bufferFromData(mStorage)->size() / mItemSize;
110 }
111 return 0;
112 }
113
insertVectorAt(const VectorImpl & vector,size_t index)114 ssize_t VectorImpl::insertVectorAt(const VectorImpl& vector, size_t index)
115 {
116 return insertArrayAt(vector.arrayImpl(), index, vector.size());
117 }
118
appendVector(const VectorImpl & vector)119 ssize_t VectorImpl::appendVector(const VectorImpl& vector)
120 {
121 return insertVectorAt(vector, size());
122 }
123
insertArrayAt(const void * array,size_t index,size_t length)124 ssize_t VectorImpl::insertArrayAt(const void* array, size_t index, size_t length)
125 {
126 if (index > size())
127 return BAD_INDEX;
128 void* where = _grow(index, length);
129 if (where) {
130 _do_copy(where, array, length);
131 }
132 return where ? index : (ssize_t)NO_MEMORY;
133 }
134
appendArray(const void * array,size_t length)135 ssize_t VectorImpl::appendArray(const void* array, size_t length)
136 {
137 return insertArrayAt(array, size(), length);
138 }
139
insertAt(size_t index,size_t numItems)140 ssize_t VectorImpl::insertAt(size_t index, size_t numItems)
141 {
142 return insertAt(nullptr, index, numItems);
143 }
144
insertAt(const void * item,size_t index,size_t numItems)145 ssize_t VectorImpl::insertAt(const void* item, size_t index, size_t numItems)
146 {
147 if (index > size())
148 return BAD_INDEX;
149 void* where = _grow(index, numItems);
150 if (where) {
151 if (item) {
152 _do_splat(where, item, numItems);
153 } else {
154 _do_construct(where, numItems);
155 }
156 }
157 return where ? index : (ssize_t)NO_MEMORY;
158 }
159
sortProxy(const void * lhs,const void * rhs,void * func)160 static int sortProxy(const void* lhs, const void* rhs, void* func)
161 {
162 return (*(VectorImpl::compar_t)func)(lhs, rhs);
163 }
164
sort(VectorImpl::compar_t cmp)165 status_t VectorImpl::sort(VectorImpl::compar_t cmp)
166 {
167 return sort(sortProxy, (void*)cmp);
168 }
169
sort(VectorImpl::compar_r_t cmp,void * state)170 status_t VectorImpl::sort(VectorImpl::compar_r_t cmp, void* state)
171 {
172 // the sort must be stable. we're using insertion sort which
173 // is well suited for small and already sorted arrays
174 // for big arrays, it could be better to use mergesort
175 const ssize_t count = size();
176 if (count > 1) {
177 void* array = const_cast<void*>(arrayImpl());
178 void* temp = nullptr;
179 ssize_t i = 1;
180 while (i < count) {
181 void* item = reinterpret_cast<char*>(array) + mItemSize*(i);
182 void* curr = reinterpret_cast<char*>(array) + mItemSize*(i-1);
183 if (cmp(curr, item, state) > 0) {
184
185 if (!temp) {
186 // we're going to have to modify the array...
187 array = editArrayImpl();
188 if (!array) return NO_MEMORY;
189 temp = malloc(mItemSize);
190 if (!temp) return NO_MEMORY;
191 item = reinterpret_cast<char*>(array) + mItemSize*(i);
192 curr = reinterpret_cast<char*>(array) + mItemSize*(i-1);
193 } else {
194 _do_destroy(temp, 1);
195 }
196
197 _do_copy(temp, item, 1);
198
199 ssize_t j = i-1;
200 void* next = reinterpret_cast<char*>(array) + mItemSize*(i);
201 do {
202 _do_destroy(next, 1);
203 _do_copy(next, curr, 1);
204 next = curr;
205 --j;
206 curr = nullptr;
207 if (j >= 0) {
208 curr = reinterpret_cast<char*>(array) + mItemSize*(j);
209 }
210 } while (j>=0 && (cmp(curr, temp, state) > 0));
211
212 _do_destroy(next, 1);
213 _do_copy(next, temp, 1);
214 }
215 i++;
216 }
217
218 if (temp) {
219 _do_destroy(temp, 1);
220 free(temp);
221 }
222 }
223 return OK;
224 }
225
pop()226 void VectorImpl::pop()
227 {
228 if (size())
229 removeItemsAt(size()-1, 1);
230 }
231
push()232 void VectorImpl::push()
233 {
234 push(nullptr);
235 }
236
push(const void * item)237 void VectorImpl::push(const void* item)
238 {
239 insertAt(item, size());
240 }
241
add()242 ssize_t VectorImpl::add()
243 {
244 return add(nullptr);
245 }
246
add(const void * item)247 ssize_t VectorImpl::add(const void* item)
248 {
249 return insertAt(item, size());
250 }
251
replaceAt(size_t index)252 ssize_t VectorImpl::replaceAt(size_t index)
253 {
254 return replaceAt(nullptr, index);
255 }
256
replaceAt(const void * prototype,size_t index)257 ssize_t VectorImpl::replaceAt(const void* prototype, size_t index)
258 {
259 ALOG_ASSERT(index<size(),
260 "[%p] replace: index=%d, size=%d", this, (int)index, (int)size());
261
262 if (index >= size()) {
263 return BAD_INDEX;
264 }
265
266 void* item = editItemLocation(index);
267 if (item != prototype) {
268 if (item == nullptr)
269 return NO_MEMORY;
270 _do_destroy(item, 1);
271 if (prototype == nullptr) {
272 _do_construct(item, 1);
273 } else {
274 _do_copy(item, prototype, 1);
275 }
276 }
277 return ssize_t(index);
278 }
279
removeItemsAt(size_t index,size_t count)280 ssize_t VectorImpl::removeItemsAt(size_t index, size_t count)
281 {
282 size_t end;
283 LOG_ALWAYS_FATAL_IF(__builtin_add_overflow(index, count, &end), "overflow: index=%zu count=%zu",
284 index, count);
285 if (end > size()) return BAD_VALUE;
286 _shrink(index, count);
287 return index;
288 }
289
finish_vector()290 void VectorImpl::finish_vector()
291 {
292 release_storage();
293 mStorage = nullptr;
294 mCount = 0;
295 }
296
clear()297 void VectorImpl::clear()
298 {
299 _shrink(0, mCount);
300 }
301
editItemLocation(size_t index)302 void* VectorImpl::editItemLocation(size_t index)
303 {
304 ALOG_ASSERT(index<capacity(),
305 "[%p] editItemLocation: index=%d, capacity=%d, count=%d",
306 this, (int)index, (int)capacity(), (int)mCount);
307
308 if (index < capacity()) {
309 void* buffer = editArrayImpl();
310 if (buffer) {
311 return reinterpret_cast<char*>(buffer) + index*mItemSize;
312 }
313 }
314 return nullptr;
315 }
316
itemLocation(size_t index) const317 const void* VectorImpl::itemLocation(size_t index) const
318 {
319 ALOG_ASSERT(index<capacity(),
320 "[%p] itemLocation: index=%d, capacity=%d, count=%d",
321 this, (int)index, (int)capacity(), (int)mCount);
322
323 if (index < capacity()) {
324 const void* buffer = arrayImpl();
325 if (buffer) {
326 return reinterpret_cast<const char*>(buffer) + index*mItemSize;
327 }
328 }
329 return nullptr;
330 }
331
setCapacity(size_t new_capacity)332 ssize_t VectorImpl::setCapacity(size_t new_capacity)
333 {
334 // The capacity must always be greater than or equal to the size
335 // of this vector.
336 if (new_capacity <= size()) {
337 return capacity();
338 }
339
340 size_t new_allocation_size = 0;
341 LOG_ALWAYS_FATAL_IF(__builtin_mul_overflow(new_capacity, mItemSize, &new_allocation_size));
342 SharedBuffer* sb = SharedBuffer::alloc(new_allocation_size);
343 if (sb) {
344 void* array = sb->data();
345 _do_copy(array, mStorage, size());
346 release_storage();
347 mStorage = const_cast<void*>(array);
348 } else {
349 return NO_MEMORY;
350 }
351 return new_capacity;
352 }
353
resize(size_t size)354 ssize_t VectorImpl::resize(size_t size) {
355 ssize_t result = OK;
356 if (size > mCount) {
357 result = insertAt(mCount, size - mCount);
358 } else if (size < mCount) {
359 result = removeItemsAt(size, mCount - size);
360 }
361 return result < 0 ? result : size;
362 }
363
release_storage()364 void VectorImpl::release_storage()
365 {
366 if (mStorage) {
367 const SharedBuffer* sb = SharedBuffer::bufferFromData(mStorage);
368 if (sb->release(SharedBuffer::eKeepStorage) == 1) {
369 _do_destroy(mStorage, mCount);
370 SharedBuffer::dealloc(sb);
371 }
372 }
373 }
374
_grow(size_t where,size_t amount)375 void* VectorImpl::_grow(size_t where, size_t amount)
376 {
377 // ALOGV("_grow(this=%p, where=%d, amount=%d) count=%d, capacity=%d",
378 // this, (int)where, (int)amount, (int)mCount, (int)capacity());
379
380 ALOG_ASSERT(where <= mCount,
381 "[%p] _grow: where=%d, amount=%d, count=%d",
382 this, (int)where, (int)amount, (int)mCount); // caller already checked
383
384 size_t new_size;
385 LOG_ALWAYS_FATAL_IF(__builtin_add_overflow(mCount, amount, &new_size), "new_size overflow");
386
387 if (capacity() < new_size) {
388 // NOTE: This implementation used to resize vectors as per ((3*x + 1) / 2)
389 // (sigh..). Also note, the " + 1" was necessary to handle the special case
390 // where x == 1, where the resized_capacity will be equal to the old
391 // capacity without the +1. The old calculation wouldn't work properly
392 // if x was zero.
393 //
394 // This approximates the old calculation, using (x + (x/2) + 1) instead.
395 size_t new_capacity = 0;
396 LOG_ALWAYS_FATAL_IF(__builtin_add_overflow(new_size, (new_size / 2), &new_capacity),
397 "new_capacity overflow");
398 LOG_ALWAYS_FATAL_IF(
399 __builtin_add_overflow(new_capacity, static_cast<size_t>(1u), &new_capacity),
400 "new_capacity overflow");
401 new_capacity = max(kMinVectorCapacity, new_capacity);
402
403 size_t new_alloc_size = 0;
404 LOG_ALWAYS_FATAL_IF(__builtin_mul_overflow(new_capacity, mItemSize, &new_alloc_size),
405 "new_alloc_size overflow");
406
407 // ALOGV("grow vector %p, new_capacity=%d", this, (int)new_capacity);
408 if ((mStorage) &&
409 (mCount==where) &&
410 (mFlags & HAS_TRIVIAL_COPY) &&
411 (mFlags & HAS_TRIVIAL_DTOR))
412 {
413 const SharedBuffer* cur_sb = SharedBuffer::bufferFromData(mStorage);
414 SharedBuffer* sb = cur_sb->editResize(new_alloc_size);
415 if (sb) {
416 mStorage = sb->data();
417 } else {
418 return nullptr;
419 }
420 } else {
421 SharedBuffer* sb = SharedBuffer::alloc(new_alloc_size);
422 if (sb) {
423 void* array = sb->data();
424 if (where != 0) {
425 _do_copy(array, mStorage, where);
426 }
427 if (where != mCount) {
428 const void* from = reinterpret_cast<const uint8_t *>(mStorage) + where*mItemSize;
429 void* dest = reinterpret_cast<uint8_t *>(array) + (where+amount)*mItemSize;
430 _do_copy(dest, from, mCount-where);
431 }
432 release_storage();
433 mStorage = const_cast<void*>(array);
434 } else {
435 return nullptr;
436 }
437 }
438 } else {
439 void* array = editArrayImpl();
440 if (where != mCount) {
441 const void* from = reinterpret_cast<const uint8_t *>(array) + where*mItemSize;
442 void* to = reinterpret_cast<uint8_t *>(array) + (where+amount)*mItemSize;
443 _do_move_forward(to, from, mCount - where);
444 }
445 }
446 mCount = new_size;
447 void* free_space = const_cast<void*>(itemLocation(where));
448 return free_space;
449 }
450
_shrink(size_t where,size_t amount)451 void VectorImpl::_shrink(size_t where, size_t amount)
452 {
453 if (!mStorage)
454 return;
455
456 // ALOGV("_shrink(this=%p, where=%d, amount=%d) count=%d, capacity=%d",
457 // this, (int)where, (int)amount, (int)mCount, (int)capacity());
458
459 ALOG_ASSERT(where + amount <= mCount,
460 "[%p] _shrink: where=%d, amount=%d, count=%d",
461 this, (int)where, (int)amount, (int)mCount); // caller already checked
462
463 size_t new_size;
464 LOG_ALWAYS_FATAL_IF(__builtin_sub_overflow(mCount, amount, &new_size));
465
466 if (new_size < (capacity() / 2)) {
467 // NOTE: (new_size * 2) is safe because capacity didn't overflow and
468 // new_size < (capacity / 2)).
469 const size_t new_capacity = max(kMinVectorCapacity, new_size * 2);
470
471 // NOTE: (new_capacity * mItemSize), (where * mItemSize) and
472 // ((where + amount) * mItemSize) beyond this point are safe because
473 // we are always reducing the capacity of the underlying SharedBuffer.
474 // In other words, (old_capacity * mItemSize) did not overflow, and
475 // where < (where + amount) < new_capacity < old_capacity.
476 if ((where == new_size) &&
477 (mFlags & HAS_TRIVIAL_COPY) &&
478 (mFlags & HAS_TRIVIAL_DTOR))
479 {
480 const SharedBuffer* cur_sb = SharedBuffer::bufferFromData(mStorage);
481 SharedBuffer* sb = cur_sb->editResize(new_capacity * mItemSize);
482 if (sb) {
483 mStorage = sb->data();
484 } else {
485 return;
486 }
487 } else {
488 SharedBuffer* sb = SharedBuffer::alloc(new_capacity * mItemSize);
489 if (sb) {
490 void* array = sb->data();
491 if (where != 0) {
492 _do_copy(array, mStorage, where);
493 }
494 if (where != new_size) {
495 const void* from = reinterpret_cast<const uint8_t *>(mStorage) + (where+amount)*mItemSize;
496 void* dest = reinterpret_cast<uint8_t *>(array) + where*mItemSize;
497 _do_copy(dest, from, new_size - where);
498 }
499 release_storage();
500 mStorage = const_cast<void*>(array);
501 } else{
502 return;
503 }
504 }
505 } else {
506 void* array = editArrayImpl();
507 void* to = reinterpret_cast<uint8_t *>(array) + where*mItemSize;
508 _do_destroy(to, amount);
509 if (where != new_size) {
510 const void* from = reinterpret_cast<uint8_t *>(array) + (where+amount)*mItemSize;
511 _do_move_backward(to, from, new_size - where);
512 }
513 }
514 mCount = new_size;
515 }
516
itemSize() const517 size_t VectorImpl::itemSize() const {
518 return mItemSize;
519 }
520
_do_construct(void * storage,size_t num) const521 void VectorImpl::_do_construct(void* storage, size_t num) const
522 {
523 if (!(mFlags & HAS_TRIVIAL_CTOR)) {
524 do_construct(storage, num);
525 }
526 }
527
_do_destroy(void * storage,size_t num) const528 void VectorImpl::_do_destroy(void* storage, size_t num) const
529 {
530 if (!(mFlags & HAS_TRIVIAL_DTOR)) {
531 do_destroy(storage, num);
532 }
533 }
534
_do_copy(void * dest,const void * from,size_t num) const535 void VectorImpl::_do_copy(void* dest, const void* from, size_t num) const
536 {
537 if (!(mFlags & HAS_TRIVIAL_COPY)) {
538 do_copy(dest, from, num);
539 } else {
540 memcpy(dest, from, num*itemSize());
541 }
542 }
543
_do_splat(void * dest,const void * item,size_t num) const544 void VectorImpl::_do_splat(void* dest, const void* item, size_t num) const {
545 do_splat(dest, item, num);
546 }
547
_do_move_forward(void * dest,const void * from,size_t num) const548 void VectorImpl::_do_move_forward(void* dest, const void* from, size_t num) const {
549 do_move_forward(dest, from, num);
550 }
551
_do_move_backward(void * dest,const void * from,size_t num) const552 void VectorImpl::_do_move_backward(void* dest, const void* from, size_t num) const {
553 do_move_backward(dest, from, num);
554 }
555
556 /*****************************************************************************/
557
SortedVectorImpl(size_t itemSize,uint32_t flags)558 SortedVectorImpl::SortedVectorImpl(size_t itemSize, uint32_t flags)
559 : VectorImpl(itemSize, flags)
560 {
561 }
562
SortedVectorImpl(const VectorImpl & rhs)563 SortedVectorImpl::SortedVectorImpl(const VectorImpl& rhs)
564 : VectorImpl(rhs)
565 {
566 }
567
~SortedVectorImpl()568 SortedVectorImpl::~SortedVectorImpl()
569 {
570 }
571
operator =(const SortedVectorImpl & rhs)572 SortedVectorImpl& SortedVectorImpl::operator = (const SortedVectorImpl& rhs)
573 {
574 return static_cast<SortedVectorImpl&>( VectorImpl::operator = (static_cast<const VectorImpl&>(rhs)) );
575 }
576
indexOf(const void * item) const577 ssize_t SortedVectorImpl::indexOf(const void* item) const
578 {
579 return _indexOrderOf(item);
580 }
581
orderOf(const void * item) const582 size_t SortedVectorImpl::orderOf(const void* item) const
583 {
584 size_t o;
585 _indexOrderOf(item, &o);
586 return o;
587 }
588
_indexOrderOf(const void * item,size_t * order) const589 ssize_t SortedVectorImpl::_indexOrderOf(const void* item, size_t* order) const
590 {
591 if (order) *order = 0;
592 if (isEmpty()) {
593 return NAME_NOT_FOUND;
594 }
595 // binary search
596 ssize_t err = NAME_NOT_FOUND;
597 ssize_t l = 0;
598 ssize_t h = size()-1;
599 ssize_t mid;
600 const void* a = arrayImpl();
601 const size_t s = itemSize();
602 while (l <= h) {
603 mid = l + (h - l)/2;
604 const void* const curr = reinterpret_cast<const char *>(a) + (mid*s);
605 const int c = do_compare(curr, item);
606 if (c == 0) {
607 err = l = mid;
608 break;
609 } else if (c < 0) {
610 l = mid + 1;
611 } else {
612 h = mid - 1;
613 }
614 }
615 if (order) *order = l;
616 return err;
617 }
618
add(const void * item)619 ssize_t SortedVectorImpl::add(const void* item)
620 {
621 size_t order;
622 ssize_t index = _indexOrderOf(item, &order);
623 if (index < 0) {
624 index = VectorImpl::insertAt(item, order, 1);
625 } else {
626 index = VectorImpl::replaceAt(item, index);
627 }
628 return index;
629 }
630
merge(const VectorImpl & vector)631 ssize_t SortedVectorImpl::merge(const VectorImpl& vector)
632 {
633 // naive merge...
634 if (!vector.isEmpty()) {
635 const void* buffer = vector.arrayImpl();
636 const size_t is = itemSize();
637 size_t s = vector.size();
638 for (size_t i=0 ; i<s ; i++) {
639 ssize_t err = add( reinterpret_cast<const char*>(buffer) + i*is );
640 if (err<0) {
641 return err;
642 }
643 }
644 }
645 return OK;
646 }
647
merge(const SortedVectorImpl & vector)648 ssize_t SortedVectorImpl::merge(const SortedVectorImpl& vector)
649 {
650 // we've merging a sorted vector... nice!
651 ssize_t err = OK;
652 if (!vector.isEmpty()) {
653 // first take care of the case where the vectors are sorted together
654 if (do_compare(vector.itemLocation(vector.size()-1), arrayImpl()) <= 0) {
655 err = VectorImpl::insertVectorAt(static_cast<const VectorImpl&>(vector), 0);
656 } else if (do_compare(vector.arrayImpl(), itemLocation(size()-1)) >= 0) {
657 err = VectorImpl::appendVector(static_cast<const VectorImpl&>(vector));
658 } else {
659 // this could be made a little better
660 err = merge(static_cast<const VectorImpl&>(vector));
661 }
662 }
663 return err;
664 }
665
remove(const void * item)666 ssize_t SortedVectorImpl::remove(const void* item)
667 {
668 ssize_t i = indexOf(item);
669 if (i>=0) {
670 VectorImpl::removeItemsAt(i, 1);
671 }
672 return i;
673 }
674
675 /*****************************************************************************/
676
677 }; // namespace android
678