1 /*
2 * Copyright (C) 2005 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #define LOG_TAG "Vector"
18
19 #include <string.h>
20 #include <stdlib.h>
21 #include <stdio.h>
22
23 #include <cutils/log.h>
24 #include <safe_iop.h>
25
26 #include <utils/Errors.h>
27 #include <utils/SharedBuffer.h>
28 #include <utils/VectorImpl.h>
29
30 /*****************************************************************************/
31
32
33 namespace android {
34
35 // ----------------------------------------------------------------------------
36
37 const size_t kMinVectorCapacity = 4;
38
max(size_t a,size_t b)39 static inline size_t max(size_t a, size_t b) {
40 return a>b ? a : b;
41 }
42
43 // ----------------------------------------------------------------------------
44
VectorImpl(size_t itemSize,uint32_t flags)45 VectorImpl::VectorImpl(size_t itemSize, uint32_t flags)
46 : mStorage(0), mCount(0), mFlags(flags), mItemSize(itemSize)
47 {
48 }
49
VectorImpl(const VectorImpl & rhs)50 VectorImpl::VectorImpl(const VectorImpl& rhs)
51 : mStorage(rhs.mStorage), mCount(rhs.mCount),
52 mFlags(rhs.mFlags), mItemSize(rhs.mItemSize)
53 {
54 if (mStorage) {
55 SharedBuffer::bufferFromData(mStorage)->acquire();
56 }
57 }
58
~VectorImpl()59 VectorImpl::~VectorImpl()
60 {
61 ALOGW_IF(mCount,
62 "[%p] subclasses of VectorImpl must call finish_vector()"
63 " in their destructor. Leaking %d bytes.",
64 this, (int)(mCount*mItemSize));
65 // We can't call _do_destroy() here because the vtable is already gone.
66 }
67
operator =(const VectorImpl & rhs)68 VectorImpl& VectorImpl::operator = (const VectorImpl& rhs)
69 {
70 LOG_ALWAYS_FATAL_IF(mItemSize != rhs.mItemSize,
71 "Vector<> have different types (this=%p, rhs=%p)", this, &rhs);
72 if (this != &rhs) {
73 release_storage();
74 if (rhs.mCount) {
75 mStorage = rhs.mStorage;
76 mCount = rhs.mCount;
77 SharedBuffer::bufferFromData(mStorage)->acquire();
78 } else {
79 mStorage = 0;
80 mCount = 0;
81 }
82 }
83 return *this;
84 }
85
editArrayImpl()86 void* VectorImpl::editArrayImpl()
87 {
88 if (mStorage) {
89 const SharedBuffer* sb = SharedBuffer::bufferFromData(mStorage);
90 SharedBuffer* editable = sb->attemptEdit();
91 if (editable == 0) {
92 // If we're here, we're not the only owner of the buffer.
93 // We must make a copy of it.
94 editable = SharedBuffer::alloc(sb->size());
95 // Fail instead of returning a pointer to storage that's not
96 // editable. Otherwise we'd be editing the contents of a buffer
97 // for which we're not the only owner, which is undefined behaviour.
98 LOG_ALWAYS_FATAL_IF(editable == NULL);
99 _do_copy(editable->data(), mStorage, mCount);
100 release_storage();
101 mStorage = editable->data();
102 }
103 }
104 return mStorage;
105 }
106
capacity() const107 size_t VectorImpl::capacity() const
108 {
109 if (mStorage) {
110 return SharedBuffer::bufferFromData(mStorage)->size() / mItemSize;
111 }
112 return 0;
113 }
114
insertVectorAt(const VectorImpl & vector,size_t index)115 ssize_t VectorImpl::insertVectorAt(const VectorImpl& vector, size_t index)
116 {
117 return insertArrayAt(vector.arrayImpl(), index, vector.size());
118 }
119
appendVector(const VectorImpl & vector)120 ssize_t VectorImpl::appendVector(const VectorImpl& vector)
121 {
122 return insertVectorAt(vector, size());
123 }
124
insertArrayAt(const void * array,size_t index,size_t length)125 ssize_t VectorImpl::insertArrayAt(const void* array, size_t index, size_t length)
126 {
127 if (index > size())
128 return BAD_INDEX;
129 void* where = _grow(index, length);
130 if (where) {
131 _do_copy(where, array, length);
132 }
133 return where ? index : (ssize_t)NO_MEMORY;
134 }
135
appendArray(const void * array,size_t length)136 ssize_t VectorImpl::appendArray(const void* array, size_t length)
137 {
138 return insertArrayAt(array, size(), length);
139 }
140
insertAt(size_t index,size_t numItems)141 ssize_t VectorImpl::insertAt(size_t index, size_t numItems)
142 {
143 return insertAt(0, index, numItems);
144 }
145
insertAt(const void * item,size_t index,size_t numItems)146 ssize_t VectorImpl::insertAt(const void* item, size_t index, size_t numItems)
147 {
148 if (index > size())
149 return BAD_INDEX;
150 void* where = _grow(index, numItems);
151 if (where) {
152 if (item) {
153 _do_splat(where, item, numItems);
154 } else {
155 _do_construct(where, numItems);
156 }
157 }
158 return where ? index : (ssize_t)NO_MEMORY;
159 }
160
sortProxy(const void * lhs,const void * rhs,void * func)161 static int sortProxy(const void* lhs, const void* rhs, void* func)
162 {
163 return (*(VectorImpl::compar_t)func)(lhs, rhs);
164 }
165
sort(VectorImpl::compar_t cmp)166 status_t VectorImpl::sort(VectorImpl::compar_t cmp)
167 {
168 return sort(sortProxy, (void*)cmp);
169 }
170
sort(VectorImpl::compar_r_t cmp,void * state)171 status_t VectorImpl::sort(VectorImpl::compar_r_t cmp, void* state)
172 {
173 // the sort must be stable. we're using insertion sort which
174 // is well suited for small and already sorted arrays
175 // for big arrays, it could be better to use mergesort
176 const ssize_t count = size();
177 if (count > 1) {
178 void* array = const_cast<void*>(arrayImpl());
179 void* temp = 0;
180 ssize_t i = 1;
181 while (i < count) {
182 void* item = reinterpret_cast<char*>(array) + mItemSize*(i);
183 void* curr = reinterpret_cast<char*>(array) + mItemSize*(i-1);
184 if (cmp(curr, item, state) > 0) {
185
186 if (!temp) {
187 // we're going to have to modify the array...
188 array = editArrayImpl();
189 if (!array) return NO_MEMORY;
190 temp = malloc(mItemSize);
191 if (!temp) return NO_MEMORY;
192 item = reinterpret_cast<char*>(array) + mItemSize*(i);
193 curr = reinterpret_cast<char*>(array) + mItemSize*(i-1);
194 } else {
195 _do_destroy(temp, 1);
196 }
197
198 _do_copy(temp, item, 1);
199
200 ssize_t j = i-1;
201 void* next = reinterpret_cast<char*>(array) + mItemSize*(i);
202 do {
203 _do_destroy(next, 1);
204 _do_copy(next, curr, 1);
205 next = curr;
206 --j;
207 curr = reinterpret_cast<char*>(array) + mItemSize*(j);
208 } while (j>=0 && (cmp(curr, temp, state) > 0));
209
210 _do_destroy(next, 1);
211 _do_copy(next, temp, 1);
212 }
213 i++;
214 }
215
216 if (temp) {
217 _do_destroy(temp, 1);
218 free(temp);
219 }
220 }
221 return NO_ERROR;
222 }
223
pop()224 void VectorImpl::pop()
225 {
226 if (size())
227 removeItemsAt(size()-1, 1);
228 }
229
push()230 void VectorImpl::push()
231 {
232 push(0);
233 }
234
push(const void * item)235 void VectorImpl::push(const void* item)
236 {
237 insertAt(item, size());
238 }
239
add()240 ssize_t VectorImpl::add()
241 {
242 return add(0);
243 }
244
add(const void * item)245 ssize_t VectorImpl::add(const void* item)
246 {
247 return insertAt(item, size());
248 }
249
replaceAt(size_t index)250 ssize_t VectorImpl::replaceAt(size_t index)
251 {
252 return replaceAt(0, index);
253 }
254
replaceAt(const void * prototype,size_t index)255 ssize_t VectorImpl::replaceAt(const void* prototype, size_t index)
256 {
257 ALOG_ASSERT(index<size(),
258 "[%p] replace: index=%d, size=%d", this, (int)index, (int)size());
259
260 if (index >= size()) {
261 return BAD_INDEX;
262 }
263
264 void* item = editItemLocation(index);
265 if (item != prototype) {
266 if (item == 0)
267 return NO_MEMORY;
268 _do_destroy(item, 1);
269 if (prototype == 0) {
270 _do_construct(item, 1);
271 } else {
272 _do_copy(item, prototype, 1);
273 }
274 }
275 return ssize_t(index);
276 }
277
removeItemsAt(size_t index,size_t count)278 ssize_t VectorImpl::removeItemsAt(size_t index, size_t count)
279 {
280 ALOG_ASSERT((index+count)<=size(),
281 "[%p] remove: index=%d, count=%d, size=%d",
282 this, (int)index, (int)count, (int)size());
283
284 if ((index+count) > size())
285 return BAD_VALUE;
286 _shrink(index, count);
287 return index;
288 }
289
finish_vector()290 void VectorImpl::finish_vector()
291 {
292 release_storage();
293 mStorage = 0;
294 mCount = 0;
295 }
296
clear()297 void VectorImpl::clear()
298 {
299 _shrink(0, mCount);
300 }
301
editItemLocation(size_t index)302 void* VectorImpl::editItemLocation(size_t index)
303 {
304 ALOG_ASSERT(index<capacity(),
305 "[%p] editItemLocation: index=%d, capacity=%d, count=%d",
306 this, (int)index, (int)capacity(), (int)mCount);
307
308 if (index < capacity()) {
309 void* buffer = editArrayImpl();
310 if (buffer) {
311 return reinterpret_cast<char*>(buffer) + index*mItemSize;
312 }
313 }
314 return 0;
315 }
316
itemLocation(size_t index) const317 const void* VectorImpl::itemLocation(size_t index) const
318 {
319 ALOG_ASSERT(index<capacity(),
320 "[%p] itemLocation: index=%d, capacity=%d, count=%d",
321 this, (int)index, (int)capacity(), (int)mCount);
322
323 if (index < capacity()) {
324 const void* buffer = arrayImpl();
325 if (buffer) {
326 return reinterpret_cast<const char*>(buffer) + index*mItemSize;
327 }
328 }
329 return 0;
330 }
331
setCapacity(size_t new_capacity)332 ssize_t VectorImpl::setCapacity(size_t new_capacity)
333 {
334 // The capacity must always be greater than or equal to the size
335 // of this vector.
336 if (new_capacity <= size()) {
337 return capacity();
338 }
339
340 size_t new_allocation_size = 0;
341 LOG_ALWAYS_FATAL_IF(!safe_mul(&new_allocation_size, new_capacity, mItemSize));
342 SharedBuffer* sb = SharedBuffer::alloc(new_allocation_size);
343 if (sb) {
344 void* array = sb->data();
345 _do_copy(array, mStorage, size());
346 release_storage();
347 mStorage = const_cast<void*>(array);
348 } else {
349 return NO_MEMORY;
350 }
351 return new_capacity;
352 }
353
resize(size_t size)354 ssize_t VectorImpl::resize(size_t size) {
355 ssize_t result = NO_ERROR;
356 if (size > mCount) {
357 result = insertAt(mCount, size - mCount);
358 } else if (size < mCount) {
359 result = removeItemsAt(size, mCount - size);
360 }
361 return result < 0 ? result : size;
362 }
363
release_storage()364 void VectorImpl::release_storage()
365 {
366 if (mStorage) {
367 const SharedBuffer* sb = SharedBuffer::bufferFromData(mStorage);
368 if (sb->release(SharedBuffer::eKeepStorage) == 1) {
369 _do_destroy(mStorage, mCount);
370 SharedBuffer::dealloc(sb);
371 }
372 }
373 }
374
_grow(size_t where,size_t amount)375 void* VectorImpl::_grow(size_t where, size_t amount)
376 {
377 // ALOGV("_grow(this=%p, where=%d, amount=%d) count=%d, capacity=%d",
378 // this, (int)where, (int)amount, (int)mCount, (int)capacity());
379
380 ALOG_ASSERT(where <= mCount,
381 "[%p] _grow: where=%d, amount=%d, count=%d",
382 this, (int)where, (int)amount, (int)mCount); // caller already checked
383
384 size_t new_size;
385 LOG_ALWAYS_FATAL_IF(!safe_add(&new_size, mCount, amount), "new_size overflow");
386
387 if (capacity() < new_size) {
388 // NOTE: This implementation used to resize vectors as per ((3*x + 1) / 2)
389 // (sigh..). Also note, the " + 1" was necessary to handle the special case
390 // where x == 1, where the resized_capacity will be equal to the old
391 // capacity without the +1. The old calculation wouldn't work properly
392 // if x was zero.
393 //
394 // This approximates the old calculation, using (x + (x/2) + 1) instead.
395 size_t new_capacity = 0;
396 LOG_ALWAYS_FATAL_IF(!safe_add(&new_capacity, new_size, (new_size / 2)),
397 "new_capacity overflow");
398 LOG_ALWAYS_FATAL_IF(!safe_add(&new_capacity, new_capacity, static_cast<size_t>(1u)),
399 "new_capacity overflow");
400 new_capacity = max(kMinVectorCapacity, new_capacity);
401
402 size_t new_alloc_size = 0;
403 LOG_ALWAYS_FATAL_IF(!safe_mul(&new_alloc_size, new_capacity, mItemSize),
404 "new_alloc_size overflow");
405
406 // ALOGV("grow vector %p, new_capacity=%d", this, (int)new_capacity);
407 if ((mStorage) &&
408 (mCount==where) &&
409 (mFlags & HAS_TRIVIAL_COPY) &&
410 (mFlags & HAS_TRIVIAL_DTOR))
411 {
412 const SharedBuffer* cur_sb = SharedBuffer::bufferFromData(mStorage);
413 SharedBuffer* sb = cur_sb->editResize(new_alloc_size);
414 if (sb) {
415 mStorage = sb->data();
416 } else {
417 return NULL;
418 }
419 } else {
420 SharedBuffer* sb = SharedBuffer::alloc(new_alloc_size);
421 if (sb) {
422 void* array = sb->data();
423 if (where != 0) {
424 _do_copy(array, mStorage, where);
425 }
426 if (where != mCount) {
427 const void* from = reinterpret_cast<const uint8_t *>(mStorage) + where*mItemSize;
428 void* dest = reinterpret_cast<uint8_t *>(array) + (where+amount)*mItemSize;
429 _do_copy(dest, from, mCount-where);
430 }
431 release_storage();
432 mStorage = const_cast<void*>(array);
433 } else {
434 return NULL;
435 }
436 }
437 } else {
438 void* array = editArrayImpl();
439 if (where != mCount) {
440 const void* from = reinterpret_cast<const uint8_t *>(array) + where*mItemSize;
441 void* to = reinterpret_cast<uint8_t *>(array) + (where+amount)*mItemSize;
442 _do_move_forward(to, from, mCount - where);
443 }
444 }
445 mCount = new_size;
446 void* free_space = const_cast<void*>(itemLocation(where));
447 return free_space;
448 }
449
_shrink(size_t where,size_t amount)450 void VectorImpl::_shrink(size_t where, size_t amount)
451 {
452 if (!mStorage)
453 return;
454
455 // ALOGV("_shrink(this=%p, where=%d, amount=%d) count=%d, capacity=%d",
456 // this, (int)where, (int)amount, (int)mCount, (int)capacity());
457
458 ALOG_ASSERT(where + amount <= mCount,
459 "[%p] _shrink: where=%d, amount=%d, count=%d",
460 this, (int)where, (int)amount, (int)mCount); // caller already checked
461
462 size_t new_size;
463 LOG_ALWAYS_FATAL_IF(!safe_sub(&new_size, mCount, amount));
464
465 if (new_size < (capacity() / 2)) {
466 // NOTE: (new_size * 2) is safe because capacity didn't overflow and
467 // new_size < (capacity / 2)).
468 const size_t new_capacity = max(kMinVectorCapacity, new_size * 2);
469
470 // NOTE: (new_capacity * mItemSize), (where * mItemSize) and
471 // ((where + amount) * mItemSize) beyond this point are safe because
472 // we are always reducing the capacity of the underlying SharedBuffer.
473 // In other words, (old_capacity * mItemSize) did not overflow, and
474 // where < (where + amount) < new_capacity < old_capacity.
475 if ((where == new_size) &&
476 (mFlags & HAS_TRIVIAL_COPY) &&
477 (mFlags & HAS_TRIVIAL_DTOR))
478 {
479 const SharedBuffer* cur_sb = SharedBuffer::bufferFromData(mStorage);
480 SharedBuffer* sb = cur_sb->editResize(new_capacity * mItemSize);
481 if (sb) {
482 mStorage = sb->data();
483 } else {
484 return;
485 }
486 } else {
487 SharedBuffer* sb = SharedBuffer::alloc(new_capacity * mItemSize);
488 if (sb) {
489 void* array = sb->data();
490 if (where != 0) {
491 _do_copy(array, mStorage, where);
492 }
493 if (where != new_size) {
494 const void* from = reinterpret_cast<const uint8_t *>(mStorage) + (where+amount)*mItemSize;
495 void* dest = reinterpret_cast<uint8_t *>(array) + where*mItemSize;
496 _do_copy(dest, from, new_size - where);
497 }
498 release_storage();
499 mStorage = const_cast<void*>(array);
500 } else{
501 return;
502 }
503 }
504 } else {
505 void* array = editArrayImpl();
506 void* to = reinterpret_cast<uint8_t *>(array) + where*mItemSize;
507 _do_destroy(to, amount);
508 if (where != new_size) {
509 const void* from = reinterpret_cast<uint8_t *>(array) + (where+amount)*mItemSize;
510 _do_move_backward(to, from, new_size - where);
511 }
512 }
513 mCount = new_size;
514 }
515
itemSize() const516 size_t VectorImpl::itemSize() const {
517 return mItemSize;
518 }
519
_do_construct(void * storage,size_t num) const520 void VectorImpl::_do_construct(void* storage, size_t num) const
521 {
522 if (!(mFlags & HAS_TRIVIAL_CTOR)) {
523 do_construct(storage, num);
524 }
525 }
526
_do_destroy(void * storage,size_t num) const527 void VectorImpl::_do_destroy(void* storage, size_t num) const
528 {
529 if (!(mFlags & HAS_TRIVIAL_DTOR)) {
530 do_destroy(storage, num);
531 }
532 }
533
_do_copy(void * dest,const void * from,size_t num) const534 void VectorImpl::_do_copy(void* dest, const void* from, size_t num) const
535 {
536 if (!(mFlags & HAS_TRIVIAL_COPY)) {
537 do_copy(dest, from, num);
538 } else {
539 memcpy(dest, from, num*itemSize());
540 }
541 }
542
_do_splat(void * dest,const void * item,size_t num) const543 void VectorImpl::_do_splat(void* dest, const void* item, size_t num) const {
544 do_splat(dest, item, num);
545 }
546
_do_move_forward(void * dest,const void * from,size_t num) const547 void VectorImpl::_do_move_forward(void* dest, const void* from, size_t num) const {
548 do_move_forward(dest, from, num);
549 }
550
_do_move_backward(void * dest,const void * from,size_t num) const551 void VectorImpl::_do_move_backward(void* dest, const void* from, size_t num) const {
552 do_move_backward(dest, from, num);
553 }
554
555 /*****************************************************************************/
556
SortedVectorImpl(size_t itemSize,uint32_t flags)557 SortedVectorImpl::SortedVectorImpl(size_t itemSize, uint32_t flags)
558 : VectorImpl(itemSize, flags)
559 {
560 }
561
SortedVectorImpl(const VectorImpl & rhs)562 SortedVectorImpl::SortedVectorImpl(const VectorImpl& rhs)
563 : VectorImpl(rhs)
564 {
565 }
566
~SortedVectorImpl()567 SortedVectorImpl::~SortedVectorImpl()
568 {
569 }
570
operator =(const SortedVectorImpl & rhs)571 SortedVectorImpl& SortedVectorImpl::operator = (const SortedVectorImpl& rhs)
572 {
573 return static_cast<SortedVectorImpl&>( VectorImpl::operator = (static_cast<const VectorImpl&>(rhs)) );
574 }
575
indexOf(const void * item) const576 ssize_t SortedVectorImpl::indexOf(const void* item) const
577 {
578 return _indexOrderOf(item);
579 }
580
orderOf(const void * item) const581 size_t SortedVectorImpl::orderOf(const void* item) const
582 {
583 size_t o;
584 _indexOrderOf(item, &o);
585 return o;
586 }
587
_indexOrderOf(const void * item,size_t * order) const588 ssize_t SortedVectorImpl::_indexOrderOf(const void* item, size_t* order) const
589 {
590 // binary search
591 ssize_t err = NAME_NOT_FOUND;
592 ssize_t l = 0;
593 ssize_t h = size()-1;
594 ssize_t mid;
595 const void* a = arrayImpl();
596 const size_t s = itemSize();
597 while (l <= h) {
598 mid = l + (h - l)/2;
599 const void* const curr = reinterpret_cast<const char *>(a) + (mid*s);
600 const int c = do_compare(curr, item);
601 if (c == 0) {
602 err = l = mid;
603 break;
604 } else if (c < 0) {
605 l = mid + 1;
606 } else {
607 h = mid - 1;
608 }
609 }
610 if (order) *order = l;
611 return err;
612 }
613
add(const void * item)614 ssize_t SortedVectorImpl::add(const void* item)
615 {
616 size_t order;
617 ssize_t index = _indexOrderOf(item, &order);
618 if (index < 0) {
619 index = VectorImpl::insertAt(item, order, 1);
620 } else {
621 index = VectorImpl::replaceAt(item, index);
622 }
623 return index;
624 }
625
merge(const VectorImpl & vector)626 ssize_t SortedVectorImpl::merge(const VectorImpl& vector)
627 {
628 // naive merge...
629 if (!vector.isEmpty()) {
630 const void* buffer = vector.arrayImpl();
631 const size_t is = itemSize();
632 size_t s = vector.size();
633 for (size_t i=0 ; i<s ; i++) {
634 ssize_t err = add( reinterpret_cast<const char*>(buffer) + i*is );
635 if (err<0) {
636 return err;
637 }
638 }
639 }
640 return NO_ERROR;
641 }
642
merge(const SortedVectorImpl & vector)643 ssize_t SortedVectorImpl::merge(const SortedVectorImpl& vector)
644 {
645 // we've merging a sorted vector... nice!
646 ssize_t err = NO_ERROR;
647 if (!vector.isEmpty()) {
648 // first take care of the case where the vectors are sorted together
649 if (do_compare(vector.itemLocation(vector.size()-1), arrayImpl()) <= 0) {
650 err = VectorImpl::insertVectorAt(static_cast<const VectorImpl&>(vector), 0);
651 } else if (do_compare(vector.arrayImpl(), itemLocation(size()-1)) >= 0) {
652 err = VectorImpl::appendVector(static_cast<const VectorImpl&>(vector));
653 } else {
654 // this could be made a little better
655 err = merge(static_cast<const VectorImpl&>(vector));
656 }
657 }
658 return err;
659 }
660
remove(const void * item)661 ssize_t SortedVectorImpl::remove(const void* item)
662 {
663 ssize_t i = indexOf(item);
664 if (i>=0) {
665 VectorImpl::removeItemsAt(i, 1);
666 }
667 return i;
668 }
669
670 /*****************************************************************************/
671
672 }; // namespace android
673
674