1 // Copyright 2019 Google LLC.
2 // Use of this source code is governed by a BSD-style license that can be found in the LICENSE file.
3
4 #include "include/private/base/SkContainers.h"
5
6 #include "include/private/base/SkAlign.h"
7 #include "include/private/base/SkAssert.h"
8 #include "include/private/base/SkFeatures.h"
9 #include "include/private/base/SkMalloc.h"
10 #include "include/private/base/SkTo.h"
11
12 #include <algorithm>
13 #include <cstddef>
14
15 #if defined(SK_BUILD_FOR_MAC) || defined(SK_BUILD_FOR_IOS)
16 #include <malloc/malloc.h>
17 #elif defined(SK_BUILD_FOR_ANDROID) || defined(SK_BUILD_FOR_UNIX)
18 #include <malloc.h>
19 #elif defined(SK_BUILD_FOR_WIN)
20 #include <malloc.h>
21 #endif
22
23 namespace {
24 // Return at least as many bytes to keep malloc aligned.
25 constexpr size_t kMinBytes = alignof(max_align_t);
26
complete_size(void * ptr,size_t size)27 SkSpan<std::byte> complete_size(void* ptr, size_t size) {
28 if (ptr == nullptr) {
29 return {};
30 }
31
32 size_t completeSize = size;
33
34 // Use the OS specific calls to find the actual capacity.
35 #if defined(SK_BUILD_FOR_MAC) || defined(SK_BUILD_FOR_IOS)
36 // TODO: remove the max, when the chrome implementation of malloc_size doesn't return 0.
37 completeSize = std::max(malloc_size(ptr), size);
38 #elif defined(SK_BUILD_FOR_ANDROID) && __ANDROID_API__ >= 17
39 completeSize = malloc_usable_size(ptr);
40 SkASSERT(completeSize >= size);
41 #elif defined(SK_BUILD_FOR_UNIX)
42 completeSize = malloc_usable_size(ptr);
43 SkASSERT(completeSize >= size);
44 #elif defined(SK_BUILD_FOR_WIN)
45 completeSize = _msize(ptr);
46 SkASSERT(completeSize >= size);
47 #endif
48
49 return {static_cast<std::byte*>(ptr), completeSize};
50 }
51 } // namespace
52
allocate(int capacity,double growthFactor)53 SkSpan<std::byte> SkContainerAllocator::allocate(int capacity, double growthFactor) {
54 SkASSERT(capacity >= 0);
55 SkASSERT(growthFactor >= 1.0);
56 SkASSERT_RELEASE(capacity <= fMaxCapacity);
57
58 if (growthFactor > 1.0 && capacity > 0) {
59 capacity = this->growthFactorCapacity(capacity, growthFactor);
60 }
61
62 return sk_allocate_throw(capacity * fSizeOfT);
63 }
64
roundUpCapacity(int64_t capacity) const65 size_t SkContainerAllocator::roundUpCapacity(int64_t capacity) const {
66 SkASSERT(capacity >= 0);
67
68 // If round will not go above fMaxCapacity return rounded capacity.
69 if (capacity < fMaxCapacity - kCapacityMultiple) {
70 return SkAlignTo(capacity, kCapacityMultiple);
71 }
72
73 return SkToSizeT(fMaxCapacity);
74 }
75
growthFactorCapacity(int capacity,double growthFactor) const76 size_t SkContainerAllocator::growthFactorCapacity(int capacity, double growthFactor) const {
77 SkASSERT(capacity >= 0);
78 SkASSERT(growthFactor >= 1.0);
79 // Multiply by the growthFactor. Remember this must be done in 64-bit ints and not
80 // size_t because size_t changes.
81 const int64_t capacityGrowth = static_cast<int64_t>(capacity * growthFactor);
82
83 // Notice that for small values of capacity, rounding up will provide most of the growth.
84 return this->roundUpCapacity(capacityGrowth);
85 }
86
87
sk_allocate_canfail(size_t size)88 SkSpan<std::byte> sk_allocate_canfail(size_t size) {
89 // Make sure to ask for at least the minimum number of bytes.
90 const size_t adjustedSize = std::max(size, kMinBytes);
91 void* ptr = sk_malloc_canfail(adjustedSize);
92 return complete_size(ptr, adjustedSize);
93 }
94
sk_allocate_throw(size_t size)95 SkSpan<std::byte> sk_allocate_throw(size_t size) {
96 if (size == 0) {
97 return {};
98 }
99 // Make sure to ask for at least the minimum number of bytes.
100 const size_t adjustedSize = std::max(size, kMinBytes);
101 void* ptr = sk_malloc_throw(adjustedSize);
102 return complete_size(ptr, adjustedSize);
103 }
104
sk_report_container_overflow_and_die()105 void sk_report_container_overflow_and_die() {
106 SK_ABORT("Requested capacity is too large.");
107 }
108