• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * Copyright (c) 2021-2024 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #include <algorithm>
17 #include <atomic>
18 
19 #include "libpandabase/macros.h"
20 #include "runtime/include/runtime.h"
21 #include "runtime/include/runtime_options.h"
22 #include "runtime/include/panda_vm.h"
23 #include "runtime/include/gc_task.h"
24 #include "runtime/mem/gc/gc_trigger.h"
25 #include "utils/logger.h"
26 
27 namespace ark::mem {
28 
29 static constexpr size_t PERCENT_100 = 100;
30 
GCTriggerConfig(const RuntimeOptions & options,panda_file::SourceLang lang)31 GCTriggerConfig::GCTriggerConfig(const RuntimeOptions &options, panda_file::SourceLang lang)
32 {
33     auto runtimeLang = plugins::LangToRuntimeType(lang);
34     gcTriggerType_ = options.GetGcTriggerType(runtimeLang);
35     debugStart_ = options.GetGcDebugTriggerStart(runtimeLang);
36     percentThreshold_ = std::min(options.GetGcTriggerPercentThreshold(), PERCENT_100_U32);
37     adaptiveMultiplier_ = options.GetGcTriggerAdaptiveMultiplier();
38     minExtraHeapSize_ = options.GetMinExtraHeapSize();
39     maxExtraHeapSize_ = options.GetMaxExtraHeapSize();
40     maxTriggerPercent_ = std::min(options.GetMaxTriggerPercent(), PERCENT_100_U32);
41     skipStartupGcCount_ = options.GetSkipStartupGcCount(runtimeLang);
42     useNthAllocTrigger_ = options.IsGcUseNthAllocTrigger();
43 
44     if (options.IsRunGcEverySafepoint()) {
45         ASSERT_PRINT(gcTriggerType_ == "debug",
46                      "Option 'run-gc-every-safepoint' must be used with 'gc-trigger-type=debug'");
47         ASSERT_PRINT(options.IsRunGcInPlace(runtimeLang),
48                      "Option 'run-gc-every-safepoint' must be used with 'run-gc-in-place'");
49     }
50 }
51 
GCTriggerHeap(MemStatsType * memStats,HeapSpace * heapSpace)52 GCTriggerHeap::GCTriggerHeap(MemStatsType *memStats, HeapSpace *heapSpace) : heapSpace_(heapSpace), memStats_(memStats)
53 {
54 }
55 
GCTriggerHeap(MemStatsType * memStats,HeapSpace * heapSpace,size_t minHeapSize,uint8_t percentThreshold,size_t minExtraSize,size_t maxExtraSize,uint32_t skipGcTimes)56 GCTriggerHeap::GCTriggerHeap(MemStatsType *memStats, HeapSpace *heapSpace, size_t minHeapSize, uint8_t percentThreshold,
57                              size_t minExtraSize, size_t maxExtraSize, uint32_t skipGcTimes)
58     : heapSpace_(heapSpace), memStats_(memStats), skipGcCount_(skipGcTimes)
59 {
60     percentThreshold_ = percentThreshold;
61     minExtraSize_ = minExtraSize;
62     maxExtraSize_ = maxExtraSize;
63     // If we have min_heap_size < 100, we get false positives in TriggerGcIfNeeded, since we divide by 100 first
64     ASSERT(minHeapSize >= 100);
65     // Atomic with relaxed order reason: data race with target_footprint_ with no synchronization or ordering
66     // constraints imposed on other reads or writes
67     targetFootprint_.store((minHeapSize / PERCENT_100) * percentThreshold_, std::memory_order_relaxed);
68     LOG(DEBUG, GC_TRIGGER) << "GCTriggerHeap created, min heap size " << minHeapSize << ", percent threshold "
69                            << percentThreshold << ", min_extra_size " << minExtraSize << ", max_extra_size "
70                            << maxExtraSize;
71 }
72 
SetMinTargetFootprint(size_t targetSize)73 void GCTriggerHeap::SetMinTargetFootprint(size_t targetSize)
74 {
75     LOG(DEBUG, GC_TRIGGER) << "SetTempTargetFootprint target_footprint = " << targetSize;
76     minTargetFootprint_ = targetSize;
77     // Atomic with relaxed order reason: data race with target_footprint_ with no synchronization or ordering
78     // constraints imposed on other reads or writes
79     targetFootprint_.store(targetSize, std::memory_order_relaxed);
80 }
81 
RestoreMinTargetFootprint()82 void GCTriggerHeap::RestoreMinTargetFootprint()
83 {
84     minTargetFootprint_ = DEFAULT_MIN_TARGET_FOOTPRINT;
85 }
86 
ComputeNewTargetFootprint(const GCTask & task,size_t heapSizeBeforeGc,size_t heapSize)87 void GCTriggerHeap::ComputeNewTargetFootprint(const GCTask &task, size_t heapSizeBeforeGc, size_t heapSize)
88 {
89     GC *gc = Thread::GetCurrent()->GetVM()->GetGC();
90     if (gc->IsGenerational() && task.reason == GCTaskCause::YOUNG_GC_CAUSE &&
91         task.collectionType != GCCollectionType::MIXED) {
92         // we don't want to update heap-trigger on young-gc
93         return;
94     }
95 
96     size_t target = this->ComputeTarget(heapSizeBeforeGc, heapSize);
97 
98     // Atomic with relaxed order reason: data race with target_footprint_ with no synchronization or ordering
99     // constraints imposed on other reads or writes
100     targetFootprint_.store(target, std::memory_order_relaxed);
101 
102     LOG(DEBUG, GC_TRIGGER) << "ComputeNewTargetFootprint target_footprint = " << target;
103 }
104 
ComputeTarget(size_t heapSizeBeforeGc,size_t heapSize)105 size_t GCTriggerHeap::ComputeTarget(size_t heapSizeBeforeGc, size_t heapSize)
106 {
107     // Note: divide by 100 first to avoid overflow
108     size_t delta = (heapSize / PERCENT_100) * percentThreshold_;
109 
110     // heap increased corresponding with previous gc
111     if (heapSize > heapSizeBeforeGc) {
112         delta = std::min(delta, maxExtraSize_);
113     } else {
114         // if heap was squeeze from 200mb to 100mb we want to set a target to 150mb, not just 100mb*percent_threshold_
115         delta = std::max(delta, (heapSizeBeforeGc - heapSize) / 2);
116     }
117     return heapSize + std::max(delta, minExtraSize_);
118 }
119 
TriggerGcIfNeeded(GC * gc)120 void GCTriggerHeap::TriggerGcIfNeeded(GC *gc)
121 {
122     if (skipGcCount_ > 0) {
123         skipGcCount_--;
124         return;
125     }
126     size_t bytesInHeap = memStats_->GetFootprintHeap();
127     // Atomic with relaxed order reason: data race with target_footprint_ with no synchronization or ordering
128     // constraints imposed on other reads or writes
129     if (UNLIKELY(bytesInHeap >= targetFootprint_.load(std::memory_order_relaxed))) {
130         LOG(DEBUG, GC_TRIGGER) << "GCTriggerHeap triggered";
131         ASSERT(gc != nullptr);
132         gc->PendingGC();
133         auto task = MakePandaUnique<GCTask>(GCTaskCause::HEAP_USAGE_THRESHOLD_CAUSE, time::GetCurrentTimeInNanos());
134         gc->Trigger(std::move(task));
135     }
136 }
137 
GCAdaptiveTriggerHeap(MemStatsType * memStats,HeapSpace * heapSpace,size_t minHeapSize,uint8_t percentThreshold,uint32_t adaptiveMultiplier,size_t minExtraSize,size_t maxExtraSize,uint32_t skipGcTimes)138 GCAdaptiveTriggerHeap::GCAdaptiveTriggerHeap(MemStatsType *memStats, HeapSpace *heapSpace, size_t minHeapSize,
139                                              uint8_t percentThreshold, uint32_t adaptiveMultiplier, size_t minExtraSize,
140                                              size_t maxExtraSize, uint32_t skipGcTimes)
141     : GCTriggerHeap(memStats, heapSpace, minHeapSize, percentThreshold, minExtraSize, maxExtraSize, skipGcTimes),
142       adaptiveMultiplier_(adaptiveMultiplier)
143 {
144     // Atomic with relaxed order reason: data race with target_footprint_ with no synchronization or ordering
145     // constraints imposed on other reads or writes
146     recentTargetThresholds_.push_back(targetFootprint_.load(std::memory_order_relaxed));
147 }
148 
ComputeTarget(size_t heapSizeBeforeGc,size_t heapSize)149 size_t GCAdaptiveTriggerHeap::ComputeTarget(size_t heapSizeBeforeGc, size_t heapSize)
150 {
151     auto delta = static_cast<size_t>(static_cast<double>(heapSize) / PERCENT_100_D * percentThreshold_);
152 
153     const auto [min_threshold, max_threshold] =
154         std::minmax_element(recentTargetThresholds_.begin(), recentTargetThresholds_.end());
155     size_t window = *max_threshold - *min_threshold;
156 
157     // if recent thresholds localize in "small" window then we need to get out from a location to avoid too many trigger
158     if (window <= maxExtraSize_) {
159         delta = std::max(delta, adaptiveMultiplier_ * maxExtraSize_);
160         delta = std::min(delta, heapSize);
161     } else if (heapSize > heapSizeBeforeGc) {  // heap increased corresponding with previous gc
162         delta = std::min(delta, maxExtraSize_);
163     } else {
164         // if heap was squeeze from 200mb to 100mb we want to set a target to 150mb, not just 100mb*percent_threshold_
165         delta = std::max(delta, (heapSizeBeforeGc - heapSize) / 2);
166     }
167     delta = std::max(delta, minExtraSize_);
168     size_t target = heapSize + delta;
169 
170     recentTargetThresholds_.push_back(target);
171 
172     return target;
173 }
174 
GetTriggerType(std::string_view gcTriggerType)175 GCTriggerType GetTriggerType(std::string_view gcTriggerType)
176 {
177     auto triggerType = GCTriggerType::INVALID_TRIGGER;
178     if (gcTriggerType == "heap-trigger-test") {
179         triggerType = GCTriggerType::HEAP_TRIGGER_TEST;
180     } else if (gcTriggerType == "heap-trigger") {
181         triggerType = GCTriggerType::HEAP_TRIGGER;
182     } else if (gcTriggerType == "adaptive-heap-trigger") {
183         triggerType = GCTriggerType::ADAPTIVE_HEAP_TRIGGER;
184     } else if (gcTriggerType == "trigger-heap-occupancy") {
185         triggerType = GCTriggerType::TRIGGER_HEAP_OCCUPANCY;
186     } else if (gcTriggerType == "debug") {
187         triggerType = GCTriggerType::DEBUG;
188     } else if (gcTriggerType == "no-gc-for-start-up") {
189         triggerType = GCTriggerType::NO_GC_FOR_START_UP;
190     } else if (gcTriggerType == "debug-never") {
191         triggerType = GCTriggerType::DEBUG_NEVER;
192     } else if (gcTriggerType == "pause-time-goal-trigger") {
193         triggerType = GCTriggerType::PAUSE_TIME_GOAL_TRIGGER;
194     }
195     return triggerType;
196 }
197 
CreateGCTrigger(MemStatsType * memStats,HeapSpace * heapSpace,const GCTriggerConfig & config,InternalAllocatorPtr allocator)198 GCTrigger *CreateGCTrigger(MemStatsType *memStats, HeapSpace *heapSpace, const GCTriggerConfig &config,
199                            InternalAllocatorPtr allocator)
200 {
201     uint32_t skipGcTimes = config.GetSkipStartupGcCount();
202 
203     constexpr size_t DEFAULT_HEAP_SIZE = 8_MB;
204     auto triggerType = GetTriggerType(config.GetGCTriggerType());
205 
206     GCTrigger *ret {nullptr};
207     switch (triggerType) {  // NOLINT(hicpp-multiway-paths-covered)
208         case GCTriggerType::HEAP_TRIGGER_TEST:
209             // NOTE(dtrubenkov): replace with permanent allocator when we get it
210             ret = allocator->New<GCTriggerHeap>(memStats, heapSpace);
211             break;
212         case GCTriggerType::HEAP_TRIGGER:
213             ret = allocator->New<GCTriggerHeap>(memStats, heapSpace, DEFAULT_HEAP_SIZE, config.GetPercentThreshold(),
214                                                 config.GetMinExtraHeapSize(), config.GetMaxExtraHeapSize());
215             break;
216         case GCTriggerType::ADAPTIVE_HEAP_TRIGGER:
217             ret = allocator->New<GCAdaptiveTriggerHeap>(memStats, heapSpace, DEFAULT_HEAP_SIZE,
218                                                         config.GetPercentThreshold(), config.GetAdaptiveMultiplier(),
219                                                         config.GetMinExtraHeapSize(), config.GetMaxExtraHeapSize());
220             break;
221         case GCTriggerType::NO_GC_FOR_START_UP:
222             ret =
223                 allocator->New<GCTriggerHeap>(memStats, heapSpace, DEFAULT_HEAP_SIZE, config.GetPercentThreshold(),
224                                               config.GetMinExtraHeapSize(), config.GetMaxExtraHeapSize(), skipGcTimes);
225             break;
226         case GCTriggerType::TRIGGER_HEAP_OCCUPANCY:
227             ret = allocator->New<GCTriggerHeapOccupancy>(heapSpace, config.GetMaxTriggerPercent());
228             break;
229         case GCTriggerType::DEBUG:
230             ret = allocator->New<GCTriggerDebug>(config.GetDebugStart(), heapSpace);
231             break;
232         case GCTriggerType::DEBUG_NEVER:
233             ret = allocator->New<GCNeverTrigger>();
234             break;
235         case GCTriggerType::PAUSE_TIME_GOAL_TRIGGER:
236             ret = allocator->New<PauseTimeGoalTrigger>(memStats, DEFAULT_HEAP_SIZE, config.GetPercentThreshold(),
237                                                        config.GetMinExtraHeapSize(), config.GetMaxExtraHeapSize());
238             break;
239         default:
240             LOG(FATAL, GC) << "Wrong GCTrigger type";
241             break;
242     }
243     if (config.IsUseNthAllocTrigger()) {
244         ret = allocator->New<SchedGCOnNthAllocTrigger>(ret);
245     }
246     return ret;
247 }
248 
GCStarted(const GCTask & task,size_t heapSize)249 void GCTriggerHeap::GCStarted([[maybe_unused]] const GCTask &task, [[maybe_unused]] size_t heapSize)
250 {
251     heapSpace_->SetIsWorkGC(true);
252 }
253 
GCFinished(const GCTask & task,size_t heapSizeBeforeGc,size_t heapSize)254 void GCTriggerHeap::GCFinished(const GCTask &task, size_t heapSizeBeforeGc, size_t heapSize)
255 {
256     this->ComputeNewTargetFootprint(task, heapSizeBeforeGc, heapSize);
257     heapSpace_->ComputeNewSize();
258 }
259 
GCTriggerDebug(uint64_t debugStart,HeapSpace * heapSpace)260 GCTriggerDebug::GCTriggerDebug(uint64_t debugStart, HeapSpace *heapSpace)
261     : heapSpace_(heapSpace), debugStart_(debugStart)
262 {
263     LOG(DEBUG, GC_TRIGGER) << "GCTriggerDebug created";
264 }
265 
TriggerGcIfNeeded(GC * gc)266 void GCTriggerDebug::TriggerGcIfNeeded(GC *gc)
267 {
268     static std::atomic<uint64_t> counter = 0;
269     LOG(DEBUG, GC_TRIGGER) << "GCTriggerDebug counter " << counter;
270     if (counter >= debugStart_) {
271         LOG(DEBUG, GC_TRIGGER) << "GCTriggerDebug triggered";
272         auto task = MakePandaUnique<GCTask>(GCTaskCause::HEAP_USAGE_THRESHOLD_CAUSE, time::GetCurrentTimeInNanos());
273         gc->Trigger(std::move(task));
274     }
275     counter++;
276 }
277 
GCStarted(const GCTask & task,size_t heapSize)278 void GCTriggerDebug::GCStarted([[maybe_unused]] const GCTask &task, [[maybe_unused]] size_t heapSize)
279 {
280     heapSpace_->SetIsWorkGC(true);
281 }
282 
GCFinished(const GCTask & task,size_t heapSizeBeforeGc,size_t heapSize)283 void GCTriggerDebug::GCFinished([[maybe_unused]] const GCTask &task, [[maybe_unused]] size_t heapSizeBeforeGc,
284                                 [[maybe_unused]] size_t heapSize)
285 {
286     heapSpace_->ComputeNewSize();
287 }
288 
GCTriggerHeapOccupancy(HeapSpace * heapSpace,uint32_t maxTriggerPercent)289 GCTriggerHeapOccupancy::GCTriggerHeapOccupancy(HeapSpace *heapSpace, uint32_t maxTriggerPercent)
290     : heapSpace_(heapSpace), maxTriggerPercent_(maxTriggerPercent / PERCENT_100_D)
291 {
292     LOG(DEBUG, GC_TRIGGER) << "GCTriggerHeapOccupancy created";
293 }
294 
GCStarted(const GCTask & task,size_t heapSize)295 void GCTriggerHeapOccupancy::GCStarted([[maybe_unused]] const GCTask &task, [[maybe_unused]] size_t heapSize)
296 {
297     heapSpace_->SetIsWorkGC(true);
298 }
299 
GCFinished(const GCTask & task,size_t heapSizeBeforeGc,size_t heapSize)300 void GCTriggerHeapOccupancy::GCFinished([[maybe_unused]] const GCTask &task, [[maybe_unused]] size_t heapSizeBeforeGc,
301                                         [[maybe_unused]] size_t heapSize)
302 {
303     heapSpace_->ComputeNewSize();
304 }
305 
TriggerGcIfNeeded(GC * gc)306 void GCTriggerHeapOccupancy::TriggerGcIfNeeded(GC *gc)
307 {
308     size_t currentHeapSize = heapSpace_->GetHeapSize();
309     size_t minHeapSize = MemConfig::GetInitialHeapSizeLimit();
310     size_t maxHeapSize = MemConfig::GetHeapSizeLimit();
311     size_t threshold = std::min(minHeapSize, static_cast<size_t>(maxTriggerPercent_ * maxHeapSize));
312     if (currentHeapSize > threshold) {
313         LOG(DEBUG, GC_TRIGGER) << "GCTriggerHeapOccupancy triggered: current heap size = " << currentHeapSize
314                                << ", threshold = " << threshold;
315         auto task = MakePandaUnique<GCTask>(GCTaskCause::HEAP_USAGE_THRESHOLD_CAUSE, time::GetCurrentTimeInNanos());
316         gc->Trigger(std::move(task));
317     }
318 }
319 
SchedGCOnNthAllocTrigger(GCTrigger * origin)320 SchedGCOnNthAllocTrigger::SchedGCOnNthAllocTrigger(GCTrigger *origin) : origin_(origin) {}
321 
~SchedGCOnNthAllocTrigger()322 SchedGCOnNthAllocTrigger::~SchedGCOnNthAllocTrigger()
323 {
324     Runtime::GetCurrent()->GetInternalAllocator()->Delete(origin_);
325 }
326 
TriggerGcIfNeeded(GC * gc)327 void SchedGCOnNthAllocTrigger::TriggerGcIfNeeded(GC *gc)
328 {
329     // Atomic with relaxed order reason: data race with other mutators
330     uint32_t value = counter_.load(std::memory_order_relaxed);
331     bool trigger = false;
332     while (value > 0) {
333         if (counter_.compare_exchange_strong(value, value - 1, std::memory_order_release, std::memory_order_relaxed)) {
334             --value;
335             trigger = (value == 0);
336             break;
337         }
338         // Atomic with relaxed order reason: data race with other mutators
339         value = counter_.load(std::memory_order_relaxed);
340     }
341     if (trigger) {
342         auto task = MakePandaUnique<GCTask>(cause_);
343         gc->WaitForGCInManaged(*task);
344         isTriggered_ = true;
345     } else {
346         origin_->TriggerGcIfNeeded(gc);
347     }
348 }
349 
ScheduleGc(GCTaskCause cause,uint32_t counter)350 void SchedGCOnNthAllocTrigger::ScheduleGc(GCTaskCause cause, uint32_t counter)
351 {
352     isTriggered_ = false;
353     cause_ = cause;
354     counter_ = counter;
355 }
356 
PauseTimeGoalTrigger(MemStatsType * memStats,size_t minHeapSize,uint8_t percentThreshold,size_t minExtraSize,size_t maxExtraSize)357 PauseTimeGoalTrigger::PauseTimeGoalTrigger(MemStatsType *memStats, size_t minHeapSize, uint8_t percentThreshold,
358                                            size_t minExtraSize, size_t maxExtraSize)
359     : memStats_(memStats),
360       percentThreshold_(percentThreshold),
361       minExtraSize_(minExtraSize),
362       maxExtraSize_(maxExtraSize),
363       targetFootprint_(minHeapSize * percentThreshold / PERCENT_100)
364 {
365 }
366 
TriggerGcIfNeeded(GC * gc)367 void PauseTimeGoalTrigger::TriggerGcIfNeeded(GC *gc)
368 {
369     bool expectedStartConcurrent = true;
370     // Atomic with relaxed order reason: data race with no synchronization or ordering constraints imposed on other
371     // reads or writes
372     auto startConcurrentMarking = startConcurrentMarking_.compare_exchange_weak(
373         expectedStartConcurrent, false, std::memory_order_relaxed, std::memory_order_relaxed);
374     if (UNLIKELY(startConcurrentMarking)) {
375         LOG(DEBUG, GC_TRIGGER) << "PauseTimeGoalTrigger triggered";
376         ASSERT(gc != nullptr);
377         gc->PendingGC();
378         auto result = gc->Trigger(
379             MakePandaUnique<GCTask>(GCTaskCause::HEAP_USAGE_THRESHOLD_CAUSE, time::GetCurrentTimeInNanos()));
380         if (!result) {
381             // Atomic with relaxed order reason: data race with no synchronization or ordering constraints imposed
382             // on other reads or writes
383             startConcurrentMarking_.store(true, std::memory_order_relaxed);
384         }
385     }
386 }
387 
GCFinished(const GCTask & task,size_t heapSizeBeforeGc,size_t heapSize)388 void PauseTimeGoalTrigger::GCFinished(const GCTask &task, size_t heapSizeBeforeGc, size_t heapSize)
389 {
390     if (task.reason == GCTaskCause::HEAP_USAGE_THRESHOLD_CAUSE) {
391         return;
392     }
393 
394     if (task.collectionType != GCCollectionType::YOUNG && task.collectionType != GCCollectionType::MIXED) {
395         return;
396     }
397 
398     if (task.collectionType == GCCollectionType::YOUNG) {
399         auto bytesInHeap = memStats_->GetFootprintHeap();
400         if (bytesInHeap >= GetTargetFootprint()) {
401             // Atomic with relaxed order reason: data race with no synchronization or ordering constraints imposed
402             // on other reads or writes
403             startConcurrentMarking_.store(true, std::memory_order_relaxed);
404         }
405     }
406 
407     auto *gc = Thread::GetCurrent()->GetVM()->GetGC();
408     gc->ComputeNewSize();
409 
410     if (task.collectionType == GCCollectionType::MIXED) {
411         // Atomic with relaxed order reason: data race with no synchronization or ordering constraints imposed
412         // on other reads or writes
413         targetFootprint_.store(ComputeTarget(heapSizeBeforeGc, heapSize), std::memory_order_relaxed);
414     }
415 }
416 
417 // Curently it is copy of GCTriggerHeap::ComputeTarget. #11945
ComputeTarget(size_t heapSizeBeforeGc,size_t heapSize)418 size_t PauseTimeGoalTrigger::ComputeTarget(size_t heapSizeBeforeGc, size_t heapSize)
419 {
420     // Note: divide by 100 first to avoid overflow
421     size_t delta = (heapSize / PERCENT_100) * percentThreshold_;
422 
423     // heap increased corresponding with previous gc
424     if (heapSize > heapSizeBeforeGc) {
425         delta = std::min(delta, maxExtraSize_);
426     } else {
427         // if heap was squeeze from 200mb to 100mb we want to set a target to 150mb, not just 100mb*percent_threshold_
428         delta = std::max(delta, (heapSizeBeforeGc - heapSize) / 2);
429     }
430     return heapSize + std::max(delta, minExtraSize_);
431 }
432 }  // namespace ark::mem
433