1 // Copyright 2020 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #ifndef V8_HEAP_CONCURRENT_ALLOCATOR_INL_H_
6 #define V8_HEAP_CONCURRENT_ALLOCATOR_INL_H_
7
8 #include "include/v8-internal.h"
9 #include "src/common/globals.h"
10 #include "src/heap/concurrent-allocator.h"
11 #include "src/heap/heap.h"
12 #include "src/heap/incremental-marking.h"
13 #include "src/heap/spaces-inl.h"
14 #include "src/heap/spaces.h"
15 #include "src/objects/heap-object.h"
16
17 namespace v8 {
18 namespace internal {
19
AllocateRaw(int object_size,AllocationAlignment alignment,AllocationOrigin origin)20 AllocationResult ConcurrentAllocator::AllocateRaw(int object_size,
21 AllocationAlignment alignment,
22 AllocationOrigin origin) {
23 // TODO(dinfuehr): Add support for allocation observers
24 CHECK(FLAG_concurrent_allocation);
25
26 // Ensure that we are on the right thread
27 DCHECK_EQ(LocalHeap::Current(), local_heap_);
28
29 if (object_size > kMaxLabObjectSize) {
30 return AllocateOutsideLab(object_size, alignment, origin);
31 }
32
33 return AllocateInLab(object_size, alignment, origin);
34 }
35
AllocateInLab(int object_size,AllocationAlignment alignment,AllocationOrigin origin)36 AllocationResult ConcurrentAllocator::AllocateInLab(
37 int object_size, AllocationAlignment alignment, AllocationOrigin origin) {
38 AllocationResult allocation = lab_.AllocateRawAligned(object_size, alignment);
39 if (allocation.IsRetry()) {
40 return AllocateInLabSlow(object_size, alignment, origin);
41 } else {
42 return allocation;
43 }
44 }
45
46 } // namespace internal
47 } // namespace v8
48
49 #endif // V8_HEAP_CONCURRENT_ALLOCATOR_INL_H_
50