• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2013 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ART_RUNTIME_GC_SPACE_BUMP_POINTER_SPACE_INL_H_
18 #define ART_RUNTIME_GC_SPACE_BUMP_POINTER_SPACE_INL_H_
19 
20 #include "bump_pointer_space.h"
21 
22 #include "base/bit_utils.h"
23 
24 namespace art {
25 namespace gc {
26 namespace space {
27 
Alloc(Thread *,size_t num_bytes,size_t * bytes_allocated,size_t * usable_size,size_t * bytes_tl_bulk_allocated)28 inline mirror::Object* BumpPointerSpace::Alloc(Thread*, size_t num_bytes, size_t* bytes_allocated,
29                                                size_t* usable_size,
30                                                size_t* bytes_tl_bulk_allocated) {
31   num_bytes = RoundUp(num_bytes, kAlignment);
32   mirror::Object* ret = AllocNonvirtual(num_bytes);
33   if (LIKELY(ret != nullptr)) {
34     *bytes_allocated = num_bytes;
35     if (usable_size != nullptr) {
36       *usable_size = num_bytes;
37     }
38     *bytes_tl_bulk_allocated = num_bytes;
39   }
40   return ret;
41 }
42 
AllocThreadUnsafe(Thread * self,size_t num_bytes,size_t * bytes_allocated,size_t * usable_size,size_t * bytes_tl_bulk_allocated)43 inline mirror::Object* BumpPointerSpace::AllocThreadUnsafe(Thread* self, size_t num_bytes,
44                                                            size_t* bytes_allocated,
45                                                            size_t* usable_size,
46                                                            size_t* bytes_tl_bulk_allocated) {
47   Locks::mutator_lock_->AssertExclusiveHeld(self);
48   num_bytes = RoundUp(num_bytes, kAlignment);
49   uint8_t* end = end_.LoadRelaxed();
50   if (end + num_bytes > growth_end_) {
51     return nullptr;
52   }
53   mirror::Object* obj = reinterpret_cast<mirror::Object*>(end);
54   end_.StoreRelaxed(end + num_bytes);
55   *bytes_allocated = num_bytes;
56   // Use the CAS free versions as an optimization.
57   objects_allocated_.StoreRelaxed(objects_allocated_.LoadRelaxed() + 1);
58   bytes_allocated_.StoreRelaxed(bytes_allocated_.LoadRelaxed() + num_bytes);
59   if (UNLIKELY(usable_size != nullptr)) {
60     *usable_size = num_bytes;
61   }
62   *bytes_tl_bulk_allocated = num_bytes;
63   return obj;
64 }
65 
AllocNonvirtualWithoutAccounting(size_t num_bytes)66 inline mirror::Object* BumpPointerSpace::AllocNonvirtualWithoutAccounting(size_t num_bytes) {
67   DCHECK_ALIGNED(num_bytes, kAlignment);
68   uint8_t* old_end;
69   uint8_t* new_end;
70   do {
71     old_end = end_.LoadRelaxed();
72     new_end = old_end + num_bytes;
73     // If there is no more room in the region, we are out of memory.
74     if (UNLIKELY(new_end > growth_end_)) {
75       return nullptr;
76     }
77   } while (!end_.CompareExchangeWeakSequentiallyConsistent(old_end, new_end));
78   return reinterpret_cast<mirror::Object*>(old_end);
79 }
80 
AllocNonvirtual(size_t num_bytes)81 inline mirror::Object* BumpPointerSpace::AllocNonvirtual(size_t num_bytes) {
82   mirror::Object* ret = AllocNonvirtualWithoutAccounting(num_bytes);
83   if (ret != nullptr) {
84     objects_allocated_.FetchAndAddSequentiallyConsistent(1);
85     bytes_allocated_.FetchAndAddSequentiallyConsistent(num_bytes);
86   }
87   return ret;
88 }
89 
90 }  // namespace space
91 }  // namespace gc
92 }  // namespace art
93 
94 #endif  // ART_RUNTIME_GC_SPACE_BUMP_POINTER_SPACE_INL_H_
95