• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2014 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ART_RUNTIME_READ_BARRIER_H_
18 #define ART_RUNTIME_READ_BARRIER_H_
19 
20 #include "base/logging.h"
21 #include "base/mutex.h"
22 #include "base/macros.h"
23 #include "gc_root.h"
24 #include "jni.h"
25 #include "mirror/object_reference.h"
26 #include "offsets.h"
27 #include "read_barrier_c.h"
28 
29 // This is a C++ (not C) header file, separate from read_barrier_c.h
30 // which needs to be a C header file for asm_support.h.
31 
32 namespace art {
33 namespace mirror {
34   class Object;
35   template<typename MirrorType> class HeapReference;
36 }  // namespace mirror
37 class ArtMethod;
38 
39 class ReadBarrier {
40  public:
41   // Enable the to-space invariant checks. This is slow and happens very often. Do not enable in
42   // fast-debug environment.
43   DECLARE_RUNTIME_DEBUG_FLAG(kEnableToSpaceInvariantChecks);
44 
45   // Enable the read barrier checks. This is slow and happens very often. Do not enable in
46   // fast-debug environment.
47   DECLARE_RUNTIME_DEBUG_FLAG(kEnableReadBarrierInvariantChecks);
48 
49   // It's up to the implementation whether the given field gets updated whereas the return value
50   // must be an updated reference unless kAlwaysUpdateField is true.
51   template <typename MirrorType, ReadBarrierOption kReadBarrierOption = kWithReadBarrier,
52             bool kAlwaysUpdateField = false>
53   ALWAYS_INLINE static MirrorType* Barrier(
54       mirror::Object* obj, MemberOffset offset, mirror::HeapReference<MirrorType>* ref_addr)
55       REQUIRES_SHARED(Locks::mutator_lock_);
56 
57   // It's up to the implementation whether the given root gets updated
58   // whereas the return value must be an updated reference.
59   template <typename MirrorType, ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
60   ALWAYS_INLINE static MirrorType* BarrierForRoot(MirrorType** root,
61                                                   GcRootSource* gc_root_source = nullptr)
62       REQUIRES_SHARED(Locks::mutator_lock_);
63 
64   // It's up to the implementation whether the given root gets updated
65   // whereas the return value must be an updated reference.
66   template <typename MirrorType, ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
67   ALWAYS_INLINE static MirrorType* BarrierForRoot(mirror::CompressedReference<MirrorType>* root,
68                                                   GcRootSource* gc_root_source = nullptr)
69       REQUIRES_SHARED(Locks::mutator_lock_);
70 
71   // Return the mirror Object if it is marked, or null if not.
72   template <typename MirrorType>
73   ALWAYS_INLINE static MirrorType* IsMarked(MirrorType* ref)
74       REQUIRES_SHARED(Locks::mutator_lock_);
75 
76   static bool IsDuringStartup();
77 
78   // Without the holder object.
AssertToSpaceInvariant(mirror::Object * ref)79   static void AssertToSpaceInvariant(mirror::Object* ref)
80       REQUIRES_SHARED(Locks::mutator_lock_) {
81     AssertToSpaceInvariant(nullptr, MemberOffset(0), ref);
82   }
83   // With the holder object.
84   static void AssertToSpaceInvariant(mirror::Object* obj, MemberOffset offset,
85                                      mirror::Object* ref)
86       REQUIRES_SHARED(Locks::mutator_lock_);
87   // With GcRootSource.
88   static void AssertToSpaceInvariant(GcRootSource* gc_root_source, mirror::Object* ref)
89       REQUIRES_SHARED(Locks::mutator_lock_);
90 
91   // ALWAYS_INLINE on this caused a performance regression b/26744236.
92   static mirror::Object* Mark(mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_);
93 
WhiteState()94   static constexpr uint32_t WhiteState() {
95     return white_state_;
96   }
GrayState()97   static constexpr uint32_t GrayState() {
98     return gray_state_;
99   }
100 
101   // fake_address_dependency will be zero which should be bitwise-or'ed with the address of the
102   // subsequent load to prevent the reordering of the read barrier bit load and the subsequent
103   // object reference load (from one of `obj`'s fields).
104   // *fake_address_dependency will be set to 0.
105   ALWAYS_INLINE static bool IsGray(mirror::Object* obj, uintptr_t* fake_address_dependency)
106       REQUIRES_SHARED(Locks::mutator_lock_);
107 
108   // This uses a load-acquire to load the read barrier bit internally to prevent the reordering of
109   // the read barrier bit load and the subsequent load.
110   ALWAYS_INLINE static bool IsGray(mirror::Object* obj)
111       REQUIRES_SHARED(Locks::mutator_lock_);
112 
IsValidReadBarrierState(uint32_t rb_state)113   static bool IsValidReadBarrierState(uint32_t rb_state) {
114     return rb_state == white_state_ || rb_state == gray_state_;
115   }
116 
117   static constexpr uint32_t white_state_ = 0x0;    // Not marked.
118   static constexpr uint32_t gray_state_ = 0x1;     // Marked, but not marked through. On mark stack.
119   static constexpr uint32_t rb_state_mask_ = 0x1;  // The low bits for white|gray.
120 };
121 
122 }  // namespace art
123 
124 #endif  // ART_RUNTIME_READ_BARRIER_H_
125