1 /*
2 * Copyright (C) 2015 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "profiling_info.h"
18
19 #include "art_method-inl.h"
20 #include "dex/dex_instruction.h"
21 #include "jit/jit.h"
22 #include "jit/jit_code_cache.h"
23 #include "scoped_thread_state_change-inl.h"
24 #include "thread.h"
25
26 namespace art {
27
ProfilingInfo(ArtMethod * method,const std::vector<uint32_t> & entries)28 ProfilingInfo::ProfilingInfo(ArtMethod* method, const std::vector<uint32_t>& entries)
29 : baseline_hotness_count_(GetOptimizeThreshold()),
30 method_(method),
31 number_of_inline_caches_(entries.size()),
32 current_inline_uses_(0) {
33 memset(&cache_, 0, number_of_inline_caches_ * sizeof(InlineCache));
34 for (size_t i = 0; i < number_of_inline_caches_; ++i) {
35 cache_[i].dex_pc_ = entries[i];
36 }
37 }
38
GetOptimizeThreshold()39 uint16_t ProfilingInfo::GetOptimizeThreshold() {
40 return Runtime::Current()->GetJITOptions()->GetOptimizeThreshold();
41 }
42
Create(Thread * self,ArtMethod * method)43 ProfilingInfo* ProfilingInfo::Create(Thread* self, ArtMethod* method) {
44 // Walk over the dex instructions of the method and keep track of
45 // instructions we are interested in profiling.
46 DCHECK(!method->IsNative());
47
48 std::vector<uint32_t> entries;
49 for (const DexInstructionPcPair& inst : method->DexInstructions()) {
50 switch (inst->Opcode()) {
51 case Instruction::INVOKE_VIRTUAL:
52 case Instruction::INVOKE_VIRTUAL_RANGE:
53 case Instruction::INVOKE_INTERFACE:
54 case Instruction::INVOKE_INTERFACE_RANGE:
55 entries.push_back(inst.DexPc());
56 break;
57
58 default:
59 break;
60 }
61 }
62
63 // We always create a `ProfilingInfo` object, even if there is no instruction we are
64 // interested in. The JIT code cache internally uses it.
65
66 // Allocate the `ProfilingInfo` object int the JIT's data space.
67 jit::JitCodeCache* code_cache = Runtime::Current()->GetJit()->GetCodeCache();
68 return code_cache->AddProfilingInfo(self, method, entries);
69 }
70
GetInlineCache(uint32_t dex_pc)71 InlineCache* ProfilingInfo::GetInlineCache(uint32_t dex_pc) {
72 // TODO: binary search if array is too long.
73 for (size_t i = 0; i < number_of_inline_caches_; ++i) {
74 if (cache_[i].dex_pc_ == dex_pc) {
75 return &cache_[i];
76 }
77 }
78 ScopedObjectAccess soa(Thread::Current());
79 LOG(FATAL) << "No inline cache found for " << ArtMethod::PrettyMethod(method_) << "@" << dex_pc;
80 UNREACHABLE();
81 }
82
AddInvokeInfo(uint32_t dex_pc,mirror::Class * cls)83 void ProfilingInfo::AddInvokeInfo(uint32_t dex_pc, mirror::Class* cls) {
84 InlineCache* cache = GetInlineCache(dex_pc);
85 for (size_t i = 0; i < InlineCache::kIndividualCacheSize; ++i) {
86 mirror::Class* existing = cache->classes_[i].Read<kWithoutReadBarrier>();
87 mirror::Class* marked = ReadBarrier::IsMarked(existing);
88 if (marked == cls) {
89 // Receiver type is already in the cache, nothing else to do.
90 return;
91 } else if (marked == nullptr) {
92 // Cache entry is empty, try to put `cls` in it.
93 // Note: it's ok to spin on 'existing' here: if 'existing' is not null, that means
94 // it is a stalled heap address, which will only be cleared during SweepSystemWeaks,
95 // *after* this thread hits a suspend point.
96 GcRoot<mirror::Class> expected_root(existing);
97 GcRoot<mirror::Class> desired_root(cls);
98 auto atomic_root = reinterpret_cast<Atomic<GcRoot<mirror::Class>>*>(&cache->classes_[i]);
99 if (!atomic_root->CompareAndSetStrongSequentiallyConsistent(expected_root, desired_root)) {
100 // Some other thread put a class in the cache, continue iteration starting at this
101 // entry in case the entry contains `cls`.
102 --i;
103 } else {
104 // We successfully set `cls`, just return.
105 return;
106 }
107 }
108 }
109 // Unsuccessfull - cache is full, making it megamorphic. We do not DCHECK it though,
110 // as the garbage collector might clear the entries concurrently.
111 }
112
ScopedProfilingInfoUse(jit::Jit * jit,ArtMethod * method,Thread * self)113 ScopedProfilingInfoUse::ScopedProfilingInfoUse(jit::Jit* jit, ArtMethod* method, Thread* self)
114 : jit_(jit),
115 method_(method),
116 self_(self),
117 // Fetch the profiling info ahead of using it. If it's null when fetching,
118 // we should not call JitCodeCache::DoneCompilerUse.
119 profiling_info_(jit == nullptr
120 ? nullptr
121 : jit->GetCodeCache()->NotifyCompilerUse(method, self))
122 {}
123
~ScopedProfilingInfoUse()124 ScopedProfilingInfoUse::~ScopedProfilingInfoUse() {
125 if (profiling_info_ != nullptr) {
126 jit_->GetCodeCache()->DoneCompilerUse(method_, self_);
127 }
128 }
129
130 } // namespace art
131