• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2018 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifdef UNSAFE_BUFFERS_BUILD
6 // TODO(crbug.com/40284755): Remove this and spanify to fix the errors.
7 #pragma allow_unsafe_buffers
8 #endif
9 
10 #include "base/trace_event/cpufreq_monitor_android.h"
11 
12 #include <fcntl.h>
13 
14 #include "base/files/file_util.h"
15 #include "base/files/scoped_file.h"
16 #include "base/functional/bind.h"
17 #include "base/memory/scoped_refptr.h"
18 #include "base/no_destructor.h"
19 #include "base/strings/string_number_conversions.h"
20 #include "base/strings/string_split.h"
21 #include "base/strings/stringprintf.h"
22 #include "base/task/task_traits.h"
23 #include "base/task/thread_pool.h"
24 #include "base/trace_event/trace_event.h"
25 #include "base/types/fixed_array.h"
26 
27 namespace base {
28 
29 namespace trace_event {
30 
31 namespace {
32 
33 const size_t kNumBytesToReadForSampling = 32;
34 constexpr const char kTraceCategory[] = TRACE_DISABLED_BY_DEFAULT("power");
35 const char kEventTitle[] = "CPU Frequency";
36 
37 }  // namespace
38 
39 CPUFreqMonitorDelegate::CPUFreqMonitorDelegate() = default;
40 
GetScalingCurFreqPathString(unsigned int cpu_id) const41 std::string CPUFreqMonitorDelegate::GetScalingCurFreqPathString(
42     unsigned int cpu_id) const {
43   return base::StringPrintf(
44       "/sys/devices/system/cpu/cpu%d/cpufreq/scaling_cur_freq", cpu_id);
45 }
46 
IsTraceCategoryEnabled() const47 bool CPUFreqMonitorDelegate::IsTraceCategoryEnabled() const {
48   bool enabled;
49   TRACE_EVENT_CATEGORY_GROUP_ENABLED(kTraceCategory, &enabled);
50   return enabled;
51 }
52 
GetKernelMaxCPUs() const53 unsigned int CPUFreqMonitorDelegate::GetKernelMaxCPUs() const {
54   std::string str;
55   if (!base::ReadFileToString(
56           base::FilePath("/sys/devices/system/cpu/kernel_max"), &str)) {
57     // If we fail to read the kernel_max file, we just assume that CPU0 exists.
58     return 0;
59   }
60 
61   unsigned int kernel_max_cpu = 0;
62   base::StringToUint(str, &kernel_max_cpu);
63   return kernel_max_cpu;
64 }
65 
GetRelatedCPUsPathString(unsigned int cpu_id) const66 std::string CPUFreqMonitorDelegate::GetRelatedCPUsPathString(
67     unsigned int cpu_id) const {
68   return base::StringPrintf(
69       "/sys/devices/system/cpu/cpu%d/cpufreq/related_cpus", cpu_id);
70 }
71 
GetCPUIds(std::vector<unsigned int> * ids) const72 void CPUFreqMonitorDelegate::GetCPUIds(std::vector<unsigned int>* ids) const {
73   ids->clear();
74   unsigned int kernel_max_cpu = GetKernelMaxCPUs();
75   // CPUs related to one that's already marked for monitoring get set to "false"
76   // so we don't needlessly monitor CPUs with redundant frequency information.
77   base::FixedArray<bool> cpus_to_monitor(kernel_max_cpu + 1, true);
78 
79   // Rule out the related CPUs for each one so we only end up with the CPUs
80   // that are representative of the cluster.
81   for (unsigned int i = 0; i <= kernel_max_cpu; i++) {
82     if (!cpus_to_monitor[i])
83       continue;
84 
85     std::string filename = GetRelatedCPUsPathString(i);
86     std::string line;
87     if (!base::ReadFileToString(base::FilePath(filename), &line))
88       continue;
89     // When reading the related_cpus file, we expected the format to be
90     // something like "0 1 2 3" for CPU0-3 if they're all in one cluster.
91     for (auto& str_piece :
92          base::SplitString(line, " ", base::WhitespaceHandling::TRIM_WHITESPACE,
93                            base::SplitResult::SPLIT_WANT_NONEMPTY)) {
94       unsigned int cpu_id;
95       if (base::StringToUint(str_piece, &cpu_id)) {
96         if (cpu_id != i && cpu_id <= kernel_max_cpu)
97           cpus_to_monitor[cpu_id] = false;
98       }
99     }
100     ids->push_back(i);
101   }
102 
103   // If none of the files were readable, we assume CPU0 exists and fall back to
104   // using that.
105   if (ids->size() == 0)
106     ids->push_back(0);
107 }
108 
RecordFrequency(unsigned int cpu_id,unsigned int freq)109 void CPUFreqMonitorDelegate::RecordFrequency(unsigned int cpu_id,
110                                              unsigned int freq) {
111   TRACE_COUNTER_ID1(kTraceCategory, kEventTitle, cpu_id, freq);
112 }
113 
114 scoped_refptr<SingleThreadTaskRunner>
CreateTaskRunner()115 CPUFreqMonitorDelegate::CreateTaskRunner() {
116   return base::ThreadPool::CreateSingleThreadTaskRunner(
117       {base::MayBlock(), base::TaskShutdownBehavior::SKIP_ON_SHUTDOWN,
118        base::TaskPriority::BEST_EFFORT},
119       base::SingleThreadTaskRunnerThreadMode::SHARED);
120 }
121 
CPUFreqMonitor()122 CPUFreqMonitor::CPUFreqMonitor()
123     : CPUFreqMonitor(std::make_unique<CPUFreqMonitorDelegate>()) {}
124 
CPUFreqMonitor(std::unique_ptr<CPUFreqMonitorDelegate> delegate)125 CPUFreqMonitor::CPUFreqMonitor(std::unique_ptr<CPUFreqMonitorDelegate> delegate)
126     : delegate_(std::move(delegate)) {}
127 
~CPUFreqMonitor()128 CPUFreqMonitor::~CPUFreqMonitor() {
129   Stop();
130 }
131 
132 // static
GetInstance()133 CPUFreqMonitor* CPUFreqMonitor::GetInstance() {
134   static base::NoDestructor<CPUFreqMonitor> instance;
135   return instance.get();
136 }
137 
OnTraceLogEnabled()138 void CPUFreqMonitor::OnTraceLogEnabled() {
139   GetOrCreateTaskRunner()->PostTask(
140       FROM_HERE,
141       base::BindOnce(&CPUFreqMonitor::Start, weak_ptr_factory_.GetWeakPtr()));
142 }
143 
OnTraceLogDisabled()144 void CPUFreqMonitor::OnTraceLogDisabled() {
145   Stop();
146 }
147 
Start()148 void CPUFreqMonitor::Start() {
149   // It's the responsibility of the caller to ensure that Start/Stop are
150   // synchronized. If Start/Stop are called asynchronously where this value
151   // may be incorrect, we have bigger problems.
152   if (is_enabled_.load(std::memory_order_relaxed) ||
153       !delegate_->IsTraceCategoryEnabled()) {
154     return;
155   }
156 
157   std::vector<unsigned int> cpu_ids;
158   delegate_->GetCPUIds(&cpu_ids);
159 
160   std::vector<std::pair<unsigned int, base::ScopedFD>> fds;
161   for (unsigned int id : cpu_ids) {
162     std::string fstr = delegate_->GetScalingCurFreqPathString(id);
163     int fd = open(fstr.c_str(), O_RDONLY);
164     if (fd == -1)
165       continue;
166 
167     fds.emplace_back(std::make_pair(id, base::ScopedFD(fd)));
168   }
169   // We failed to read any scaling_cur_freq files, no point sampling nothing.
170   if (fds.size() == 0)
171     return;
172 
173   is_enabled_.store(true, std::memory_order_release);
174 
175   GetOrCreateTaskRunner()->PostTask(
176       FROM_HERE,
177       base::BindOnce(&CPUFreqMonitor::Sample, weak_ptr_factory_.GetWeakPtr(),
178                      std::move(fds)));
179 }
180 
Stop()181 void CPUFreqMonitor::Stop() {
182   is_enabled_.store(false, std::memory_order_release);
183 }
184 
Sample(std::vector<std::pair<unsigned int,base::ScopedFD>> fds)185 void CPUFreqMonitor::Sample(
186     std::vector<std::pair<unsigned int, base::ScopedFD>> fds) {
187   // For the same reason as above we use relaxed ordering, because if this value
188   // is in transition and we use acquire ordering then we'll never shut down our
189   // original Sample tasks until the next Stop, so it's still the responsibility
190   // of callers to sync Start/Stop.
191   if (!is_enabled_.load(std::memory_order_relaxed))
192     return;
193 
194   for (auto& id_fd : fds) {
195     int fd = id_fd.second.get();
196     unsigned int freq = 0;
197     // If we have trouble reading data from the file for any reason we'll end up
198     // reporting the frequency as nothing.
199     lseek(fd, 0L, SEEK_SET);
200     char data[kNumBytesToReadForSampling];
201 
202     ssize_t bytes_read = read(fd, data, kNumBytesToReadForSampling);
203     if (bytes_read > 0) {
204       if (static_cast<size_t>(bytes_read) < kNumBytesToReadForSampling)
205         data[static_cast<size_t>(bytes_read)] = '\0';
206       int ret = sscanf(data, "%d", &freq);
207       if (ret == 0 || ret == std::char_traits<char>::eof())
208         freq = 0;
209     }
210 
211     delegate_->RecordFrequency(id_fd.first, freq);
212   }
213 
214   GetOrCreateTaskRunner()->PostDelayedTask(
215       FROM_HERE,
216       base::BindOnce(&CPUFreqMonitor::Sample, weak_ptr_factory_.GetWeakPtr(),
217                      std::move(fds)),
218       base::Milliseconds(kDefaultCPUFreqSampleIntervalMs));
219 }
220 
IsEnabledForTesting()221 bool CPUFreqMonitor::IsEnabledForTesting() {
222   return is_enabled_.load(std::memory_order_acquire);
223 }
224 
225 const scoped_refptr<SingleThreadTaskRunner>&
GetOrCreateTaskRunner()226 CPUFreqMonitor::GetOrCreateTaskRunner() {
227   if (!task_runner_)
228     task_runner_ = delegate_->CreateTaskRunner();
229   return task_runner_;
230 }
231 
232 }  // namespace trace_event
233 }  // namespace base
234