• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2017 The Abseil Authors.
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 //      https://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14 
15 #include "absl/base/internal/sysinfo.h"
16 
17 #include "absl/base/attributes.h"
18 
19 #ifdef _WIN32
20 #include <windows.h>
21 #else
22 #include <fcntl.h>
23 #include <pthread.h>
24 #include <sys/stat.h>
25 #include <sys/types.h>
26 #include <unistd.h>
27 #endif
28 
29 #ifdef __linux__
30 #include <sys/syscall.h>
31 #endif
32 
33 #if defined(__APPLE__) || defined(__FreeBSD__)
34 #include <sys/sysctl.h>
35 #endif
36 
37 #if defined(__myriad2__)
38 #include <rtems.h>
39 #endif
40 
41 #include <string.h>
42 
43 #include <cassert>
44 #include <cstdint>
45 #include <cstdio>
46 #include <cstdlib>
47 #include <ctime>
48 #include <limits>
49 #include <thread>  // NOLINT(build/c++11)
50 #include <utility>
51 #include <vector>
52 
53 #include "absl/base/call_once.h"
54 #include "absl/base/config.h"
55 #include "absl/base/internal/raw_logging.h"
56 #include "absl/base/internal/spinlock.h"
57 #include "absl/base/internal/unscaledcycleclock.h"
58 #include "absl/base/thread_annotations.h"
59 
60 namespace absl {
61 ABSL_NAMESPACE_BEGIN
62 namespace base_internal {
63 
GetNumCPUs()64 static int GetNumCPUs() {
65 #if defined(__myriad2__)
66   return 1;
67 #else
68   // Other possibilities:
69   //  - Read /sys/devices/system/cpu/online and use cpumask_parse()
70   //  - sysconf(_SC_NPROCESSORS_ONLN)
71   return std::thread::hardware_concurrency();
72 #endif
73 }
74 
75 #if defined(_WIN32)
76 
GetNominalCPUFrequency()77 static double GetNominalCPUFrequency() {
78 #if WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_APP) && \
79     !WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_DESKTOP)
80   // UWP apps don't have access to the registry and currently don't provide an
81   // API informing about CPU nominal frequency.
82   return 1.0;
83 #else
84 #pragma comment(lib, "advapi32.lib")  // For Reg* functions.
85   HKEY key;
86   // Use the Reg* functions rather than the SH functions because shlwapi.dll
87   // pulls in gdi32.dll which makes process destruction much more costly.
88   if (RegOpenKeyExA(HKEY_LOCAL_MACHINE,
89                     "HARDWARE\\DESCRIPTION\\System\\CentralProcessor\\0", 0,
90                     KEY_READ, &key) == ERROR_SUCCESS) {
91     DWORD type = 0;
92     DWORD data = 0;
93     DWORD data_size = sizeof(data);
94     auto result = RegQueryValueExA(key, "~MHz", 0, &type,
95                                    reinterpret_cast<LPBYTE>(&data), &data_size);
96     RegCloseKey(key);
97     if (result == ERROR_SUCCESS && type == REG_DWORD &&
98         data_size == sizeof(data)) {
99       return data * 1e6;  // Value is MHz.
100     }
101   }
102   return 1.0;
103 #endif  // WINAPI_PARTITION_APP && !WINAPI_PARTITION_DESKTOP
104 }
105 
106 #elif defined(CTL_HW) && defined(HW_CPU_FREQ)
107 
GetNominalCPUFrequency()108 static double GetNominalCPUFrequency() {
109   unsigned freq;
110   size_t size = sizeof(freq);
111   int mib[2] = {CTL_HW, HW_CPU_FREQ};
112   if (sysctl(mib, 2, &freq, &size, nullptr, 0) == 0) {
113     return static_cast<double>(freq);
114   }
115   return 1.0;
116 }
117 
118 #else
119 
120 // Helper function for reading a long from a file. Returns true if successful
121 // and the memory location pointed to by value is set to the value read.
ReadLongFromFile(const char * file,long * value)122 static bool ReadLongFromFile(const char *file, long *value) {
123   bool ret = false;
124   int fd = open(file, O_RDONLY);
125   if (fd != -1) {
126     char line[1024];
127     char *err;
128     memset(line, '\0', sizeof(line));
129     int len = read(fd, line, sizeof(line) - 1);
130     if (len <= 0) {
131       ret = false;
132     } else {
133       const long temp_value = strtol(line, &err, 10);
134       if (line[0] != '\0' && (*err == '\n' || *err == '\0')) {
135         *value = temp_value;
136         ret = true;
137       }
138     }
139     close(fd);
140   }
141   return ret;
142 }
143 
144 #if defined(ABSL_INTERNAL_UNSCALED_CYCLECLOCK_FREQUENCY_IS_CPU_FREQUENCY)
145 
146 // Reads a monotonic time source and returns a value in
147 // nanoseconds. The returned value uses an arbitrary epoch, not the
148 // Unix epoch.
ReadMonotonicClockNanos()149 static int64_t ReadMonotonicClockNanos() {
150   struct timespec t;
151 #ifdef CLOCK_MONOTONIC_RAW
152   int rc = clock_gettime(CLOCK_MONOTONIC_RAW, &t);
153 #else
154   int rc = clock_gettime(CLOCK_MONOTONIC, &t);
155 #endif
156   if (rc != 0) {
157     perror("clock_gettime() failed");
158     abort();
159   }
160   return int64_t{t.tv_sec} * 1000000000 + t.tv_nsec;
161 }
162 
163 class UnscaledCycleClockWrapperForInitializeFrequency {
164  public:
Now()165   static int64_t Now() { return base_internal::UnscaledCycleClock::Now(); }
166 };
167 
168 struct TimeTscPair {
169   int64_t time;  // From ReadMonotonicClockNanos().
170   int64_t tsc;   // From UnscaledCycleClock::Now().
171 };
172 
173 // Returns a pair of values (monotonic kernel time, TSC ticks) that
174 // approximately correspond to each other.  This is accomplished by
175 // doing several reads and picking the reading with the lowest
176 // latency.  This approach is used to minimize the probability that
177 // our thread was preempted between clock reads.
GetTimeTscPair()178 static TimeTscPair GetTimeTscPair() {
179   int64_t best_latency = std::numeric_limits<int64_t>::max();
180   TimeTscPair best;
181   for (int i = 0; i < 10; ++i) {
182     int64_t t0 = ReadMonotonicClockNanos();
183     int64_t tsc = UnscaledCycleClockWrapperForInitializeFrequency::Now();
184     int64_t t1 = ReadMonotonicClockNanos();
185     int64_t latency = t1 - t0;
186     if (latency < best_latency) {
187       best_latency = latency;
188       best.time = t0;
189       best.tsc = tsc;
190     }
191   }
192   return best;
193 }
194 
195 // Measures and returns the TSC frequency by taking a pair of
196 // measurements approximately `sleep_nanoseconds` apart.
MeasureTscFrequencyWithSleep(int sleep_nanoseconds)197 static double MeasureTscFrequencyWithSleep(int sleep_nanoseconds) {
198   auto t0 = GetTimeTscPair();
199   struct timespec ts;
200   ts.tv_sec = 0;
201   ts.tv_nsec = sleep_nanoseconds;
202   while (nanosleep(&ts, &ts) != 0 && errno == EINTR) {}
203   auto t1 = GetTimeTscPair();
204   double elapsed_ticks = t1.tsc - t0.tsc;
205   double elapsed_time = (t1.time - t0.time) * 1e-9;
206   return elapsed_ticks / elapsed_time;
207 }
208 
209 // Measures and returns the TSC frequency by calling
210 // MeasureTscFrequencyWithSleep(), doubling the sleep interval until the
211 // frequency measurement stabilizes.
MeasureTscFrequency()212 static double MeasureTscFrequency() {
213   double last_measurement = -1.0;
214   int sleep_nanoseconds = 1000000;  // 1 millisecond.
215   for (int i = 0; i < 8; ++i) {
216     double measurement = MeasureTscFrequencyWithSleep(sleep_nanoseconds);
217     if (measurement * 0.99 < last_measurement &&
218         last_measurement < measurement * 1.01) {
219       // Use the current measurement if it is within 1% of the
220       // previous measurement.
221       return measurement;
222     }
223     last_measurement = measurement;
224     sleep_nanoseconds *= 2;
225   }
226   return last_measurement;
227 }
228 
229 #endif  // ABSL_INTERNAL_UNSCALED_CYCLECLOCK_FREQUENCY_IS_CPU_FREQUENCY
230 
GetNominalCPUFrequency()231 static double GetNominalCPUFrequency() {
232   long freq = 0;
233 
234   // Google's production kernel has a patch to export the TSC
235   // frequency through sysfs. If the kernel is exporting the TSC
236   // frequency use that. There are issues where cpuinfo_max_freq
237   // cannot be relied on because the BIOS may be exporting an invalid
238   // p-state (on x86) or p-states may be used to put the processor in
239   // a new mode (turbo mode). Essentially, those frequencies cannot
240   // always be relied upon. The same reasons apply to /proc/cpuinfo as
241   // well.
242   if (ReadLongFromFile("/sys/devices/system/cpu/cpu0/tsc_freq_khz", &freq)) {
243     return freq * 1e3;  // Value is kHz.
244   }
245 
246 #if defined(ABSL_INTERNAL_UNSCALED_CYCLECLOCK_FREQUENCY_IS_CPU_FREQUENCY)
247   // On these platforms, the TSC frequency is the nominal CPU
248   // frequency.  But without having the kernel export it directly
249   // though /sys/devices/system/cpu/cpu0/tsc_freq_khz, there is no
250   // other way to reliably get the TSC frequency, so we have to
251   // measure it ourselves.  Some CPUs abuse cpuinfo_max_freq by
252   // exporting "fake" frequencies for implementing new features. For
253   // example, Intel's turbo mode is enabled by exposing a p-state
254   // value with a higher frequency than that of the real TSC
255   // rate. Because of this, we prefer to measure the TSC rate
256   // ourselves on i386 and x86-64.
257   return MeasureTscFrequency();
258 #else
259 
260   // If CPU scaling is in effect, we want to use the *maximum*
261   // frequency, not whatever CPU speed some random processor happens
262   // to be using now.
263   if (ReadLongFromFile("/sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_max_freq",
264                        &freq)) {
265     return freq * 1e3;  // Value is kHz.
266   }
267 
268   return 1.0;
269 #endif  // !ABSL_INTERNAL_UNSCALED_CYCLECLOCK_FREQUENCY_IS_CPU_FREQUENCY
270 }
271 
272 #endif
273 
274 ABSL_CONST_INIT static once_flag init_num_cpus_once;
275 ABSL_CONST_INIT static int num_cpus = 0;
276 
277 // NumCPUs() may be called before main() and before malloc is properly
278 // initialized, therefore this must not allocate memory.
NumCPUs()279 int NumCPUs() {
280   base_internal::LowLevelCallOnce(
281       &init_num_cpus_once, []() { num_cpus = GetNumCPUs(); });
282   return num_cpus;
283 }
284 
285 // A default frequency of 0.0 might be dangerous if it is used in division.
286 ABSL_CONST_INIT static once_flag init_nominal_cpu_frequency_once;
287 ABSL_CONST_INIT static double nominal_cpu_frequency = 1.0;
288 
289 // NominalCPUFrequency() may be called before main() and before malloc is
290 // properly initialized, therefore this must not allocate memory.
NominalCPUFrequency()291 double NominalCPUFrequency() {
292   base_internal::LowLevelCallOnce(
293       &init_nominal_cpu_frequency_once,
294       []() { nominal_cpu_frequency = GetNominalCPUFrequency(); });
295   return nominal_cpu_frequency;
296 }
297 
298 #if defined(_WIN32)
299 
GetTID()300 pid_t GetTID() {
301   return pid_t{GetCurrentThreadId()};
302 }
303 
304 #elif defined(__linux__)
305 
306 #ifndef SYS_gettid
307 #define SYS_gettid __NR_gettid
308 #endif
309 
GetTID()310 pid_t GetTID() {
311   return syscall(SYS_gettid);
312 }
313 
314 #elif defined(__akaros__)
315 
GetTID()316 pid_t GetTID() {
317   // Akaros has a concept of "vcore context", which is the state the program
318   // is forced into when we need to make a user-level scheduling decision, or
319   // run a signal handler.  This is analogous to the interrupt context that a
320   // CPU might enter if it encounters some kind of exception.
321   //
322   // There is no current thread context in vcore context, but we need to give
323   // a reasonable answer if asked for a thread ID (e.g., in a signal handler).
324   // Thread 0 always exists, so if we are in vcore context, we return that.
325   //
326   // Otherwise, we know (since we are using pthreads) that the uthread struct
327   // current_uthread is pointing to is the first element of a
328   // struct pthread_tcb, so we extract and return the thread ID from that.
329   //
330   // TODO(dcross): Akaros anticipates moving the thread ID to the uthread
331   // structure at some point. We should modify this code to remove the cast
332   // when that happens.
333   if (in_vcore_context())
334     return 0;
335   return reinterpret_cast<struct pthread_tcb *>(current_uthread)->id;
336 }
337 
338 #elif defined(__myriad2__)
339 
GetTID()340 pid_t GetTID() {
341   uint32_t tid;
342   rtems_task_ident(RTEMS_SELF, 0, &tid);
343   return tid;
344 }
345 
346 #else
347 
348 // Fallback implementation of GetTID using pthread_getspecific.
349 ABSL_CONST_INIT static once_flag tid_once;
350 ABSL_CONST_INIT static pthread_key_t tid_key;
351 ABSL_CONST_INIT static absl::base_internal::SpinLock tid_lock(
352     absl::kConstInit, base_internal::SCHEDULE_KERNEL_ONLY);
353 
354 // We set a bit per thread in this array to indicate that an ID is in
355 // use. ID 0 is unused because it is the default value returned by
356 // pthread_getspecific().
357 ABSL_CONST_INIT static std::vector<uint32_t> *tid_array
358     ABSL_GUARDED_BY(tid_lock) = nullptr;
359 static constexpr int kBitsPerWord = 32;  // tid_array is uint32_t.
360 
361 // Returns the TID to tid_array.
FreeTID(void * v)362 static void FreeTID(void *v) {
363   intptr_t tid = reinterpret_cast<intptr_t>(v);
364   int word = tid / kBitsPerWord;
365   uint32_t mask = ~(1u << (tid % kBitsPerWord));
366   absl::base_internal::SpinLockHolder lock(&tid_lock);
367   assert(0 <= word && static_cast<size_t>(word) < tid_array->size());
368   (*tid_array)[word] &= mask;
369 }
370 
InitGetTID()371 static void InitGetTID() {
372   if (pthread_key_create(&tid_key, FreeTID) != 0) {
373     // The logging system calls GetTID() so it can't be used here.
374     perror("pthread_key_create failed");
375     abort();
376   }
377 
378   // Initialize tid_array.
379   absl::base_internal::SpinLockHolder lock(&tid_lock);
380   tid_array = new std::vector<uint32_t>(1);
381   (*tid_array)[0] = 1;  // ID 0 is never-allocated.
382 }
383 
384 // Return a per-thread small integer ID from pthread's thread-specific data.
GetTID()385 pid_t GetTID() {
386   absl::call_once(tid_once, InitGetTID);
387 
388   intptr_t tid = reinterpret_cast<intptr_t>(pthread_getspecific(tid_key));
389   if (tid != 0) {
390     return tid;
391   }
392 
393   int bit;  // tid_array[word] = 1u << bit;
394   size_t word;
395   {
396     // Search for the first unused ID.
397     absl::base_internal::SpinLockHolder lock(&tid_lock);
398     // First search for a word in the array that is not all ones.
399     word = 0;
400     while (word < tid_array->size() && ~(*tid_array)[word] == 0) {
401       ++word;
402     }
403     if (word == tid_array->size()) {
404       tid_array->push_back(0);  // No space left, add kBitsPerWord more IDs.
405     }
406     // Search for a zero bit in the word.
407     bit = 0;
408     while (bit < kBitsPerWord && (((*tid_array)[word] >> bit) & 1) != 0) {
409       ++bit;
410     }
411     tid = (word * kBitsPerWord) + bit;
412     (*tid_array)[word] |= 1u << bit;  // Mark the TID as allocated.
413   }
414 
415   if (pthread_setspecific(tid_key, reinterpret_cast<void *>(tid)) != 0) {
416     perror("pthread_setspecific failed");
417     abort();
418   }
419 
420   return static_cast<pid_t>(tid);
421 }
422 
423 #endif
424 
425 // GetCachedTID() caches the thread ID in thread-local storage (which is a
426 // userspace construct) to avoid unnecessary system calls. Without this caching,
427 // it can take roughly 98ns, while it takes roughly 1ns with this caching.
GetCachedTID()428 pid_t GetCachedTID() {
429 #if ABSL_HAVE_THREAD_LOCAL
430   static thread_local pid_t thread_id = GetTID();
431   return thread_id;
432 #else
433   return GetTID();
434 #endif  // ABSL_HAVE_THREAD_LOCAL
435 }
436 
437 }  // namespace base_internal
438 ABSL_NAMESPACE_END
439 }  // namespace absl
440