• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/base/platform/time.h"
6 
7 #if V8_OS_POSIX
8 #include <fcntl.h>  // for O_RDONLY
9 #include <sys/time.h>
10 #include <unistd.h>
11 #endif
12 #if V8_OS_DARWIN
13 #include <mach/mach.h>
14 #include <mach/mach_time.h>
15 #include <pthread.h>
16 #endif
17 
18 #include <cstring>
19 #include <ostream>
20 
21 #if V8_OS_WIN
22 #include <windows.h>
23 
24 // This has to come after windows.h.
25 #include <mmsystem.h>  // For timeGetTime().
26 
27 #include <atomic>
28 
29 #include "src/base/lazy-instance.h"
30 #include "src/base/win32-headers.h"
31 #endif
32 #include "src/base/cpu.h"
33 #include "src/base/logging.h"
34 #include "src/base/platform/platform.h"
35 
36 #if V8_OS_STARBOARD
37 #include "starboard/time.h"
38 #endif
39 
40 namespace {
41 
42 #if V8_OS_DARWIN
ComputeThreadTicks()43 int64_t ComputeThreadTicks() {
44   mach_msg_type_number_t thread_info_count = THREAD_BASIC_INFO_COUNT;
45   thread_basic_info_data_t thread_info_data;
46   kern_return_t kr = thread_info(
47       pthread_mach_thread_np(pthread_self()),
48       THREAD_BASIC_INFO,
49       reinterpret_cast<thread_info_t>(&thread_info_data),
50       &thread_info_count);
51   CHECK_EQ(kr, KERN_SUCCESS);
52 
53   // We can add the seconds into a {int64_t} without overflow.
54   CHECK_LE(thread_info_data.user_time.seconds,
55            std::numeric_limits<int64_t>::max() -
56                thread_info_data.system_time.seconds);
57   int64_t seconds =
58       thread_info_data.user_time.seconds + thread_info_data.system_time.seconds;
59   // Multiplying the seconds by {kMicrosecondsPerSecond}, and adding something
60   // in [0, 2 * kMicrosecondsPerSecond) must result in a valid {int64_t}.
61   static constexpr int64_t kSecondsLimit =
62       (std::numeric_limits<int64_t>::max() /
63        v8::base::Time::kMicrosecondsPerSecond) -
64       2;
65   CHECK_GT(kSecondsLimit, seconds);
66   int64_t micros = seconds * v8::base::Time::kMicrosecondsPerSecond;
67   micros += (thread_info_data.user_time.microseconds +
68              thread_info_data.system_time.microseconds);
69   return micros;
70 }
71 #elif V8_OS_POSIX
72 // Helper function to get results from clock_gettime() and convert to a
73 // microsecond timebase. Minimum requirement is MONOTONIC_CLOCK to be supported
74 // on the system. FreeBSD 6 has CLOCK_MONOTONIC but defines
75 // _POSIX_MONOTONIC_CLOCK to -1.
76 V8_INLINE int64_t ClockNow(clockid_t clk_id) {
77 #if (defined(_POSIX_MONOTONIC_CLOCK) && _POSIX_MONOTONIC_CLOCK >= 0) || \
78   defined(V8_OS_BSD) || defined(V8_OS_ANDROID)
79 #if defined(V8_OS_AIX)
80   // On AIX clock_gettime for CLOCK_THREAD_CPUTIME_ID outputs time with
81   // resolution of 10ms. thread_cputime API provides the time in ns.
82   if (clk_id == CLOCK_THREAD_CPUTIME_ID) {
83 #if defined(__PASE__)  // CLOCK_THREAD_CPUTIME_ID clock not supported on IBMi
84     return 0;
85 #else
86     thread_cputime_t tc;
87     if (thread_cputime(-1, &tc) != 0) {
88       UNREACHABLE();
89     }
90     return (tc.stime / v8::base::Time::kNanosecondsPerMicrosecond)
91            + (tc.utime / v8::base::Time::kNanosecondsPerMicrosecond);
92 #endif  // defined(__PASE__)
93   }
94 #endif  // defined(V8_OS_AIX)
95   struct timespec ts;
96   if (clock_gettime(clk_id, &ts) != 0) {
97     UNREACHABLE();
98   }
99   // Multiplying the seconds by {kMicrosecondsPerSecond}, and adding something
100   // in [0, kMicrosecondsPerSecond) must result in a valid {int64_t}.
101   static constexpr int64_t kSecondsLimit =
102       (std::numeric_limits<int64_t>::max() /
103        v8::base::Time::kMicrosecondsPerSecond) -
104       1;
105   CHECK_GT(kSecondsLimit, ts.tv_sec);
106   int64_t result = int64_t{ts.tv_sec} * v8::base::Time::kMicrosecondsPerSecond;
107   result += (ts.tv_nsec / v8::base::Time::kNanosecondsPerMicrosecond);
108   return result;
109 #else  // Monotonic clock not supported.
110   return 0;
111 #endif
112 }
113 
114 V8_INLINE int64_t NanosecondsNow() {
115   struct timespec ts;
116   clock_gettime(CLOCK_MONOTONIC, &ts);
117   return int64_t{ts.tv_sec} * v8::base::Time::kNanosecondsPerSecond +
118          ts.tv_nsec;
119 }
120 
121 inline bool IsHighResolutionTimer(clockid_t clk_id) {
122   // Currently this is only needed for CLOCK_MONOTONIC. If other clocks need
123   // to be checked, care must be taken to support all platforms correctly;
124   // see ClockNow() above for precedent.
125   DCHECK_EQ(clk_id, CLOCK_MONOTONIC);
126   int64_t previous = NanosecondsNow();
127   // There should be enough attempts to make the loop run for more than one
128   // microsecond if the early return is not taken -- the elapsed time can't
129   // be measured in that situation, so we have to estimate it offline.
130   constexpr int kAttempts = 100;
131   for (int i = 0; i < kAttempts; i++) {
132     int64_t next = NanosecondsNow();
133     int64_t delta = next - previous;
134     if (delta == 0) continue;
135     // We expect most systems to take this branch on the first iteration.
136     if (delta <= v8::base::Time::kNanosecondsPerMicrosecond) {
137       return true;
138     }
139     previous = next;
140   }
141   // As of 2022, we expect that the loop above has taken at least 2 μs (on
142   // a fast desktop). If we still haven't seen a non-zero clock increment
143   // in sub-microsecond range, assume a low resolution timer.
144   return false;
145 }
146 
147 #elif V8_OS_WIN
148 // Returns the current value of the performance counter.
149 V8_INLINE uint64_t QPCNowRaw() {
150   LARGE_INTEGER perf_counter_now = {};
151   // According to the MSDN documentation for QueryPerformanceCounter(), this
152   // will never fail on systems that run XP or later.
153   // https://msdn.microsoft.com/library/windows/desktop/ms644904.aspx
154   BOOL result = ::QueryPerformanceCounter(&perf_counter_now);
155   DCHECK(result);
156   USE(result);
157   return perf_counter_now.QuadPart;
158 }
159 #endif  // V8_OS_DARWIN
160 
161 }  // namespace
162 
163 namespace v8 {
164 namespace base {
165 
InDays() const166 int TimeDelta::InDays() const {
167   if (IsMax()) {
168     // Preserve max to prevent overflow.
169     return std::numeric_limits<int>::max();
170   }
171   return static_cast<int>(delta_ / Time::kMicrosecondsPerDay);
172 }
173 
InHours() const174 int TimeDelta::InHours() const {
175   if (IsMax()) {
176     // Preserve max to prevent overflow.
177     return std::numeric_limits<int>::max();
178   }
179   return static_cast<int>(delta_ / Time::kMicrosecondsPerHour);
180 }
181 
InMinutes() const182 int TimeDelta::InMinutes() const {
183   if (IsMax()) {
184     // Preserve max to prevent overflow.
185     return std::numeric_limits<int>::max();
186   }
187   return static_cast<int>(delta_ / Time::kMicrosecondsPerMinute);
188 }
189 
InSecondsF() const190 double TimeDelta::InSecondsF() const {
191   if (IsMax()) {
192     // Preserve max to prevent overflow.
193     return std::numeric_limits<double>::infinity();
194   }
195   return static_cast<double>(delta_) / Time::kMicrosecondsPerSecond;
196 }
197 
InSeconds() const198 int64_t TimeDelta::InSeconds() const {
199   if (IsMax()) {
200     // Preserve max to prevent overflow.
201     return std::numeric_limits<int64_t>::max();
202   }
203   return delta_ / Time::kMicrosecondsPerSecond;
204 }
205 
InMillisecondsF() const206 double TimeDelta::InMillisecondsF() const {
207   if (IsMax()) {
208     // Preserve max to prevent overflow.
209     return std::numeric_limits<double>::infinity();
210   }
211   return static_cast<double>(delta_) / Time::kMicrosecondsPerMillisecond;
212 }
213 
InMilliseconds() const214 int64_t TimeDelta::InMilliseconds() const {
215   if (IsMax()) {
216     // Preserve max to prevent overflow.
217     return std::numeric_limits<int64_t>::max();
218   }
219   return delta_ / Time::kMicrosecondsPerMillisecond;
220 }
221 
InMillisecondsRoundedUp() const222 int64_t TimeDelta::InMillisecondsRoundedUp() const {
223   if (IsMax()) {
224     // Preserve max to prevent overflow.
225     return std::numeric_limits<int64_t>::max();
226   }
227   return (delta_ + Time::kMicrosecondsPerMillisecond - 1) /
228          Time::kMicrosecondsPerMillisecond;
229 }
230 
InMicroseconds() const231 int64_t TimeDelta::InMicroseconds() const {
232   if (IsMax()) {
233     // Preserve max to prevent overflow.
234     return std::numeric_limits<int64_t>::max();
235   }
236   return delta_;
237 }
238 
InNanoseconds() const239 int64_t TimeDelta::InNanoseconds() const {
240   if (IsMax()) {
241     // Preserve max to prevent overflow.
242     return std::numeric_limits<int64_t>::max();
243   }
244   return delta_ * Time::kNanosecondsPerMicrosecond;
245 }
246 
247 #if V8_OS_DARWIN
248 
FromMachTimespec(struct mach_timespec ts)249 TimeDelta TimeDelta::FromMachTimespec(struct mach_timespec ts) {
250   DCHECK_GE(ts.tv_nsec, 0);
251   DCHECK_LT(ts.tv_nsec,
252             static_cast<long>(Time::kNanosecondsPerSecond));  // NOLINT
253   return TimeDelta(ts.tv_sec * Time::kMicrosecondsPerSecond +
254                    ts.tv_nsec / Time::kNanosecondsPerMicrosecond);
255 }
256 
257 
ToMachTimespec() const258 struct mach_timespec TimeDelta::ToMachTimespec() const {
259   struct mach_timespec ts;
260   DCHECK_GE(delta_, 0);
261   ts.tv_sec = static_cast<unsigned>(delta_ / Time::kMicrosecondsPerSecond);
262   ts.tv_nsec = (delta_ % Time::kMicrosecondsPerSecond) *
263       Time::kNanosecondsPerMicrosecond;
264   return ts;
265 }
266 
267 #endif  // V8_OS_DARWIN
268 
269 #if V8_OS_POSIX
270 
FromTimespec(struct timespec ts)271 TimeDelta TimeDelta::FromTimespec(struct timespec ts) {
272   DCHECK_GE(ts.tv_nsec, 0);
273   DCHECK_LT(ts.tv_nsec,
274             static_cast<long>(Time::kNanosecondsPerSecond));  // NOLINT
275   return TimeDelta(ts.tv_sec * Time::kMicrosecondsPerSecond +
276                    ts.tv_nsec / Time::kNanosecondsPerMicrosecond);
277 }
278 
279 
ToTimespec() const280 struct timespec TimeDelta::ToTimespec() const {
281   struct timespec ts;
282   ts.tv_sec = static_cast<time_t>(delta_ / Time::kMicrosecondsPerSecond);
283   ts.tv_nsec = (delta_ % Time::kMicrosecondsPerSecond) *
284       Time::kNanosecondsPerMicrosecond;
285   return ts;
286 }
287 
288 #endif  // V8_OS_POSIX
289 
290 
291 #if V8_OS_WIN
292 
293 // We implement time using the high-resolution timers so that we can get
294 // timeouts which are smaller than 10-15ms. To avoid any drift, we
295 // periodically resync the internal clock to the system clock.
296 class Clock final {
297  public:
Clock()298   Clock() : initial_ticks_(GetSystemTicks()), initial_time_(GetSystemTime()) {}
299 
Now()300   Time Now() {
301     // Time between resampling the un-granular clock for this API (1 minute).
302     const TimeDelta kMaxElapsedTime = TimeDelta::FromMinutes(1);
303 
304     MutexGuard lock_guard(&mutex_);
305 
306     // Determine current time and ticks.
307     TimeTicks ticks = GetSystemTicks();
308     Time time = GetSystemTime();
309 
310     // Check if we need to synchronize with the system clock due to a backwards
311     // time change or the amount of time elapsed.
312     TimeDelta elapsed = ticks - initial_ticks_;
313     if (time < initial_time_ || elapsed > kMaxElapsedTime) {
314       initial_ticks_ = ticks;
315       initial_time_ = time;
316       return time;
317     }
318 
319     return initial_time_ + elapsed;
320   }
321 
NowFromSystemTime()322   Time NowFromSystemTime() {
323     MutexGuard lock_guard(&mutex_);
324     initial_ticks_ = GetSystemTicks();
325     initial_time_ = GetSystemTime();
326     return initial_time_;
327   }
328 
329  private:
GetSystemTicks()330   static TimeTicks GetSystemTicks() {
331     return TimeTicks::Now();
332   }
333 
GetSystemTime()334   static Time GetSystemTime() {
335     FILETIME ft;
336     ::GetSystemTimeAsFileTime(&ft);
337     return Time::FromFiletime(ft);
338   }
339 
340   TimeTicks initial_ticks_;
341   Time initial_time_;
342   Mutex mutex_;
343 };
344 
345 namespace {
346 DEFINE_LAZY_LEAKY_OBJECT_GETTER(Clock, GetClock)
347 }  // namespace
348 
Now()349 Time Time::Now() { return GetClock()->Now(); }
350 
NowFromSystemTime()351 Time Time::NowFromSystemTime() { return GetClock()->NowFromSystemTime(); }
352 
353 // Time between windows epoch and standard epoch.
354 static const int64_t kTimeToEpochInMicroseconds = int64_t{11644473600000000};
355 
FromFiletime(FILETIME ft)356 Time Time::FromFiletime(FILETIME ft) {
357   if (ft.dwLowDateTime == 0 && ft.dwHighDateTime == 0) {
358     return Time();
359   }
360   if (ft.dwLowDateTime == std::numeric_limits<DWORD>::max() &&
361       ft.dwHighDateTime == std::numeric_limits<DWORD>::max()) {
362     return Max();
363   }
364   int64_t us = (static_cast<uint64_t>(ft.dwLowDateTime) +
365                 (static_cast<uint64_t>(ft.dwHighDateTime) << 32)) / 10;
366   return Time(us - kTimeToEpochInMicroseconds);
367 }
368 
369 
ToFiletime() const370 FILETIME Time::ToFiletime() const {
371   DCHECK_GE(us_, 0);
372   FILETIME ft;
373   if (IsNull()) {
374     ft.dwLowDateTime = 0;
375     ft.dwHighDateTime = 0;
376     return ft;
377   }
378   if (IsMax()) {
379     ft.dwLowDateTime = std::numeric_limits<DWORD>::max();
380     ft.dwHighDateTime = std::numeric_limits<DWORD>::max();
381     return ft;
382   }
383   uint64_t us = static_cast<uint64_t>(us_ + kTimeToEpochInMicroseconds) * 10;
384   ft.dwLowDateTime = static_cast<DWORD>(us);
385   ft.dwHighDateTime = static_cast<DWORD>(us >> 32);
386   return ft;
387 }
388 
389 #elif V8_OS_POSIX
390 
Now()391 Time Time::Now() {
392   struct timeval tv;
393   int result = gettimeofday(&tv, nullptr);
394   DCHECK_EQ(0, result);
395   USE(result);
396   return FromTimeval(tv);
397 }
398 
399 
NowFromSystemTime()400 Time Time::NowFromSystemTime() {
401   return Now();
402 }
403 
404 
FromTimespec(struct timespec ts)405 Time Time::FromTimespec(struct timespec ts) {
406   DCHECK_GE(ts.tv_nsec, 0);
407   DCHECK_LT(ts.tv_nsec, kNanosecondsPerSecond);
408   if (ts.tv_nsec == 0 && ts.tv_sec == 0) {
409     return Time();
410   }
411   if (ts.tv_nsec == static_cast<long>(kNanosecondsPerSecond - 1) &&  // NOLINT
412       ts.tv_sec == std::numeric_limits<time_t>::max()) {
413     return Max();
414   }
415   return Time(ts.tv_sec * kMicrosecondsPerSecond +
416               ts.tv_nsec / kNanosecondsPerMicrosecond);
417 }
418 
419 
ToTimespec() const420 struct timespec Time::ToTimespec() const {
421   struct timespec ts;
422   if (IsNull()) {
423     ts.tv_sec = 0;
424     ts.tv_nsec = 0;
425     return ts;
426   }
427   if (IsMax()) {
428     ts.tv_sec = std::numeric_limits<time_t>::max();
429     ts.tv_nsec = static_cast<long>(kNanosecondsPerSecond - 1);  // NOLINT
430     return ts;
431   }
432   ts.tv_sec = static_cast<time_t>(us_ / kMicrosecondsPerSecond);
433   ts.tv_nsec = (us_ % kMicrosecondsPerSecond) * kNanosecondsPerMicrosecond;
434   return ts;
435 }
436 
437 
FromTimeval(struct timeval tv)438 Time Time::FromTimeval(struct timeval tv) {
439   DCHECK_GE(tv.tv_usec, 0);
440   DCHECK(tv.tv_usec < static_cast<suseconds_t>(kMicrosecondsPerSecond));
441   if (tv.tv_usec == 0 && tv.tv_sec == 0) {
442     return Time();
443   }
444   if (tv.tv_usec == static_cast<suseconds_t>(kMicrosecondsPerSecond - 1) &&
445       tv.tv_sec == std::numeric_limits<time_t>::max()) {
446     return Max();
447   }
448   return Time(tv.tv_sec * kMicrosecondsPerSecond + tv.tv_usec);
449 }
450 
451 
ToTimeval() const452 struct timeval Time::ToTimeval() const {
453   struct timeval tv;
454   if (IsNull()) {
455     tv.tv_sec = 0;
456     tv.tv_usec = 0;
457     return tv;
458   }
459   if (IsMax()) {
460     tv.tv_sec = std::numeric_limits<time_t>::max();
461     tv.tv_usec = static_cast<suseconds_t>(kMicrosecondsPerSecond - 1);
462     return tv;
463   }
464   tv.tv_sec = static_cast<time_t>(us_ / kMicrosecondsPerSecond);
465   tv.tv_usec = us_ % kMicrosecondsPerSecond;
466   return tv;
467 }
468 
469 #elif V8_OS_STARBOARD
470 
Now()471 Time Time::Now() { return Time(SbTimeToPosix(SbTimeGetNow())); }
472 
NowFromSystemTime()473 Time Time::NowFromSystemTime() { return Now(); }
474 
475 #endif  // V8_OS_STARBOARD
476 
FromJsTime(double ms_since_epoch)477 Time Time::FromJsTime(double ms_since_epoch) {
478   // The epoch is a valid time, so this constructor doesn't interpret
479   // 0 as the null time.
480   if (ms_since_epoch == std::numeric_limits<double>::max()) {
481     return Max();
482   }
483   return Time(
484       static_cast<int64_t>(ms_since_epoch * kMicrosecondsPerMillisecond));
485 }
486 
487 
ToJsTime() const488 double Time::ToJsTime() const {
489   if (IsNull()) {
490     // Preserve 0 so the invalid result doesn't depend on the platform.
491     return 0;
492   }
493   if (IsMax()) {
494     // Preserve max without offset to prevent overflow.
495     return std::numeric_limits<double>::max();
496   }
497   return static_cast<double>(us_) / kMicrosecondsPerMillisecond;
498 }
499 
500 
operator <<(std::ostream & os,const Time & time)501 std::ostream& operator<<(std::ostream& os, const Time& time) {
502   return os << time.ToJsTime();
503 }
504 
505 
506 #if V8_OS_WIN
507 
508 namespace {
509 
510 // We define a wrapper to adapt between the __stdcall and __cdecl call of the
511 // mock function, and to avoid a static constructor.  Assigning an import to a
512 // function pointer directly would require setup code to fetch from the IAT.
timeGetTimeWrapper()513 DWORD timeGetTimeWrapper() { return timeGetTime(); }
514 
515 DWORD (*g_tick_function)(void) = &timeGetTimeWrapper;
516 
517 // A structure holding the most significant bits of "last seen" and a
518 // "rollover" counter.
519 union LastTimeAndRolloversState {
520   // The state as a single 32-bit opaque value.
521   int32_t as_opaque_32;
522 
523   // The state as usable values.
524   struct {
525     // The top 8-bits of the "last" time. This is enough to check for rollovers
526     // and the small bit-size means fewer CompareAndSwap operations to store
527     // changes in state, which in turn makes for fewer retries.
528     uint8_t last_8;
529     // A count of the number of detected rollovers. Using this as bits 47-32
530     // of the upper half of a 64-bit value results in a 48-bit tick counter.
531     // This extends the total rollover period from about 49 days to about 8800
532     // years while still allowing it to be stored with last_8 in a single
533     // 32-bit value.
534     uint16_t rollovers;
535   } as_values;
536 };
537 std::atomic<int32_t> g_last_time_and_rollovers{0};
538 static_assert(sizeof(LastTimeAndRolloversState) <=
539                   sizeof(g_last_time_and_rollovers),
540               "LastTimeAndRolloversState does not fit in a single atomic word");
541 
542 // We use timeGetTime() to implement TimeTicks::Now().  This can be problematic
543 // because it returns the number of milliseconds since Windows has started,
544 // which will roll over the 32-bit value every ~49 days.  We try to track
545 // rollover ourselves, which works if TimeTicks::Now() is called at least every
546 // 48.8 days (not 49 days because only changes in the top 8 bits get noticed).
RolloverProtectedNow()547 TimeTicks RolloverProtectedNow() {
548   LastTimeAndRolloversState state;
549   DWORD now;  // DWORD is always unsigned 32 bits.
550 
551   // Fetch the "now" and "last" tick values, updating "last" with "now" and
552   // incrementing the "rollovers" counter if the tick-value has wrapped back
553   // around. Atomic operations ensure that both "last" and "rollovers" are
554   // always updated together.
555   int32_t original = g_last_time_and_rollovers.load(std::memory_order_acquire);
556   while (true) {
557     state.as_opaque_32 = original;
558     now = g_tick_function();
559     uint8_t now_8 = static_cast<uint8_t>(now >> 24);
560     if (now_8 < state.as_values.last_8) ++state.as_values.rollovers;
561     state.as_values.last_8 = now_8;
562 
563     // If the state hasn't changed, exit the loop.
564     if (state.as_opaque_32 == original) break;
565 
566     // Save the changed state. If the existing value is unchanged from the
567     // original, exit the loop.
568     if (g_last_time_and_rollovers.compare_exchange_weak(
569             original, state.as_opaque_32, std::memory_order_acq_rel)) {
570       break;
571     }
572 
573     // Another thread has done something in between so retry from the top.
574     // {original} has been updated by the {compare_exchange_weak}.
575   }
576 
577   return TimeTicks() +
578          TimeDelta::FromMilliseconds(
579              now + (static_cast<uint64_t>(state.as_values.rollovers) << 32));
580 }
581 
582 // Discussion of tick counter options on Windows:
583 //
584 // (1) CPU cycle counter. (Retrieved via RDTSC)
585 // The CPU counter provides the highest resolution time stamp and is the least
586 // expensive to retrieve. However, on older CPUs, two issues can affect its
587 // reliability: First it is maintained per processor and not synchronized
588 // between processors. Also, the counters will change frequency due to thermal
589 // and power changes, and stop in some states.
590 //
591 // (2) QueryPerformanceCounter (QPC). The QPC counter provides a high-
592 // resolution (<1 microsecond) time stamp. On most hardware running today, it
593 // auto-detects and uses the constant-rate RDTSC counter to provide extremely
594 // efficient and reliable time stamps.
595 //
596 // On older CPUs where RDTSC is unreliable, it falls back to using more
597 // expensive (20X to 40X more costly) alternate clocks, such as HPET or the ACPI
598 // PM timer, and can involve system calls; and all this is up to the HAL (with
599 // some help from ACPI). According to
600 // http://blogs.msdn.com/oldnewthing/archive/2005/09/02/459952.aspx, in the
601 // worst case, it gets the counter from the rollover interrupt on the
602 // programmable interrupt timer. In best cases, the HAL may conclude that the
603 // RDTSC counter runs at a constant frequency, then it uses that instead. On
604 // multiprocessor machines, it will try to verify the values returned from
605 // RDTSC on each processor are consistent with each other, and apply a handful
606 // of workarounds for known buggy hardware. In other words, QPC is supposed to
607 // give consistent results on a multiprocessor computer, but for older CPUs it
608 // can be unreliable due bugs in BIOS or HAL.
609 //
610 // (3) System time. The system time provides a low-resolution (from ~1 to ~15.6
611 // milliseconds) time stamp but is comparatively less expensive to retrieve and
612 // more reliable. Time::EnableHighResolutionTimer() and
613 // Time::ActivateHighResolutionTimer() can be called to alter the resolution of
614 // this timer; and also other Windows applications can alter it, affecting this
615 // one.
616 
617 TimeTicks InitialTimeTicksNowFunction();
618 
619 // See "threading notes" in InitializeNowFunctionPointer() for details on how
620 // concurrent reads/writes to these globals has been made safe.
621 using TimeTicksNowFunction = decltype(&TimeTicks::Now);
622 TimeTicksNowFunction g_time_ticks_now_function = &InitialTimeTicksNowFunction;
623 int64_t g_qpc_ticks_per_second = 0;
624 
QPCValueToTimeDelta(LONGLONG qpc_value)625 TimeDelta QPCValueToTimeDelta(LONGLONG qpc_value) {
626   // Ensure that the assignment to |g_qpc_ticks_per_second|, made in
627   // InitializeNowFunctionPointer(), has happened by this point.
628   std::atomic_thread_fence(std::memory_order_acquire);
629 
630   DCHECK_GT(g_qpc_ticks_per_second, 0);
631 
632   // If the QPC Value is below the overflow threshold, we proceed with
633   // simple multiply and divide.
634   if (qpc_value < TimeTicks::kQPCOverflowThreshold) {
635     return TimeDelta::FromMicroseconds(
636         qpc_value * TimeTicks::kMicrosecondsPerSecond / g_qpc_ticks_per_second);
637   }
638   // Otherwise, calculate microseconds in a round about manner to avoid
639   // overflow and precision issues.
640   int64_t whole_seconds = qpc_value / g_qpc_ticks_per_second;
641   int64_t leftover_ticks = qpc_value - (whole_seconds * g_qpc_ticks_per_second);
642   return TimeDelta::FromMicroseconds(
643       (whole_seconds * TimeTicks::kMicrosecondsPerSecond) +
644       ((leftover_ticks * TimeTicks::kMicrosecondsPerSecond) /
645        g_qpc_ticks_per_second));
646 }
647 
QPCNow()648 TimeTicks QPCNow() { return TimeTicks() + QPCValueToTimeDelta(QPCNowRaw()); }
649 
InitializeTimeTicksNowFunctionPointer()650 void InitializeTimeTicksNowFunctionPointer() {
651   LARGE_INTEGER ticks_per_sec = {};
652   if (!QueryPerformanceFrequency(&ticks_per_sec)) ticks_per_sec.QuadPart = 0;
653 
654   // If Windows cannot provide a QPC implementation, TimeTicks::Now() must use
655   // the low-resolution clock.
656   //
657   // If the QPC implementation is expensive and/or unreliable, TimeTicks::Now()
658   // will still use the low-resolution clock. A CPU lacking a non-stop time
659   // counter will cause Windows to provide an alternate QPC implementation that
660   // works, but is expensive to use. Certain Athlon CPUs are known to make the
661   // QPC implementation unreliable.
662   //
663   // Otherwise, Now uses the high-resolution QPC clock. As of 21 August 2015,
664   // ~72% of users fall within this category.
665   TimeTicksNowFunction now_function;
666   CPU cpu;
667   if (ticks_per_sec.QuadPart <= 0 || !cpu.has_non_stop_time_stamp_counter()) {
668     now_function = &RolloverProtectedNow;
669   } else {
670     now_function = &QPCNow;
671   }
672 
673   // Threading note 1: In an unlikely race condition, it's possible for two or
674   // more threads to enter InitializeNowFunctionPointer() in parallel. This is
675   // not a problem since all threads should end up writing out the same values
676   // to the global variables.
677   //
678   // Threading note 2: A release fence is placed here to ensure, from the
679   // perspective of other threads using the function pointers, that the
680   // assignment to |g_qpc_ticks_per_second| happens before the function pointers
681   // are changed.
682   g_qpc_ticks_per_second = ticks_per_sec.QuadPart;
683   std::atomic_thread_fence(std::memory_order_release);
684   g_time_ticks_now_function = now_function;
685 }
686 
InitialTimeTicksNowFunction()687 TimeTicks InitialTimeTicksNowFunction() {
688   InitializeTimeTicksNowFunctionPointer();
689   return g_time_ticks_now_function();
690 }
691 
692 }  // namespace
693 
694 // static
Now()695 TimeTicks TimeTicks::Now() {
696   // Make sure we never return 0 here.
697   TimeTicks ticks(g_time_ticks_now_function());
698   DCHECK(!ticks.IsNull());
699   return ticks;
700 }
701 
702 // static
IsHighResolution()703 bool TimeTicks::IsHighResolution() {
704   if (g_time_ticks_now_function == &InitialTimeTicksNowFunction)
705     InitializeTimeTicksNowFunctionPointer();
706   return g_time_ticks_now_function == &QPCNow;
707 }
708 
709 #else  // V8_OS_WIN
710 
Now()711 TimeTicks TimeTicks::Now() {
712   int64_t ticks;
713 #if V8_OS_DARWIN
714   static struct mach_timebase_info info;
715   if (info.denom == 0) {
716     kern_return_t result = mach_timebase_info(&info);
717     DCHECK_EQ(KERN_SUCCESS, result);
718     USE(result);
719   }
720   ticks = (mach_absolute_time() / Time::kNanosecondsPerMicrosecond *
721            info.numer / info.denom);
722 #elif V8_OS_SOLARIS
723   ticks = (gethrtime() / Time::kNanosecondsPerMicrosecond);
724 #elif V8_OS_POSIX
725   ticks = ClockNow(CLOCK_MONOTONIC);
726 #elif V8_OS_STARBOARD
727   ticks = SbTimeGetMonotonicNow();
728 #else
729 #error platform does not implement TimeTicks::Now.
730 #endif  // V8_OS_DARWIN
731   // Make sure we never return 0 here.
732   return TimeTicks(ticks + 1);
733 }
734 
735 // static
IsHighResolution()736 bool TimeTicks::IsHighResolution() {
737 #if V8_OS_DARWIN
738   return true;
739 #elif V8_OS_POSIX
740   static const bool is_high_resolution = IsHighResolutionTimer(CLOCK_MONOTONIC);
741   return is_high_resolution;
742 #else
743   return true;
744 #endif
745 }
746 
747 #endif  // V8_OS_WIN
748 
749 
IsSupported()750 bool ThreadTicks::IsSupported() {
751 #if V8_OS_STARBOARD
752 #if SB_API_VERSION >= 12
753   return SbTimeIsTimeThreadNowSupported();
754 #elif SB_HAS(TIME_THREAD_NOW)
755   return true;
756 #else
757   return false;
758 #endif
759 #elif defined(__PASE__)
760   // Thread CPU time accounting is unavailable in PASE
761   return false;
762 #elif(defined(_POSIX_THREAD_CPUTIME) && (_POSIX_THREAD_CPUTIME >= 0)) || \
763     defined(V8_OS_DARWIN) || defined(V8_OS_ANDROID) || defined(V8_OS_SOLARIS)
764   return true;
765 #elif defined(V8_OS_WIN)
766   return IsSupportedWin();
767 #else
768   return false;
769 #endif
770 }
771 
772 
Now()773 ThreadTicks ThreadTicks::Now() {
774 #if V8_OS_STARBOARD
775 #if SB_API_VERSION >= 12
776   if (SbTimeIsTimeThreadNowSupported())
777     return ThreadTicks(SbTimeGetMonotonicThreadNow());
778   UNREACHABLE();
779 #elif SB_HAS(TIME_THREAD_NOW)
780   return ThreadTicks(SbTimeGetMonotonicThreadNow());
781 #else
782   UNREACHABLE();
783 #endif
784 #elif V8_OS_DARWIN
785   return ThreadTicks(ComputeThreadTicks());
786 #elif(defined(_POSIX_THREAD_CPUTIME) && (_POSIX_THREAD_CPUTIME >= 0)) || \
787   defined(V8_OS_ANDROID)
788   return ThreadTicks(ClockNow(CLOCK_THREAD_CPUTIME_ID));
789 #elif V8_OS_SOLARIS
790   return ThreadTicks(gethrvtime() / Time::kNanosecondsPerMicrosecond);
791 #elif V8_OS_WIN
792   return ThreadTicks::GetForThread(::GetCurrentThread());
793 #else
794   UNREACHABLE();
795 #endif
796 }
797 
798 
799 #if V8_OS_WIN
GetForThread(const HANDLE & thread_handle)800 ThreadTicks ThreadTicks::GetForThread(const HANDLE& thread_handle) {
801   DCHECK(IsSupported());
802 
803   // Get the number of TSC ticks used by the current thread.
804   ULONG64 thread_cycle_time = 0;
805   ::QueryThreadCycleTime(thread_handle, &thread_cycle_time);
806 
807   // Get the frequency of the TSC.
808   double tsc_ticks_per_second = TSCTicksPerSecond();
809   if (tsc_ticks_per_second == 0)
810     return ThreadTicks();
811 
812   // Return the CPU time of the current thread.
813   double thread_time_seconds = thread_cycle_time / tsc_ticks_per_second;
814   return ThreadTicks(
815       static_cast<int64_t>(thread_time_seconds * Time::kMicrosecondsPerSecond));
816 }
817 
818 // static
IsSupportedWin()819 bool ThreadTicks::IsSupportedWin() {
820   static bool is_supported = base::CPU().has_non_stop_time_stamp_counter();
821   return is_supported;
822 }
823 
824 // static
WaitUntilInitializedWin()825 void ThreadTicks::WaitUntilInitializedWin() {
826   while (TSCTicksPerSecond() == 0)
827     ::Sleep(10);
828 }
829 
830 #ifdef V8_HOST_ARCH_ARM64
831 #define ReadCycleCounter() _ReadStatusReg(ARM64_PMCCNTR_EL0)
832 #else
833 #define ReadCycleCounter() __rdtsc()
834 #endif
835 
TSCTicksPerSecond()836 double ThreadTicks::TSCTicksPerSecond() {
837   DCHECK(IsSupported());
838 
839   // The value returned by QueryPerformanceFrequency() cannot be used as the TSC
840   // frequency, because there is no guarantee that the TSC frequency is equal to
841   // the performance counter frequency.
842 
843   // The TSC frequency is cached in a static variable because it takes some time
844   // to compute it.
845   static double tsc_ticks_per_second = 0;
846   if (tsc_ticks_per_second != 0)
847     return tsc_ticks_per_second;
848 
849   // Increase the thread priority to reduces the chances of having a context
850   // switch during a reading of the TSC and the performance counter.
851   int previous_priority = ::GetThreadPriority(::GetCurrentThread());
852   ::SetThreadPriority(::GetCurrentThread(), THREAD_PRIORITY_HIGHEST);
853 
854   // The first time that this function is called, make an initial reading of the
855   // TSC and the performance counter.
856   static const uint64_t tsc_initial = ReadCycleCounter();
857   static const uint64_t perf_counter_initial = QPCNowRaw();
858 
859   // Make a another reading of the TSC and the performance counter every time
860   // that this function is called.
861   uint64_t tsc_now = ReadCycleCounter();
862   uint64_t perf_counter_now = QPCNowRaw();
863 
864   // Reset the thread priority.
865   ::SetThreadPriority(::GetCurrentThread(), previous_priority);
866 
867   // Make sure that at least 50 ms elapsed between the 2 readings. The first
868   // time that this function is called, we don't expect this to be the case.
869   // Note: The longer the elapsed time between the 2 readings is, the more
870   //   accurate the computed TSC frequency will be. The 50 ms value was
871   //   chosen because local benchmarks show that it allows us to get a
872   //   stddev of less than 1 tick/us between multiple runs.
873   // Note: According to the MSDN documentation for QueryPerformanceFrequency(),
874   //   this will never fail on systems that run XP or later.
875   //   https://msdn.microsoft.com/library/windows/desktop/ms644905.aspx
876   LARGE_INTEGER perf_counter_frequency = {};
877   ::QueryPerformanceFrequency(&perf_counter_frequency);
878   DCHECK_GE(perf_counter_now, perf_counter_initial);
879   uint64_t perf_counter_ticks = perf_counter_now - perf_counter_initial;
880   double elapsed_time_seconds =
881       perf_counter_ticks / static_cast<double>(perf_counter_frequency.QuadPart);
882 
883   const double kMinimumEvaluationPeriodSeconds = 0.05;
884   if (elapsed_time_seconds < kMinimumEvaluationPeriodSeconds)
885     return 0;
886 
887   // Compute the frequency of the TSC.
888   DCHECK_GE(tsc_now, tsc_initial);
889   uint64_t tsc_ticks = tsc_now - tsc_initial;
890   tsc_ticks_per_second = tsc_ticks / elapsed_time_seconds;
891 
892   return tsc_ticks_per_second;
893 }
894 #undef ReadCycleCounter
895 #endif  // V8_OS_WIN
896 
897 }  // namespace base
898 }  // namespace v8
899