• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/base/platform/time.h"
6 
7 #if V8_OS_POSIX
8 #include <fcntl.h>  // for O_RDONLY
9 #include <sys/time.h>
10 #include <unistd.h>
11 #endif
12 #if V8_OS_MACOSX
13 #include <mach/mach.h>
14 #include <mach/mach_time.h>
15 #include <pthread.h>
16 #endif
17 
18 #include <cstring>
19 #include <ostream>
20 
21 #if V8_OS_WIN
22 #include "src/base/lazy-instance.h"
23 #include "src/base/win32-headers.h"
24 #endif
25 #include "src/base/cpu.h"
26 #include "src/base/logging.h"
27 #include "src/base/platform/platform.h"
28 
29 #if V8_OS_STARBOARD
30 #include "starboard/time.h"
31 #endif
32 
33 namespace {
34 
35 #if V8_OS_MACOSX
ComputeThreadTicks()36 int64_t ComputeThreadTicks() {
37   mach_msg_type_number_t thread_info_count = THREAD_BASIC_INFO_COUNT;
38   thread_basic_info_data_t thread_info_data;
39   kern_return_t kr = thread_info(
40       pthread_mach_thread_np(pthread_self()),
41       THREAD_BASIC_INFO,
42       reinterpret_cast<thread_info_t>(&thread_info_data),
43       &thread_info_count);
44   CHECK_EQ(kr, KERN_SUCCESS);
45 
46   // We can add the seconds into a {int64_t} without overflow.
47   CHECK_LE(thread_info_data.user_time.seconds,
48            std::numeric_limits<int64_t>::max() -
49                thread_info_data.system_time.seconds);
50   int64_t seconds =
51       thread_info_data.user_time.seconds + thread_info_data.system_time.seconds;
52   // Multiplying the seconds by {kMicrosecondsPerSecond}, and adding something
53   // in [0, 2 * kMicrosecondsPerSecond) must result in a valid {int64_t}.
54   static constexpr int64_t kSecondsLimit =
55       (std::numeric_limits<int64_t>::max() /
56        v8::base::Time::kMicrosecondsPerSecond) -
57       2;
58   CHECK_GT(kSecondsLimit, seconds);
59   int64_t micros = seconds * v8::base::Time::kMicrosecondsPerSecond;
60   micros += (thread_info_data.user_time.microseconds +
61              thread_info_data.system_time.microseconds);
62   return micros;
63 }
64 #elif V8_OS_POSIX
65 // Helper function to get results from clock_gettime() and convert to a
66 // microsecond timebase. Minimum requirement is MONOTONIC_CLOCK to be supported
67 // on the system. FreeBSD 6 has CLOCK_MONOTONIC but defines
68 // _POSIX_MONOTONIC_CLOCK to -1.
69 V8_INLINE int64_t ClockNow(clockid_t clk_id) {
70 #if (defined(_POSIX_MONOTONIC_CLOCK) && _POSIX_MONOTONIC_CLOCK >= 0) || \
71   defined(V8_OS_BSD) || defined(V8_OS_ANDROID)
72 // On AIX clock_gettime for CLOCK_THREAD_CPUTIME_ID outputs time with
73 // resolution of 10ms. thread_cputime API provides the time in ns
74 #if defined(V8_OS_AIX)
75   thread_cputime_t tc;
76   if (clk_id == CLOCK_THREAD_CPUTIME_ID) {
77 #if defined(__PASE__)  // CLOCK_THREAD_CPUTIME_ID clock not supported on IBMi
78     return 0;
79 #endif
80     if (thread_cputime(-1, &tc) != 0) {
81       UNREACHABLE();
82     }
83   }
84 #endif
85   struct timespec ts;
86   if (clock_gettime(clk_id, &ts) != 0) {
87     UNREACHABLE();
88   }
89   // Multiplying the seconds by {kMicrosecondsPerSecond}, and adding something
90   // in [0, kMicrosecondsPerSecond) must result in a valid {int64_t}.
91   static constexpr int64_t kSecondsLimit =
92       (std::numeric_limits<int64_t>::max() /
93        v8::base::Time::kMicrosecondsPerSecond) -
94       1;
95   CHECK_GT(kSecondsLimit, ts.tv_sec);
96   int64_t result = int64_t{ts.tv_sec} * v8::base::Time::kMicrosecondsPerSecond;
97 #if defined(V8_OS_AIX)
98   if (clk_id == CLOCK_THREAD_CPUTIME_ID) {
99     result += (tc.stime / v8::base::Time::kNanosecondsPerMicrosecond);
100   } else {
101     result += (ts.tv_nsec / v8::base::Time::kNanosecondsPerMicrosecond);
102   }
103 #else
104   result += (ts.tv_nsec / v8::base::Time::kNanosecondsPerMicrosecond);
105 #endif
106   return result;
107 #else  // Monotonic clock not supported.
108   return 0;
109 #endif
110 }
111 
112 V8_INLINE bool IsHighResolutionTimer(clockid_t clk_id) {
113   // Limit duration of timer resolution measurement to 100 ms. If we cannot
114   // measure timer resoltuion within this time, we assume a low resolution
115   // timer.
116   int64_t end =
117       ClockNow(clk_id) + 100 * v8::base::Time::kMicrosecondsPerMillisecond;
118   int64_t start, delta;
119   do {
120     start = ClockNow(clk_id);
121     // Loop until we can detect that the clock has changed. Non-HighRes timers
122     // will increment in chunks, i.e. 15ms. By spinning until we see a clock
123     // change, we detect the minimum time between measurements.
124     do {
125       delta = ClockNow(clk_id) - start;
126     } while (delta == 0);
127   } while (delta > 1 && start < end);
128   return delta <= 1;
129 }
130 
131 #elif V8_OS_WIN
132 // Returns the current value of the performance counter.
133 V8_INLINE uint64_t QPCNowRaw() {
134   LARGE_INTEGER perf_counter_now = {};
135   // According to the MSDN documentation for QueryPerformanceCounter(), this
136   // will never fail on systems that run XP or later.
137   // https://msdn.microsoft.com/library/windows/desktop/ms644904.aspx
138   BOOL result = ::QueryPerformanceCounter(&perf_counter_now);
139   DCHECK(result);
140   USE(result);
141   return perf_counter_now.QuadPart;
142 }
143 #endif  // V8_OS_MACOSX
144 
145 
146 }  // namespace
147 
148 namespace v8 {
149 namespace base {
150 
InDays() const151 int TimeDelta::InDays() const {
152   if (IsMax()) {
153     // Preserve max to prevent overflow.
154     return std::numeric_limits<int>::max();
155   }
156   return static_cast<int>(delta_ / Time::kMicrosecondsPerDay);
157 }
158 
InHours() const159 int TimeDelta::InHours() const {
160   if (IsMax()) {
161     // Preserve max to prevent overflow.
162     return std::numeric_limits<int>::max();
163   }
164   return static_cast<int>(delta_ / Time::kMicrosecondsPerHour);
165 }
166 
InMinutes() const167 int TimeDelta::InMinutes() const {
168   if (IsMax()) {
169     // Preserve max to prevent overflow.
170     return std::numeric_limits<int>::max();
171   }
172   return static_cast<int>(delta_ / Time::kMicrosecondsPerMinute);
173 }
174 
InSecondsF() const175 double TimeDelta::InSecondsF() const {
176   if (IsMax()) {
177     // Preserve max to prevent overflow.
178     return std::numeric_limits<double>::infinity();
179   }
180   return static_cast<double>(delta_) / Time::kMicrosecondsPerSecond;
181 }
182 
InSeconds() const183 int64_t TimeDelta::InSeconds() const {
184   if (IsMax()) {
185     // Preserve max to prevent overflow.
186     return std::numeric_limits<int64_t>::max();
187   }
188   return delta_ / Time::kMicrosecondsPerSecond;
189 }
190 
InMillisecondsF() const191 double TimeDelta::InMillisecondsF() const {
192   if (IsMax()) {
193     // Preserve max to prevent overflow.
194     return std::numeric_limits<double>::infinity();
195   }
196   return static_cast<double>(delta_) / Time::kMicrosecondsPerMillisecond;
197 }
198 
InMilliseconds() const199 int64_t TimeDelta::InMilliseconds() const {
200   if (IsMax()) {
201     // Preserve max to prevent overflow.
202     return std::numeric_limits<int64_t>::max();
203   }
204   return delta_ / Time::kMicrosecondsPerMillisecond;
205 }
206 
InMillisecondsRoundedUp() const207 int64_t TimeDelta::InMillisecondsRoundedUp() const {
208   if (IsMax()) {
209     // Preserve max to prevent overflow.
210     return std::numeric_limits<int64_t>::max();
211   }
212   return (delta_ + Time::kMicrosecondsPerMillisecond - 1) /
213          Time::kMicrosecondsPerMillisecond;
214 }
215 
InMicroseconds() const216 int64_t TimeDelta::InMicroseconds() const {
217   if (IsMax()) {
218     // Preserve max to prevent overflow.
219     return std::numeric_limits<int64_t>::max();
220   }
221   return delta_;
222 }
223 
InNanoseconds() const224 int64_t TimeDelta::InNanoseconds() const {
225   if (IsMax()) {
226     // Preserve max to prevent overflow.
227     return std::numeric_limits<int64_t>::max();
228   }
229   return delta_ * Time::kNanosecondsPerMicrosecond;
230 }
231 
232 
233 #if V8_OS_MACOSX
234 
FromMachTimespec(struct mach_timespec ts)235 TimeDelta TimeDelta::FromMachTimespec(struct mach_timespec ts) {
236   DCHECK_GE(ts.tv_nsec, 0);
237   DCHECK_LT(ts.tv_nsec,
238             static_cast<long>(Time::kNanosecondsPerSecond));  // NOLINT
239   return TimeDelta(ts.tv_sec * Time::kMicrosecondsPerSecond +
240                    ts.tv_nsec / Time::kNanosecondsPerMicrosecond);
241 }
242 
243 
ToMachTimespec() const244 struct mach_timespec TimeDelta::ToMachTimespec() const {
245   struct mach_timespec ts;
246   DCHECK_GE(delta_, 0);
247   ts.tv_sec = static_cast<unsigned>(delta_ / Time::kMicrosecondsPerSecond);
248   ts.tv_nsec = (delta_ % Time::kMicrosecondsPerSecond) *
249       Time::kNanosecondsPerMicrosecond;
250   return ts;
251 }
252 
253 #endif  // V8_OS_MACOSX
254 
255 
256 #if V8_OS_POSIX
257 
FromTimespec(struct timespec ts)258 TimeDelta TimeDelta::FromTimespec(struct timespec ts) {
259   DCHECK_GE(ts.tv_nsec, 0);
260   DCHECK_LT(ts.tv_nsec,
261             static_cast<long>(Time::kNanosecondsPerSecond));  // NOLINT
262   return TimeDelta(ts.tv_sec * Time::kMicrosecondsPerSecond +
263                    ts.tv_nsec / Time::kNanosecondsPerMicrosecond);
264 }
265 
266 
ToTimespec() const267 struct timespec TimeDelta::ToTimespec() const {
268   struct timespec ts;
269   ts.tv_sec = static_cast<time_t>(delta_ / Time::kMicrosecondsPerSecond);
270   ts.tv_nsec = (delta_ % Time::kMicrosecondsPerSecond) *
271       Time::kNanosecondsPerMicrosecond;
272   return ts;
273 }
274 
275 #endif  // V8_OS_POSIX
276 
277 
278 #if V8_OS_WIN
279 
280 // We implement time using the high-resolution timers so that we can get
281 // timeouts which are smaller than 10-15ms. To avoid any drift, we
282 // periodically resync the internal clock to the system clock.
283 class Clock final {
284  public:
Clock()285   Clock() : initial_ticks_(GetSystemTicks()), initial_time_(GetSystemTime()) {}
286 
Now()287   Time Now() {
288     // Time between resampling the un-granular clock for this API (1 minute).
289     const TimeDelta kMaxElapsedTime = TimeDelta::FromMinutes(1);
290 
291     MutexGuard lock_guard(&mutex_);
292 
293     // Determine current time and ticks.
294     TimeTicks ticks = GetSystemTicks();
295     Time time = GetSystemTime();
296 
297     // Check if we need to synchronize with the system clock due to a backwards
298     // time change or the amount of time elapsed.
299     TimeDelta elapsed = ticks - initial_ticks_;
300     if (time < initial_time_ || elapsed > kMaxElapsedTime) {
301       initial_ticks_ = ticks;
302       initial_time_ = time;
303       return time;
304     }
305 
306     return initial_time_ + elapsed;
307   }
308 
NowFromSystemTime()309   Time NowFromSystemTime() {
310     MutexGuard lock_guard(&mutex_);
311     initial_ticks_ = GetSystemTicks();
312     initial_time_ = GetSystemTime();
313     return initial_time_;
314   }
315 
316  private:
GetSystemTicks()317   static TimeTicks GetSystemTicks() {
318     return TimeTicks::Now();
319   }
320 
GetSystemTime()321   static Time GetSystemTime() {
322     FILETIME ft;
323     ::GetSystemTimeAsFileTime(&ft);
324     return Time::FromFiletime(ft);
325   }
326 
327   TimeTicks initial_ticks_;
328   Time initial_time_;
329   Mutex mutex_;
330 };
331 
332 namespace {
333 DEFINE_LAZY_LEAKY_OBJECT_GETTER(Clock, GetClock)
334 }  // namespace
335 
Now()336 Time Time::Now() { return GetClock()->Now(); }
337 
NowFromSystemTime()338 Time Time::NowFromSystemTime() { return GetClock()->NowFromSystemTime(); }
339 
340 // Time between windows epoch and standard epoch.
341 static const int64_t kTimeToEpochInMicroseconds = int64_t{11644473600000000};
342 
FromFiletime(FILETIME ft)343 Time Time::FromFiletime(FILETIME ft) {
344   if (ft.dwLowDateTime == 0 && ft.dwHighDateTime == 0) {
345     return Time();
346   }
347   if (ft.dwLowDateTime == std::numeric_limits<DWORD>::max() &&
348       ft.dwHighDateTime == std::numeric_limits<DWORD>::max()) {
349     return Max();
350   }
351   int64_t us = (static_cast<uint64_t>(ft.dwLowDateTime) +
352                 (static_cast<uint64_t>(ft.dwHighDateTime) << 32)) / 10;
353   return Time(us - kTimeToEpochInMicroseconds);
354 }
355 
356 
ToFiletime() const357 FILETIME Time::ToFiletime() const {
358   DCHECK_GE(us_, 0);
359   FILETIME ft;
360   if (IsNull()) {
361     ft.dwLowDateTime = 0;
362     ft.dwHighDateTime = 0;
363     return ft;
364   }
365   if (IsMax()) {
366     ft.dwLowDateTime = std::numeric_limits<DWORD>::max();
367     ft.dwHighDateTime = std::numeric_limits<DWORD>::max();
368     return ft;
369   }
370   uint64_t us = static_cast<uint64_t>(us_ + kTimeToEpochInMicroseconds) * 10;
371   ft.dwLowDateTime = static_cast<DWORD>(us);
372   ft.dwHighDateTime = static_cast<DWORD>(us >> 32);
373   return ft;
374 }
375 
376 #elif V8_OS_POSIX
377 
Now()378 Time Time::Now() {
379   struct timeval tv;
380   int result = gettimeofday(&tv, nullptr);
381   DCHECK_EQ(0, result);
382   USE(result);
383   return FromTimeval(tv);
384 }
385 
386 
NowFromSystemTime()387 Time Time::NowFromSystemTime() {
388   return Now();
389 }
390 
391 
FromTimespec(struct timespec ts)392 Time Time::FromTimespec(struct timespec ts) {
393   DCHECK_GE(ts.tv_nsec, 0);
394   DCHECK_LT(ts.tv_nsec, kNanosecondsPerSecond);
395   if (ts.tv_nsec == 0 && ts.tv_sec == 0) {
396     return Time();
397   }
398   if (ts.tv_nsec == static_cast<long>(kNanosecondsPerSecond - 1) &&  // NOLINT
399       ts.tv_sec == std::numeric_limits<time_t>::max()) {
400     return Max();
401   }
402   return Time(ts.tv_sec * kMicrosecondsPerSecond +
403               ts.tv_nsec / kNanosecondsPerMicrosecond);
404 }
405 
406 
ToTimespec() const407 struct timespec Time::ToTimespec() const {
408   struct timespec ts;
409   if (IsNull()) {
410     ts.tv_sec = 0;
411     ts.tv_nsec = 0;
412     return ts;
413   }
414   if (IsMax()) {
415     ts.tv_sec = std::numeric_limits<time_t>::max();
416     ts.tv_nsec = static_cast<long>(kNanosecondsPerSecond - 1);  // NOLINT
417     return ts;
418   }
419   ts.tv_sec = static_cast<time_t>(us_ / kMicrosecondsPerSecond);
420   ts.tv_nsec = (us_ % kMicrosecondsPerSecond) * kNanosecondsPerMicrosecond;
421   return ts;
422 }
423 
424 
FromTimeval(struct timeval tv)425 Time Time::FromTimeval(struct timeval tv) {
426   DCHECK_GE(tv.tv_usec, 0);
427   DCHECK(tv.tv_usec < static_cast<suseconds_t>(kMicrosecondsPerSecond));
428   if (tv.tv_usec == 0 && tv.tv_sec == 0) {
429     return Time();
430   }
431   if (tv.tv_usec == static_cast<suseconds_t>(kMicrosecondsPerSecond - 1) &&
432       tv.tv_sec == std::numeric_limits<time_t>::max()) {
433     return Max();
434   }
435   return Time(tv.tv_sec * kMicrosecondsPerSecond + tv.tv_usec);
436 }
437 
438 
ToTimeval() const439 struct timeval Time::ToTimeval() const {
440   struct timeval tv;
441   if (IsNull()) {
442     tv.tv_sec = 0;
443     tv.tv_usec = 0;
444     return tv;
445   }
446   if (IsMax()) {
447     tv.tv_sec = std::numeric_limits<time_t>::max();
448     tv.tv_usec = static_cast<suseconds_t>(kMicrosecondsPerSecond - 1);
449     return tv;
450   }
451   tv.tv_sec = static_cast<time_t>(us_ / kMicrosecondsPerSecond);
452   tv.tv_usec = us_ % kMicrosecondsPerSecond;
453   return tv;
454 }
455 
456 #elif V8_OS_STARBOARD
457 
Now()458 Time Time::Now() { return Time(SbTimeToPosix(SbTimeGetNow())); }
459 
NowFromSystemTime()460 Time Time::NowFromSystemTime() { return Now(); }
461 
462 #endif  // V8_OS_STARBOARD
463 
464 // static
HighResolutionNow()465 TimeTicks TimeTicks::HighResolutionNow() {
466   // a DCHECK of TimeTicks::IsHighResolution() was removed from here
467   // as it turns out this path is used in the wild for logs and counters.
468   //
469   // TODO(hpayer) We may eventually want to split TimedHistograms based
470   // on low resolution clocks to avoid polluting metrics
471   return TimeTicks::Now();
472 }
473 
FromJsTime(double ms_since_epoch)474 Time Time::FromJsTime(double ms_since_epoch) {
475   // The epoch is a valid time, so this constructor doesn't interpret
476   // 0 as the null time.
477   if (ms_since_epoch == std::numeric_limits<double>::max()) {
478     return Max();
479   }
480   return Time(
481       static_cast<int64_t>(ms_since_epoch * kMicrosecondsPerMillisecond));
482 }
483 
484 
ToJsTime() const485 double Time::ToJsTime() const {
486   if (IsNull()) {
487     // Preserve 0 so the invalid result doesn't depend on the platform.
488     return 0;
489   }
490   if (IsMax()) {
491     // Preserve max without offset to prevent overflow.
492     return std::numeric_limits<double>::max();
493   }
494   return static_cast<double>(us_) / kMicrosecondsPerMillisecond;
495 }
496 
497 
operator <<(std::ostream & os,const Time & time)498 std::ostream& operator<<(std::ostream& os, const Time& time) {
499   return os << time.ToJsTime();
500 }
501 
502 
503 #if V8_OS_WIN
504 
505 namespace {
506 
507 // We define a wrapper to adapt between the __stdcall and __cdecl call of the
508 // mock function, and to avoid a static constructor.  Assigning an import to a
509 // function pointer directly would require setup code to fetch from the IAT.
timeGetTimeWrapper()510 DWORD timeGetTimeWrapper() { return timeGetTime(); }
511 
512 DWORD (*g_tick_function)(void) = &timeGetTimeWrapper;
513 
514 // A structure holding the most significant bits of "last seen" and a
515 // "rollover" counter.
516 union LastTimeAndRolloversState {
517   // The state as a single 32-bit opaque value.
518   int32_t as_opaque_32;
519 
520   // The state as usable values.
521   struct {
522     // The top 8-bits of the "last" time. This is enough to check for rollovers
523     // and the small bit-size means fewer CompareAndSwap operations to store
524     // changes in state, which in turn makes for fewer retries.
525     uint8_t last_8;
526     // A count of the number of detected rollovers. Using this as bits 47-32
527     // of the upper half of a 64-bit value results in a 48-bit tick counter.
528     // This extends the total rollover period from about 49 days to about 8800
529     // years while still allowing it to be stored with last_8 in a single
530     // 32-bit value.
531     uint16_t rollovers;
532   } as_values;
533 };
534 std::atomic<int32_t> g_last_time_and_rollovers{0};
535 static_assert(sizeof(LastTimeAndRolloversState) <=
536                   sizeof(g_last_time_and_rollovers),
537               "LastTimeAndRolloversState does not fit in a single atomic word");
538 
539 // We use timeGetTime() to implement TimeTicks::Now().  This can be problematic
540 // because it returns the number of milliseconds since Windows has started,
541 // which will roll over the 32-bit value every ~49 days.  We try to track
542 // rollover ourselves, which works if TimeTicks::Now() is called at least every
543 // 48.8 days (not 49 days because only changes in the top 8 bits get noticed).
RolloverProtectedNow()544 TimeTicks RolloverProtectedNow() {
545   LastTimeAndRolloversState state;
546   DWORD now;  // DWORD is always unsigned 32 bits.
547 
548   // Fetch the "now" and "last" tick values, updating "last" with "now" and
549   // incrementing the "rollovers" counter if the tick-value has wrapped back
550   // around. Atomic operations ensure that both "last" and "rollovers" are
551   // always updated together.
552   int32_t original = g_last_time_and_rollovers.load(std::memory_order_acquire);
553   while (true) {
554     state.as_opaque_32 = original;
555     now = g_tick_function();
556     uint8_t now_8 = static_cast<uint8_t>(now >> 24);
557     if (now_8 < state.as_values.last_8) ++state.as_values.rollovers;
558     state.as_values.last_8 = now_8;
559 
560     // If the state hasn't changed, exit the loop.
561     if (state.as_opaque_32 == original) break;
562 
563     // Save the changed state. If the existing value is unchanged from the
564     // original, exit the loop.
565     if (g_last_time_and_rollovers.compare_exchange_weak(
566             original, state.as_opaque_32, std::memory_order_acq_rel)) {
567       break;
568     }
569 
570     // Another thread has done something in between so retry from the top.
571     // {original} has been updated by the {compare_exchange_weak}.
572   }
573 
574   return TimeTicks() +
575          TimeDelta::FromMilliseconds(
576              now + (static_cast<uint64_t>(state.as_values.rollovers) << 32));
577 }
578 
579 // Discussion of tick counter options on Windows:
580 //
581 // (1) CPU cycle counter. (Retrieved via RDTSC)
582 // The CPU counter provides the highest resolution time stamp and is the least
583 // expensive to retrieve. However, on older CPUs, two issues can affect its
584 // reliability: First it is maintained per processor and not synchronized
585 // between processors. Also, the counters will change frequency due to thermal
586 // and power changes, and stop in some states.
587 //
588 // (2) QueryPerformanceCounter (QPC). The QPC counter provides a high-
589 // resolution (<1 microsecond) time stamp. On most hardware running today, it
590 // auto-detects and uses the constant-rate RDTSC counter to provide extremely
591 // efficient and reliable time stamps.
592 //
593 // On older CPUs where RDTSC is unreliable, it falls back to using more
594 // expensive (20X to 40X more costly) alternate clocks, such as HPET or the ACPI
595 // PM timer, and can involve system calls; and all this is up to the HAL (with
596 // some help from ACPI). According to
597 // http://blogs.msdn.com/oldnewthing/archive/2005/09/02/459952.aspx, in the
598 // worst case, it gets the counter from the rollover interrupt on the
599 // programmable interrupt timer. In best cases, the HAL may conclude that the
600 // RDTSC counter runs at a constant frequency, then it uses that instead. On
601 // multiprocessor machines, it will try to verify the values returned from
602 // RDTSC on each processor are consistent with each other, and apply a handful
603 // of workarounds for known buggy hardware. In other words, QPC is supposed to
604 // give consistent results on a multiprocessor computer, but for older CPUs it
605 // can be unreliable due bugs in BIOS or HAL.
606 //
607 // (3) System time. The system time provides a low-resolution (from ~1 to ~15.6
608 // milliseconds) time stamp but is comparatively less expensive to retrieve and
609 // more reliable. Time::EnableHighResolutionTimer() and
610 // Time::ActivateHighResolutionTimer() can be called to alter the resolution of
611 // this timer; and also other Windows applications can alter it, affecting this
612 // one.
613 
614 TimeTicks InitialTimeTicksNowFunction();
615 
616 // See "threading notes" in InitializeNowFunctionPointer() for details on how
617 // concurrent reads/writes to these globals has been made safe.
618 using TimeTicksNowFunction = decltype(&TimeTicks::Now);
619 TimeTicksNowFunction g_time_ticks_now_function = &InitialTimeTicksNowFunction;
620 int64_t g_qpc_ticks_per_second = 0;
621 
622 // As of January 2015, use of <atomic> is forbidden in Chromium code. This is
623 // what std::atomic_thread_fence does on Windows on all Intel architectures when
624 // the memory_order argument is anything but std::memory_order_seq_cst:
625 #define ATOMIC_THREAD_FENCE(memory_order) _ReadWriteBarrier();
626 
QPCValueToTimeDelta(LONGLONG qpc_value)627 TimeDelta QPCValueToTimeDelta(LONGLONG qpc_value) {
628   // Ensure that the assignment to |g_qpc_ticks_per_second|, made in
629   // InitializeNowFunctionPointer(), has happened by this point.
630   ATOMIC_THREAD_FENCE(memory_order_acquire);
631 
632   DCHECK_GT(g_qpc_ticks_per_second, 0);
633 
634   // If the QPC Value is below the overflow threshold, we proceed with
635   // simple multiply and divide.
636   if (qpc_value < TimeTicks::kQPCOverflowThreshold) {
637     return TimeDelta::FromMicroseconds(
638         qpc_value * TimeTicks::kMicrosecondsPerSecond / g_qpc_ticks_per_second);
639   }
640   // Otherwise, calculate microseconds in a round about manner to avoid
641   // overflow and precision issues.
642   int64_t whole_seconds = qpc_value / g_qpc_ticks_per_second;
643   int64_t leftover_ticks = qpc_value - (whole_seconds * g_qpc_ticks_per_second);
644   return TimeDelta::FromMicroseconds(
645       (whole_seconds * TimeTicks::kMicrosecondsPerSecond) +
646       ((leftover_ticks * TimeTicks::kMicrosecondsPerSecond) /
647        g_qpc_ticks_per_second));
648 }
649 
QPCNow()650 TimeTicks QPCNow() { return TimeTicks() + QPCValueToTimeDelta(QPCNowRaw()); }
651 
InitializeTimeTicksNowFunctionPointer()652 void InitializeTimeTicksNowFunctionPointer() {
653   LARGE_INTEGER ticks_per_sec = {};
654   if (!QueryPerformanceFrequency(&ticks_per_sec)) ticks_per_sec.QuadPart = 0;
655 
656   // If Windows cannot provide a QPC implementation, TimeTicks::Now() must use
657   // the low-resolution clock.
658   //
659   // If the QPC implementation is expensive and/or unreliable, TimeTicks::Now()
660   // will still use the low-resolution clock. A CPU lacking a non-stop time
661   // counter will cause Windows to provide an alternate QPC implementation that
662   // works, but is expensive to use. Certain Athlon CPUs are known to make the
663   // QPC implementation unreliable.
664   //
665   // Otherwise, Now uses the high-resolution QPC clock. As of 21 August 2015,
666   // ~72% of users fall within this category.
667   TimeTicksNowFunction now_function;
668   CPU cpu;
669   if (ticks_per_sec.QuadPart <= 0 || !cpu.has_non_stop_time_stamp_counter()) {
670     now_function = &RolloverProtectedNow;
671   } else {
672     now_function = &QPCNow;
673   }
674 
675   // Threading note 1: In an unlikely race condition, it's possible for two or
676   // more threads to enter InitializeNowFunctionPointer() in parallel. This is
677   // not a problem since all threads should end up writing out the same values
678   // to the global variables.
679   //
680   // Threading note 2: A release fence is placed here to ensure, from the
681   // perspective of other threads using the function pointers, that the
682   // assignment to |g_qpc_ticks_per_second| happens before the function pointers
683   // are changed.
684   g_qpc_ticks_per_second = ticks_per_sec.QuadPart;
685   ATOMIC_THREAD_FENCE(memory_order_release);
686   g_time_ticks_now_function = now_function;
687 }
688 
InitialTimeTicksNowFunction()689 TimeTicks InitialTimeTicksNowFunction() {
690   InitializeTimeTicksNowFunctionPointer();
691   return g_time_ticks_now_function();
692 }
693 
694 #undef ATOMIC_THREAD_FENCE
695 
696 }  // namespace
697 
698 // static
Now()699 TimeTicks TimeTicks::Now() {
700   // Make sure we never return 0 here.
701   TimeTicks ticks(g_time_ticks_now_function());
702   DCHECK(!ticks.IsNull());
703   return ticks;
704 }
705 
706 // static
IsHighResolution()707 bool TimeTicks::IsHighResolution() {
708   if (g_time_ticks_now_function == &InitialTimeTicksNowFunction)
709     InitializeTimeTicksNowFunctionPointer();
710   return g_time_ticks_now_function == &QPCNow;
711 }
712 
713 #else  // V8_OS_WIN
714 
Now()715 TimeTicks TimeTicks::Now() {
716   int64_t ticks;
717 #if V8_OS_MACOSX
718   static struct mach_timebase_info info;
719   if (info.denom == 0) {
720     kern_return_t result = mach_timebase_info(&info);
721     DCHECK_EQ(KERN_SUCCESS, result);
722     USE(result);
723   }
724   ticks = (mach_absolute_time() / Time::kNanosecondsPerMicrosecond *
725            info.numer / info.denom);
726 #elif V8_OS_SOLARIS
727   ticks = (gethrtime() / Time::kNanosecondsPerMicrosecond);
728 #elif V8_OS_POSIX
729   ticks = ClockNow(CLOCK_MONOTONIC);
730 #elif V8_OS_STARBOARD
731   ticks = SbTimeGetMonotonicNow();
732 #else
733 #error platform does not implement TimeTicks::HighResolutionNow.
734 #endif  // V8_OS_MACOSX
735   // Make sure we never return 0 here.
736   return TimeTicks(ticks + 1);
737 }
738 
739 // static
IsHighResolution()740 bool TimeTicks::IsHighResolution() {
741 #if V8_OS_MACOSX
742   return true;
743 #elif V8_OS_POSIX
744   static bool is_high_resolution = IsHighResolutionTimer(CLOCK_MONOTONIC);
745   return is_high_resolution;
746 #else
747   return true;
748 #endif
749 }
750 
751 #endif  // V8_OS_WIN
752 
753 
IsSupported()754 bool ThreadTicks::IsSupported() {
755 #if V8_OS_STARBOARD
756 #if SB_API_VERSION >= 12
757   return SbTimeIsTimeThreadNowSupported();
758 #elif SB_HAS(TIME_THREAD_NOW)
759   return true;
760 #else
761   return false;
762 #endif
763 #elif(defined(_POSIX_THREAD_CPUTIME) && (_POSIX_THREAD_CPUTIME >= 0)) || \
764     defined(V8_OS_MACOSX) || defined(V8_OS_ANDROID) || defined(V8_OS_SOLARIS)
765   return true;
766 #elif defined(V8_OS_WIN)
767   return IsSupportedWin();
768 #else
769   return false;
770 #endif
771 }
772 
773 
Now()774 ThreadTicks ThreadTicks::Now() {
775 #if V8_OS_STARBOARD
776 #if SB_API_VERSION >= 12
777   if (SbTimeIsTimeThreadNowSupported())
778     return ThreadTicks(SbTimeGetMonotonicThreadNow());
779   UNREACHABLE();
780 #elif SB_HAS(TIME_THREAD_NOW)
781   return ThreadTicks(SbTimeGetMonotonicThreadNow());
782 #else
783   UNREACHABLE();
784 #endif
785 #elif V8_OS_MACOSX
786   return ThreadTicks(ComputeThreadTicks());
787 #elif(defined(_POSIX_THREAD_CPUTIME) && (_POSIX_THREAD_CPUTIME >= 0)) || \
788   defined(V8_OS_ANDROID)
789   return ThreadTicks(ClockNow(CLOCK_THREAD_CPUTIME_ID));
790 #elif V8_OS_SOLARIS
791   return ThreadTicks(gethrvtime() / Time::kNanosecondsPerMicrosecond);
792 #elif V8_OS_WIN
793   return ThreadTicks::GetForThread(::GetCurrentThread());
794 #else
795   UNREACHABLE();
796 #endif
797 }
798 
799 
800 #if V8_OS_WIN
GetForThread(const HANDLE & thread_handle)801 ThreadTicks ThreadTicks::GetForThread(const HANDLE& thread_handle) {
802   DCHECK(IsSupported());
803 
804   // Get the number of TSC ticks used by the current thread.
805   ULONG64 thread_cycle_time = 0;
806   ::QueryThreadCycleTime(thread_handle, &thread_cycle_time);
807 
808   // Get the frequency of the TSC.
809   double tsc_ticks_per_second = TSCTicksPerSecond();
810   if (tsc_ticks_per_second == 0)
811     return ThreadTicks();
812 
813   // Return the CPU time of the current thread.
814   double thread_time_seconds = thread_cycle_time / tsc_ticks_per_second;
815   return ThreadTicks(
816       static_cast<int64_t>(thread_time_seconds * Time::kMicrosecondsPerSecond));
817 }
818 
819 // static
IsSupportedWin()820 bool ThreadTicks::IsSupportedWin() {
821   static bool is_supported = base::CPU().has_non_stop_time_stamp_counter();
822   return is_supported;
823 }
824 
825 // static
WaitUntilInitializedWin()826 void ThreadTicks::WaitUntilInitializedWin() {
827   while (TSCTicksPerSecond() == 0)
828     ::Sleep(10);
829 }
830 
831 #ifdef V8_HOST_ARCH_ARM64
832 #define ReadCycleCounter() _ReadStatusReg(ARM64_PMCCNTR_EL0)
833 #else
834 #define ReadCycleCounter() __rdtsc()
835 #endif
836 
TSCTicksPerSecond()837 double ThreadTicks::TSCTicksPerSecond() {
838   DCHECK(IsSupported());
839 
840   // The value returned by QueryPerformanceFrequency() cannot be used as the TSC
841   // frequency, because there is no guarantee that the TSC frequency is equal to
842   // the performance counter frequency.
843 
844   // The TSC frequency is cached in a static variable because it takes some time
845   // to compute it.
846   static double tsc_ticks_per_second = 0;
847   if (tsc_ticks_per_second != 0)
848     return tsc_ticks_per_second;
849 
850   // Increase the thread priority to reduces the chances of having a context
851   // switch during a reading of the TSC and the performance counter.
852   int previous_priority = ::GetThreadPriority(::GetCurrentThread());
853   ::SetThreadPriority(::GetCurrentThread(), THREAD_PRIORITY_HIGHEST);
854 
855   // The first time that this function is called, make an initial reading of the
856   // TSC and the performance counter.
857   static const uint64_t tsc_initial = ReadCycleCounter();
858   static const uint64_t perf_counter_initial = QPCNowRaw();
859 
860   // Make a another reading of the TSC and the performance counter every time
861   // that this function is called.
862   uint64_t tsc_now = ReadCycleCounter();
863   uint64_t perf_counter_now = QPCNowRaw();
864 
865   // Reset the thread priority.
866   ::SetThreadPriority(::GetCurrentThread(), previous_priority);
867 
868   // Make sure that at least 50 ms elapsed between the 2 readings. The first
869   // time that this function is called, we don't expect this to be the case.
870   // Note: The longer the elapsed time between the 2 readings is, the more
871   //   accurate the computed TSC frequency will be. The 50 ms value was
872   //   chosen because local benchmarks show that it allows us to get a
873   //   stddev of less than 1 tick/us between multiple runs.
874   // Note: According to the MSDN documentation for QueryPerformanceFrequency(),
875   //   this will never fail on systems that run XP or later.
876   //   https://msdn.microsoft.com/library/windows/desktop/ms644905.aspx
877   LARGE_INTEGER perf_counter_frequency = {};
878   ::QueryPerformanceFrequency(&perf_counter_frequency);
879   DCHECK_GE(perf_counter_now, perf_counter_initial);
880   uint64_t perf_counter_ticks = perf_counter_now - perf_counter_initial;
881   double elapsed_time_seconds =
882       perf_counter_ticks / static_cast<double>(perf_counter_frequency.QuadPart);
883 
884   const double kMinimumEvaluationPeriodSeconds = 0.05;
885   if (elapsed_time_seconds < kMinimumEvaluationPeriodSeconds)
886     return 0;
887 
888   // Compute the frequency of the TSC.
889   DCHECK_GE(tsc_now, tsc_initial);
890   uint64_t tsc_ticks = tsc_now - tsc_initial;
891   tsc_ticks_per_second = tsc_ticks / elapsed_time_seconds;
892 
893   return tsc_ticks_per_second;
894 }
895 #undef ReadCycleCounter
896 #endif  // V8_OS_WIN
897 
898 }  // namespace base
899 }  // namespace v8
900