• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/base/platform/time.h"
6 
7 #if V8_OS_POSIX
8 #include <fcntl.h>  // for O_RDONLY
9 #include <sys/time.h>
10 #include <unistd.h>
11 #endif
12 #if V8_OS_MACOSX
13 #include <mach/mach.h>
14 #include <mach/mach_time.h>
15 #include <pthread.h>
16 #endif
17 
18 #include <cstring>
19 #include <ostream>
20 
21 #if V8_OS_WIN
22 #include "src/base/atomicops.h"
23 #include "src/base/lazy-instance.h"
24 #include "src/base/win32-headers.h"
25 #endif
26 #include "src/base/cpu.h"
27 #include "src/base/logging.h"
28 #include "src/base/platform/platform.h"
29 
30 namespace {
31 
32 #if V8_OS_MACOSX
ComputeThreadTicks()33 int64_t ComputeThreadTicks() {
34   mach_msg_type_number_t thread_info_count = THREAD_BASIC_INFO_COUNT;
35   thread_basic_info_data_t thread_info_data;
36   kern_return_t kr = thread_info(
37       pthread_mach_thread_np(pthread_self()),
38       THREAD_BASIC_INFO,
39       reinterpret_cast<thread_info_t>(&thread_info_data),
40       &thread_info_count);
41   CHECK_EQ(kr, KERN_SUCCESS);
42 
43   v8::base::CheckedNumeric<int64_t> absolute_micros(
44       thread_info_data.user_time.seconds +
45       thread_info_data.system_time.seconds);
46   absolute_micros *= v8::base::Time::kMicrosecondsPerSecond;
47   absolute_micros += (thread_info_data.user_time.microseconds +
48                       thread_info_data.system_time.microseconds);
49   return absolute_micros.ValueOrDie();
50 }
51 #elif V8_OS_POSIX
52 // Helper function to get results from clock_gettime() and convert to a
53 // microsecond timebase. Minimum requirement is MONOTONIC_CLOCK to be supported
54 // on the system. FreeBSD 6 has CLOCK_MONOTONIC but defines
55 // _POSIX_MONOTONIC_CLOCK to -1.
56 V8_INLINE int64_t ClockNow(clockid_t clk_id) {
57 #if (defined(_POSIX_MONOTONIC_CLOCK) && _POSIX_MONOTONIC_CLOCK >= 0) || \
58   defined(V8_OS_BSD) || defined(V8_OS_ANDROID)
59 // On AIX clock_gettime for CLOCK_THREAD_CPUTIME_ID outputs time with
60 // resolution of 10ms. thread_cputime API provides the time in ns
61 #if defined(V8_OS_AIX)
62   thread_cputime_t tc;
63   if (clk_id == CLOCK_THREAD_CPUTIME_ID) {
64     if (thread_cputime(-1, &tc) != 0) {
65       UNREACHABLE();
66     }
67   }
68 #endif
69   struct timespec ts;
70   if (clock_gettime(clk_id, &ts) != 0) {
71     UNREACHABLE();
72   }
73   v8::base::internal::CheckedNumeric<int64_t> result(ts.tv_sec);
74   result *= v8::base::Time::kMicrosecondsPerSecond;
75 #if defined(V8_OS_AIX)
76   if (clk_id == CLOCK_THREAD_CPUTIME_ID) {
77     result += (tc.stime / v8::base::Time::kNanosecondsPerMicrosecond);
78   } else {
79     result += (ts.tv_nsec / v8::base::Time::kNanosecondsPerMicrosecond);
80   }
81 #else
82   result += (ts.tv_nsec / v8::base::Time::kNanosecondsPerMicrosecond);
83 #endif
84   return result.ValueOrDie();
85 #else  // Monotonic clock not supported.
86   return 0;
87 #endif
88 }
89 
90 V8_INLINE bool IsHighResolutionTimer(clockid_t clk_id) {
91   // Limit duration of timer resolution measurement to 100 ms. If we cannot
92   // measure timer resoltuion within this time, we assume a low resolution
93   // timer.
94   int64_t end =
95       ClockNow(clk_id) + 100 * v8::base::Time::kMicrosecondsPerMillisecond;
96   int64_t start, delta;
97   do {
98     start = ClockNow(clk_id);
99     // Loop until we can detect that the clock has changed. Non-HighRes timers
100     // will increment in chunks, i.e. 15ms. By spinning until we see a clock
101     // change, we detect the minimum time between measurements.
102     do {
103       delta = ClockNow(clk_id) - start;
104     } while (delta == 0);
105   } while (delta > 1 && start < end);
106   return delta <= 1;
107 }
108 
109 #elif V8_OS_WIN
110 V8_INLINE bool IsQPCReliable() {
111   v8::base::CPU cpu;
112   // On Athlon X2 CPUs (e.g. model 15) QueryPerformanceCounter is unreliable.
113   return strcmp(cpu.vendor(), "AuthenticAMD") == 0 && cpu.family() == 15;
114 }
115 
116 // Returns the current value of the performance counter.
117 V8_INLINE uint64_t QPCNowRaw() {
118   LARGE_INTEGER perf_counter_now = {};
119   // According to the MSDN documentation for QueryPerformanceCounter(), this
120   // will never fail on systems that run XP or later.
121   // https://msdn.microsoft.com/library/windows/desktop/ms644904.aspx
122   BOOL result = ::QueryPerformanceCounter(&perf_counter_now);
123   DCHECK(result);
124   USE(result);
125   return perf_counter_now.QuadPart;
126 }
127 #endif  // V8_OS_MACOSX
128 
129 
130 }  // namespace
131 
132 namespace v8 {
133 namespace base {
134 
FromDays(int days)135 TimeDelta TimeDelta::FromDays(int days) {
136   return TimeDelta(days * Time::kMicrosecondsPerDay);
137 }
138 
139 
FromHours(int hours)140 TimeDelta TimeDelta::FromHours(int hours) {
141   return TimeDelta(hours * Time::kMicrosecondsPerHour);
142 }
143 
144 
FromMinutes(int minutes)145 TimeDelta TimeDelta::FromMinutes(int minutes) {
146   return TimeDelta(minutes * Time::kMicrosecondsPerMinute);
147 }
148 
149 
FromSeconds(int64_t seconds)150 TimeDelta TimeDelta::FromSeconds(int64_t seconds) {
151   return TimeDelta(seconds * Time::kMicrosecondsPerSecond);
152 }
153 
154 
FromMilliseconds(int64_t milliseconds)155 TimeDelta TimeDelta::FromMilliseconds(int64_t milliseconds) {
156   return TimeDelta(milliseconds * Time::kMicrosecondsPerMillisecond);
157 }
158 
159 
FromNanoseconds(int64_t nanoseconds)160 TimeDelta TimeDelta::FromNanoseconds(int64_t nanoseconds) {
161   return TimeDelta(nanoseconds / Time::kNanosecondsPerMicrosecond);
162 }
163 
164 
InDays() const165 int TimeDelta::InDays() const {
166   if (IsMax()) {
167     // Preserve max to prevent overflow.
168     return std::numeric_limits<int>::max();
169   }
170   return static_cast<int>(delta_ / Time::kMicrosecondsPerDay);
171 }
172 
InHours() const173 int TimeDelta::InHours() const {
174   if (IsMax()) {
175     // Preserve max to prevent overflow.
176     return std::numeric_limits<int>::max();
177   }
178   return static_cast<int>(delta_ / Time::kMicrosecondsPerHour);
179 }
180 
InMinutes() const181 int TimeDelta::InMinutes() const {
182   if (IsMax()) {
183     // Preserve max to prevent overflow.
184     return std::numeric_limits<int>::max();
185   }
186   return static_cast<int>(delta_ / Time::kMicrosecondsPerMinute);
187 }
188 
InSecondsF() const189 double TimeDelta::InSecondsF() const {
190   if (IsMax()) {
191     // Preserve max to prevent overflow.
192     return std::numeric_limits<double>::infinity();
193   }
194   return static_cast<double>(delta_) / Time::kMicrosecondsPerSecond;
195 }
196 
InSeconds() const197 int64_t TimeDelta::InSeconds() const {
198   if (IsMax()) {
199     // Preserve max to prevent overflow.
200     return std::numeric_limits<int64_t>::max();
201   }
202   return delta_ / Time::kMicrosecondsPerSecond;
203 }
204 
InMillisecondsF() const205 double TimeDelta::InMillisecondsF() const {
206   if (IsMax()) {
207     // Preserve max to prevent overflow.
208     return std::numeric_limits<double>::infinity();
209   }
210   return static_cast<double>(delta_) / Time::kMicrosecondsPerMillisecond;
211 }
212 
InMilliseconds() const213 int64_t TimeDelta::InMilliseconds() const {
214   if (IsMax()) {
215     // Preserve max to prevent overflow.
216     return std::numeric_limits<int64_t>::max();
217   }
218   return delta_ / Time::kMicrosecondsPerMillisecond;
219 }
220 
InMillisecondsRoundedUp() const221 int64_t TimeDelta::InMillisecondsRoundedUp() const {
222   if (IsMax()) {
223     // Preserve max to prevent overflow.
224     return std::numeric_limits<int64_t>::max();
225   }
226   return (delta_ + Time::kMicrosecondsPerMillisecond - 1) /
227          Time::kMicrosecondsPerMillisecond;
228 }
229 
InMicroseconds() const230 int64_t TimeDelta::InMicroseconds() const {
231   if (IsMax()) {
232     // Preserve max to prevent overflow.
233     return std::numeric_limits<int64_t>::max();
234   }
235   return delta_;
236 }
237 
InNanoseconds() const238 int64_t TimeDelta::InNanoseconds() const {
239   if (IsMax()) {
240     // Preserve max to prevent overflow.
241     return std::numeric_limits<int64_t>::max();
242   }
243   return delta_ * Time::kNanosecondsPerMicrosecond;
244 }
245 
246 
247 #if V8_OS_MACOSX
248 
FromMachTimespec(struct mach_timespec ts)249 TimeDelta TimeDelta::FromMachTimespec(struct mach_timespec ts) {
250   DCHECK_GE(ts.tv_nsec, 0);
251   DCHECK_LT(ts.tv_nsec,
252             static_cast<long>(Time::kNanosecondsPerSecond));  // NOLINT
253   return TimeDelta(ts.tv_sec * Time::kMicrosecondsPerSecond +
254                    ts.tv_nsec / Time::kNanosecondsPerMicrosecond);
255 }
256 
257 
ToMachTimespec() const258 struct mach_timespec TimeDelta::ToMachTimespec() const {
259   struct mach_timespec ts;
260   DCHECK_GE(delta_, 0);
261   ts.tv_sec = static_cast<unsigned>(delta_ / Time::kMicrosecondsPerSecond);
262   ts.tv_nsec = (delta_ % Time::kMicrosecondsPerSecond) *
263       Time::kNanosecondsPerMicrosecond;
264   return ts;
265 }
266 
267 #endif  // V8_OS_MACOSX
268 
269 
270 #if V8_OS_POSIX
271 
FromTimespec(struct timespec ts)272 TimeDelta TimeDelta::FromTimespec(struct timespec ts) {
273   DCHECK_GE(ts.tv_nsec, 0);
274   DCHECK_LT(ts.tv_nsec,
275             static_cast<long>(Time::kNanosecondsPerSecond));  // NOLINT
276   return TimeDelta(ts.tv_sec * Time::kMicrosecondsPerSecond +
277                    ts.tv_nsec / Time::kNanosecondsPerMicrosecond);
278 }
279 
280 
ToTimespec() const281 struct timespec TimeDelta::ToTimespec() const {
282   struct timespec ts;
283   ts.tv_sec = static_cast<time_t>(delta_ / Time::kMicrosecondsPerSecond);
284   ts.tv_nsec = (delta_ % Time::kMicrosecondsPerSecond) *
285       Time::kNanosecondsPerMicrosecond;
286   return ts;
287 }
288 
289 #endif  // V8_OS_POSIX
290 
291 
292 #if V8_OS_WIN
293 
294 // We implement time using the high-resolution timers so that we can get
295 // timeouts which are smaller than 10-15ms. To avoid any drift, we
296 // periodically resync the internal clock to the system clock.
297 class Clock final {
298  public:
Clock()299   Clock() : initial_ticks_(GetSystemTicks()), initial_time_(GetSystemTime()) {}
300 
Now()301   Time Now() {
302     // Time between resampling the un-granular clock for this API (1 minute).
303     const TimeDelta kMaxElapsedTime = TimeDelta::FromMinutes(1);
304 
305     LockGuard<Mutex> lock_guard(&mutex_);
306 
307     // Determine current time and ticks.
308     TimeTicks ticks = GetSystemTicks();
309     Time time = GetSystemTime();
310 
311     // Check if we need to synchronize with the system clock due to a backwards
312     // time change or the amount of time elapsed.
313     TimeDelta elapsed = ticks - initial_ticks_;
314     if (time < initial_time_ || elapsed > kMaxElapsedTime) {
315       initial_ticks_ = ticks;
316       initial_time_ = time;
317       return time;
318     }
319 
320     return initial_time_ + elapsed;
321   }
322 
NowFromSystemTime()323   Time NowFromSystemTime() {
324     LockGuard<Mutex> lock_guard(&mutex_);
325     initial_ticks_ = GetSystemTicks();
326     initial_time_ = GetSystemTime();
327     return initial_time_;
328   }
329 
330  private:
GetSystemTicks()331   static TimeTicks GetSystemTicks() {
332     return TimeTicks::Now();
333   }
334 
GetSystemTime()335   static Time GetSystemTime() {
336     FILETIME ft;
337     ::GetSystemTimeAsFileTime(&ft);
338     return Time::FromFiletime(ft);
339   }
340 
341   TimeTicks initial_ticks_;
342   Time initial_time_;
343   Mutex mutex_;
344 };
345 
346 
347 static LazyStaticInstance<Clock, DefaultConstructTrait<Clock>,
348                           ThreadSafeInitOnceTrait>::type clock =
349     LAZY_STATIC_INSTANCE_INITIALIZER;
350 
351 
Now()352 Time Time::Now() {
353   return clock.Pointer()->Now();
354 }
355 
356 
NowFromSystemTime()357 Time Time::NowFromSystemTime() {
358   return clock.Pointer()->NowFromSystemTime();
359 }
360 
361 
362 // Time between windows epoch and standard epoch.
363 static const int64_t kTimeToEpochInMicroseconds = int64_t{11644473600000000};
364 
FromFiletime(FILETIME ft)365 Time Time::FromFiletime(FILETIME ft) {
366   if (ft.dwLowDateTime == 0 && ft.dwHighDateTime == 0) {
367     return Time();
368   }
369   if (ft.dwLowDateTime == std::numeric_limits<DWORD>::max() &&
370       ft.dwHighDateTime == std::numeric_limits<DWORD>::max()) {
371     return Max();
372   }
373   int64_t us = (static_cast<uint64_t>(ft.dwLowDateTime) +
374                 (static_cast<uint64_t>(ft.dwHighDateTime) << 32)) / 10;
375   return Time(us - kTimeToEpochInMicroseconds);
376 }
377 
378 
ToFiletime() const379 FILETIME Time::ToFiletime() const {
380   DCHECK_GE(us_, 0);
381   FILETIME ft;
382   if (IsNull()) {
383     ft.dwLowDateTime = 0;
384     ft.dwHighDateTime = 0;
385     return ft;
386   }
387   if (IsMax()) {
388     ft.dwLowDateTime = std::numeric_limits<DWORD>::max();
389     ft.dwHighDateTime = std::numeric_limits<DWORD>::max();
390     return ft;
391   }
392   uint64_t us = static_cast<uint64_t>(us_ + kTimeToEpochInMicroseconds) * 10;
393   ft.dwLowDateTime = static_cast<DWORD>(us);
394   ft.dwHighDateTime = static_cast<DWORD>(us >> 32);
395   return ft;
396 }
397 
398 #elif V8_OS_POSIX
399 
Now()400 Time Time::Now() {
401   struct timeval tv;
402   int result = gettimeofday(&tv, nullptr);
403   DCHECK_EQ(0, result);
404   USE(result);
405   return FromTimeval(tv);
406 }
407 
408 
NowFromSystemTime()409 Time Time::NowFromSystemTime() {
410   return Now();
411 }
412 
413 
FromTimespec(struct timespec ts)414 Time Time::FromTimespec(struct timespec ts) {
415   DCHECK_GE(ts.tv_nsec, 0);
416   DCHECK_LT(ts.tv_nsec, kNanosecondsPerSecond);
417   if (ts.tv_nsec == 0 && ts.tv_sec == 0) {
418     return Time();
419   }
420   if (ts.tv_nsec == static_cast<long>(kNanosecondsPerSecond - 1) &&  // NOLINT
421       ts.tv_sec == std::numeric_limits<time_t>::max()) {
422     return Max();
423   }
424   return Time(ts.tv_sec * kMicrosecondsPerSecond +
425               ts.tv_nsec / kNanosecondsPerMicrosecond);
426 }
427 
428 
ToTimespec() const429 struct timespec Time::ToTimespec() const {
430   struct timespec ts;
431   if (IsNull()) {
432     ts.tv_sec = 0;
433     ts.tv_nsec = 0;
434     return ts;
435   }
436   if (IsMax()) {
437     ts.tv_sec = std::numeric_limits<time_t>::max();
438     ts.tv_nsec = static_cast<long>(kNanosecondsPerSecond - 1);  // NOLINT
439     return ts;
440   }
441   ts.tv_sec = static_cast<time_t>(us_ / kMicrosecondsPerSecond);
442   ts.tv_nsec = (us_ % kMicrosecondsPerSecond) * kNanosecondsPerMicrosecond;
443   return ts;
444 }
445 
446 
FromTimeval(struct timeval tv)447 Time Time::FromTimeval(struct timeval tv) {
448   DCHECK_GE(tv.tv_usec, 0);
449   DCHECK(tv.tv_usec < static_cast<suseconds_t>(kMicrosecondsPerSecond));
450   if (tv.tv_usec == 0 && tv.tv_sec == 0) {
451     return Time();
452   }
453   if (tv.tv_usec == static_cast<suseconds_t>(kMicrosecondsPerSecond - 1) &&
454       tv.tv_sec == std::numeric_limits<time_t>::max()) {
455     return Max();
456   }
457   return Time(tv.tv_sec * kMicrosecondsPerSecond + tv.tv_usec);
458 }
459 
460 
ToTimeval() const461 struct timeval Time::ToTimeval() const {
462   struct timeval tv;
463   if (IsNull()) {
464     tv.tv_sec = 0;
465     tv.tv_usec = 0;
466     return tv;
467   }
468   if (IsMax()) {
469     tv.tv_sec = std::numeric_limits<time_t>::max();
470     tv.tv_usec = static_cast<suseconds_t>(kMicrosecondsPerSecond - 1);
471     return tv;
472   }
473   tv.tv_sec = static_cast<time_t>(us_ / kMicrosecondsPerSecond);
474   tv.tv_usec = us_ % kMicrosecondsPerSecond;
475   return tv;
476 }
477 
478 #endif  // V8_OS_WIN
479 
480 // static
HighResolutionNow()481 TimeTicks TimeTicks::HighResolutionNow() {
482   // a DCHECK of TimeTicks::IsHighResolution() was removed from here
483   // as it turns out this path is used in the wild for logs and counters.
484   //
485   // TODO(hpayer) We may eventually want to split TimedHistograms based
486   // on low resolution clocks to avoid polluting metrics
487   return TimeTicks::Now();
488 }
489 
FromJsTime(double ms_since_epoch)490 Time Time::FromJsTime(double ms_since_epoch) {
491   // The epoch is a valid time, so this constructor doesn't interpret
492   // 0 as the null time.
493   if (ms_since_epoch == std::numeric_limits<double>::max()) {
494     return Max();
495   }
496   return Time(
497       static_cast<int64_t>(ms_since_epoch * kMicrosecondsPerMillisecond));
498 }
499 
500 
ToJsTime() const501 double Time::ToJsTime() const {
502   if (IsNull()) {
503     // Preserve 0 so the invalid result doesn't depend on the platform.
504     return 0;
505   }
506   if (IsMax()) {
507     // Preserve max without offset to prevent overflow.
508     return std::numeric_limits<double>::max();
509   }
510   return static_cast<double>(us_) / kMicrosecondsPerMillisecond;
511 }
512 
513 
operator <<(std::ostream & os,const Time & time)514 std::ostream& operator<<(std::ostream& os, const Time& time) {
515   return os << time.ToJsTime();
516 }
517 
518 
519 #if V8_OS_WIN
520 
521 namespace {
522 
523 // We define a wrapper to adapt between the __stdcall and __cdecl call of the
524 // mock function, and to avoid a static constructor.  Assigning an import to a
525 // function pointer directly would require setup code to fetch from the IAT.
timeGetTimeWrapper()526 DWORD timeGetTimeWrapper() { return timeGetTime(); }
527 
528 DWORD (*g_tick_function)(void) = &timeGetTimeWrapper;
529 
530 // A structure holding the most significant bits of "last seen" and a
531 // "rollover" counter.
532 union LastTimeAndRolloversState {
533   // The state as a single 32-bit opaque value.
534   base::Atomic32 as_opaque_32;
535 
536   // The state as usable values.
537   struct {
538     // The top 8-bits of the "last" time. This is enough to check for rollovers
539     // and the small bit-size means fewer CompareAndSwap operations to store
540     // changes in state, which in turn makes for fewer retries.
541     uint8_t last_8;
542     // A count of the number of detected rollovers. Using this as bits 47-32
543     // of the upper half of a 64-bit value results in a 48-bit tick counter.
544     // This extends the total rollover period from about 49 days to about 8800
545     // years while still allowing it to be stored with last_8 in a single
546     // 32-bit value.
547     uint16_t rollovers;
548   } as_values;
549 };
550 base::Atomic32 g_last_time_and_rollovers = 0;
551 static_assert(sizeof(LastTimeAndRolloversState) <=
552                   sizeof(g_last_time_and_rollovers),
553               "LastTimeAndRolloversState does not fit in a single atomic word");
554 
555 // We use timeGetTime() to implement TimeTicks::Now().  This can be problematic
556 // because it returns the number of milliseconds since Windows has started,
557 // which will roll over the 32-bit value every ~49 days.  We try to track
558 // rollover ourselves, which works if TimeTicks::Now() is called at least every
559 // 48.8 days (not 49 days because only changes in the top 8 bits get noticed).
RolloverProtectedNow()560 TimeTicks RolloverProtectedNow() {
561   LastTimeAndRolloversState state;
562   DWORD now;  // DWORD is always unsigned 32 bits.
563 
564   while (true) {
565     // Fetch the "now" and "last" tick values, updating "last" with "now" and
566     // incrementing the "rollovers" counter if the tick-value has wrapped back
567     // around. Atomic operations ensure that both "last" and "rollovers" are
568     // always updated together.
569     int32_t original = base::Acquire_Load(&g_last_time_and_rollovers);
570     state.as_opaque_32 = original;
571     now = g_tick_function();
572     uint8_t now_8 = static_cast<uint8_t>(now >> 24);
573     if (now_8 < state.as_values.last_8) ++state.as_values.rollovers;
574     state.as_values.last_8 = now_8;
575 
576     // If the state hasn't changed, exit the loop.
577     if (state.as_opaque_32 == original) break;
578 
579     // Save the changed state. If the existing value is unchanged from the
580     // original, exit the loop.
581     int32_t check = base::Release_CompareAndSwap(&g_last_time_and_rollovers,
582                                                  original, state.as_opaque_32);
583     if (check == original) break;
584 
585     // Another thread has done something in between so retry from the top.
586   }
587 
588   return TimeTicks() +
589          TimeDelta::FromMilliseconds(
590              now + (static_cast<uint64_t>(state.as_values.rollovers) << 32));
591 }
592 
593 // Discussion of tick counter options on Windows:
594 //
595 // (1) CPU cycle counter. (Retrieved via RDTSC)
596 // The CPU counter provides the highest resolution time stamp and is the least
597 // expensive to retrieve. However, on older CPUs, two issues can affect its
598 // reliability: First it is maintained per processor and not synchronized
599 // between processors. Also, the counters will change frequency due to thermal
600 // and power changes, and stop in some states.
601 //
602 // (2) QueryPerformanceCounter (QPC). The QPC counter provides a high-
603 // resolution (<1 microsecond) time stamp. On most hardware running today, it
604 // auto-detects and uses the constant-rate RDTSC counter to provide extremely
605 // efficient and reliable time stamps.
606 //
607 // On older CPUs where RDTSC is unreliable, it falls back to using more
608 // expensive (20X to 40X more costly) alternate clocks, such as HPET or the ACPI
609 // PM timer, and can involve system calls; and all this is up to the HAL (with
610 // some help from ACPI). According to
611 // http://blogs.msdn.com/oldnewthing/archive/2005/09/02/459952.aspx, in the
612 // worst case, it gets the counter from the rollover interrupt on the
613 // programmable interrupt timer. In best cases, the HAL may conclude that the
614 // RDTSC counter runs at a constant frequency, then it uses that instead. On
615 // multiprocessor machines, it will try to verify the values returned from
616 // RDTSC on each processor are consistent with each other, and apply a handful
617 // of workarounds for known buggy hardware. In other words, QPC is supposed to
618 // give consistent results on a multiprocessor computer, but for older CPUs it
619 // can be unreliable due bugs in BIOS or HAL.
620 //
621 // (3) System time. The system time provides a low-resolution (from ~1 to ~15.6
622 // milliseconds) time stamp but is comparatively less expensive to retrieve and
623 // more reliable. Time::EnableHighResolutionTimer() and
624 // Time::ActivateHighResolutionTimer() can be called to alter the resolution of
625 // this timer; and also other Windows applications can alter it, affecting this
626 // one.
627 
628 TimeTicks InitialTimeTicksNowFunction();
629 
630 // See "threading notes" in InitializeNowFunctionPointer() for details on how
631 // concurrent reads/writes to these globals has been made safe.
632 using TimeTicksNowFunction = decltype(&TimeTicks::Now);
633 TimeTicksNowFunction g_time_ticks_now_function = &InitialTimeTicksNowFunction;
634 int64_t g_qpc_ticks_per_second = 0;
635 
636 // As of January 2015, use of <atomic> is forbidden in Chromium code. This is
637 // what std::atomic_thread_fence does on Windows on all Intel architectures when
638 // the memory_order argument is anything but std::memory_order_seq_cst:
639 #define ATOMIC_THREAD_FENCE(memory_order) _ReadWriteBarrier();
640 
QPCValueToTimeDelta(LONGLONG qpc_value)641 TimeDelta QPCValueToTimeDelta(LONGLONG qpc_value) {
642   // Ensure that the assignment to |g_qpc_ticks_per_second|, made in
643   // InitializeNowFunctionPointer(), has happened by this point.
644   ATOMIC_THREAD_FENCE(memory_order_acquire);
645 
646   DCHECK_GT(g_qpc_ticks_per_second, 0);
647 
648   // If the QPC Value is below the overflow threshold, we proceed with
649   // simple multiply and divide.
650   if (qpc_value < TimeTicks::kQPCOverflowThreshold) {
651     return TimeDelta::FromMicroseconds(
652         qpc_value * TimeTicks::kMicrosecondsPerSecond / g_qpc_ticks_per_second);
653   }
654   // Otherwise, calculate microseconds in a round about manner to avoid
655   // overflow and precision issues.
656   int64_t whole_seconds = qpc_value / g_qpc_ticks_per_second;
657   int64_t leftover_ticks = qpc_value - (whole_seconds * g_qpc_ticks_per_second);
658   return TimeDelta::FromMicroseconds(
659       (whole_seconds * TimeTicks::kMicrosecondsPerSecond) +
660       ((leftover_ticks * TimeTicks::kMicrosecondsPerSecond) /
661        g_qpc_ticks_per_second));
662 }
663 
QPCNow()664 TimeTicks QPCNow() { return TimeTicks() + QPCValueToTimeDelta(QPCNowRaw()); }
665 
IsBuggyAthlon(const CPU & cpu)666 bool IsBuggyAthlon(const CPU& cpu) {
667   // On Athlon X2 CPUs (e.g. model 15) QueryPerformanceCounter is unreliable.
668   return strcmp(cpu.vendor(), "AuthenticAMD") == 0 && cpu.family() == 15;
669 }
670 
InitializeTimeTicksNowFunctionPointer()671 void InitializeTimeTicksNowFunctionPointer() {
672   LARGE_INTEGER ticks_per_sec = {};
673   if (!QueryPerformanceFrequency(&ticks_per_sec)) ticks_per_sec.QuadPart = 0;
674 
675   // If Windows cannot provide a QPC implementation, TimeTicks::Now() must use
676   // the low-resolution clock.
677   //
678   // If the QPC implementation is expensive and/or unreliable, TimeTicks::Now()
679   // will still use the low-resolution clock. A CPU lacking a non-stop time
680   // counter will cause Windows to provide an alternate QPC implementation that
681   // works, but is expensive to use. Certain Athlon CPUs are known to make the
682   // QPC implementation unreliable.
683   //
684   // Otherwise, Now uses the high-resolution QPC clock. As of 21 August 2015,
685   // ~72% of users fall within this category.
686   TimeTicksNowFunction now_function;
687   CPU cpu;
688   if (ticks_per_sec.QuadPart <= 0 || !cpu.has_non_stop_time_stamp_counter() ||
689       IsBuggyAthlon(cpu)) {
690     now_function = &RolloverProtectedNow;
691   } else {
692     now_function = &QPCNow;
693   }
694 
695   // Threading note 1: In an unlikely race condition, it's possible for two or
696   // more threads to enter InitializeNowFunctionPointer() in parallel. This is
697   // not a problem since all threads should end up writing out the same values
698   // to the global variables.
699   //
700   // Threading note 2: A release fence is placed here to ensure, from the
701   // perspective of other threads using the function pointers, that the
702   // assignment to |g_qpc_ticks_per_second| happens before the function pointers
703   // are changed.
704   g_qpc_ticks_per_second = ticks_per_sec.QuadPart;
705   ATOMIC_THREAD_FENCE(memory_order_release);
706   g_time_ticks_now_function = now_function;
707 }
708 
InitialTimeTicksNowFunction()709 TimeTicks InitialTimeTicksNowFunction() {
710   InitializeTimeTicksNowFunctionPointer();
711   return g_time_ticks_now_function();
712 }
713 
714 #undef ATOMIC_THREAD_FENCE
715 
716 }  // namespace
717 
718 // static
Now()719 TimeTicks TimeTicks::Now() {
720   // Make sure we never return 0 here.
721   TimeTicks ticks(g_time_ticks_now_function());
722   DCHECK(!ticks.IsNull());
723   return ticks;
724 }
725 
726 // static
IsHighResolution()727 bool TimeTicks::IsHighResolution() {
728   if (g_time_ticks_now_function == &InitialTimeTicksNowFunction)
729     InitializeTimeTicksNowFunctionPointer();
730   return g_time_ticks_now_function == &QPCNow;
731 }
732 
733 #else  // V8_OS_WIN
734 
Now()735 TimeTicks TimeTicks::Now() {
736   int64_t ticks;
737 #if V8_OS_MACOSX
738   static struct mach_timebase_info info;
739   if (info.denom == 0) {
740     kern_return_t result = mach_timebase_info(&info);
741     DCHECK_EQ(KERN_SUCCESS, result);
742     USE(result);
743   }
744   ticks = (mach_absolute_time() / Time::kNanosecondsPerMicrosecond *
745            info.numer / info.denom);
746 #elif V8_OS_SOLARIS
747   ticks = (gethrtime() / Time::kNanosecondsPerMicrosecond);
748 #elif V8_OS_POSIX
749   ticks = ClockNow(CLOCK_MONOTONIC);
750 #else
751 #error platform does not implement TimeTicks::HighResolutionNow.
752 #endif  // V8_OS_MACOSX
753   // Make sure we never return 0 here.
754   return TimeTicks(ticks + 1);
755 }
756 
757 // static
IsHighResolution()758 bool TimeTicks::IsHighResolution() {
759 #if V8_OS_MACOSX
760   return true;
761 #elif V8_OS_POSIX
762   static bool is_high_resolution = IsHighResolutionTimer(CLOCK_MONOTONIC);
763   return is_high_resolution;
764 #else
765   return true;
766 #endif
767 }
768 
769 #endif  // V8_OS_WIN
770 
771 
IsSupported()772 bool ThreadTicks::IsSupported() {
773 #if (defined(_POSIX_THREAD_CPUTIME) && (_POSIX_THREAD_CPUTIME >= 0)) || \
774     defined(V8_OS_MACOSX) || defined(V8_OS_ANDROID) || defined(V8_OS_SOLARIS)
775   return true;
776 #elif defined(V8_OS_WIN)
777   return IsSupportedWin();
778 #else
779   return false;
780 #endif
781 }
782 
783 
Now()784 ThreadTicks ThreadTicks::Now() {
785 #if V8_OS_MACOSX
786   return ThreadTicks(ComputeThreadTicks());
787 #elif(defined(_POSIX_THREAD_CPUTIME) && (_POSIX_THREAD_CPUTIME >= 0)) || \
788   defined(V8_OS_ANDROID)
789   return ThreadTicks(ClockNow(CLOCK_THREAD_CPUTIME_ID));
790 #elif V8_OS_SOLARIS
791   return ThreadTicks(gethrvtime() / Time::kNanosecondsPerMicrosecond);
792 #elif V8_OS_WIN
793   return ThreadTicks::GetForThread(::GetCurrentThread());
794 #else
795   UNREACHABLE();
796 #endif
797 }
798 
799 
800 #if V8_OS_WIN
GetForThread(const HANDLE & thread_handle)801 ThreadTicks ThreadTicks::GetForThread(const HANDLE& thread_handle) {
802   DCHECK(IsSupported());
803 
804   // Get the number of TSC ticks used by the current thread.
805   ULONG64 thread_cycle_time = 0;
806   ::QueryThreadCycleTime(thread_handle, &thread_cycle_time);
807 
808   // Get the frequency of the TSC.
809   double tsc_ticks_per_second = TSCTicksPerSecond();
810   if (tsc_ticks_per_second == 0)
811     return ThreadTicks();
812 
813   // Return the CPU time of the current thread.
814   double thread_time_seconds = thread_cycle_time / tsc_ticks_per_second;
815   return ThreadTicks(
816       static_cast<int64_t>(thread_time_seconds * Time::kMicrosecondsPerSecond));
817 }
818 
819 // static
IsSupportedWin()820 bool ThreadTicks::IsSupportedWin() {
821   static bool is_supported = base::CPU().has_non_stop_time_stamp_counter() &&
822                              !IsQPCReliable();
823   return is_supported;
824 }
825 
826 // static
WaitUntilInitializedWin()827 void ThreadTicks::WaitUntilInitializedWin() {
828   while (TSCTicksPerSecond() == 0)
829     ::Sleep(10);
830 }
831 
TSCTicksPerSecond()832 double ThreadTicks::TSCTicksPerSecond() {
833   DCHECK(IsSupported());
834 
835   // The value returned by QueryPerformanceFrequency() cannot be used as the TSC
836   // frequency, because there is no guarantee that the TSC frequency is equal to
837   // the performance counter frequency.
838 
839   // The TSC frequency is cached in a static variable because it takes some time
840   // to compute it.
841   static double tsc_ticks_per_second = 0;
842   if (tsc_ticks_per_second != 0)
843     return tsc_ticks_per_second;
844 
845   // Increase the thread priority to reduces the chances of having a context
846   // switch during a reading of the TSC and the performance counter.
847   int previous_priority = ::GetThreadPriority(::GetCurrentThread());
848   ::SetThreadPriority(::GetCurrentThread(), THREAD_PRIORITY_HIGHEST);
849 
850   // The first time that this function is called, make an initial reading of the
851   // TSC and the performance counter.
852   static const uint64_t tsc_initial = __rdtsc();
853   static const uint64_t perf_counter_initial = QPCNowRaw();
854 
855   // Make a another reading of the TSC and the performance counter every time
856   // that this function is called.
857   uint64_t tsc_now = __rdtsc();
858   uint64_t perf_counter_now = QPCNowRaw();
859 
860   // Reset the thread priority.
861   ::SetThreadPriority(::GetCurrentThread(), previous_priority);
862 
863   // Make sure that at least 50 ms elapsed between the 2 readings. The first
864   // time that this function is called, we don't expect this to be the case.
865   // Note: The longer the elapsed time between the 2 readings is, the more
866   //   accurate the computed TSC frequency will be. The 50 ms value was
867   //   chosen because local benchmarks show that it allows us to get a
868   //   stddev of less than 1 tick/us between multiple runs.
869   // Note: According to the MSDN documentation for QueryPerformanceFrequency(),
870   //   this will never fail on systems that run XP or later.
871   //   https://msdn.microsoft.com/library/windows/desktop/ms644905.aspx
872   LARGE_INTEGER perf_counter_frequency = {};
873   ::QueryPerformanceFrequency(&perf_counter_frequency);
874   DCHECK_GE(perf_counter_now, perf_counter_initial);
875   uint64_t perf_counter_ticks = perf_counter_now - perf_counter_initial;
876   double elapsed_time_seconds =
877       perf_counter_ticks / static_cast<double>(perf_counter_frequency.QuadPart);
878 
879   const double kMinimumEvaluationPeriodSeconds = 0.05;
880   if (elapsed_time_seconds < kMinimumEvaluationPeriodSeconds)
881     return 0;
882 
883   // Compute the frequency of the TSC.
884   DCHECK_GE(tsc_now, tsc_initial);
885   uint64_t tsc_ticks = tsc_now - tsc_initial;
886   tsc_ticks_per_second = tsc_ticks / elapsed_time_seconds;
887 
888   return tsc_ticks_per_second;
889 }
890 #endif  // V8_OS_WIN
891 
892 }  // namespace base
893 }  // namespace v8
894