Lines Matching refs:uint64_t
117 static int64_t GetCurrentTimeNanosFromKernel(uint64_t last_cycleclock, in GetCurrentTimeNanosFromKernel()
118 uint64_t *cycleclock) { in GetCurrentTimeNanosFromKernel()
122 static std::atomic<uint64_t> approx_syscall_time_in_cycles{10 * 1000}; in GetCurrentTimeNanosFromKernel()
124 uint64_t local_approx_syscall_time_in_cycles = // local copy in GetCurrentTimeNanosFromKernel()
128 uint64_t before_cycles; in GetCurrentTimeNanosFromKernel()
129 uint64_t after_cycles; in GetCurrentTimeNanosFromKernel()
130 uint64_t elapsed_cycles; in GetCurrentTimeNanosFromKernel()
150 last_cycleclock - after_cycles < (static_cast<uint64_t>(1) << 16)); in GetCurrentTimeNanosFromKernel()
163 const uint64_t new_approximation = in GetCurrentTimeNanosFromKernel()
189 static inline uint64_t SeqAcquire(std::atomic<uint64_t> *seq) { in SeqAcquire()
190 uint64_t x = seq->fetch_add(1, std::memory_order_relaxed); in SeqAcquire()
204 static inline void SeqRelease(std::atomic<uint64_t> *seq, uint64_t x) { in SeqRelease()
219 static const uint64_t kMinNSBetweenSamples = 2000 << 20;
231 ABSL_CONST_INIT static std::atomic<uint64_t> seq(0);
235 std::atomic<uint64_t> raw_ns; // raw kernel time
236 std::atomic<uint64_t> base_ns; // our estimate of time
237 std::atomic<uint64_t> base_cycles; // cycle counter reading
238 std::atomic<uint64_t> nsscaled_per_cycle; // cycle period
241 std::atomic<uint64_t> min_cycles_per_sample;
245 uint64_t raw_ns; // raw kernel time
246 uint64_t base_ns; // our estimate of time
247 uint64_t base_cycles; // cycle counter reading
248 uint64_t nsscaled_per_cycle; // cycle period
249 uint64_t min_cycles_per_sample; // approx cycles before next sample
302 uint64_t base_ns; in GetCurrentTimeNanos()
303 uint64_t base_cycles; in GetCurrentTimeNanos()
304 uint64_t nsscaled_per_cycle; in GetCurrentTimeNanos()
305 uint64_t min_cycles_per_sample; in GetCurrentTimeNanos()
306 uint64_t seq_read0; in GetCurrentTimeNanos()
307 uint64_t seq_read1; in GetCurrentTimeNanos()
315 uint64_t now_cycles = GET_CURRENT_TIME_NANOS_CYCLECLOCK_NOW(); in GetCurrentTimeNanos()
351 uint64_t delta_cycles = now_cycles - base_cycles; in GetCurrentTimeNanos()
362 static uint64_t SafeDivideAndScale(uint64_t a, uint64_t b) { in SafeDivideAndScale()
369 uint64_t scaled_b = b >> (kScale - safe_shift); in SafeDivideAndScale()
370 uint64_t quotient = 0; in SafeDivideAndScale()
377 static uint64_t UpdateLastSample(
378 uint64_t now_cycles, uint64_t now_ns, uint64_t delta_cycles,
400 static uint64_t last_now_cycles; // protected by lock in GetCurrentTimeNanosSlowPath()
401 uint64_t now_cycles; in GetCurrentTimeNanosSlowPath()
402 uint64_t now_ns = GetCurrentTimeNanosFromKernel(last_now_cycles, &now_cycles); in GetCurrentTimeNanosSlowPath()
405 uint64_t estimated_base_ns; in GetCurrentTimeNanosSlowPath()
415 uint64_t delta_cycles = now_cycles - sample.base_cycles; in GetCurrentTimeNanosSlowPath()
435 static uint64_t UpdateLastSample(uint64_t now_cycles, uint64_t now_ns, in UpdateLastSample()
436 uint64_t delta_cycles, in UpdateLastSample()
439 uint64_t estimated_base_ns = now_ns; in UpdateLastSample()
440 uint64_t lock_value = SeqAcquire(&seq); // acquire seqlock to block readers in UpdateLastSample()
447 sample->raw_ns + static_cast<uint64_t>(5) * 1000 * 1000 * 1000 < now_ns || in UpdateLastSample()
462 uint64_t estimated_scaled_ns; in UpdateLastSample()
475 uint64_t ns = now_ns - sample->raw_ns; in UpdateLastSample()
476 uint64_t measured_nsscaled_per_cycle = SafeDivideAndScale(ns, delta_cycles); in UpdateLastSample()
478 uint64_t assumed_next_sample_delta_cycles = in UpdateLastSample()
493 uint64_t new_nsscaled_per_cycle = in UpdateLastSample()
500 uint64_t new_min_cycles_per_sample = in UpdateLastSample()