/external/perfetto/src/trace_processor/importers/ninja/ |
D | ninja_log_parser.cc | 110 *t_start == jobs_.back().start_ms && *t_end == jobs_.back().end_ms) { in Parse() 126 [](const Job& x, const Job& y) { return x.start_ms < y.start_ms; }); in NotifyEndOfFile() 158 if (cur.busy_until <= job.start_ms) { in NotifyEndOfFile() 180 const int64_t start_ns = job.start_ms * kMsToNs; in NotifyEndOfFile() 181 const int64_t dur_ns = (job.end_ms - job.start_ms) * kMsToNs; in NotifyEndOfFile()
|
D | ninja_log_parser.h | 58 : build_id(b), start_ms(s), end_ms(e), hash(h), names(n) {} in Job() 61 int64_t start_ms; member
|
/external/webrtc/webrtc/system_wrappers/source/ |
D | timestamp_extrapolator.cc | 17 TimestampExtrapolator::TimestampExtrapolator(int64_t start_ms) in TimestampExtrapolator() argument 34 Reset(start_ms); in TimestampExtrapolator() 42 void TimestampExtrapolator::Reset(int64_t start_ms) in Reset() argument 45 _startMs = start_ms; in Reset()
|
D | condition_variable_unittest.cc | 194 int64_t start_ms = TickTime::MillisecondTimestamp(); in TEST() local 197 EXPECT_LE(start_ms + kVeryShortWaitMs, end_ms) in TEST() 198 << "actual elapsed:" << end_ms - start_ms; in TEST()
|
/external/webrtc/webrtc/system_wrappers/include/ |
D | timestamp_extrapolator.h | 23 explicit TimestampExtrapolator(int64_t start_ms); 27 void Reset(int64_t start_ms);
|
/external/tensorflow/tensorflow/lite/micro/examples/micro_speech/ |
D | audio_provider_mock.cc | 27 int start_ms, int duration_ms, in GetAudioSamples() argument 34 const int start_sample = (start_ms * kAudioSampleFrequency) / 1000; in GetAudioSamples()
|
D | audio_provider.h | 33 int start_ms, int duration_ms,
|
D | audio_provider.cc | 26 int start_ms, int duration_ms, in GetAudioSamples() argument
|
/external/tensorflow/tensorflow/lite/micro/examples/micro_speech/arduino/ |
D | audio_provider.cc | 80 int start_ms, int duration_ms, in GetAudioSamples() argument 99 const int start_offset = start_ms * (kAudioSampleFrequency / 1000); in GetAudioSamples()
|
/external/tensorflow/tensorflow/core/profiler/utils/ |
D | timespan.h | 113 inline Timespan MilliSpan(double start_ms, double end_ms) { in MilliSpan() argument 114 return PicoSpan(MillisToPicos(start_ms), MillisToPicos(end_ms)); in MilliSpan()
|
/external/tensorflow/tensorflow/lite/micro/examples/micro_speech/osx/ |
D | audio_provider.cc | 108 int start_ms, int duration_ms, in GetAudioSamples() argument 126 const int start_offset = start_ms * (kAudioSampleFrequency / 1000); in GetAudioSamples()
|
/external/tensorflow/tensorflow/lite/micro/examples/micro_speech/disco_f746ng/ |
D | audio_provider.cc | 153 int start_ms, int duration_ms, in GetAudioSamples() argument 168 const int start_offset = start_ms * (kAudioSampleFrequency / 1000); in GetAudioSamples()
|
/external/tensorflow/tensorflow/lite/micro/examples/micro_speech/apollo3evb/ |
D | audio_provider.cc | 478 int start_ms, int duration_ms, in GetAudioSamples() argument 506 (start_ms < 0) ? 0 : start_ms * (kAudioSampleFrequency / 1000); in GetAudioSamples()
|
/external/webrtc/talk/media/base/ |
D | rtpdump.cc | 48 RtpDumpFileHeader::RtpDumpFileHeader(uint32_t start_ms, uint32_t s, uint16_t p) in RtpDumpFileHeader() argument 49 : start_sec(start_ms / 1000), in RtpDumpFileHeader() 50 start_usec(start_ms % 1000 * 1000), in RtpDumpFileHeader()
|
D | rtpdump.h | 59 RtpDumpFileHeader(uint32_t start_ms, uint32_t s, uint16_t p);
|
/external/u-boot/cmd/ |
D | sf.c | 73 static ulong bytes_per_second(unsigned int len, ulong start_ms) in bytes_per_second() argument 77 return len / (max(get_timer(start_ms) / 1024, 1UL)); in bytes_per_second() 79 return 1024 * len / max(get_timer(start_ms), 1UL); in bytes_per_second()
|
/external/tensorflow/tensorflow/lite/micro/examples/micro_speech/sparkfun_edge/ |
D | audio_provider.cc | 311 int start_ms, int duration_ms, in GetAudioSamples() argument 335 const int start_offset = start_ms * (kAudioSampleFrequency / 1000); in GetAudioSamples()
|
/external/tensorflow/tensorflow/lite/micro/examples/micro_speech/nxp_k66f/ |
D | audio_provider.cc | 353 int start_ms, int duration_ms, in GetAudioSamples() argument 368 const int start_offset = start_ms * (kAudioSampleFrequency / 1000); in GetAudioSamples()
|
/external/tensorflow/tensorflow/core/util/ |
D | stats_calculator.cc | 71 const double start_ms = detail.start_us.avg() / 1000.0; in ColumnString() local 80 InitField(stream, 17) << start_ms; in ColumnString()
|
/external/webrtc/webrtc/test/ |
D | rtp_file_reader.cc | 513 uint32_t CalcTimeDelta(uint32_t ts_sec, uint32_t ts_usec, uint32_t start_ms) { in CalcTimeDelta() argument 517 uint64_t t1_ms = static_cast<uint64_t>(start_ms); in CalcTimeDelta()
|
/external/webrtc/webrtc/base/ |
D | socket_unittest.cc | 933 uint32_t start_ms = Time(); in UdpReadyToSend() local 936 while (start_ms + kTimeout > Time()) { in UdpReadyToSend()
|
/external/v8/src/heap/ |
D | heap.cc | 3566 double start_ms, double deadline_in_ms) { in IdleNotificationEpilogue() argument 3567 double idle_time_in_ms = deadline_in_ms - start_ms; in IdleNotificationEpilogue() 3625 double start_ms = MonotonicallyIncreasingTimeInMs(); in IdleNotification() local 3626 double idle_time_in_ms = deadline_in_ms - start_ms; in IdleNotification() 3628 tracer()->SampleAllocation(start_ms, NewSpaceAllocationCounter(), in IdleNotification() 3639 IdleNotificationEpilogue(action, heap_state, start_ms, deadline_in_ms); in IdleNotification()
|
D | heap.h | 1746 GCIdleTimeHeapState heap_state, double start_ms,
|