1 #include <time.h>
2 #include <sys/time.h>
3
4 #include "fio.h"
5
6 static struct timeval genesis;
7 static unsigned long ns_granularity;
8
timeval_add_msec(struct timeval * tv,unsigned int msec)9 void timeval_add_msec(struct timeval *tv, unsigned int msec)
10 {
11 unsigned long adj_usec = 1000 * msec;
12
13 tv->tv_usec += adj_usec;
14 if (adj_usec >= 1000000) {
15 unsigned long adj_sec = adj_usec / 1000000;
16
17 tv->tv_usec -= adj_sec * 1000000;
18 tv->tv_sec += adj_sec;
19 }
20 if (tv->tv_usec >= 1000000){
21 tv->tv_usec -= 1000000;
22 tv->tv_sec++;
23 }
24 }
25
26 /*
27 * busy looping version for the last few usec
28 */
usec_spin(unsigned int usec)29 uint64_t usec_spin(unsigned int usec)
30 {
31 struct timeval start;
32 uint64_t t;
33
34 fio_gettime(&start, NULL);
35 while ((t = utime_since_now(&start)) < usec)
36 nop;
37
38 return t;
39 }
40
usec_sleep(struct thread_data * td,unsigned long usec)41 uint64_t usec_sleep(struct thread_data *td, unsigned long usec)
42 {
43 struct timespec req;
44 struct timeval tv;
45 uint64_t t = 0;
46
47 do {
48 unsigned long ts = usec;
49
50 if (usec < ns_granularity) {
51 t += usec_spin(usec);
52 break;
53 }
54
55 ts = usec - ns_granularity;
56
57 if (ts >= 1000000) {
58 req.tv_sec = ts / 1000000;
59 ts -= 1000000 * req.tv_sec;
60 } else
61 req.tv_sec = 0;
62
63 req.tv_nsec = ts * 1000;
64 fio_gettime(&tv, NULL);
65
66 if (nanosleep(&req, NULL) < 0)
67 break;
68
69 ts = utime_since_now(&tv);
70 t += ts;
71 if (ts >= usec)
72 break;
73
74 usec -= ts;
75 } while (!td->terminate);
76
77 return t;
78 }
79
time_since_genesis(void)80 uint64_t time_since_genesis(void)
81 {
82 return time_since_now(&genesis);
83 }
84
mtime_since_genesis(void)85 uint64_t mtime_since_genesis(void)
86 {
87 return mtime_since_now(&genesis);
88 }
89
utime_since_genesis(void)90 uint64_t utime_since_genesis(void)
91 {
92 return utime_since_now(&genesis);
93 }
94
in_ramp_time(struct thread_data * td)95 bool in_ramp_time(struct thread_data *td)
96 {
97 return td->o.ramp_time && !td->ramp_time_over;
98 }
99
parent_update_ramp(struct thread_data * td)100 static void parent_update_ramp(struct thread_data *td)
101 {
102 struct thread_data *parent = td->parent;
103
104 if (!parent || parent->ramp_time_over)
105 return;
106
107 reset_all_stats(parent);
108 parent->ramp_time_over = 1;
109 td_set_runstate(parent, TD_RAMP);
110 }
111
ramp_time_over(struct thread_data * td)112 bool ramp_time_over(struct thread_data *td)
113 {
114 struct timeval tv;
115
116 if (!td->o.ramp_time || td->ramp_time_over)
117 return true;
118
119 fio_gettime(&tv, NULL);
120 if (utime_since(&td->epoch, &tv) >= td->o.ramp_time) {
121 td->ramp_time_over = 1;
122 reset_all_stats(td);
123 td_set_runstate(td, TD_RAMP);
124 parent_update_ramp(td);
125 return true;
126 }
127
128 return false;
129 }
130
fio_time_init(void)131 void fio_time_init(void)
132 {
133 int i;
134
135 fio_clock_init();
136
137 /*
138 * Check the granularity of the nanosleep function
139 */
140 for (i = 0; i < 10; i++) {
141 struct timeval tv;
142 struct timespec ts;
143 unsigned long elapsed;
144
145 fio_gettime(&tv, NULL);
146 ts.tv_sec = 0;
147 ts.tv_nsec = 1000;
148
149 nanosleep(&ts, NULL);
150 elapsed = utime_since_now(&tv);
151
152 if (elapsed > ns_granularity)
153 ns_granularity = elapsed;
154 }
155 }
156
set_genesis_time(void)157 void set_genesis_time(void)
158 {
159 fio_gettime(&genesis, NULL);
160 }
161
set_epoch_time(struct thread_data * td,int log_unix_epoch)162 void set_epoch_time(struct thread_data *td, int log_unix_epoch)
163 {
164 fio_gettime(&td->epoch, NULL);
165 if (log_unix_epoch) {
166 struct timeval tv;
167 gettimeofday(&tv, NULL);
168 td->unix_epoch = (unsigned long long)(tv.tv_sec) * 1000 +
169 (unsigned long long)(tv.tv_usec) / 1000;
170 }
171 }
172
fill_start_time(struct timeval * t)173 void fill_start_time(struct timeval *t)
174 {
175 memcpy(t, &genesis, sizeof(genesis));
176 }
177