• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *  include/linux/ktime.h
3  *
4  *  ktime_t - nanosecond-resolution time format.
5  *
6  *   Copyright(C) 2005, Thomas Gleixner <tglx@linutronix.de>
7  *   Copyright(C) 2005, Red Hat, Inc., Ingo Molnar
8  *
9  *  data type definitions, declarations, prototypes and macros.
10  *
11  *  Started by: Thomas Gleixner and Ingo Molnar
12  *
13  *  Credits:
14  *
15  *  	Roman Zippel provided the ideas and primary code snippets of
16  *  	the ktime_t union and further simplifications of the original
17  *  	code.
18  *
19  *  For licencing details see kernel-base/COPYING
20  */
21 #ifndef _LINUX_KTIME_H
22 #define _LINUX_KTIME_H
23 
24 #include <linux/time.h>
25 #include <linux/jiffies.h>
26 
27 /*
28  * ktime_t:
29  *
30  * A single 64-bit variable is used to store the hrtimers
31  * internal representation of time values in scalar nanoseconds. The
32  * design plays out best on 64-bit CPUs, where most conversions are
33  * NOPs and most arithmetic ktime_t operations are plain arithmetic
34  * operations.
35  *
36  */
37 union ktime {
38 	s64	tv64;
39 };
40 
41 typedef union ktime ktime_t;		/* Kill this */
42 
43 /**
44  * ktime_set - Set a ktime_t variable from a seconds/nanoseconds value
45  * @secs:	seconds to set
46  * @nsecs:	nanoseconds to set
47  *
48  * Return: The ktime_t representation of the value.
49  */
ktime_set(const s64 secs,const unsigned long nsecs)50 static inline ktime_t ktime_set(const s64 secs, const unsigned long nsecs)
51 {
52 	if (unlikely(secs >= KTIME_SEC_MAX))
53 		return (ktime_t){ .tv64 = KTIME_MAX };
54 
55 	return (ktime_t) { .tv64 = secs * NSEC_PER_SEC + (s64)nsecs };
56 }
57 
58 /* Subtract two ktime_t variables. rem = lhs -rhs: */
59 #define ktime_sub(lhs, rhs) \
60 		({ (ktime_t){ .tv64 = (lhs).tv64 - (rhs).tv64 }; })
61 
62 /* Add two ktime_t variables. res = lhs + rhs: */
63 #define ktime_add(lhs, rhs) \
64 		({ (ktime_t){ .tv64 = (lhs).tv64 + (rhs).tv64 }; })
65 
66 /*
67  * Same as ktime_add(), but avoids undefined behaviour on overflow; however,
68  * this means that you must check the result for overflow yourself.
69  */
70 #define ktime_add_unsafe(lhs, rhs) \
71 		({ (ktime_t){ .tv64 = (u64) (lhs).tv64 + (rhs).tv64 }; })
72 
73 /*
74  * Add a ktime_t variable and a scalar nanosecond value.
75  * res = kt + nsval:
76  */
77 #define ktime_add_ns(kt, nsval) \
78 		({ (ktime_t){ .tv64 = (kt).tv64 + (nsval) }; })
79 
80 /*
81  * Subtract a scalar nanosecod from a ktime_t variable
82  * res = kt - nsval:
83  */
84 #define ktime_sub_ns(kt, nsval) \
85 		({ (ktime_t){ .tv64 = (kt).tv64 - (nsval) }; })
86 
87 /* convert a timespec to ktime_t format: */
timespec_to_ktime(struct timespec ts)88 static inline ktime_t timespec_to_ktime(struct timespec ts)
89 {
90 	return ktime_set(ts.tv_sec, ts.tv_nsec);
91 }
92 
93 /* convert a timespec64 to ktime_t format: */
timespec64_to_ktime(struct timespec64 ts)94 static inline ktime_t timespec64_to_ktime(struct timespec64 ts)
95 {
96 	return ktime_set(ts.tv_sec, ts.tv_nsec);
97 }
98 
99 /* convert a timeval to ktime_t format: */
timeval_to_ktime(struct timeval tv)100 static inline ktime_t timeval_to_ktime(struct timeval tv)
101 {
102 	return ktime_set(tv.tv_sec, tv.tv_usec * NSEC_PER_USEC);
103 }
104 
105 /* Map the ktime_t to timespec conversion to ns_to_timespec function */
106 #define ktime_to_timespec(kt)		ns_to_timespec((kt).tv64)
107 
108 /* Map the ktime_t to timespec conversion to ns_to_timespec function */
109 #define ktime_to_timespec64(kt)		ns_to_timespec64((kt).tv64)
110 
111 /* Map the ktime_t to timeval conversion to ns_to_timeval function */
112 #define ktime_to_timeval(kt)		ns_to_timeval((kt).tv64)
113 
114 /* Convert ktime_t to nanoseconds - NOP in the scalar storage format: */
115 #define ktime_to_ns(kt)			((kt).tv64)
116 
117 
118 /**
119  * ktime_equal - Compares two ktime_t variables to see if they are equal
120  * @cmp1:	comparable1
121  * @cmp2:	comparable2
122  *
123  * Compare two ktime_t variables.
124  *
125  * Return: 1 if equal.
126  */
ktime_equal(const ktime_t cmp1,const ktime_t cmp2)127 static inline int ktime_equal(const ktime_t cmp1, const ktime_t cmp2)
128 {
129 	return cmp1.tv64 == cmp2.tv64;
130 }
131 
132 /**
133  * ktime_compare - Compares two ktime_t variables for less, greater or equal
134  * @cmp1:	comparable1
135  * @cmp2:	comparable2
136  *
137  * Return: ...
138  *   cmp1  < cmp2: return <0
139  *   cmp1 == cmp2: return 0
140  *   cmp1  > cmp2: return >0
141  */
ktime_compare(const ktime_t cmp1,const ktime_t cmp2)142 static inline int ktime_compare(const ktime_t cmp1, const ktime_t cmp2)
143 {
144 	if (cmp1.tv64 < cmp2.tv64)
145 		return -1;
146 	if (cmp1.tv64 > cmp2.tv64)
147 		return 1;
148 	return 0;
149 }
150 
151 /**
152  * ktime_after - Compare if a ktime_t value is bigger than another one.
153  * @cmp1:	comparable1
154  * @cmp2:	comparable2
155  *
156  * Return: true if cmp1 happened after cmp2.
157  */
ktime_after(const ktime_t cmp1,const ktime_t cmp2)158 static inline bool ktime_after(const ktime_t cmp1, const ktime_t cmp2)
159 {
160 	return ktime_compare(cmp1, cmp2) > 0;
161 }
162 
163 /**
164  * ktime_before - Compare if a ktime_t value is smaller than another one.
165  * @cmp1:	comparable1
166  * @cmp2:	comparable2
167  *
168  * Return: true if cmp1 happened before cmp2.
169  */
ktime_before(const ktime_t cmp1,const ktime_t cmp2)170 static inline bool ktime_before(const ktime_t cmp1, const ktime_t cmp2)
171 {
172 	return ktime_compare(cmp1, cmp2) < 0;
173 }
174 
175 #if BITS_PER_LONG < 64
176 extern s64 __ktime_divns(const ktime_t kt, s64 div);
ktime_divns(const ktime_t kt,s64 div)177 static inline s64 ktime_divns(const ktime_t kt, s64 div)
178 {
179 	/*
180 	 * Negative divisors could cause an inf loop,
181 	 * so bug out here.
182 	 */
183 	BUG_ON(div < 0);
184 	if (__builtin_constant_p(div) && !(div >> 32)) {
185 		s64 ns = kt.tv64;
186 		u64 tmp = ns < 0 ? -ns : ns;
187 
188 		do_div(tmp, div);
189 		return ns < 0 ? -tmp : tmp;
190 	} else {
191 		return __ktime_divns(kt, div);
192 	}
193 }
194 #else /* BITS_PER_LONG < 64 */
ktime_divns(const ktime_t kt,s64 div)195 static inline s64 ktime_divns(const ktime_t kt, s64 div)
196 {
197 	/*
198 	 * 32-bit implementation cannot handle negative divisors,
199 	 * so catch them on 64bit as well.
200 	 */
201 	WARN_ON(div < 0);
202 	return kt.tv64 / div;
203 }
204 #endif
205 
ktime_to_us(const ktime_t kt)206 static inline s64 ktime_to_us(const ktime_t kt)
207 {
208 	return ktime_divns(kt, NSEC_PER_USEC);
209 }
210 
ktime_to_ms(const ktime_t kt)211 static inline s64 ktime_to_ms(const ktime_t kt)
212 {
213 	return ktime_divns(kt, NSEC_PER_MSEC);
214 }
215 
ktime_us_delta(const ktime_t later,const ktime_t earlier)216 static inline s64 ktime_us_delta(const ktime_t later, const ktime_t earlier)
217 {
218        return ktime_to_us(ktime_sub(later, earlier));
219 }
220 
ktime_ms_delta(const ktime_t later,const ktime_t earlier)221 static inline s64 ktime_ms_delta(const ktime_t later, const ktime_t earlier)
222 {
223 	return ktime_to_ms(ktime_sub(later, earlier));
224 }
225 
ktime_add_us(const ktime_t kt,const u64 usec)226 static inline ktime_t ktime_add_us(const ktime_t kt, const u64 usec)
227 {
228 	return ktime_add_ns(kt, usec * NSEC_PER_USEC);
229 }
230 
ktime_add_ms(const ktime_t kt,const u64 msec)231 static inline ktime_t ktime_add_ms(const ktime_t kt, const u64 msec)
232 {
233 	return ktime_add_ns(kt, msec * NSEC_PER_MSEC);
234 }
235 
ktime_sub_us(const ktime_t kt,const u64 usec)236 static inline ktime_t ktime_sub_us(const ktime_t kt, const u64 usec)
237 {
238 	return ktime_sub_ns(kt, usec * NSEC_PER_USEC);
239 }
240 
ktime_sub_ms(const ktime_t kt,const u64 msec)241 static inline ktime_t ktime_sub_ms(const ktime_t kt, const u64 msec)
242 {
243 	return ktime_sub_ns(kt, msec * NSEC_PER_MSEC);
244 }
245 
246 extern ktime_t ktime_add_safe(const ktime_t lhs, const ktime_t rhs);
247 
248 /**
249  * ktime_to_timespec_cond - convert a ktime_t variable to timespec
250  *			    format only if the variable contains data
251  * @kt:		the ktime_t variable to convert
252  * @ts:		the timespec variable to store the result in
253  *
254  * Return: %true if there was a successful conversion, %false if kt was 0.
255  */
ktime_to_timespec_cond(const ktime_t kt,struct timespec * ts)256 static inline __must_check bool ktime_to_timespec_cond(const ktime_t kt,
257 						       struct timespec *ts)
258 {
259 	if (kt.tv64) {
260 		*ts = ktime_to_timespec(kt);
261 		return true;
262 	} else {
263 		return false;
264 	}
265 }
266 
267 /**
268  * ktime_to_timespec64_cond - convert a ktime_t variable to timespec64
269  *			    format only if the variable contains data
270  * @kt:		the ktime_t variable to convert
271  * @ts:		the timespec variable to store the result in
272  *
273  * Return: %true if there was a successful conversion, %false if kt was 0.
274  */
ktime_to_timespec64_cond(const ktime_t kt,struct timespec64 * ts)275 static inline __must_check bool ktime_to_timespec64_cond(const ktime_t kt,
276 						       struct timespec64 *ts)
277 {
278 	if (kt.tv64) {
279 		*ts = ktime_to_timespec64(kt);
280 		return true;
281 	} else {
282 		return false;
283 	}
284 }
285 
286 /*
287  * The resolution of the clocks. The resolution value is returned in
288  * the clock_getres() system call to give application programmers an
289  * idea of the (in)accuracy of timers. Timer values are rounded up to
290  * this resolution values.
291  */
292 #define LOW_RES_NSEC		TICK_NSEC
293 #define KTIME_LOW_RES		(ktime_t){ .tv64 = LOW_RES_NSEC }
294 
ns_to_ktime(u64 ns)295 static inline ktime_t ns_to_ktime(u64 ns)
296 {
297 	static const ktime_t ktime_zero = { .tv64 = 0 };
298 
299 	return ktime_add_ns(ktime_zero, ns);
300 }
301 
ms_to_ktime(u64 ms)302 static inline ktime_t ms_to_ktime(u64 ms)
303 {
304 	static const ktime_t ktime_zero = { .tv64 = 0 };
305 
306 	return ktime_add_ms(ktime_zero, ms);
307 }
308 
309 # include <linux/timekeeping.h>
310 
311 #endif
312