• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *  include/linux/ktime.h
3  *
4  *  ktime_t - nanosecond-resolution time format.
5  *
6  *   Copyright(C) 2005, Thomas Gleixner <tglx@linutronix.de>
7  *   Copyright(C) 2005, Red Hat, Inc., Ingo Molnar
8  *
9  *  data type definitions, declarations, prototypes and macros.
10  *
11  *  Started by: Thomas Gleixner and Ingo Molnar
12  *
13  *  Credits:
14  *
15  *  	Roman Zippel provided the ideas and primary code snippets of
16  *  	the ktime_t union and further simplifications of the original
17  *  	code.
18  *
19  *  For licencing details see kernel-base/COPYING
20  */
21 #ifndef _LINUX_KTIME_H
22 #define _LINUX_KTIME_H
23 
24 #include <linux/time.h>
25 #include <linux/jiffies.h>
26 #include <asm/bug.h>
27 
28 /* Nanosecond scalar representation for kernel time values */
29 typedef s64	ktime_t;
30 
31 /**
32  * ktime_set - Set a ktime_t variable from a seconds/nanoseconds value
33  * @secs:	seconds to set
34  * @nsecs:	nanoseconds to set
35  *
36  * Return: The ktime_t representation of the value.
37  */
ktime_set(const s64 secs,const unsigned long nsecs)38 static inline ktime_t ktime_set(const s64 secs, const unsigned long nsecs)
39 {
40 	if (unlikely(secs >= KTIME_SEC_MAX))
41 		return KTIME_MAX;
42 
43 	return secs * NSEC_PER_SEC + (s64)nsecs;
44 }
45 
46 /* Subtract two ktime_t variables. rem = lhs -rhs: */
47 #define ktime_sub(lhs, rhs)	((lhs) - (rhs))
48 
49 /* Add two ktime_t variables. res = lhs + rhs: */
50 #define ktime_add(lhs, rhs)	((lhs) + (rhs))
51 
52 /*
53  * Same as ktime_add(), but avoids undefined behaviour on overflow; however,
54  * this means that you must check the result for overflow yourself.
55  */
56 #define ktime_add_unsafe(lhs, rhs)	((u64) (lhs) + (rhs))
57 
58 /*
59  * Add a ktime_t variable and a scalar nanosecond value.
60  * res = kt + nsval:
61  */
62 #define ktime_add_ns(kt, nsval)		((kt) + (nsval))
63 
64 /*
65  * Subtract a scalar nanosecod from a ktime_t variable
66  * res = kt - nsval:
67  */
68 #define ktime_sub_ns(kt, nsval)		((kt) - (nsval))
69 
70 /* convert a timespec64 to ktime_t format: */
timespec64_to_ktime(struct timespec64 ts)71 static inline ktime_t timespec64_to_ktime(struct timespec64 ts)
72 {
73 	return ktime_set(ts.tv_sec, ts.tv_nsec);
74 }
75 
76 /* Map the ktime_t to timespec conversion to ns_to_timespec function */
77 #define ktime_to_timespec64(kt)		ns_to_timespec64((kt))
78 
79 /* Convert ktime_t to nanoseconds */
ktime_to_ns(const ktime_t kt)80 static inline s64 ktime_to_ns(const ktime_t kt)
81 {
82 	return kt;
83 }
84 
85 /**
86  * ktime_compare - Compares two ktime_t variables for less, greater or equal
87  * @cmp1:	comparable1
88  * @cmp2:	comparable2
89  *
90  * Return: ...
91  *   cmp1  < cmp2: return <0
92  *   cmp1 == cmp2: return 0
93  *   cmp1  > cmp2: return >0
94  */
ktime_compare(const ktime_t cmp1,const ktime_t cmp2)95 static inline int ktime_compare(const ktime_t cmp1, const ktime_t cmp2)
96 {
97 	if (cmp1 < cmp2)
98 		return -1;
99 	if (cmp1 > cmp2)
100 		return 1;
101 	return 0;
102 }
103 
104 /**
105  * ktime_after - Compare if a ktime_t value is bigger than another one.
106  * @cmp1:	comparable1
107  * @cmp2:	comparable2
108  *
109  * Return: true if cmp1 happened after cmp2.
110  */
ktime_after(const ktime_t cmp1,const ktime_t cmp2)111 static inline bool ktime_after(const ktime_t cmp1, const ktime_t cmp2)
112 {
113 	return ktime_compare(cmp1, cmp2) > 0;
114 }
115 
116 /**
117  * ktime_before - Compare if a ktime_t value is smaller than another one.
118  * @cmp1:	comparable1
119  * @cmp2:	comparable2
120  *
121  * Return: true if cmp1 happened before cmp2.
122  */
ktime_before(const ktime_t cmp1,const ktime_t cmp2)123 static inline bool ktime_before(const ktime_t cmp1, const ktime_t cmp2)
124 {
125 	return ktime_compare(cmp1, cmp2) < 0;
126 }
127 
128 #if BITS_PER_LONG < 64
129 extern s64 __ktime_divns(const ktime_t kt, s64 div);
ktime_divns(const ktime_t kt,s64 div)130 static inline s64 ktime_divns(const ktime_t kt, s64 div)
131 {
132 	/*
133 	 * Negative divisors could cause an inf loop,
134 	 * so bug out here.
135 	 */
136 	BUG_ON(div < 0);
137 	if (__builtin_constant_p(div) && !(div >> 32)) {
138 		s64 ns = kt;
139 		u64 tmp = ns < 0 ? -ns : ns;
140 
141 		do_div(tmp, div);
142 		return ns < 0 ? -tmp : tmp;
143 	} else {
144 		return __ktime_divns(kt, div);
145 	}
146 }
147 #else /* BITS_PER_LONG < 64 */
ktime_divns(const ktime_t kt,s64 div)148 static inline s64 ktime_divns(const ktime_t kt, s64 div)
149 {
150 	/*
151 	 * 32-bit implementation cannot handle negative divisors,
152 	 * so catch them on 64bit as well.
153 	 */
154 	WARN_ON(div < 0);
155 	return kt / div;
156 }
157 #endif
158 
ktime_to_us(const ktime_t kt)159 static inline s64 ktime_to_us(const ktime_t kt)
160 {
161 	return ktime_divns(kt, NSEC_PER_USEC);
162 }
163 
ktime_to_ms(const ktime_t kt)164 static inline s64 ktime_to_ms(const ktime_t kt)
165 {
166 	return ktime_divns(kt, NSEC_PER_MSEC);
167 }
168 
ktime_us_delta(const ktime_t later,const ktime_t earlier)169 static inline s64 ktime_us_delta(const ktime_t later, const ktime_t earlier)
170 {
171        return ktime_to_us(ktime_sub(later, earlier));
172 }
173 
ktime_ms_delta(const ktime_t later,const ktime_t earlier)174 static inline s64 ktime_ms_delta(const ktime_t later, const ktime_t earlier)
175 {
176 	return ktime_to_ms(ktime_sub(later, earlier));
177 }
178 
ktime_add_us(const ktime_t kt,const u64 usec)179 static inline ktime_t ktime_add_us(const ktime_t kt, const u64 usec)
180 {
181 	return ktime_add_ns(kt, usec * NSEC_PER_USEC);
182 }
183 
ktime_add_ms(const ktime_t kt,const u64 msec)184 static inline ktime_t ktime_add_ms(const ktime_t kt, const u64 msec)
185 {
186 	return ktime_add_ns(kt, msec * NSEC_PER_MSEC);
187 }
188 
ktime_sub_us(const ktime_t kt,const u64 usec)189 static inline ktime_t ktime_sub_us(const ktime_t kt, const u64 usec)
190 {
191 	return ktime_sub_ns(kt, usec * NSEC_PER_USEC);
192 }
193 
ktime_sub_ms(const ktime_t kt,const u64 msec)194 static inline ktime_t ktime_sub_ms(const ktime_t kt, const u64 msec)
195 {
196 	return ktime_sub_ns(kt, msec * NSEC_PER_MSEC);
197 }
198 
199 extern ktime_t ktime_add_safe(const ktime_t lhs, const ktime_t rhs);
200 
201 /**
202  * ktime_to_timespec64_cond - convert a ktime_t variable to timespec64
203  *			    format only if the variable contains data
204  * @kt:		the ktime_t variable to convert
205  * @ts:		the timespec variable to store the result in
206  *
207  * Return: %true if there was a successful conversion, %false if kt was 0.
208  */
ktime_to_timespec64_cond(const ktime_t kt,struct timespec64 * ts)209 static inline __must_check bool ktime_to_timespec64_cond(const ktime_t kt,
210 						       struct timespec64 *ts)
211 {
212 	if (kt) {
213 		*ts = ktime_to_timespec64(kt);
214 		return true;
215 	} else {
216 		return false;
217 	}
218 }
219 
220 #include <vdso/ktime.h>
221 
ns_to_ktime(u64 ns)222 static inline ktime_t ns_to_ktime(u64 ns)
223 {
224 	return ns;
225 }
226 
ms_to_ktime(u64 ms)227 static inline ktime_t ms_to_ktime(u64 ms)
228 {
229 	return ms * NSEC_PER_MSEC;
230 }
231 
232 # include <linux/timekeeping.h>
233 # include <linux/timekeeping32.h>
234 
235 #endif
236