• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * linux/kernel/time/clocksource.c
3  *
4  * This file contains the functions which manage clocksource drivers.
5  *
6  * Copyright (C) 2004, 2005 IBM, John Stultz (johnstul@us.ibm.com)
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21  *
22  * TODO WishList:
23  *   o Allow clocksource drivers to be unregistered
24  *   o get rid of clocksource_jiffies extern
25  */
26 
27 #include <linux/clocksource.h>
28 #include <linux/sysdev.h>
29 #include <linux/init.h>
30 #include <linux/module.h>
31 #include <linux/sched.h> /* for spin_unlock_irq() using preempt_count() m68k */
32 #include <linux/tick.h>
33 
34 /* XXX - Would like a better way for initializing curr_clocksource */
35 extern struct clocksource clocksource_jiffies;
36 
37 /*[Clocksource internal variables]---------
38  * curr_clocksource:
39  *	currently selected clocksource. Initialized to clocksource_jiffies.
40  * next_clocksource:
41  *	pending next selected clocksource.
42  * clocksource_list:
43  *	linked list with the registered clocksources
44  * clocksource_lock:
45  *	protects manipulations to curr_clocksource and next_clocksource
46  *	and the clocksource_list
47  * override_name:
48  *	Name of the user-specified clocksource.
49  */
50 static struct clocksource *curr_clocksource = &clocksource_jiffies;
51 static struct clocksource *next_clocksource;
52 static struct clocksource *clocksource_override;
53 static LIST_HEAD(clocksource_list);
54 static DEFINE_SPINLOCK(clocksource_lock);
55 static char override_name[32];
56 static int finished_booting;
57 
58 /* clocksource_done_booting - Called near the end of core bootup
59  *
60  * Hack to avoid lots of clocksource churn at boot time.
61  * We use fs_initcall because we want this to start before
62  * device_initcall but after subsys_initcall.
63  */
clocksource_done_booting(void)64 static int __init clocksource_done_booting(void)
65 {
66 	finished_booting = 1;
67 	return 0;
68 }
69 fs_initcall(clocksource_done_booting);
70 
71 #ifdef CONFIG_CLOCKSOURCE_WATCHDOG
72 static LIST_HEAD(watchdog_list);
73 static struct clocksource *watchdog;
74 static struct timer_list watchdog_timer;
75 static DEFINE_SPINLOCK(watchdog_lock);
76 static cycle_t watchdog_last;
77 static unsigned long watchdog_resumed;
78 
79 /*
80  * Interval: 0.5sec Threshold: 0.0625s
81  */
82 #define WATCHDOG_INTERVAL (HZ >> 1)
83 #define WATCHDOG_THRESHOLD (NSEC_PER_SEC >> 4)
84 
clocksource_ratewd(struct clocksource * cs,int64_t delta)85 static void clocksource_ratewd(struct clocksource *cs, int64_t delta)
86 {
87 	if (delta > -WATCHDOG_THRESHOLD && delta < WATCHDOG_THRESHOLD)
88 		return;
89 
90 	printk(KERN_WARNING "Clocksource %s unstable (delta = %Ld ns)\n",
91 	       cs->name, delta);
92 	cs->flags &= ~(CLOCK_SOURCE_VALID_FOR_HRES | CLOCK_SOURCE_WATCHDOG);
93 	clocksource_change_rating(cs, 0);
94 	list_del(&cs->wd_list);
95 }
96 
clocksource_watchdog(unsigned long data)97 static void clocksource_watchdog(unsigned long data)
98 {
99 	struct clocksource *cs, *tmp;
100 	cycle_t csnow, wdnow;
101 	int64_t wd_nsec, cs_nsec;
102 	int resumed;
103 
104 	spin_lock(&watchdog_lock);
105 
106 	resumed = test_and_clear_bit(0, &watchdog_resumed);
107 
108 	wdnow = watchdog->read();
109 	wd_nsec = cyc2ns(watchdog, (wdnow - watchdog_last) & watchdog->mask);
110 	watchdog_last = wdnow;
111 
112 	list_for_each_entry_safe(cs, tmp, &watchdog_list, wd_list) {
113 		csnow = cs->read();
114 
115 		if (unlikely(resumed)) {
116 			cs->wd_last = csnow;
117 			continue;
118 		}
119 
120 		/* Initialized ? */
121 		if (!(cs->flags & CLOCK_SOURCE_WATCHDOG)) {
122 			if ((cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) &&
123 			    (watchdog->flags & CLOCK_SOURCE_IS_CONTINUOUS)) {
124 				cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES;
125 				/*
126 				 * We just marked the clocksource as
127 				 * highres-capable, notify the rest of the
128 				 * system as well so that we transition
129 				 * into high-res mode:
130 				 */
131 				tick_clock_notify();
132 			}
133 			cs->flags |= CLOCK_SOURCE_WATCHDOG;
134 			cs->wd_last = csnow;
135 		} else {
136 			cs_nsec = cyc2ns(cs, (csnow - cs->wd_last) & cs->mask);
137 			cs->wd_last = csnow;
138 			/* Check the delta. Might remove from the list ! */
139 			clocksource_ratewd(cs, cs_nsec - wd_nsec);
140 		}
141 	}
142 
143 	if (!list_empty(&watchdog_list)) {
144 		/*
145 		 * Cycle through CPUs to check if the CPUs stay
146 		 * synchronized to each other.
147 		 */
148 		int next_cpu = cpumask_next(raw_smp_processor_id(),
149 					    cpu_online_mask);
150 
151 		if (next_cpu >= nr_cpu_ids)
152 			next_cpu = cpumask_first(cpu_online_mask);
153 		watchdog_timer.expires += WATCHDOG_INTERVAL;
154 		add_timer_on(&watchdog_timer, next_cpu);
155 	}
156 	spin_unlock(&watchdog_lock);
157 }
clocksource_resume_watchdog(void)158 static void clocksource_resume_watchdog(void)
159 {
160 	set_bit(0, &watchdog_resumed);
161 }
162 
clocksource_check_watchdog(struct clocksource * cs)163 static void clocksource_check_watchdog(struct clocksource *cs)
164 {
165 	struct clocksource *cse;
166 	unsigned long flags;
167 
168 	spin_lock_irqsave(&watchdog_lock, flags);
169 	if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) {
170 		int started = !list_empty(&watchdog_list);
171 
172 		list_add(&cs->wd_list, &watchdog_list);
173 		if (!started && watchdog) {
174 			watchdog_last = watchdog->read();
175 			watchdog_timer.expires = jiffies + WATCHDOG_INTERVAL;
176 			add_timer_on(&watchdog_timer,
177 				     cpumask_first(cpu_online_mask));
178 		}
179 	} else {
180 		if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS)
181 			cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES;
182 
183 		if (!watchdog || cs->rating > watchdog->rating) {
184 			if (watchdog)
185 				del_timer(&watchdog_timer);
186 			watchdog = cs;
187 			init_timer(&watchdog_timer);
188 			watchdog_timer.function = clocksource_watchdog;
189 
190 			/* Reset watchdog cycles */
191 			list_for_each_entry(cse, &watchdog_list, wd_list)
192 				cse->flags &= ~CLOCK_SOURCE_WATCHDOG;
193 			/* Start if list is not empty */
194 			if (!list_empty(&watchdog_list)) {
195 				watchdog_last = watchdog->read();
196 				watchdog_timer.expires =
197 					jiffies + WATCHDOG_INTERVAL;
198 				add_timer_on(&watchdog_timer,
199 					     cpumask_first(cpu_online_mask));
200 			}
201 		}
202 	}
203 	spin_unlock_irqrestore(&watchdog_lock, flags);
204 }
205 #else
clocksource_check_watchdog(struct clocksource * cs)206 static void clocksource_check_watchdog(struct clocksource *cs)
207 {
208 	if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS)
209 		cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES;
210 }
211 
clocksource_resume_watchdog(void)212 static inline void clocksource_resume_watchdog(void) { }
213 #endif
214 
215 /**
216  * clocksource_resume - resume the clocksource(s)
217  */
clocksource_resume(void)218 void clocksource_resume(void)
219 {
220 	struct clocksource *cs;
221 	unsigned long flags;
222 
223 	spin_lock_irqsave(&clocksource_lock, flags);
224 
225 	list_for_each_entry(cs, &clocksource_list, list) {
226 		if (cs->resume)
227 			cs->resume();
228 	}
229 
230 	clocksource_resume_watchdog();
231 
232 	spin_unlock_irqrestore(&clocksource_lock, flags);
233 }
234 
235 /**
236  * clocksource_touch_watchdog - Update watchdog
237  *
238  * Update the watchdog after exception contexts such as kgdb so as not
239  * to incorrectly trip the watchdog.
240  *
241  */
clocksource_touch_watchdog(void)242 void clocksource_touch_watchdog(void)
243 {
244 	clocksource_resume_watchdog();
245 }
246 
247 /**
248  * clocksource_get_next - Returns the selected clocksource
249  *
250  */
clocksource_get_next(void)251 struct clocksource *clocksource_get_next(void)
252 {
253 	unsigned long flags;
254 
255 	spin_lock_irqsave(&clocksource_lock, flags);
256 	if (next_clocksource && finished_booting) {
257 		curr_clocksource = next_clocksource;
258 		next_clocksource = NULL;
259 	}
260 	spin_unlock_irqrestore(&clocksource_lock, flags);
261 
262 	return curr_clocksource;
263 }
264 
265 /**
266  * select_clocksource - Selects the best registered clocksource.
267  *
268  * Private function. Must hold clocksource_lock when called.
269  *
270  * Select the clocksource with the best rating, or the clocksource,
271  * which is selected by userspace override.
272  */
select_clocksource(void)273 static struct clocksource *select_clocksource(void)
274 {
275 	struct clocksource *next;
276 
277 	if (list_empty(&clocksource_list))
278 		return NULL;
279 
280 	if (clocksource_override)
281 		next = clocksource_override;
282 	else
283 		next = list_entry(clocksource_list.next, struct clocksource,
284 				  list);
285 
286 	if (next == curr_clocksource)
287 		return NULL;
288 
289 	return next;
290 }
291 
292 /*
293  * Enqueue the clocksource sorted by rating
294  */
clocksource_enqueue(struct clocksource * c)295 static int clocksource_enqueue(struct clocksource *c)
296 {
297 	struct list_head *tmp, *entry = &clocksource_list;
298 
299 	list_for_each(tmp, &clocksource_list) {
300 		struct clocksource *cs;
301 
302 		cs = list_entry(tmp, struct clocksource, list);
303 		if (cs == c)
304 			return -EBUSY;
305 		/* Keep track of the place, where to insert */
306 		if (cs->rating >= c->rating)
307 			entry = tmp;
308 	}
309 	list_add(&c->list, entry);
310 
311 	if (strlen(c->name) == strlen(override_name) &&
312 	    !strcmp(c->name, override_name))
313 		clocksource_override = c;
314 
315 	return 0;
316 }
317 
318 /**
319  * clocksource_register - Used to install new clocksources
320  * @t:		clocksource to be registered
321  *
322  * Returns -EBUSY if registration fails, zero otherwise.
323  */
clocksource_register(struct clocksource * c)324 int clocksource_register(struct clocksource *c)
325 {
326 	unsigned long flags;
327 	int ret;
328 
329 	/* save mult_orig on registration */
330 	c->mult_orig = c->mult;
331 
332 	spin_lock_irqsave(&clocksource_lock, flags);
333 	ret = clocksource_enqueue(c);
334 	if (!ret)
335 		next_clocksource = select_clocksource();
336 	spin_unlock_irqrestore(&clocksource_lock, flags);
337 	if (!ret)
338 		clocksource_check_watchdog(c);
339 	return ret;
340 }
341 EXPORT_SYMBOL(clocksource_register);
342 
343 /**
344  * clocksource_change_rating - Change the rating of a registered clocksource
345  *
346  */
clocksource_change_rating(struct clocksource * cs,int rating)347 void clocksource_change_rating(struct clocksource *cs, int rating)
348 {
349 	unsigned long flags;
350 
351 	spin_lock_irqsave(&clocksource_lock, flags);
352 	list_del(&cs->list);
353 	cs->rating = rating;
354 	clocksource_enqueue(cs);
355 	next_clocksource = select_clocksource();
356 	spin_unlock_irqrestore(&clocksource_lock, flags);
357 }
358 
359 /**
360  * clocksource_unregister - remove a registered clocksource
361  */
clocksource_unregister(struct clocksource * cs)362 void clocksource_unregister(struct clocksource *cs)
363 {
364 	unsigned long flags;
365 
366 	spin_lock_irqsave(&clocksource_lock, flags);
367 	list_del(&cs->list);
368 	if (clocksource_override == cs)
369 		clocksource_override = NULL;
370 	next_clocksource = select_clocksource();
371 	spin_unlock_irqrestore(&clocksource_lock, flags);
372 }
373 
374 #ifdef CONFIG_SYSFS
375 /**
376  * sysfs_show_current_clocksources - sysfs interface for current clocksource
377  * @dev:	unused
378  * @buf:	char buffer to be filled with clocksource list
379  *
380  * Provides sysfs interface for listing current clocksource.
381  */
382 static ssize_t
sysfs_show_current_clocksources(struct sys_device * dev,struct sysdev_attribute * attr,char * buf)383 sysfs_show_current_clocksources(struct sys_device *dev,
384 				struct sysdev_attribute *attr, char *buf)
385 {
386 	ssize_t count = 0;
387 
388 	spin_lock_irq(&clocksource_lock);
389 	count = snprintf(buf, PAGE_SIZE, "%s\n", curr_clocksource->name);
390 	spin_unlock_irq(&clocksource_lock);
391 
392 	return count;
393 }
394 
395 /**
396  * sysfs_override_clocksource - interface for manually overriding clocksource
397  * @dev:	unused
398  * @buf:	name of override clocksource
399  * @count:	length of buffer
400  *
401  * Takes input from sysfs interface for manually overriding the default
402  * clocksource selction.
403  */
sysfs_override_clocksource(struct sys_device * dev,struct sysdev_attribute * attr,const char * buf,size_t count)404 static ssize_t sysfs_override_clocksource(struct sys_device *dev,
405 					  struct sysdev_attribute *attr,
406 					  const char *buf, size_t count)
407 {
408 	struct clocksource *ovr = NULL;
409 	size_t ret = count;
410 	int len;
411 
412 	/* strings from sysfs write are not 0 terminated! */
413 	if (count >= sizeof(override_name))
414 		return -EINVAL;
415 
416 	/* strip of \n: */
417 	if (buf[count-1] == '\n')
418 		count--;
419 
420 	spin_lock_irq(&clocksource_lock);
421 
422 	if (count > 0)
423 		memcpy(override_name, buf, count);
424 	override_name[count] = 0;
425 
426 	len = strlen(override_name);
427 	if (len) {
428 		struct clocksource *cs;
429 
430 		ovr = clocksource_override;
431 		/* try to select it: */
432 		list_for_each_entry(cs, &clocksource_list, list) {
433 			if (strlen(cs->name) == len &&
434 			    !strcmp(cs->name, override_name))
435 				ovr = cs;
436 		}
437 	}
438 
439 	/* Reselect, when the override name has changed */
440 	if (ovr != clocksource_override) {
441 		clocksource_override = ovr;
442 		next_clocksource = select_clocksource();
443 	}
444 
445 	spin_unlock_irq(&clocksource_lock);
446 
447 	return ret;
448 }
449 
450 /**
451  * sysfs_show_available_clocksources - sysfs interface for listing clocksource
452  * @dev:	unused
453  * @buf:	char buffer to be filled with clocksource list
454  *
455  * Provides sysfs interface for listing registered clocksources
456  */
457 static ssize_t
sysfs_show_available_clocksources(struct sys_device * dev,struct sysdev_attribute * attr,char * buf)458 sysfs_show_available_clocksources(struct sys_device *dev,
459 				  struct sysdev_attribute *attr,
460 				  char *buf)
461 {
462 	struct clocksource *src;
463 	ssize_t count = 0;
464 
465 	spin_lock_irq(&clocksource_lock);
466 	list_for_each_entry(src, &clocksource_list, list) {
467 		count += snprintf(buf + count,
468 				  max((ssize_t)PAGE_SIZE - count, (ssize_t)0),
469 				  "%s ", src->name);
470 	}
471 	spin_unlock_irq(&clocksource_lock);
472 
473 	count += snprintf(buf + count,
474 			  max((ssize_t)PAGE_SIZE - count, (ssize_t)0), "\n");
475 
476 	return count;
477 }
478 
479 /*
480  * Sysfs setup bits:
481  */
482 static SYSDEV_ATTR(current_clocksource, 0644, sysfs_show_current_clocksources,
483 		   sysfs_override_clocksource);
484 
485 static SYSDEV_ATTR(available_clocksource, 0444,
486 		   sysfs_show_available_clocksources, NULL);
487 
488 static struct sysdev_class clocksource_sysclass = {
489 	.name = "clocksource",
490 };
491 
492 static struct sys_device device_clocksource = {
493 	.id	= 0,
494 	.cls	= &clocksource_sysclass,
495 };
496 
init_clocksource_sysfs(void)497 static int __init init_clocksource_sysfs(void)
498 {
499 	int error = sysdev_class_register(&clocksource_sysclass);
500 
501 	if (!error)
502 		error = sysdev_register(&device_clocksource);
503 	if (!error)
504 		error = sysdev_create_file(
505 				&device_clocksource,
506 				&attr_current_clocksource);
507 	if (!error)
508 		error = sysdev_create_file(
509 				&device_clocksource,
510 				&attr_available_clocksource);
511 	return error;
512 }
513 
514 device_initcall(init_clocksource_sysfs);
515 #endif /* CONFIG_SYSFS */
516 
517 /**
518  * boot_override_clocksource - boot clock override
519  * @str:	override name
520  *
521  * Takes a clocksource= boot argument and uses it
522  * as the clocksource override name.
523  */
boot_override_clocksource(char * str)524 static int __init boot_override_clocksource(char* str)
525 {
526 	unsigned long flags;
527 	spin_lock_irqsave(&clocksource_lock, flags);
528 	if (str)
529 		strlcpy(override_name, str, sizeof(override_name));
530 	spin_unlock_irqrestore(&clocksource_lock, flags);
531 	return 1;
532 }
533 
534 __setup("clocksource=", boot_override_clocksource);
535 
536 /**
537  * boot_override_clock - Compatibility layer for deprecated boot option
538  * @str:	override name
539  *
540  * DEPRECATED! Takes a clock= boot argument and uses it
541  * as the clocksource override name
542  */
boot_override_clock(char * str)543 static int __init boot_override_clock(char* str)
544 {
545 	if (!strcmp(str, "pmtmr")) {
546 		printk("Warning: clock=pmtmr is deprecated. "
547 			"Use clocksource=acpi_pm.\n");
548 		return boot_override_clocksource("acpi_pm");
549 	}
550 	printk("Warning! clock= boot option is deprecated. "
551 		"Use clocksource=xyz\n");
552 	return boot_override_clocksource(str);
553 }
554 
555 __setup("clock=", boot_override_clock);
556