• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1  /*
2   *	Precise Delay Loops for i386
3   *
4   *	Copyright (C) 1993 Linus Torvalds
5   *	Copyright (C) 1997 Martin Mares <mj@atrey.karlin.mff.cuni.cz>
6   *	Copyright (C) 2008 Jiri Hladky <hladky _dot_ jiri _at_ gmail _dot_ com>
7   *
8   *	The __delay function must _NOT_ be inlined as its execution time
9   *	depends wildly on alignment on many x86 processors. The additional
10   *	jump magic is needed to get the timing stable on all the CPU's
11   *	we have to worry about.
12   */
13  
14  #include <linux/module.h>
15  #include <linux/sched.h>
16  #include <linux/timex.h>
17  #include <linux/preempt.h>
18  #include <linux/delay.h>
19  #include <linux/init.h>
20  
21  #include <asm/processor.h>
22  #include <asm/delay.h>
23  #include <asm/timer.h>
24  
25  #ifdef CONFIG_SMP
26  # include <asm/smp.h>
27  #endif
28  
29  /* simple loop based delay: */
delay_loop(unsigned long loops)30  static void delay_loop(unsigned long loops)
31  {
32  	asm volatile(
33  		"	test %0,%0	\n"
34  		"	jz 3f		\n"
35  		"	jmp 1f		\n"
36  
37  		".align 16		\n"
38  		"1:	jmp 2f		\n"
39  
40  		".align 16		\n"
41  		"2:	dec %0		\n"
42  		"	jnz 2b		\n"
43  		"3:	dec %0		\n"
44  
45  		: /* we don't need output */
46  		:"a" (loops)
47  	);
48  }
49  
50  /* TSC based delay: */
delay_tsc(unsigned long loops)51  static void delay_tsc(unsigned long loops)
52  {
53  	unsigned long bclock, now;
54  	int cpu;
55  
56  	preempt_disable();
57  	cpu = smp_processor_id();
58  	rdtscl(bclock);
59  	for (;;) {
60  		rdtscl(now);
61  		if ((now - bclock) >= loops)
62  			break;
63  
64  		/* Allow RT tasks to run */
65  		preempt_enable();
66  		rep_nop();
67  		preempt_disable();
68  
69  		/*
70  		 * It is possible that we moved to another CPU, and
71  		 * since TSC's are per-cpu we need to calculate
72  		 * that. The delay must guarantee that we wait "at
73  		 * least" the amount of time. Being moved to another
74  		 * CPU could make the wait longer but we just need to
75  		 * make sure we waited long enough. Rebalance the
76  		 * counter for this CPU.
77  		 */
78  		if (unlikely(cpu != smp_processor_id())) {
79  			loops -= (now - bclock);
80  			cpu = smp_processor_id();
81  			rdtscl(bclock);
82  		}
83  	}
84  	preempt_enable();
85  }
86  
87  /*
88   * Since we calibrate only once at boot, this
89   * function should be set once at boot and not changed
90   */
91  static void (*delay_fn)(unsigned long) = delay_loop;
92  
use_tsc_delay(void)93  void use_tsc_delay(void)
94  {
95  	delay_fn = delay_tsc;
96  }
97  
read_current_timer(unsigned long * timer_val)98  int __devinit read_current_timer(unsigned long *timer_val)
99  {
100  	if (delay_fn == delay_tsc) {
101  		rdtscll(*timer_val);
102  		return 0;
103  	}
104  	return -1;
105  }
106  
__delay(unsigned long loops)107  void __delay(unsigned long loops)
108  {
109  	delay_fn(loops);
110  }
111  EXPORT_SYMBOL(__delay);
112  
__const_udelay(unsigned long xloops)113  inline void __const_udelay(unsigned long xloops)
114  {
115  	int d0;
116  
117  	xloops *= 4;
118  	asm("mull %%edx"
119  		:"=d" (xloops), "=&a" (d0)
120  		:"1" (xloops), "0"
121  		(cpu_data(raw_smp_processor_id()).loops_per_jiffy * (HZ/4)));
122  
123  	__delay(++xloops);
124  }
125  EXPORT_SYMBOL(__const_udelay);
126  
__udelay(unsigned long usecs)127  void __udelay(unsigned long usecs)
128  {
129  	__const_udelay(usecs * 0x000010c7); /* 2**32 / 1000000 (rounded up) */
130  }
131  EXPORT_SYMBOL(__udelay);
132  
__ndelay(unsigned long nsecs)133  void __ndelay(unsigned long nsecs)
134  {
135  	__const_udelay(nsecs * 0x00005); /* 2**32 / 1000000000 (rounded up) */
136  }
137  EXPORT_SYMBOL(__ndelay);
138