• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 #include <linux/clocksource.h>
2 #include <linux/clockchips.h>
3 #include <linux/interrupt.h>
4 #include <linux/sysdev.h>
5 #include <linux/delay.h>
6 #include <linux/errno.h>
7 #include <linux/hpet.h>
8 #include <linux/init.h>
9 #include <linux/cpu.h>
10 #include <linux/pm.h>
11 #include <linux/io.h>
12 
13 #include <asm/fixmap.h>
14 #include <asm/i8253.h>
15 #include <asm/hpet.h>
16 
17 #define HPET_MASK			CLOCKSOURCE_MASK(32)
18 #define HPET_SHIFT			22
19 
20 /* FSEC = 10^-15
21    NSEC = 10^-9 */
22 #define FSEC_PER_NSEC			1000000L
23 
24 #define HPET_DEV_USED_BIT		2
25 #define HPET_DEV_USED			(1 << HPET_DEV_USED_BIT)
26 #define HPET_DEV_VALID			0x8
27 #define HPET_DEV_FSB_CAP		0x1000
28 #define HPET_DEV_PERI_CAP		0x2000
29 
30 #define EVT_TO_HPET_DEV(evt) container_of(evt, struct hpet_dev, evt)
31 
32 /*
33  * HPET address is set in acpi/boot.c, when an ACPI entry exists
34  */
35 unsigned long				hpet_address;
36 #ifdef CONFIG_PCI_MSI
37 static unsigned long			hpet_num_timers;
38 #endif
39 static void __iomem			*hpet_virt_address;
40 
41 struct hpet_dev {
42 	struct clock_event_device	evt;
43 	unsigned int			num;
44 	int				cpu;
45 	unsigned int			irq;
46 	unsigned int			flags;
47 	char				name[10];
48 };
49 
hpet_readl(unsigned long a)50 unsigned long hpet_readl(unsigned long a)
51 {
52 	return readl(hpet_virt_address + a);
53 }
54 
hpet_writel(unsigned long d,unsigned long a)55 static inline void hpet_writel(unsigned long d, unsigned long a)
56 {
57 	writel(d, hpet_virt_address + a);
58 }
59 
60 #ifdef CONFIG_X86_64
61 #include <asm/pgtable.h>
62 #endif
63 
hpet_set_mapping(void)64 static inline void hpet_set_mapping(void)
65 {
66 	hpet_virt_address = ioremap_nocache(hpet_address, HPET_MMAP_SIZE);
67 #ifdef CONFIG_X86_64
68 	__set_fixmap(VSYSCALL_HPET, hpet_address, PAGE_KERNEL_VSYSCALL_NOCACHE);
69 #endif
70 }
71 
hpet_clear_mapping(void)72 static inline void hpet_clear_mapping(void)
73 {
74 	iounmap(hpet_virt_address);
75 	hpet_virt_address = NULL;
76 }
77 
78 /*
79  * HPET command line enable / disable
80  */
81 static int boot_hpet_disable;
82 int hpet_force_user;
83 
hpet_setup(char * str)84 static int __init hpet_setup(char *str)
85 {
86 	if (str) {
87 		if (!strncmp("disable", str, 7))
88 			boot_hpet_disable = 1;
89 		if (!strncmp("force", str, 5))
90 			hpet_force_user = 1;
91 	}
92 	return 1;
93 }
94 __setup("hpet=", hpet_setup);
95 
disable_hpet(char * str)96 static int __init disable_hpet(char *str)
97 {
98 	boot_hpet_disable = 1;
99 	return 1;
100 }
101 __setup("nohpet", disable_hpet);
102 
is_hpet_capable(void)103 static inline int is_hpet_capable(void)
104 {
105 	return !boot_hpet_disable && hpet_address;
106 }
107 
108 /*
109  * HPET timer interrupt enable / disable
110  */
111 static int hpet_legacy_int_enabled;
112 
113 /**
114  * is_hpet_enabled - check whether the hpet timer interrupt is enabled
115  */
is_hpet_enabled(void)116 int is_hpet_enabled(void)
117 {
118 	return is_hpet_capable() && hpet_legacy_int_enabled;
119 }
120 EXPORT_SYMBOL_GPL(is_hpet_enabled);
121 
122 /*
123  * When the hpet driver (/dev/hpet) is enabled, we need to reserve
124  * timer 0 and timer 1 in case of RTC emulation.
125  */
126 #ifdef CONFIG_HPET
127 
128 static void hpet_reserve_msi_timers(struct hpet_data *hd);
129 
hpet_reserve_platform_timers(unsigned long id)130 static void hpet_reserve_platform_timers(unsigned long id)
131 {
132 	struct hpet __iomem *hpet = hpet_virt_address;
133 	struct hpet_timer __iomem *timer = &hpet->hpet_timers[2];
134 	unsigned int nrtimers, i;
135 	struct hpet_data hd;
136 
137 	nrtimers = ((id & HPET_ID_NUMBER) >> HPET_ID_NUMBER_SHIFT) + 1;
138 
139 	memset(&hd, 0, sizeof(hd));
140 	hd.hd_phys_address	= hpet_address;
141 	hd.hd_address		= hpet;
142 	hd.hd_nirqs		= nrtimers;
143 	hpet_reserve_timer(&hd, 0);
144 
145 #ifdef CONFIG_HPET_EMULATE_RTC
146 	hpet_reserve_timer(&hd, 1);
147 #endif
148 
149 	/*
150 	 * NOTE that hd_irq[] reflects IOAPIC input pins (LEGACY_8254
151 	 * is wrong for i8259!) not the output IRQ.  Many BIOS writers
152 	 * don't bother configuring *any* comparator interrupts.
153 	 */
154 	hd.hd_irq[0] = HPET_LEGACY_8254;
155 	hd.hd_irq[1] = HPET_LEGACY_RTC;
156 
157 	for (i = 2; i < nrtimers; timer++, i++) {
158 		hd.hd_irq[i] = (readl(&timer->hpet_config) &
159 			Tn_INT_ROUTE_CNF_MASK) >> Tn_INT_ROUTE_CNF_SHIFT;
160 	}
161 
162 	hpet_reserve_msi_timers(&hd);
163 
164 	hpet_alloc(&hd);
165 
166 }
167 #else
hpet_reserve_platform_timers(unsigned long id)168 static void hpet_reserve_platform_timers(unsigned long id) { }
169 #endif
170 
171 /*
172  * Common hpet info
173  */
174 static unsigned long hpet_period;
175 
176 static void hpet_legacy_set_mode(enum clock_event_mode mode,
177 			  struct clock_event_device *evt);
178 static int hpet_legacy_next_event(unsigned long delta,
179 			   struct clock_event_device *evt);
180 
181 /*
182  * The hpet clock event device
183  */
184 static struct clock_event_device hpet_clockevent = {
185 	.name		= "hpet",
186 	.features	= CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
187 	.set_mode	= hpet_legacy_set_mode,
188 	.set_next_event = hpet_legacy_next_event,
189 	.shift		= 32,
190 	.irq		= 0,
191 	.rating		= 50,
192 };
193 
hpet_start_counter(void)194 static void hpet_start_counter(void)
195 {
196 	unsigned long cfg = hpet_readl(HPET_CFG);
197 
198 	cfg &= ~HPET_CFG_ENABLE;
199 	hpet_writel(cfg, HPET_CFG);
200 	hpet_writel(0, HPET_COUNTER);
201 	hpet_writel(0, HPET_COUNTER + 4);
202 	cfg |= HPET_CFG_ENABLE;
203 	hpet_writel(cfg, HPET_CFG);
204 }
205 
hpet_resume_device(void)206 static void hpet_resume_device(void)
207 {
208 	force_hpet_resume();
209 }
210 
hpet_restart_counter(void)211 static void hpet_restart_counter(void)
212 {
213 	hpet_resume_device();
214 	hpet_start_counter();
215 }
216 
hpet_enable_legacy_int(void)217 static void hpet_enable_legacy_int(void)
218 {
219 	unsigned long cfg = hpet_readl(HPET_CFG);
220 
221 	cfg |= HPET_CFG_LEGACY;
222 	hpet_writel(cfg, HPET_CFG);
223 	hpet_legacy_int_enabled = 1;
224 }
225 
hpet_legacy_clockevent_register(void)226 static void hpet_legacy_clockevent_register(void)
227 {
228 	/* Start HPET legacy interrupts */
229 	hpet_enable_legacy_int();
230 
231 	/*
232 	 * The mult factor is defined as (include/linux/clockchips.h)
233 	 *  mult/2^shift = cyc/ns (in contrast to ns/cyc in clocksource.h)
234 	 * hpet_period is in units of femtoseconds (per cycle), so
235 	 *  mult/2^shift = cyc/ns = 10^6/hpet_period
236 	 *  mult = (10^6 * 2^shift)/hpet_period
237 	 *  mult = (FSEC_PER_NSEC << hpet_clockevent.shift)/hpet_period
238 	 */
239 	hpet_clockevent.mult = div_sc((unsigned long) FSEC_PER_NSEC,
240 				      hpet_period, hpet_clockevent.shift);
241 	/* Calculate the min / max delta */
242 	hpet_clockevent.max_delta_ns = clockevent_delta2ns(0x7FFFFFFF,
243 							   &hpet_clockevent);
244 	/* 5 usec minimum reprogramming delta. */
245 	hpet_clockevent.min_delta_ns = 5000;
246 
247 	/*
248 	 * Start hpet with the boot cpu mask and make it
249 	 * global after the IO_APIC has been initialized.
250 	 */
251 	hpet_clockevent.cpumask = cpumask_of(smp_processor_id());
252 	clockevents_register_device(&hpet_clockevent);
253 	global_clock_event = &hpet_clockevent;
254 	printk(KERN_DEBUG "hpet clockevent registered\n");
255 }
256 
257 static int hpet_setup_msi_irq(unsigned int irq);
258 
hpet_set_mode(enum clock_event_mode mode,struct clock_event_device * evt,int timer)259 static void hpet_set_mode(enum clock_event_mode mode,
260 			  struct clock_event_device *evt, int timer)
261 {
262 	unsigned long cfg, cmp, now;
263 	uint64_t delta;
264 
265 	switch (mode) {
266 	case CLOCK_EVT_MODE_PERIODIC:
267 		delta = ((uint64_t)(NSEC_PER_SEC/HZ)) * evt->mult;
268 		delta >>= evt->shift;
269 		now = hpet_readl(HPET_COUNTER);
270 		cmp = now + (unsigned long) delta;
271 		cfg = hpet_readl(HPET_Tn_CFG(timer));
272 		/* Make sure we use edge triggered interrupts */
273 		cfg &= ~HPET_TN_LEVEL;
274 		cfg |= HPET_TN_ENABLE | HPET_TN_PERIODIC |
275 		       HPET_TN_SETVAL | HPET_TN_32BIT;
276 		hpet_writel(cfg, HPET_Tn_CFG(timer));
277 		/*
278 		 * The first write after writing TN_SETVAL to the
279 		 * config register sets the counter value, the second
280 		 * write sets the period.
281 		 */
282 		hpet_writel(cmp, HPET_Tn_CMP(timer));
283 		udelay(1);
284 		hpet_writel((unsigned long) delta, HPET_Tn_CMP(timer));
285 		break;
286 
287 	case CLOCK_EVT_MODE_ONESHOT:
288 		cfg = hpet_readl(HPET_Tn_CFG(timer));
289 		cfg &= ~HPET_TN_PERIODIC;
290 		cfg |= HPET_TN_ENABLE | HPET_TN_32BIT;
291 		hpet_writel(cfg, HPET_Tn_CFG(timer));
292 		break;
293 
294 	case CLOCK_EVT_MODE_UNUSED:
295 	case CLOCK_EVT_MODE_SHUTDOWN:
296 		cfg = hpet_readl(HPET_Tn_CFG(timer));
297 		cfg &= ~HPET_TN_ENABLE;
298 		hpet_writel(cfg, HPET_Tn_CFG(timer));
299 		break;
300 
301 	case CLOCK_EVT_MODE_RESUME:
302 		if (timer == 0) {
303 			hpet_enable_legacy_int();
304 		} else {
305 			struct hpet_dev *hdev = EVT_TO_HPET_DEV(evt);
306 			hpet_setup_msi_irq(hdev->irq);
307 			disable_irq(hdev->irq);
308 			irq_set_affinity(hdev->irq, cpumask_of(hdev->cpu));
309 			enable_irq(hdev->irq);
310 		}
311 		break;
312 	}
313 }
314 
hpet_next_event(unsigned long delta,struct clock_event_device * evt,int timer)315 static int hpet_next_event(unsigned long delta,
316 			   struct clock_event_device *evt, int timer)
317 {
318 	u32 cnt;
319 
320 	cnt = hpet_readl(HPET_COUNTER);
321 	cnt += (u32) delta;
322 	hpet_writel(cnt, HPET_Tn_CMP(timer));
323 
324 	/*
325 	 * We need to read back the CMP register to make sure that
326 	 * what we wrote hit the chip before we compare it to the
327 	 * counter.
328 	 */
329 	WARN_ON_ONCE((u32)hpet_readl(HPET_Tn_CMP(timer)) != cnt);
330 
331 	return (s32)((u32)hpet_readl(HPET_COUNTER) - cnt) >= 0 ? -ETIME : 0;
332 }
333 
hpet_legacy_set_mode(enum clock_event_mode mode,struct clock_event_device * evt)334 static void hpet_legacy_set_mode(enum clock_event_mode mode,
335 			struct clock_event_device *evt)
336 {
337 	hpet_set_mode(mode, evt, 0);
338 }
339 
hpet_legacy_next_event(unsigned long delta,struct clock_event_device * evt)340 static int hpet_legacy_next_event(unsigned long delta,
341 			struct clock_event_device *evt)
342 {
343 	return hpet_next_event(delta, evt, 0);
344 }
345 
346 /*
347  * HPET MSI Support
348  */
349 #ifdef CONFIG_PCI_MSI
350 
351 static DEFINE_PER_CPU(struct hpet_dev *, cpu_hpet_dev);
352 static struct hpet_dev	*hpet_devs;
353 
hpet_msi_unmask(unsigned int irq)354 void hpet_msi_unmask(unsigned int irq)
355 {
356 	struct hpet_dev *hdev = get_irq_data(irq);
357 	unsigned long cfg;
358 
359 	/* unmask it */
360 	cfg = hpet_readl(HPET_Tn_CFG(hdev->num));
361 	cfg |= HPET_TN_FSB;
362 	hpet_writel(cfg, HPET_Tn_CFG(hdev->num));
363 }
364 
hpet_msi_mask(unsigned int irq)365 void hpet_msi_mask(unsigned int irq)
366 {
367 	unsigned long cfg;
368 	struct hpet_dev *hdev = get_irq_data(irq);
369 
370 	/* mask it */
371 	cfg = hpet_readl(HPET_Tn_CFG(hdev->num));
372 	cfg &= ~HPET_TN_FSB;
373 	hpet_writel(cfg, HPET_Tn_CFG(hdev->num));
374 }
375 
hpet_msi_write(unsigned int irq,struct msi_msg * msg)376 void hpet_msi_write(unsigned int irq, struct msi_msg *msg)
377 {
378 	struct hpet_dev *hdev = get_irq_data(irq);
379 
380 	hpet_writel(msg->data, HPET_Tn_ROUTE(hdev->num));
381 	hpet_writel(msg->address_lo, HPET_Tn_ROUTE(hdev->num) + 4);
382 }
383 
hpet_msi_read(unsigned int irq,struct msi_msg * msg)384 void hpet_msi_read(unsigned int irq, struct msi_msg *msg)
385 {
386 	struct hpet_dev *hdev = get_irq_data(irq);
387 
388 	msg->data = hpet_readl(HPET_Tn_ROUTE(hdev->num));
389 	msg->address_lo = hpet_readl(HPET_Tn_ROUTE(hdev->num) + 4);
390 	msg->address_hi = 0;
391 }
392 
hpet_msi_set_mode(enum clock_event_mode mode,struct clock_event_device * evt)393 static void hpet_msi_set_mode(enum clock_event_mode mode,
394 				struct clock_event_device *evt)
395 {
396 	struct hpet_dev *hdev = EVT_TO_HPET_DEV(evt);
397 	hpet_set_mode(mode, evt, hdev->num);
398 }
399 
hpet_msi_next_event(unsigned long delta,struct clock_event_device * evt)400 static int hpet_msi_next_event(unsigned long delta,
401 				struct clock_event_device *evt)
402 {
403 	struct hpet_dev *hdev = EVT_TO_HPET_DEV(evt);
404 	return hpet_next_event(delta, evt, hdev->num);
405 }
406 
hpet_setup_msi_irq(unsigned int irq)407 static int hpet_setup_msi_irq(unsigned int irq)
408 {
409 	if (arch_setup_hpet_msi(irq)) {
410 		destroy_irq(irq);
411 		return -EINVAL;
412 	}
413 	return 0;
414 }
415 
hpet_assign_irq(struct hpet_dev * dev)416 static int hpet_assign_irq(struct hpet_dev *dev)
417 {
418 	unsigned int irq;
419 
420 	irq = create_irq();
421 	if (!irq)
422 		return -EINVAL;
423 
424 	set_irq_data(irq, dev);
425 
426 	if (hpet_setup_msi_irq(irq))
427 		return -EINVAL;
428 
429 	dev->irq = irq;
430 	return 0;
431 }
432 
hpet_interrupt_handler(int irq,void * data)433 static irqreturn_t hpet_interrupt_handler(int irq, void *data)
434 {
435 	struct hpet_dev *dev = (struct hpet_dev *)data;
436 	struct clock_event_device *hevt = &dev->evt;
437 
438 	if (!hevt->event_handler) {
439 		printk(KERN_INFO "Spurious HPET timer interrupt on HPET timer %d\n",
440 				dev->num);
441 		return IRQ_HANDLED;
442 	}
443 
444 	hevt->event_handler(hevt);
445 	return IRQ_HANDLED;
446 }
447 
hpet_setup_irq(struct hpet_dev * dev)448 static int hpet_setup_irq(struct hpet_dev *dev)
449 {
450 
451 	if (request_irq(dev->irq, hpet_interrupt_handler,
452 			IRQF_DISABLED|IRQF_NOBALANCING, dev->name, dev))
453 		return -1;
454 
455 	disable_irq(dev->irq);
456 	irq_set_affinity(dev->irq, cpumask_of(dev->cpu));
457 	enable_irq(dev->irq);
458 
459 	printk(KERN_DEBUG "hpet: %s irq %d for MSI\n",
460 			 dev->name, dev->irq);
461 
462 	return 0;
463 }
464 
465 /* This should be called in specific @cpu */
init_one_hpet_msi_clockevent(struct hpet_dev * hdev,int cpu)466 static void init_one_hpet_msi_clockevent(struct hpet_dev *hdev, int cpu)
467 {
468 	struct clock_event_device *evt = &hdev->evt;
469 	uint64_t hpet_freq;
470 
471 	WARN_ON(cpu != smp_processor_id());
472 	if (!(hdev->flags & HPET_DEV_VALID))
473 		return;
474 
475 	if (hpet_setup_msi_irq(hdev->irq))
476 		return;
477 
478 	hdev->cpu = cpu;
479 	per_cpu(cpu_hpet_dev, cpu) = hdev;
480 	evt->name = hdev->name;
481 	hpet_setup_irq(hdev);
482 	evt->irq = hdev->irq;
483 
484 	evt->rating = 110;
485 	evt->features = CLOCK_EVT_FEAT_ONESHOT;
486 	if (hdev->flags & HPET_DEV_PERI_CAP)
487 		evt->features |= CLOCK_EVT_FEAT_PERIODIC;
488 
489 	evt->set_mode = hpet_msi_set_mode;
490 	evt->set_next_event = hpet_msi_next_event;
491 	evt->shift = 32;
492 
493 	/*
494 	 * The period is a femto seconds value. We need to calculate the
495 	 * scaled math multiplication factor for nanosecond to hpet tick
496 	 * conversion.
497 	 */
498 	hpet_freq = 1000000000000000ULL;
499 	do_div(hpet_freq, hpet_period);
500 	evt->mult = div_sc((unsigned long) hpet_freq,
501 				      NSEC_PER_SEC, evt->shift);
502 	/* Calculate the max delta */
503 	evt->max_delta_ns = clockevent_delta2ns(0x7FFFFFFF, evt);
504 	/* 5 usec minimum reprogramming delta. */
505 	evt->min_delta_ns = 5000;
506 
507 	evt->cpumask = cpumask_of(hdev->cpu);
508 	clockevents_register_device(evt);
509 }
510 
511 #ifdef CONFIG_HPET
512 /* Reserve at least one timer for userspace (/dev/hpet) */
513 #define RESERVE_TIMERS 1
514 #else
515 #define RESERVE_TIMERS 0
516 #endif
517 
hpet_msi_capability_lookup(unsigned int start_timer)518 static void hpet_msi_capability_lookup(unsigned int start_timer)
519 {
520 	unsigned int id;
521 	unsigned int num_timers;
522 	unsigned int num_timers_used = 0;
523 	int i;
524 
525 	id = hpet_readl(HPET_ID);
526 
527 	num_timers = ((id & HPET_ID_NUMBER) >> HPET_ID_NUMBER_SHIFT);
528 	num_timers++; /* Value read out starts from 0 */
529 
530 	hpet_devs = kzalloc(sizeof(struct hpet_dev) * num_timers, GFP_KERNEL);
531 	if (!hpet_devs)
532 		return;
533 
534 	hpet_num_timers = num_timers;
535 
536 	for (i = start_timer; i < num_timers - RESERVE_TIMERS; i++) {
537 		struct hpet_dev *hdev = &hpet_devs[num_timers_used];
538 		unsigned long cfg = hpet_readl(HPET_Tn_CFG(i));
539 
540 		/* Only consider HPET timer with MSI support */
541 		if (!(cfg & HPET_TN_FSB_CAP))
542 			continue;
543 
544 		hdev->flags = 0;
545 		if (cfg & HPET_TN_PERIODIC_CAP)
546 			hdev->flags |= HPET_DEV_PERI_CAP;
547 		hdev->num = i;
548 
549 		sprintf(hdev->name, "hpet%d", i);
550 		if (hpet_assign_irq(hdev))
551 			continue;
552 
553 		hdev->flags |= HPET_DEV_FSB_CAP;
554 		hdev->flags |= HPET_DEV_VALID;
555 		num_timers_used++;
556 		if (num_timers_used == num_possible_cpus())
557 			break;
558 	}
559 
560 	printk(KERN_INFO "HPET: %d timers in total, %d timers will be used for per-cpu timer\n",
561 		num_timers, num_timers_used);
562 }
563 
564 #ifdef CONFIG_HPET
hpet_reserve_msi_timers(struct hpet_data * hd)565 static void hpet_reserve_msi_timers(struct hpet_data *hd)
566 {
567 	int i;
568 
569 	if (!hpet_devs)
570 		return;
571 
572 	for (i = 0; i < hpet_num_timers; i++) {
573 		struct hpet_dev *hdev = &hpet_devs[i];
574 
575 		if (!(hdev->flags & HPET_DEV_VALID))
576 			continue;
577 
578 		hd->hd_irq[hdev->num] = hdev->irq;
579 		hpet_reserve_timer(hd, hdev->num);
580 	}
581 }
582 #endif
583 
hpet_get_unused_timer(void)584 static struct hpet_dev *hpet_get_unused_timer(void)
585 {
586 	int i;
587 
588 	if (!hpet_devs)
589 		return NULL;
590 
591 	for (i = 0; i < hpet_num_timers; i++) {
592 		struct hpet_dev *hdev = &hpet_devs[i];
593 
594 		if (!(hdev->flags & HPET_DEV_VALID))
595 			continue;
596 		if (test_and_set_bit(HPET_DEV_USED_BIT,
597 			(unsigned long *)&hdev->flags))
598 			continue;
599 		return hdev;
600 	}
601 	return NULL;
602 }
603 
604 struct hpet_work_struct {
605 	struct delayed_work work;
606 	struct completion complete;
607 };
608 
hpet_work(struct work_struct * w)609 static void hpet_work(struct work_struct *w)
610 {
611 	struct hpet_dev *hdev;
612 	int cpu = smp_processor_id();
613 	struct hpet_work_struct *hpet_work;
614 
615 	hpet_work = container_of(w, struct hpet_work_struct, work.work);
616 
617 	hdev = hpet_get_unused_timer();
618 	if (hdev)
619 		init_one_hpet_msi_clockevent(hdev, cpu);
620 
621 	complete(&hpet_work->complete);
622 }
623 
hpet_cpuhp_notify(struct notifier_block * n,unsigned long action,void * hcpu)624 static int hpet_cpuhp_notify(struct notifier_block *n,
625 		unsigned long action, void *hcpu)
626 {
627 	unsigned long cpu = (unsigned long)hcpu;
628 	struct hpet_work_struct work;
629 	struct hpet_dev *hdev = per_cpu(cpu_hpet_dev, cpu);
630 
631 	switch (action & 0xf) {
632 	case CPU_ONLINE:
633 		INIT_DELAYED_WORK_ON_STACK(&work.work, hpet_work);
634 		init_completion(&work.complete);
635 		/* FIXME: add schedule_work_on() */
636 		schedule_delayed_work_on(cpu, &work.work, 0);
637 		wait_for_completion(&work.complete);
638 		destroy_timer_on_stack(&work.work.timer);
639 		break;
640 	case CPU_DEAD:
641 		if (hdev) {
642 			free_irq(hdev->irq, hdev);
643 			hdev->flags &= ~HPET_DEV_USED;
644 			per_cpu(cpu_hpet_dev, cpu) = NULL;
645 		}
646 		break;
647 	}
648 	return NOTIFY_OK;
649 }
650 #else
651 
hpet_setup_msi_irq(unsigned int irq)652 static int hpet_setup_msi_irq(unsigned int irq)
653 {
654 	return 0;
655 }
hpet_msi_capability_lookup(unsigned int start_timer)656 static void hpet_msi_capability_lookup(unsigned int start_timer)
657 {
658 	return;
659 }
660 
661 #ifdef CONFIG_HPET
hpet_reserve_msi_timers(struct hpet_data * hd)662 static void hpet_reserve_msi_timers(struct hpet_data *hd)
663 {
664 	return;
665 }
666 #endif
667 
hpet_cpuhp_notify(struct notifier_block * n,unsigned long action,void * hcpu)668 static int hpet_cpuhp_notify(struct notifier_block *n,
669 		unsigned long action, void *hcpu)
670 {
671 	return NOTIFY_OK;
672 }
673 
674 #endif
675 
676 /*
677  * Clock source related code
678  */
read_hpet(void)679 static cycle_t read_hpet(void)
680 {
681 	return (cycle_t)hpet_readl(HPET_COUNTER);
682 }
683 
684 #ifdef CONFIG_X86_64
vread_hpet(void)685 static cycle_t __vsyscall_fn vread_hpet(void)
686 {
687 	return readl((const void __iomem *)fix_to_virt(VSYSCALL_HPET) + 0xf0);
688 }
689 #endif
690 
691 static struct clocksource clocksource_hpet = {
692 	.name		= "hpet",
693 	.rating		= 250,
694 	.read		= read_hpet,
695 	.mask		= HPET_MASK,
696 	.shift		= HPET_SHIFT,
697 	.flags		= CLOCK_SOURCE_IS_CONTINUOUS,
698 	.resume		= hpet_restart_counter,
699 #ifdef CONFIG_X86_64
700 	.vread		= vread_hpet,
701 #endif
702 };
703 
hpet_clocksource_register(void)704 static int hpet_clocksource_register(void)
705 {
706 	u64 start, now;
707 	cycle_t t1;
708 
709 	/* Start the counter */
710 	hpet_start_counter();
711 
712 	/* Verify whether hpet counter works */
713 	t1 = read_hpet();
714 	rdtscll(start);
715 
716 	/*
717 	 * We don't know the TSC frequency yet, but waiting for
718 	 * 200000 TSC cycles is safe:
719 	 * 4 GHz == 50us
720 	 * 1 GHz == 200us
721 	 */
722 	do {
723 		rep_nop();
724 		rdtscll(now);
725 	} while ((now - start) < 200000UL);
726 
727 	if (t1 == read_hpet()) {
728 		printk(KERN_WARNING
729 		       "HPET counter not counting. HPET disabled\n");
730 		return -ENODEV;
731 	}
732 
733 	/*
734 	 * The definition of mult is (include/linux/clocksource.h)
735 	 * mult/2^shift = ns/cyc and hpet_period is in units of fsec/cyc
736 	 * so we first need to convert hpet_period to ns/cyc units:
737 	 *  mult/2^shift = ns/cyc = hpet_period/10^6
738 	 *  mult = (hpet_period * 2^shift)/10^6
739 	 *  mult = (hpet_period << shift)/FSEC_PER_NSEC
740 	 */
741 	clocksource_hpet.mult = div_sc(hpet_period, FSEC_PER_NSEC, HPET_SHIFT);
742 
743 	clocksource_register(&clocksource_hpet);
744 
745 	return 0;
746 }
747 
748 /**
749  * hpet_enable - Try to setup the HPET timer. Returns 1 on success.
750  */
hpet_enable(void)751 int __init hpet_enable(void)
752 {
753 	unsigned long id;
754 	int i;
755 
756 	if (!is_hpet_capable())
757 		return 0;
758 
759 	hpet_set_mapping();
760 
761 	/*
762 	 * Read the period and check for a sane value:
763 	 */
764 	hpet_period = hpet_readl(HPET_PERIOD);
765 
766 	/*
767 	 * AMD SB700 based systems with spread spectrum enabled use a
768 	 * SMM based HPET emulation to provide proper frequency
769 	 * setting. The SMM code is initialized with the first HPET
770 	 * register access and takes some time to complete. During
771 	 * this time the config register reads 0xffffffff. We check
772 	 * for max. 1000 loops whether the config register reads a non
773 	 * 0xffffffff value to make sure that HPET is up and running
774 	 * before we go further. A counting loop is safe, as the HPET
775 	 * access takes thousands of CPU cycles. On non SB700 based
776 	 * machines this check is only done once and has no side
777 	 * effects.
778 	 */
779 	for (i = 0; hpet_readl(HPET_CFG) == 0xFFFFFFFF; i++) {
780 		if (i == 1000) {
781 			printk(KERN_WARNING
782 			       "HPET config register value = 0xFFFFFFFF. "
783 			       "Disabling HPET\n");
784 			goto out_nohpet;
785 		}
786 	}
787 
788 	if (hpet_period < HPET_MIN_PERIOD || hpet_period > HPET_MAX_PERIOD)
789 		goto out_nohpet;
790 
791 	/*
792 	 * Read the HPET ID register to retrieve the IRQ routing
793 	 * information and the number of channels
794 	 */
795 	id = hpet_readl(HPET_ID);
796 
797 #ifdef CONFIG_HPET_EMULATE_RTC
798 	/*
799 	 * The legacy routing mode needs at least two channels, tick timer
800 	 * and the rtc emulation channel.
801 	 */
802 	if (!(id & HPET_ID_NUMBER))
803 		goto out_nohpet;
804 #endif
805 
806 	if (hpet_clocksource_register())
807 		goto out_nohpet;
808 
809 	if (id & HPET_ID_LEGSUP) {
810 		hpet_legacy_clockevent_register();
811 		hpet_msi_capability_lookup(2);
812 		return 1;
813 	}
814 	hpet_msi_capability_lookup(0);
815 	return 0;
816 
817 out_nohpet:
818 	hpet_clear_mapping();
819 	hpet_address = 0;
820 	return 0;
821 }
822 
823 /*
824  * Needs to be late, as the reserve_timer code calls kalloc !
825  *
826  * Not a problem on i386 as hpet_enable is called from late_time_init,
827  * but on x86_64 it is necessary !
828  */
hpet_late_init(void)829 static __init int hpet_late_init(void)
830 {
831 	int cpu;
832 
833 	if (boot_hpet_disable)
834 		return -ENODEV;
835 
836 	if (!hpet_address) {
837 		if (!force_hpet_address)
838 			return -ENODEV;
839 
840 		hpet_address = force_hpet_address;
841 		hpet_enable();
842 	}
843 
844 	if (!hpet_virt_address)
845 		return -ENODEV;
846 
847 	hpet_reserve_platform_timers(hpet_readl(HPET_ID));
848 
849 	for_each_online_cpu(cpu) {
850 		hpet_cpuhp_notify(NULL, CPU_ONLINE, (void *)(long)cpu);
851 	}
852 
853 	/* This notifier should be called after workqueue is ready */
854 	hotcpu_notifier(hpet_cpuhp_notify, -20);
855 
856 	return 0;
857 }
858 fs_initcall(hpet_late_init);
859 
hpet_disable(void)860 void hpet_disable(void)
861 {
862 	if (is_hpet_capable()) {
863 		unsigned long cfg = hpet_readl(HPET_CFG);
864 
865 		if (hpet_legacy_int_enabled) {
866 			cfg &= ~HPET_CFG_LEGACY;
867 			hpet_legacy_int_enabled = 0;
868 		}
869 		cfg &= ~HPET_CFG_ENABLE;
870 		hpet_writel(cfg, HPET_CFG);
871 	}
872 }
873 
874 #ifdef CONFIG_HPET_EMULATE_RTC
875 
876 /* HPET in LegacyReplacement Mode eats up RTC interrupt line. When, HPET
877  * is enabled, we support RTC interrupt functionality in software.
878  * RTC has 3 kinds of interrupts:
879  * 1) Update Interrupt - generate an interrupt, every sec, when RTC clock
880  *    is updated
881  * 2) Alarm Interrupt - generate an interrupt at a specific time of day
882  * 3) Periodic Interrupt - generate periodic interrupt, with frequencies
883  *    2Hz-8192Hz (2Hz-64Hz for non-root user) (all freqs in powers of 2)
884  * (1) and (2) above are implemented using polling at a frequency of
885  * 64 Hz. The exact frequency is a tradeoff between accuracy and interrupt
886  * overhead. (DEFAULT_RTC_INT_FREQ)
887  * For (3), we use interrupts at 64Hz or user specified periodic
888  * frequency, whichever is higher.
889  */
890 #include <linux/mc146818rtc.h>
891 #include <linux/rtc.h>
892 #include <asm/rtc.h>
893 
894 #define DEFAULT_RTC_INT_FREQ	64
895 #define DEFAULT_RTC_SHIFT	6
896 #define RTC_NUM_INTS		1
897 
898 static unsigned long hpet_rtc_flags;
899 static int hpet_prev_update_sec;
900 static struct rtc_time hpet_alarm_time;
901 static unsigned long hpet_pie_count;
902 static u32 hpet_t1_cmp;
903 static unsigned long hpet_default_delta;
904 static unsigned long hpet_pie_delta;
905 static unsigned long hpet_pie_limit;
906 
907 static rtc_irq_handler irq_handler;
908 
909 /*
910  * Check that the hpet counter c1 is ahead of the c2
911  */
hpet_cnt_ahead(u32 c1,u32 c2)912 static inline int hpet_cnt_ahead(u32 c1, u32 c2)
913 {
914 	return (s32)(c2 - c1) < 0;
915 }
916 
917 /*
918  * Registers a IRQ handler.
919  */
hpet_register_irq_handler(rtc_irq_handler handler)920 int hpet_register_irq_handler(rtc_irq_handler handler)
921 {
922 	if (!is_hpet_enabled())
923 		return -ENODEV;
924 	if (irq_handler)
925 		return -EBUSY;
926 
927 	irq_handler = handler;
928 
929 	return 0;
930 }
931 EXPORT_SYMBOL_GPL(hpet_register_irq_handler);
932 
933 /*
934  * Deregisters the IRQ handler registered with hpet_register_irq_handler()
935  * and does cleanup.
936  */
hpet_unregister_irq_handler(rtc_irq_handler handler)937 void hpet_unregister_irq_handler(rtc_irq_handler handler)
938 {
939 	if (!is_hpet_enabled())
940 		return;
941 
942 	irq_handler = NULL;
943 	hpet_rtc_flags = 0;
944 }
945 EXPORT_SYMBOL_GPL(hpet_unregister_irq_handler);
946 
947 /*
948  * Timer 1 for RTC emulation. We use one shot mode, as periodic mode
949  * is not supported by all HPET implementations for timer 1.
950  *
951  * hpet_rtc_timer_init() is called when the rtc is initialized.
952  */
hpet_rtc_timer_init(void)953 int hpet_rtc_timer_init(void)
954 {
955 	unsigned long cfg, cnt, delta, flags;
956 
957 	if (!is_hpet_enabled())
958 		return 0;
959 
960 	if (!hpet_default_delta) {
961 		uint64_t clc;
962 
963 		clc = (uint64_t) hpet_clockevent.mult * NSEC_PER_SEC;
964 		clc >>= hpet_clockevent.shift + DEFAULT_RTC_SHIFT;
965 		hpet_default_delta = (unsigned long) clc;
966 	}
967 
968 	if (!(hpet_rtc_flags & RTC_PIE) || hpet_pie_limit)
969 		delta = hpet_default_delta;
970 	else
971 		delta = hpet_pie_delta;
972 
973 	local_irq_save(flags);
974 
975 	cnt = delta + hpet_readl(HPET_COUNTER);
976 	hpet_writel(cnt, HPET_T1_CMP);
977 	hpet_t1_cmp = cnt;
978 
979 	cfg = hpet_readl(HPET_T1_CFG);
980 	cfg &= ~HPET_TN_PERIODIC;
981 	cfg |= HPET_TN_ENABLE | HPET_TN_32BIT;
982 	hpet_writel(cfg, HPET_T1_CFG);
983 
984 	local_irq_restore(flags);
985 
986 	return 1;
987 }
988 EXPORT_SYMBOL_GPL(hpet_rtc_timer_init);
989 
990 /*
991  * The functions below are called from rtc driver.
992  * Return 0 if HPET is not being used.
993  * Otherwise do the necessary changes and return 1.
994  */
hpet_mask_rtc_irq_bit(unsigned long bit_mask)995 int hpet_mask_rtc_irq_bit(unsigned long bit_mask)
996 {
997 	if (!is_hpet_enabled())
998 		return 0;
999 
1000 	hpet_rtc_flags &= ~bit_mask;
1001 	return 1;
1002 }
1003 EXPORT_SYMBOL_GPL(hpet_mask_rtc_irq_bit);
1004 
hpet_set_rtc_irq_bit(unsigned long bit_mask)1005 int hpet_set_rtc_irq_bit(unsigned long bit_mask)
1006 {
1007 	unsigned long oldbits = hpet_rtc_flags;
1008 
1009 	if (!is_hpet_enabled())
1010 		return 0;
1011 
1012 	hpet_rtc_flags |= bit_mask;
1013 
1014 	if ((bit_mask & RTC_UIE) && !(oldbits & RTC_UIE))
1015 		hpet_prev_update_sec = -1;
1016 
1017 	if (!oldbits)
1018 		hpet_rtc_timer_init();
1019 
1020 	return 1;
1021 }
1022 EXPORT_SYMBOL_GPL(hpet_set_rtc_irq_bit);
1023 
hpet_set_alarm_time(unsigned char hrs,unsigned char min,unsigned char sec)1024 int hpet_set_alarm_time(unsigned char hrs, unsigned char min,
1025 			unsigned char sec)
1026 {
1027 	if (!is_hpet_enabled())
1028 		return 0;
1029 
1030 	hpet_alarm_time.tm_hour = hrs;
1031 	hpet_alarm_time.tm_min = min;
1032 	hpet_alarm_time.tm_sec = sec;
1033 
1034 	return 1;
1035 }
1036 EXPORT_SYMBOL_GPL(hpet_set_alarm_time);
1037 
hpet_set_periodic_freq(unsigned long freq)1038 int hpet_set_periodic_freq(unsigned long freq)
1039 {
1040 	uint64_t clc;
1041 
1042 	if (!is_hpet_enabled())
1043 		return 0;
1044 
1045 	if (freq <= DEFAULT_RTC_INT_FREQ)
1046 		hpet_pie_limit = DEFAULT_RTC_INT_FREQ / freq;
1047 	else {
1048 		clc = (uint64_t) hpet_clockevent.mult * NSEC_PER_SEC;
1049 		do_div(clc, freq);
1050 		clc >>= hpet_clockevent.shift;
1051 		hpet_pie_delta = (unsigned long) clc;
1052 	}
1053 	return 1;
1054 }
1055 EXPORT_SYMBOL_GPL(hpet_set_periodic_freq);
1056 
hpet_rtc_dropped_irq(void)1057 int hpet_rtc_dropped_irq(void)
1058 {
1059 	return is_hpet_enabled();
1060 }
1061 EXPORT_SYMBOL_GPL(hpet_rtc_dropped_irq);
1062 
hpet_rtc_timer_reinit(void)1063 static void hpet_rtc_timer_reinit(void)
1064 {
1065 	unsigned long cfg, delta;
1066 	int lost_ints = -1;
1067 
1068 	if (unlikely(!hpet_rtc_flags)) {
1069 		cfg = hpet_readl(HPET_T1_CFG);
1070 		cfg &= ~HPET_TN_ENABLE;
1071 		hpet_writel(cfg, HPET_T1_CFG);
1072 		return;
1073 	}
1074 
1075 	if (!(hpet_rtc_flags & RTC_PIE) || hpet_pie_limit)
1076 		delta = hpet_default_delta;
1077 	else
1078 		delta = hpet_pie_delta;
1079 
1080 	/*
1081 	 * Increment the comparator value until we are ahead of the
1082 	 * current count.
1083 	 */
1084 	do {
1085 		hpet_t1_cmp += delta;
1086 		hpet_writel(hpet_t1_cmp, HPET_T1_CMP);
1087 		lost_ints++;
1088 	} while (!hpet_cnt_ahead(hpet_t1_cmp, hpet_readl(HPET_COUNTER)));
1089 
1090 	if (lost_ints) {
1091 		if (hpet_rtc_flags & RTC_PIE)
1092 			hpet_pie_count += lost_ints;
1093 		if (printk_ratelimit())
1094 			printk(KERN_WARNING "hpet1: lost %d rtc interrupts\n",
1095 				lost_ints);
1096 	}
1097 }
1098 
hpet_rtc_interrupt(int irq,void * dev_id)1099 irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id)
1100 {
1101 	struct rtc_time curr_time;
1102 	unsigned long rtc_int_flag = 0;
1103 
1104 	hpet_rtc_timer_reinit();
1105 	memset(&curr_time, 0, sizeof(struct rtc_time));
1106 
1107 	if (hpet_rtc_flags & (RTC_UIE | RTC_AIE))
1108 		get_rtc_time(&curr_time);
1109 
1110 	if (hpet_rtc_flags & RTC_UIE &&
1111 	    curr_time.tm_sec != hpet_prev_update_sec) {
1112 		if (hpet_prev_update_sec >= 0)
1113 			rtc_int_flag = RTC_UF;
1114 		hpet_prev_update_sec = curr_time.tm_sec;
1115 	}
1116 
1117 	if (hpet_rtc_flags & RTC_PIE &&
1118 	    ++hpet_pie_count >= hpet_pie_limit) {
1119 		rtc_int_flag |= RTC_PF;
1120 		hpet_pie_count = 0;
1121 	}
1122 
1123 	if (hpet_rtc_flags & RTC_AIE &&
1124 	    (curr_time.tm_sec == hpet_alarm_time.tm_sec) &&
1125 	    (curr_time.tm_min == hpet_alarm_time.tm_min) &&
1126 	    (curr_time.tm_hour == hpet_alarm_time.tm_hour))
1127 			rtc_int_flag |= RTC_AF;
1128 
1129 	if (rtc_int_flag) {
1130 		rtc_int_flag |= (RTC_IRQF | (RTC_NUM_INTS << 8));
1131 		if (irq_handler)
1132 			irq_handler(rtc_int_flag, dev_id);
1133 	}
1134 	return IRQ_HANDLED;
1135 }
1136 EXPORT_SYMBOL_GPL(hpet_rtc_interrupt);
1137 #endif
1138