• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1  // SPDX-License-Identifier: GPL-2.0+
2  /*
3   * Copyright (C) 2012 by Alan Stern
4   */
5  
6  /* This file is part of ehci-hcd.c */
7  
8  /*-------------------------------------------------------------------------*/
9  
10  /* Set a bit in the USBCMD register */
ehci_set_command_bit(struct ehci_hcd * ehci,u32 bit)11  static void ehci_set_command_bit(struct ehci_hcd *ehci, u32 bit)
12  {
13  	ehci->command |= bit;
14  	ehci_writel(ehci, ehci->command, &ehci->regs->command);
15  
16  	/* unblock posted write */
17  	ehci_readl(ehci, &ehci->regs->command);
18  }
19  
20  /* Clear a bit in the USBCMD register */
ehci_clear_command_bit(struct ehci_hcd * ehci,u32 bit)21  static void ehci_clear_command_bit(struct ehci_hcd *ehci, u32 bit)
22  {
23  	ehci->command &= ~bit;
24  	ehci_writel(ehci, ehci->command, &ehci->regs->command);
25  
26  	/* unblock posted write */
27  	ehci_readl(ehci, &ehci->regs->command);
28  }
29  
30  /*-------------------------------------------------------------------------*/
31  
32  /*
33   * EHCI timer support...  Now using hrtimers.
34   *
35   * Lots of different events are triggered from ehci->hrtimer.  Whenever
36   * the timer routine runs, it checks each possible event; events that are
37   * currently enabled and whose expiration time has passed get handled.
38   * The set of enabled events is stored as a collection of bitflags in
39   * ehci->enabled_hrtimer_events, and they are numbered in order of
40   * increasing delay values (ranging between 1 ms and 100 ms).
41   *
42   * Rather than implementing a sorted list or tree of all pending events,
43   * we keep track only of the lowest-numbered pending event, in
44   * ehci->next_hrtimer_event.  Whenever ehci->hrtimer gets restarted, its
45   * expiration time is set to the timeout value for this event.
46   *
47   * As a result, events might not get handled right away; the actual delay
48   * could be anywhere up to twice the requested delay.  This doesn't
49   * matter, because none of the events are especially time-critical.  The
50   * ones that matter most all have a delay of 1 ms, so they will be
51   * handled after 2 ms at most, which is okay.  In addition to this, we
52   * allow for an expiration range of 1 ms.
53   */
54  
55  /*
56   * Delay lengths for the hrtimer event types.
57   * Keep this list sorted by delay length, in the same order as
58   * the event types indexed by enum ehci_hrtimer_event in ehci.h.
59   */
60  static unsigned event_delays_ns[] = {
61  	1 * NSEC_PER_MSEC,	/* EHCI_HRTIMER_POLL_ASS */
62  	1 * NSEC_PER_MSEC,	/* EHCI_HRTIMER_POLL_PSS */
63  	1 * NSEC_PER_MSEC,	/* EHCI_HRTIMER_POLL_DEAD */
64  	1125 * NSEC_PER_USEC,	/* EHCI_HRTIMER_UNLINK_INTR */
65  	2 * NSEC_PER_MSEC,	/* EHCI_HRTIMER_FREE_ITDS */
66  	2 * NSEC_PER_MSEC,	/* EHCI_HRTIMER_ACTIVE_UNLINK */
67  	5 * NSEC_PER_MSEC,	/* EHCI_HRTIMER_START_UNLINK_INTR */
68  	6 * NSEC_PER_MSEC,	/* EHCI_HRTIMER_ASYNC_UNLINKS */
69  	10 * NSEC_PER_MSEC,	/* EHCI_HRTIMER_IAA_WATCHDOG */
70  	10 * NSEC_PER_MSEC,	/* EHCI_HRTIMER_DISABLE_PERIODIC */
71  	15 * NSEC_PER_MSEC,	/* EHCI_HRTIMER_DISABLE_ASYNC */
72  	100 * NSEC_PER_MSEC,	/* EHCI_HRTIMER_IO_WATCHDOG */
73  };
74  
75  /* Enable a pending hrtimer event */
ehci_enable_event(struct ehci_hcd * ehci,unsigned event,bool resched)76  static void ehci_enable_event(struct ehci_hcd *ehci, unsigned event,
77  		bool resched)
78  {
79  	ktime_t		*timeout = &ehci->hr_timeouts[event];
80  
81  	if (resched)
82  		*timeout = ktime_add(ktime_get(), event_delays_ns[event]);
83  	ehci->enabled_hrtimer_events |= (1 << event);
84  
85  	/* Track only the lowest-numbered pending event */
86  	if (event < ehci->next_hrtimer_event) {
87  		ehci->next_hrtimer_event = event;
88  		hrtimer_start_range_ns(&ehci->hrtimer, *timeout,
89  				NSEC_PER_MSEC, HRTIMER_MODE_ABS);
90  	}
91  }
92  
93  
94  /* Poll the STS_ASS status bit; see when it agrees with CMD_ASE */
ehci_poll_ASS(struct ehci_hcd * ehci)95  static void ehci_poll_ASS(struct ehci_hcd *ehci)
96  {
97  	unsigned	actual, want;
98  
99  	/* Don't enable anything if the controller isn't running (e.g., died) */
100  	if (ehci->rh_state != EHCI_RH_RUNNING)
101  		return;
102  
103  	want = (ehci->command & CMD_ASE) ? STS_ASS : 0;
104  	actual = ehci_readl(ehci, &ehci->regs->status) & STS_ASS;
105  
106  	if (want != actual) {
107  
108  		/* Poll again later, but give up after about 2-4 ms */
109  		if (ehci->ASS_poll_count++ < 2) {
110  			ehci_enable_event(ehci, EHCI_HRTIMER_POLL_ASS, true);
111  			return;
112  		}
113  		ehci_dbg(ehci, "Waited too long for the async schedule status (%x/%x), giving up\n",
114  				want, actual);
115  	}
116  	ehci->ASS_poll_count = 0;
117  
118  	/* The status is up-to-date; restart or stop the schedule as needed */
119  	if (want == 0) {	/* Stopped */
120  		if (ehci->async_count > 0)
121  			ehci_set_command_bit(ehci, CMD_ASE);
122  
123  	} else {		/* Running */
124  		if (ehci->async_count == 0) {
125  
126  			/* Turn off the schedule after a while */
127  			ehci_enable_event(ehci, EHCI_HRTIMER_DISABLE_ASYNC,
128  					true);
129  		}
130  	}
131  }
132  
133  /* Turn off the async schedule after a brief delay */
ehci_disable_ASE(struct ehci_hcd * ehci)134  static void ehci_disable_ASE(struct ehci_hcd *ehci)
135  {
136  	ehci_clear_command_bit(ehci, CMD_ASE);
137  }
138  
139  
140  /* Poll the STS_PSS status bit; see when it agrees with CMD_PSE */
ehci_poll_PSS(struct ehci_hcd * ehci)141  static void ehci_poll_PSS(struct ehci_hcd *ehci)
142  {
143  	unsigned	actual, want;
144  
145  	/* Don't do anything if the controller isn't running (e.g., died) */
146  	if (ehci->rh_state != EHCI_RH_RUNNING)
147  		return;
148  
149  	want = (ehci->command & CMD_PSE) ? STS_PSS : 0;
150  	actual = ehci_readl(ehci, &ehci->regs->status) & STS_PSS;
151  
152  	if (want != actual) {
153  
154  		/* Poll again later, but give up after about 2-4 ms */
155  		if (ehci->PSS_poll_count++ < 2) {
156  			ehci_enable_event(ehci, EHCI_HRTIMER_POLL_PSS, true);
157  			return;
158  		}
159  		ehci_dbg(ehci, "Waited too long for the periodic schedule status (%x/%x), giving up\n",
160  				want, actual);
161  	}
162  	ehci->PSS_poll_count = 0;
163  
164  	/* The status is up-to-date; restart or stop the schedule as needed */
165  	if (want == 0) {	/* Stopped */
166  		if (ehci->periodic_count > 0)
167  			ehci_set_command_bit(ehci, CMD_PSE);
168  
169  	} else {		/* Running */
170  		if (ehci->periodic_count == 0) {
171  
172  			/* Turn off the schedule after a while */
173  			ehci_enable_event(ehci, EHCI_HRTIMER_DISABLE_PERIODIC,
174  					true);
175  		}
176  	}
177  }
178  
179  /* Turn off the periodic schedule after a brief delay */
ehci_disable_PSE(struct ehci_hcd * ehci)180  static void ehci_disable_PSE(struct ehci_hcd *ehci)
181  {
182  	ehci_clear_command_bit(ehci, CMD_PSE);
183  }
184  
185  
186  /* Poll the STS_HALT status bit; see when a dead controller stops */
ehci_handle_controller_death(struct ehci_hcd * ehci)187  static void ehci_handle_controller_death(struct ehci_hcd *ehci)
188  {
189  	if (!(ehci_readl(ehci, &ehci->regs->status) & STS_HALT)) {
190  
191  		/* Give up after a few milliseconds */
192  		if (ehci->died_poll_count++ < 5) {
193  			/* Try again later */
194  			ehci_enable_event(ehci, EHCI_HRTIMER_POLL_DEAD, true);
195  			return;
196  		}
197  		ehci_warn(ehci, "Waited too long for the controller to stop, giving up\n");
198  	}
199  
200  	/* Clean up the mess */
201  	ehci->rh_state = EHCI_RH_HALTED;
202  	ehci_writel(ehci, 0, &ehci->regs->configured_flag);
203  	ehci_writel(ehci, 0, &ehci->regs->intr_enable);
204  	ehci_work(ehci);
205  	end_unlink_async(ehci);
206  
207  	/* Not in process context, so don't try to reset the controller */
208  }
209  
210  /* start to unlink interrupt QHs  */
ehci_handle_start_intr_unlinks(struct ehci_hcd * ehci)211  static void ehci_handle_start_intr_unlinks(struct ehci_hcd *ehci)
212  {
213  	bool		stopped = (ehci->rh_state < EHCI_RH_RUNNING);
214  
215  	/*
216  	 * Process all the QHs on the intr_unlink list that were added
217  	 * before the current unlink cycle began.  The list is in
218  	 * temporal order, so stop when we reach the first entry in the
219  	 * current cycle.  But if the root hub isn't running then
220  	 * process all the QHs on the list.
221  	 */
222  	while (!list_empty(&ehci->intr_unlink_wait)) {
223  		struct ehci_qh	*qh;
224  
225  		qh = list_first_entry(&ehci->intr_unlink_wait,
226  				struct ehci_qh, unlink_node);
227  		if (!stopped && (qh->unlink_cycle ==
228  				ehci->intr_unlink_wait_cycle))
229  			break;
230  		list_del_init(&qh->unlink_node);
231  		qh->unlink_reason |= QH_UNLINK_QUEUE_EMPTY;
232  		start_unlink_intr(ehci, qh);
233  	}
234  
235  	/* Handle remaining entries later */
236  	if (!list_empty(&ehci->intr_unlink_wait)) {
237  		ehci_enable_event(ehci, EHCI_HRTIMER_START_UNLINK_INTR, true);
238  		++ehci->intr_unlink_wait_cycle;
239  	}
240  }
241  
242  /* Handle unlinked interrupt QHs once they are gone from the hardware */
ehci_handle_intr_unlinks(struct ehci_hcd * ehci)243  static void ehci_handle_intr_unlinks(struct ehci_hcd *ehci)
244  {
245  	bool		stopped = (ehci->rh_state < EHCI_RH_RUNNING);
246  
247  	/*
248  	 * Process all the QHs on the intr_unlink list that were added
249  	 * before the current unlink cycle began.  The list is in
250  	 * temporal order, so stop when we reach the first entry in the
251  	 * current cycle.  But if the root hub isn't running then
252  	 * process all the QHs on the list.
253  	 */
254  	ehci->intr_unlinking = true;
255  	while (!list_empty(&ehci->intr_unlink)) {
256  		struct ehci_qh	*qh;
257  
258  		qh = list_first_entry(&ehci->intr_unlink, struct ehci_qh,
259  				unlink_node);
260  		if (!stopped && qh->unlink_cycle == ehci->intr_unlink_cycle)
261  			break;
262  		list_del_init(&qh->unlink_node);
263  		end_unlink_intr(ehci, qh);
264  	}
265  
266  	/* Handle remaining entries later */
267  	if (!list_empty(&ehci->intr_unlink)) {
268  		ehci_enable_event(ehci, EHCI_HRTIMER_UNLINK_INTR, true);
269  		++ehci->intr_unlink_cycle;
270  	}
271  	ehci->intr_unlinking = false;
272  }
273  
274  
275  /* Start another free-iTDs/siTDs cycle */
start_free_itds(struct ehci_hcd * ehci)276  static void start_free_itds(struct ehci_hcd *ehci)
277  {
278  	if (!(ehci->enabled_hrtimer_events & BIT(EHCI_HRTIMER_FREE_ITDS))) {
279  		ehci->last_itd_to_free = list_entry(
280  				ehci->cached_itd_list.prev,
281  				struct ehci_itd, itd_list);
282  		ehci->last_sitd_to_free = list_entry(
283  				ehci->cached_sitd_list.prev,
284  				struct ehci_sitd, sitd_list);
285  		ehci_enable_event(ehci, EHCI_HRTIMER_FREE_ITDS, true);
286  	}
287  }
288  
289  /* Wait for controller to stop using old iTDs and siTDs */
end_free_itds(struct ehci_hcd * ehci)290  static void end_free_itds(struct ehci_hcd *ehci)
291  {
292  	struct ehci_itd		*itd, *n;
293  	struct ehci_sitd	*sitd, *sn;
294  
295  	if (ehci->rh_state < EHCI_RH_RUNNING) {
296  		ehci->last_itd_to_free = NULL;
297  		ehci->last_sitd_to_free = NULL;
298  	}
299  
300  	list_for_each_entry_safe(itd, n, &ehci->cached_itd_list, itd_list) {
301  		list_del(&itd->itd_list);
302  		dma_pool_free(ehci->itd_pool, itd, itd->itd_dma);
303  		if (itd == ehci->last_itd_to_free)
304  			break;
305  	}
306  	list_for_each_entry_safe(sitd, sn, &ehci->cached_sitd_list, sitd_list) {
307  		list_del(&sitd->sitd_list);
308  		dma_pool_free(ehci->sitd_pool, sitd, sitd->sitd_dma);
309  		if (sitd == ehci->last_sitd_to_free)
310  			break;
311  	}
312  
313  	if (!list_empty(&ehci->cached_itd_list) ||
314  			!list_empty(&ehci->cached_sitd_list))
315  		start_free_itds(ehci);
316  }
317  
318  
319  /* Handle lost (or very late) IAA interrupts */
ehci_iaa_watchdog(struct ehci_hcd * ehci)320  static void ehci_iaa_watchdog(struct ehci_hcd *ehci)
321  {
322  	u32 cmd, status;
323  
324  	/*
325  	 * Lost IAA irqs wedge things badly; seen first with a vt8235.
326  	 * So we need this watchdog, but must protect it against both
327  	 * (a) SMP races against real IAA firing and retriggering, and
328  	 * (b) clean HC shutdown, when IAA watchdog was pending.
329  	 */
330  	if (!ehci->iaa_in_progress || ehci->rh_state != EHCI_RH_RUNNING)
331  		return;
332  
333  	/* If we get here, IAA is *REALLY* late.  It's barely
334  	 * conceivable that the system is so busy that CMD_IAAD
335  	 * is still legitimately set, so let's be sure it's
336  	 * clear before we read STS_IAA.  (The HC should clear
337  	 * CMD_IAAD when it sets STS_IAA.)
338  	 */
339  	cmd = ehci_readl(ehci, &ehci->regs->command);
340  
341  	/*
342  	 * If IAA is set here it either legitimately triggered
343  	 * after the watchdog timer expired (_way_ late, so we'll
344  	 * still count it as lost) ... or a silicon erratum:
345  	 * - VIA seems to set IAA without triggering the IRQ;
346  	 * - IAAD potentially cleared without setting IAA.
347  	 */
348  	status = ehci_readl(ehci, &ehci->regs->status);
349  	if ((status & STS_IAA) || !(cmd & CMD_IAAD)) {
350  		COUNT(ehci->stats.lost_iaa);
351  		ehci_writel(ehci, STS_IAA, &ehci->regs->status);
352  	}
353  
354  	ehci_dbg(ehci, "IAA watchdog: status %x cmd %x\n", status, cmd);
355  	end_iaa_cycle(ehci);
356  }
357  
358  
359  /* Enable the I/O watchdog, if appropriate */
turn_on_io_watchdog(struct ehci_hcd * ehci)360  static void turn_on_io_watchdog(struct ehci_hcd *ehci)
361  {
362  	/* Not needed if the controller isn't running or it's already enabled */
363  	if (ehci->rh_state != EHCI_RH_RUNNING ||
364  			(ehci->enabled_hrtimer_events &
365  				BIT(EHCI_HRTIMER_IO_WATCHDOG)))
366  		return;
367  
368  	/*
369  	 * Isochronous transfers always need the watchdog.
370  	 * For other sorts we use it only if the flag is set.
371  	 */
372  	if (ehci->isoc_count > 0 || (ehci->need_io_watchdog &&
373  			ehci->async_count + ehci->intr_count > 0))
374  		ehci_enable_event(ehci, EHCI_HRTIMER_IO_WATCHDOG, true);
375  }
376  
377  
378  /*
379   * Handler functions for the hrtimer event types.
380   * Keep this array in the same order as the event types indexed by
381   * enum ehci_hrtimer_event in ehci.h.
382   */
383  static void (*event_handlers[])(struct ehci_hcd *) = {
384  	ehci_poll_ASS,			/* EHCI_HRTIMER_POLL_ASS */
385  	ehci_poll_PSS,			/* EHCI_HRTIMER_POLL_PSS */
386  	ehci_handle_controller_death,	/* EHCI_HRTIMER_POLL_DEAD */
387  	ehci_handle_intr_unlinks,	/* EHCI_HRTIMER_UNLINK_INTR */
388  	end_free_itds,			/* EHCI_HRTIMER_FREE_ITDS */
389  	end_unlink_async,		/* EHCI_HRTIMER_ACTIVE_UNLINK */
390  	ehci_handle_start_intr_unlinks,	/* EHCI_HRTIMER_START_UNLINK_INTR */
391  	unlink_empty_async,		/* EHCI_HRTIMER_ASYNC_UNLINKS */
392  	ehci_iaa_watchdog,		/* EHCI_HRTIMER_IAA_WATCHDOG */
393  	ehci_disable_PSE,		/* EHCI_HRTIMER_DISABLE_PERIODIC */
394  	ehci_disable_ASE,		/* EHCI_HRTIMER_DISABLE_ASYNC */
395  	ehci_work,			/* EHCI_HRTIMER_IO_WATCHDOG */
396  };
397  
ehci_hrtimer_func(struct hrtimer * t)398  static enum hrtimer_restart ehci_hrtimer_func(struct hrtimer *t)
399  {
400  	struct ehci_hcd	*ehci = container_of(t, struct ehci_hcd, hrtimer);
401  	ktime_t		now;
402  	unsigned long	events;
403  	unsigned long	flags;
404  	unsigned	e;
405  
406  	spin_lock_irqsave(&ehci->lock, flags);
407  
408  	events = ehci->enabled_hrtimer_events;
409  	ehci->enabled_hrtimer_events = 0;
410  	ehci->next_hrtimer_event = EHCI_HRTIMER_NO_EVENT;
411  
412  	/*
413  	 * Check each pending event.  If its time has expired, handle
414  	 * the event; otherwise re-enable it.
415  	 */
416  	now = ktime_get();
417  	for_each_set_bit(e, &events, EHCI_HRTIMER_NUM_EVENTS) {
418  		if (ktime_compare(now, ehci->hr_timeouts[e]) >= 0)
419  			event_handlers[e](ehci);
420  		else
421  			ehci_enable_event(ehci, e, false);
422  	}
423  
424  	spin_unlock_irqrestore(&ehci->lock, flags);
425  	return HRTIMER_NORESTART;
426  }
427