1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20static void ehci_set_command_bit(struct ehci_hcd *ehci, u32 bit)
21{
22 ehci->command |= bit;
23 ehci_writel(ehci, ehci->command, &ehci->regs->command);
24
25
26 ehci_readl(ehci, &ehci->regs->command);
27}
28
29
30static void ehci_clear_command_bit(struct ehci_hcd *ehci, u32 bit)
31{
32 ehci->command &= ~bit;
33 ehci_writel(ehci, ehci->command, &ehci->regs->command);
34
35
36 ehci_readl(ehci, &ehci->regs->command);
37}
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69static unsigned event_delays_ns[] = {
70 1 * NSEC_PER_MSEC,
71 1 * NSEC_PER_MSEC,
72 1 * NSEC_PER_MSEC,
73 1125 * NSEC_PER_USEC,
74 2 * NSEC_PER_MSEC,
75 6 * NSEC_PER_MSEC,
76 10 * NSEC_PER_MSEC,
77 10 * NSEC_PER_MSEC,
78 15 * NSEC_PER_MSEC,
79 100 * NSEC_PER_MSEC,
80};
81
82
83static void ehci_enable_event(struct ehci_hcd *ehci, unsigned event,
84 bool resched)
85{
86 ktime_t *timeout = &ehci->hr_timeouts[event];
87
88 if (resched)
89 *timeout = ktime_add(ktime_get(),
90 ktime_set(0, event_delays_ns[event]));
91 ehci->enabled_hrtimer_events |= (1 << event);
92
93
94 if (event < ehci->next_hrtimer_event) {
95 ehci->next_hrtimer_event = event;
96 hrtimer_start_range_ns(&ehci->hrtimer, *timeout,
97 NSEC_PER_MSEC, HRTIMER_MODE_ABS);
98 }
99}
100
101
102
103static void ehci_poll_ASS(struct ehci_hcd *ehci)
104{
105 unsigned actual, want;
106
107
108 if (ehci->rh_state != EHCI_RH_RUNNING)
109 return;
110
111 want = (ehci->command & CMD_ASE) ? STS_ASS : 0;
112 actual = ehci_readl(ehci, &ehci->regs->status) & STS_ASS;
113
114 if (want != actual) {
115
116
117 if (ehci->ASS_poll_count++ < 2) {
118 ehci_enable_event(ehci, EHCI_HRTIMER_POLL_ASS, true);
119 return;
120 }
121 ehci_dbg(ehci, "Waited too long for the async schedule status (%x/%x), giving up\n",
122 want, actual);
123 }
124 ehci->ASS_poll_count = 0;
125
126
127 if (want == 0) {
128 if (ehci->async_count > 0)
129 ehci_set_command_bit(ehci, CMD_ASE);
130
131 } else {
132 if (ehci->async_count == 0) {
133
134
135 ehci_enable_event(ehci, EHCI_HRTIMER_DISABLE_ASYNC,
136 true);
137 }
138 }
139}
140
141
142static void ehci_disable_ASE(struct ehci_hcd *ehci)
143{
144 ehci_clear_command_bit(ehci, CMD_ASE);
145}
146
147
148
149static void ehci_poll_PSS(struct ehci_hcd *ehci)
150{
151 unsigned actual, want;
152
153
154 if (ehci->rh_state != EHCI_RH_RUNNING)
155 return;
156
157 want = (ehci->command & CMD_PSE) ? STS_PSS : 0;
158 actual = ehci_readl(ehci, &ehci->regs->status) & STS_PSS;
159
160 if (want != actual) {
161
162
163 if (ehci->PSS_poll_count++ < 2) {
164 ehci_enable_event(ehci, EHCI_HRTIMER_POLL_PSS, true);
165 return;
166 }
167 ehci_dbg(ehci, "Waited too long for the periodic schedule status (%x/%x), giving up\n",
168 want, actual);
169 }
170 ehci->PSS_poll_count = 0;
171
172
173 if (want == 0) {
174 if (ehci->periodic_count > 0)
175 ehci_set_command_bit(ehci, CMD_PSE);
176
177 } else {
178 if (ehci->periodic_count == 0) {
179
180
181 ehci_enable_event(ehci, EHCI_HRTIMER_DISABLE_PERIODIC,
182 true);
183 }
184 }
185}
186
187
188static void ehci_disable_PSE(struct ehci_hcd *ehci)
189{
190 ehci_clear_command_bit(ehci, CMD_PSE);
191}
192
193
194
195static void ehci_handle_controller_death(struct ehci_hcd *ehci)
196{
197 if (!(ehci_readl(ehci, &ehci->regs->status) & STS_HALT)) {
198
199
200 if (ehci->died_poll_count++ < 5) {
201
202 ehci_enable_event(ehci, EHCI_HRTIMER_POLL_DEAD, true);
203 return;
204 }
205 ehci_warn(ehci, "Waited too long for the controller to stop, giving up\n");
206 }
207
208
209 ehci->rh_state = EHCI_RH_HALTED;
210 ehci_writel(ehci, 0, &ehci->regs->configured_flag);
211 ehci_writel(ehci, 0, &ehci->regs->intr_enable);
212 ehci_work(ehci);
213 end_unlink_async(ehci);
214
215
216}
217
218
219
220static void ehci_handle_intr_unlinks(struct ehci_hcd *ehci)
221{
222 bool stopped = (ehci->rh_state < EHCI_RH_RUNNING);
223
224
225
226
227
228
229
230
231 ehci->intr_unlinking = true;
232 while (!list_empty(&ehci->intr_unlink)) {
233 struct ehci_qh *qh;
234
235 qh = list_first_entry(&ehci->intr_unlink, struct ehci_qh,
236 unlink_node);
237 if (!stopped && qh->unlink_cycle == ehci->intr_unlink_cycle)
238 break;
239 list_del(&qh->unlink_node);
240 end_unlink_intr(ehci, qh);
241 }
242
243
244 if (!list_empty(&ehci->intr_unlink)) {
245 ehci_enable_event(ehci, EHCI_HRTIMER_UNLINK_INTR, true);
246 ++ehci->intr_unlink_cycle;
247 }
248 ehci->intr_unlinking = false;
249}
250
251
252
253static void start_free_itds(struct ehci_hcd *ehci)
254{
255 if (!(ehci->enabled_hrtimer_events & BIT(EHCI_HRTIMER_FREE_ITDS))) {
256 ehci->last_itd_to_free = list_entry(
257 ehci->cached_itd_list.prev,
258 struct ehci_itd, itd_list);
259 ehci->last_sitd_to_free = list_entry(
260 ehci->cached_sitd_list.prev,
261 struct ehci_sitd, sitd_list);
262 ehci_enable_event(ehci, EHCI_HRTIMER_FREE_ITDS, true);
263 }
264}
265
266
267static void end_free_itds(struct ehci_hcd *ehci)
268{
269 struct ehci_itd *itd, *n;
270 struct ehci_sitd *sitd, *sn;
271
272 if (ehci->rh_state < EHCI_RH_RUNNING) {
273 ehci->last_itd_to_free = NULL;
274 ehci->last_sitd_to_free = NULL;
275 }
276
277 list_for_each_entry_safe(itd, n, &ehci->cached_itd_list, itd_list) {
278 list_del(&itd->itd_list);
279 dma_pool_free(ehci->itd_pool, itd, itd->itd_dma);
280 if (itd == ehci->last_itd_to_free)
281 break;
282 }
283 list_for_each_entry_safe(sitd, sn, &ehci->cached_sitd_list, sitd_list) {
284 list_del(&sitd->sitd_list);
285 dma_pool_free(ehci->sitd_pool, sitd, sitd->sitd_dma);
286 if (sitd == ehci->last_sitd_to_free)
287 break;
288 }
289
290 if (!list_empty(&ehci->cached_itd_list) ||
291 !list_empty(&ehci->cached_sitd_list))
292 start_free_itds(ehci);
293}
294
295
296
297static void ehci_iaa_watchdog(struct ehci_hcd *ehci)
298{
299 u32 cmd, status;
300
301
302
303
304
305
306
307 if (!ehci->iaa_in_progress || ehci->rh_state != EHCI_RH_RUNNING)
308 return;
309
310
311
312
313
314
315
316 cmd = ehci_readl(ehci, &ehci->regs->command);
317
318
319
320
321
322
323
324
325 status = ehci_readl(ehci, &ehci->regs->status);
326 if ((status & STS_IAA) || !(cmd & CMD_IAAD)) {
327 COUNT(ehci->stats.lost_iaa);
328 ehci_writel(ehci, STS_IAA, &ehci->regs->status);
329 }
330
331 ehci_dbg(ehci, "IAA watchdog: status %x cmd %x\n", status, cmd);
332 end_unlink_async(ehci);
333}
334
335
336
337static void turn_on_io_watchdog(struct ehci_hcd *ehci)
338{
339
340 if (ehci->rh_state != EHCI_RH_RUNNING ||
341 (ehci->enabled_hrtimer_events &
342 BIT(EHCI_HRTIMER_IO_WATCHDOG)))
343 return;
344
345
346
347
348
349 if (ehci->isoc_count > 0 || (ehci->need_io_watchdog &&
350 ehci->async_count + ehci->intr_count > 0))
351 ehci_enable_event(ehci, EHCI_HRTIMER_IO_WATCHDOG, true);
352}
353
354
355
356
357
358
359
360static void (*event_handlers[])(struct ehci_hcd *) = {
361 ehci_poll_ASS,
362 ehci_poll_PSS,
363 ehci_handle_controller_death,
364 ehci_handle_intr_unlinks,
365 end_free_itds,
366 unlink_empty_async,
367 ehci_iaa_watchdog,
368 ehci_disable_PSE,
369 ehci_disable_ASE,
370 ehci_work,
371};
372
373static enum hrtimer_restart ehci_hrtimer_func(struct hrtimer *t)
374{
375 struct ehci_hcd *ehci = container_of(t, struct ehci_hcd, hrtimer);
376 ktime_t now;
377 unsigned long events;
378 unsigned long flags;
379 unsigned e;
380
381 spin_lock_irqsave(&ehci->lock, flags);
382
383 events = ehci->enabled_hrtimer_events;
384 ehci->enabled_hrtimer_events = 0;
385 ehci->next_hrtimer_event = EHCI_HRTIMER_NO_EVENT;
386
387
388
389
390
391 now = ktime_get();
392 for_each_set_bit(e, &events, EHCI_HRTIMER_NUM_EVENTS) {
393 if (now.tv64 >= ehci->hr_timeouts[e].tv64)
394 event_handlers[e](ehci);
395 else
396 ehci_enable_event(ehci, e, false);
397 }
398
399 spin_unlock_irqrestore(&ehci->lock, flags);
400 return HRTIMER_NORESTART;
401}
402