1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40#undef ISP1362_DEBUG
41
42
43
44
45
46
47
48
49
50#undef BUGGY_PXA2XX_UDC_USBTEST
51
52#undef PTD_TRACE
53#undef URB_TRACE
54#undef VERBOSE
55#undef REGISTERS
56
57
58
59
60#undef CHIP_BUFFER_TEST
61
62#include <linux/module.h>
63#include <linux/moduleparam.h>
64#include <linux/kernel.h>
65#include <linux/delay.h>
66#include <linux/ioport.h>
67#include <linux/sched.h>
68#include <linux/slab.h>
69#include <linux/errno.h>
70#include <linux/list.h>
71#include <linux/interrupt.h>
72#include <linux/usb.h>
73#include <linux/usb/isp1362.h>
74#include <linux/usb/hcd.h>
75#include <linux/platform_device.h>
76#include <linux/pm.h>
77#include <linux/io.h>
78#include <linux/bitmap.h>
79#include <linux/prefetch.h>
80#include <linux/debugfs.h>
81#include <linux/seq_file.h>
82
83#include <asm/irq.h>
84#include <asm/byteorder.h>
85#include <asm/unaligned.h>
86
87static int dbg_level;
88#ifdef ISP1362_DEBUG
89module_param(dbg_level, int, 0644);
90#else
91module_param(dbg_level, int, 0);
92#endif
93
94#include "../core/usb.h"
95#include "isp1362.h"
96
97
98#define DRIVER_VERSION "2005-04-04"
99#define DRIVER_DESC "ISP1362 USB Host Controller Driver"
100
101MODULE_DESCRIPTION(DRIVER_DESC);
102MODULE_LICENSE("GPL");
103
104static const char hcd_name[] = "isp1362-hcd";
105
106static void isp1362_hc_stop(struct usb_hcd *hcd);
107static int isp1362_hc_start(struct usb_hcd *hcd);
108
109
110
111
112
113
114
115
116
117
118static inline void isp1362_enable_int(struct isp1362_hcd *isp1362_hcd, u16 mask)
119{
120 if ((isp1362_hcd->irqenb | mask) == isp1362_hcd->irqenb)
121 return;
122 if (mask & ~isp1362_hcd->irqenb)
123 isp1362_write_reg16(isp1362_hcd, HCuPINT, mask & ~isp1362_hcd->irqenb);
124 isp1362_hcd->irqenb |= mask;
125 if (isp1362_hcd->irq_active)
126 return;
127 isp1362_write_reg16(isp1362_hcd, HCuPINTENB, isp1362_hcd->irqenb);
128}
129
130
131
132static inline struct isp1362_ep_queue *get_ptd_queue(struct isp1362_hcd *isp1362_hcd,
133 u16 offset)
134{
135 struct isp1362_ep_queue *epq = NULL;
136
137 if (offset < isp1362_hcd->istl_queue[1].buf_start)
138 epq = &isp1362_hcd->istl_queue[0];
139 else if (offset < isp1362_hcd->intl_queue.buf_start)
140 epq = &isp1362_hcd->istl_queue[1];
141 else if (offset < isp1362_hcd->atl_queue.buf_start)
142 epq = &isp1362_hcd->intl_queue;
143 else if (offset < isp1362_hcd->atl_queue.buf_start +
144 isp1362_hcd->atl_queue.buf_size)
145 epq = &isp1362_hcd->atl_queue;
146
147 if (epq)
148 DBG(1, "%s: PTD $%04x is on %s queue\n", __func__, offset, epq->name);
149 else
150 pr_warn("%s: invalid PTD $%04x\n", __func__, offset);
151
152 return epq;
153}
154
155static inline int get_ptd_offset(struct isp1362_ep_queue *epq, u8 index)
156{
157 int offset;
158
159 if (index * epq->blk_size > epq->buf_size) {
160 pr_warn("%s: Bad %s index %d(%d)\n",
161 __func__, epq->name, index,
162 epq->buf_size / epq->blk_size);
163 return -EINVAL;
164 }
165 offset = epq->buf_start + index * epq->blk_size;
166 DBG(3, "%s: %s PTD[%02x] # %04x\n", __func__, epq->name, index, offset);
167
168 return offset;
169}
170
171
172
173static inline u16 max_transfer_size(struct isp1362_ep_queue *epq, size_t size,
174 int mps)
175{
176 u16 xfer_size = min_t(size_t, MAX_XFER_SIZE, size);
177
178 xfer_size = min_t(size_t, xfer_size, epq->buf_avail * epq->blk_size - PTD_HEADER_SIZE);
179 if (xfer_size < size && xfer_size % mps)
180 xfer_size -= xfer_size % mps;
181
182 return xfer_size;
183}
184
185static int claim_ptd_buffers(struct isp1362_ep_queue *epq,
186 struct isp1362_ep *ep, u16 len)
187{
188 int ptd_offset = -EINVAL;
189 int num_ptds = ((len + PTD_HEADER_SIZE - 1) / epq->blk_size) + 1;
190 int found;
191
192 BUG_ON(len > epq->buf_size);
193
194 if (!epq->buf_avail)
195 return -ENOMEM;
196
197 if (ep->num_ptds)
198 pr_err("%s: %s len %d/%d num_ptds %d buf_map %08lx skip_map %08lx\n", __func__,
199 epq->name, len, epq->blk_size, num_ptds, epq->buf_map, epq->skip_map);
200 BUG_ON(ep->num_ptds != 0);
201
202 found = bitmap_find_next_zero_area(&epq->buf_map, epq->buf_count, 0,
203 num_ptds, 0);
204 if (found >= epq->buf_count)
205 return -EOVERFLOW;
206
207 DBG(1, "%s: Found %d PTDs[%d] for %d/%d byte\n", __func__,
208 num_ptds, found, len, (int)(epq->blk_size - PTD_HEADER_SIZE));
209 ptd_offset = get_ptd_offset(epq, found);
210 WARN_ON(ptd_offset < 0);
211 ep->ptd_offset = ptd_offset;
212 ep->num_ptds += num_ptds;
213 epq->buf_avail -= num_ptds;
214 BUG_ON(epq->buf_avail > epq->buf_count);
215 ep->ptd_index = found;
216 bitmap_set(&epq->buf_map, found, num_ptds);
217 DBG(1, "%s: Done %s PTD[%d] $%04x, avail %d count %d claimed %d %08lx:%08lx\n",
218 __func__, epq->name, ep->ptd_index, ep->ptd_offset,
219 epq->buf_avail, epq->buf_count, num_ptds, epq->buf_map, epq->skip_map);
220
221 return found;
222}
223
224static inline void release_ptd_buffers(struct isp1362_ep_queue *epq, struct isp1362_ep *ep)
225{
226 int last = ep->ptd_index + ep->num_ptds;
227
228 if (last > epq->buf_count)
229 pr_err("%s: ep %p req %d len %d %s PTD[%d] $%04x num_ptds %d buf_count %d buf_avail %d buf_map %08lx skip_map %08lx\n",
230 __func__, ep, ep->num_req, ep->length, epq->name, ep->ptd_index,
231 ep->ptd_offset, ep->num_ptds, epq->buf_count, epq->buf_avail,
232 epq->buf_map, epq->skip_map);
233 BUG_ON(last > epq->buf_count);
234
235 bitmap_clear(&epq->buf_map, ep->ptd_index, ep->num_ptds);
236 bitmap_set(&epq->skip_map, ep->ptd_index, ep->num_ptds);
237 epq->buf_avail += ep->num_ptds;
238 epq->ptd_count--;
239
240 BUG_ON(epq->buf_avail > epq->buf_count);
241 BUG_ON(epq->ptd_count > epq->buf_count);
242
243 DBG(1, "%s: Done %s PTDs $%04x released %d avail %d count %d\n",
244 __func__, epq->name,
245 ep->ptd_offset, ep->num_ptds, epq->buf_avail, epq->buf_count);
246 DBG(1, "%s: buf_map %08lx skip_map %08lx\n", __func__,
247 epq->buf_map, epq->skip_map);
248
249 ep->num_ptds = 0;
250 ep->ptd_offset = -EINVAL;
251 ep->ptd_index = -EINVAL;
252}
253
254
255
256
257
258
259static void prepare_ptd(struct isp1362_hcd *isp1362_hcd, struct urb *urb,
260 struct isp1362_ep *ep, struct isp1362_ep_queue *epq,
261 u16 fno)
262{
263 struct ptd *ptd;
264 int toggle;
265 int dir;
266 u16 len;
267 size_t buf_len = urb->transfer_buffer_length - urb->actual_length;
268
269 DBG(3, "%s: %s ep %p\n", __func__, epq->name, ep);
270
271 ptd = &ep->ptd;
272
273 ep->data = (unsigned char *)urb->transfer_buffer + urb->actual_length;
274
275 switch (ep->nextpid) {
276 case USB_PID_IN:
277 toggle = usb_gettoggle(urb->dev, ep->epnum, 0);
278 dir = PTD_DIR_IN;
279 if (usb_pipecontrol(urb->pipe)) {
280 len = min_t(size_t, ep->maxpacket, buf_len);
281 } else if (usb_pipeisoc(urb->pipe)) {
282 len = min_t(size_t, urb->iso_frame_desc[fno].length, MAX_XFER_SIZE);
283 ep->data = urb->transfer_buffer + urb->iso_frame_desc[fno].offset;
284 } else
285 len = max_transfer_size(epq, buf_len, ep->maxpacket);
286 DBG(1, "%s: IN len %d/%d/%d from URB\n", __func__, len, ep->maxpacket,
287 (int)buf_len);
288 break;
289 case USB_PID_OUT:
290 toggle = usb_gettoggle(urb->dev, ep->epnum, 1);
291 dir = PTD_DIR_OUT;
292 if (usb_pipecontrol(urb->pipe))
293 len = min_t(size_t, ep->maxpacket, buf_len);
294 else if (usb_pipeisoc(urb->pipe))
295 len = min_t(size_t, urb->iso_frame_desc[0].length, MAX_XFER_SIZE);
296 else
297 len = max_transfer_size(epq, buf_len, ep->maxpacket);
298 if (len == 0)
299 pr_info("%s: Sending ZERO packet: %d\n", __func__,
300 urb->transfer_flags & URB_ZERO_PACKET);
301 DBG(1, "%s: OUT len %d/%d/%d from URB\n", __func__, len, ep->maxpacket,
302 (int)buf_len);
303 break;
304 case USB_PID_SETUP:
305 toggle = 0;
306 dir = PTD_DIR_SETUP;
307 len = sizeof(struct usb_ctrlrequest);
308 DBG(1, "%s: SETUP len %d\n", __func__, len);
309 ep->data = urb->setup_packet;
310 break;
311 case USB_PID_ACK:
312 toggle = 1;
313 len = 0;
314 dir = (urb->transfer_buffer_length && usb_pipein(urb->pipe)) ?
315 PTD_DIR_OUT : PTD_DIR_IN;
316 DBG(1, "%s: ACK len %d\n", __func__, len);
317 break;
318 default:
319 toggle = dir = len = 0;
320 pr_err("%s@%d: ep->nextpid %02x\n", __func__, __LINE__, ep->nextpid);
321 BUG_ON(1);
322 }
323
324 ep->length = len;
325 if (!len)
326 ep->data = NULL;
327
328 ptd->count = PTD_CC_MSK | PTD_ACTIVE_MSK | PTD_TOGGLE(toggle);
329 ptd->mps = PTD_MPS(ep->maxpacket) | PTD_SPD(urb->dev->speed == USB_SPEED_LOW) |
330 PTD_EP(ep->epnum);
331 ptd->len = PTD_LEN(len) | PTD_DIR(dir);
332 ptd->faddr = PTD_FA(usb_pipedevice(urb->pipe));
333
334 if (usb_pipeint(urb->pipe)) {
335 ptd->faddr |= PTD_SF_INT(ep->branch);
336 ptd->faddr |= PTD_PR(ep->interval ? __ffs(ep->interval) : 0);
337 }
338 if (usb_pipeisoc(urb->pipe))
339 ptd->faddr |= PTD_SF_ISO(fno);
340
341 DBG(1, "%s: Finished\n", __func__);
342}
343
344static void isp1362_write_ptd(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep,
345 struct isp1362_ep_queue *epq)
346{
347 struct ptd *ptd = &ep->ptd;
348 int len = PTD_GET_DIR(ptd) == PTD_DIR_IN ? 0 : ep->length;
349
350 prefetch(ptd);
351 isp1362_write_buffer(isp1362_hcd, ptd, ep->ptd_offset, PTD_HEADER_SIZE);
352 if (len)
353 isp1362_write_buffer(isp1362_hcd, ep->data,
354 ep->ptd_offset + PTD_HEADER_SIZE, len);
355
356 dump_ptd(ptd);
357 dump_ptd_out_data(ptd, ep->data);
358}
359
360static void isp1362_read_ptd(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep,
361 struct isp1362_ep_queue *epq)
362{
363 struct ptd *ptd = &ep->ptd;
364 int act_len;
365
366 WARN_ON(list_empty(&ep->active));
367 BUG_ON(ep->ptd_offset < 0);
368
369 list_del_init(&ep->active);
370 DBG(1, "%s: ep %p removed from active list %p\n", __func__, ep, &epq->active);
371
372 prefetchw(ptd);
373 isp1362_read_buffer(isp1362_hcd, ptd, ep->ptd_offset, PTD_HEADER_SIZE);
374 dump_ptd(ptd);
375 act_len = PTD_GET_COUNT(ptd);
376 if (PTD_GET_DIR(ptd) != PTD_DIR_IN || act_len == 0)
377 return;
378 if (act_len > ep->length)
379 pr_err("%s: ep %p PTD $%04x act_len %d ep->length %d\n", __func__, ep,
380 ep->ptd_offset, act_len, ep->length);
381 BUG_ON(act_len > ep->length);
382
383
384
385
386 prefetchw(ep->data);
387 isp1362_read_buffer(isp1362_hcd, ep->data,
388 ep->ptd_offset + PTD_HEADER_SIZE, act_len);
389 dump_ptd_in_data(ptd, ep->data);
390}
391
392
393
394
395
396
397static void remove_ptd(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep)
398
399{
400 int index;
401 struct isp1362_ep_queue *epq;
402
403 DBG(1, "%s: ep %p PTD[%d] $%04x\n", __func__, ep, ep->ptd_index, ep->ptd_offset);
404 BUG_ON(ep->ptd_offset < 0);
405
406 epq = get_ptd_queue(isp1362_hcd, ep->ptd_offset);
407 BUG_ON(!epq);
408
409
410 WARN_ON(!list_empty(&ep->remove_list));
411 list_add_tail(&ep->remove_list, &isp1362_hcd->remove_list);
412
413 isp1362_enable_int(isp1362_hcd, HCuPINT_SOF);
414
415 index = ep->ptd_index;
416 if (index < 0)
417
418 return;
419
420 DBG(1, "%s: Disabling PTD[%02x] $%04x %08lx|%08x\n", __func__,
421 index, ep->ptd_offset, epq->skip_map, 1 << index);
422
423
424 epq->skip_map |= 1 << index;
425 if (epq == &isp1362_hcd->atl_queue) {
426 DBG(2, "%s: ATLSKIP = %08x -> %08lx\n", __func__,
427 isp1362_read_reg32(isp1362_hcd, HCATLSKIP), epq->skip_map);
428 isp1362_write_reg32(isp1362_hcd, HCATLSKIP, epq->skip_map);
429 if (~epq->skip_map == 0)
430 isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ATL_ACTIVE);
431 } else if (epq == &isp1362_hcd->intl_queue) {
432 DBG(2, "%s: INTLSKIP = %08x -> %08lx\n", __func__,
433 isp1362_read_reg32(isp1362_hcd, HCINTLSKIP), epq->skip_map);
434 isp1362_write_reg32(isp1362_hcd, HCINTLSKIP, epq->skip_map);
435 if (~epq->skip_map == 0)
436 isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_INTL_ACTIVE);
437 }
438}
439
440
441
442
443
444static void finish_request(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep,
445 struct urb *urb, int status)
446 __releases(isp1362_hcd->lock)
447 __acquires(isp1362_hcd->lock)
448{
449 urb->hcpriv = NULL;
450 ep->error_count = 0;
451
452 if (usb_pipecontrol(urb->pipe))
453 ep->nextpid = USB_PID_SETUP;
454
455 URB_DBG("%s: req %d FA %d ep%d%s %s: len %d/%d %s stat %d\n", __func__,
456 ep->num_req, usb_pipedevice(urb->pipe),
457 usb_pipeendpoint(urb->pipe),
458 !usb_pipein(urb->pipe) ? "out" : "in",
459 usb_pipecontrol(urb->pipe) ? "ctrl" :
460 usb_pipeint(urb->pipe) ? "int" :
461 usb_pipebulk(urb->pipe) ? "bulk" :
462 "iso",
463 urb->actual_length, urb->transfer_buffer_length,
464 !(urb->transfer_flags & URB_SHORT_NOT_OK) ?
465 "short_ok" : "", urb->status);
466
467
468 usb_hcd_unlink_urb_from_ep(isp1362_hcd_to_hcd(isp1362_hcd), urb);
469 spin_unlock(&isp1362_hcd->lock);
470 usb_hcd_giveback_urb(isp1362_hcd_to_hcd(isp1362_hcd), urb, status);
471 spin_lock(&isp1362_hcd->lock);
472
473
474 if (!list_empty(&ep->hep->urb_list))
475 return;
476
477
478 if (!list_empty(&ep->schedule)) {
479 list_del_init(&ep->schedule);
480 return;
481 }
482
483
484 if (ep->interval) {
485
486 DBG(1, "deschedule qh%d/%p branch %d load %d bandwidth %d -> %d\n", ep->interval,
487 ep, ep->branch, ep->load,
488 isp1362_hcd->load[ep->branch],
489 isp1362_hcd->load[ep->branch] - ep->load);
490 isp1362_hcd->load[ep->branch] -= ep->load;
491 ep->branch = PERIODIC_SIZE;
492 }
493}
494
495
496
497
498static void postproc_ep(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep)
499{
500 struct urb *urb = get_urb(ep);
501 struct usb_device *udev;
502 struct ptd *ptd;
503 int short_ok;
504 u16 len;
505 int urbstat = -EINPROGRESS;
506 u8 cc;
507
508 DBG(2, "%s: ep %p req %d\n", __func__, ep, ep->num_req);
509
510 udev = urb->dev;
511 ptd = &ep->ptd;
512 cc = PTD_GET_CC(ptd);
513 if (cc == PTD_NOTACCESSED) {
514 pr_err("%s: req %d PTD %p Untouched by ISP1362\n", __func__,
515 ep->num_req, ptd);
516 cc = PTD_DEVNOTRESP;
517 }
518
519 short_ok = !(urb->transfer_flags & URB_SHORT_NOT_OK);
520 len = urb->transfer_buffer_length - urb->actual_length;
521
522
523
524
525
526
527
528 if (cc == PTD_DATAUNDERRUN) {
529 if (short_ok) {
530 DBG(1, "%s: req %d Allowed data underrun short_%sok %d/%d/%d byte\n",
531 __func__, ep->num_req, short_ok ? "" : "not_",
532 PTD_GET_COUNT(ptd), ep->maxpacket, len);
533 cc = PTD_CC_NOERROR;
534 urbstat = 0;
535 } else {
536 DBG(1, "%s: req %d Data Underrun %s nextpid %02x short_%sok %d/%d/%d byte\n",
537 __func__, ep->num_req,
538 usb_pipein(urb->pipe) ? "IN" : "OUT", ep->nextpid,
539 short_ok ? "" : "not_",
540 PTD_GET_COUNT(ptd), ep->maxpacket, len);
541
542
543
544 urb->actual_length += PTD_GET_COUNT(ptd);
545 if (usb_pipecontrol(urb->pipe)) {
546 ep->nextpid = USB_PID_ACK;
547 BUG_ON(urb->actual_length > urb->transfer_buffer_length);
548
549 if (urb->status == -EINPROGRESS)
550 urb->status = cc_to_error[PTD_DATAUNDERRUN];
551 } else {
552 usb_settoggle(udev, ep->epnum, ep->nextpid == USB_PID_OUT,
553 PTD_GET_TOGGLE(ptd));
554 urbstat = cc_to_error[PTD_DATAUNDERRUN];
555 }
556 goto out;
557 }
558 }
559
560 if (cc != PTD_CC_NOERROR) {
561 if (++ep->error_count >= 3 || cc == PTD_CC_STALL || cc == PTD_DATAOVERRUN) {
562 urbstat = cc_to_error[cc];
563 DBG(1, "%s: req %d nextpid %02x, status %d, error %d, error_count %d\n",
564 __func__, ep->num_req, ep->nextpid, urbstat, cc,
565 ep->error_count);
566 }
567 goto out;
568 }
569
570 switch (ep->nextpid) {
571 case USB_PID_OUT:
572 if (PTD_GET_COUNT(ptd) != ep->length)
573 pr_err("%s: count=%d len=%d\n", __func__,
574 PTD_GET_COUNT(ptd), ep->length);
575 BUG_ON(PTD_GET_COUNT(ptd) != ep->length);
576 urb->actual_length += ep->length;
577 BUG_ON(urb->actual_length > urb->transfer_buffer_length);
578 usb_settoggle(udev, ep->epnum, 1, PTD_GET_TOGGLE(ptd));
579 if (urb->actual_length == urb->transfer_buffer_length) {
580 DBG(3, "%s: req %d xfer complete %d/%d status %d -> 0\n", __func__,
581 ep->num_req, len, ep->maxpacket, urbstat);
582 if (usb_pipecontrol(urb->pipe)) {
583 DBG(3, "%s: req %d %s Wait for ACK\n", __func__,
584 ep->num_req,
585 usb_pipein(urb->pipe) ? "IN" : "OUT");
586 ep->nextpid = USB_PID_ACK;
587 } else {
588 if (len % ep->maxpacket ||
589 !(urb->transfer_flags & URB_ZERO_PACKET)) {
590 urbstat = 0;
591 DBG(3, "%s: req %d URB %s status %d count %d/%d/%d\n",
592 __func__, ep->num_req, usb_pipein(urb->pipe) ? "IN" : "OUT",
593 urbstat, len, ep->maxpacket, urb->actual_length);
594 }
595 }
596 }
597 break;
598 case USB_PID_IN:
599 len = PTD_GET_COUNT(ptd);
600 BUG_ON(len > ep->length);
601 urb->actual_length += len;
602 BUG_ON(urb->actual_length > urb->transfer_buffer_length);
603 usb_settoggle(udev, ep->epnum, 0, PTD_GET_TOGGLE(ptd));
604
605 if ((urb->transfer_buffer_length == urb->actual_length) ||
606 len % ep->maxpacket) {
607 DBG(3, "%s: req %d xfer complete %d/%d status %d -> 0\n", __func__,
608 ep->num_req, len, ep->maxpacket, urbstat);
609 if (usb_pipecontrol(urb->pipe)) {
610 DBG(3, "%s: req %d %s Wait for ACK\n", __func__,
611 ep->num_req,
612 usb_pipein(urb->pipe) ? "IN" : "OUT");
613 ep->nextpid = USB_PID_ACK;
614 } else {
615 urbstat = 0;
616 DBG(3, "%s: req %d URB %s status %d count %d/%d/%d\n",
617 __func__, ep->num_req, usb_pipein(urb->pipe) ? "IN" : "OUT",
618 urbstat, len, ep->maxpacket, urb->actual_length);
619 }
620 }
621 break;
622 case USB_PID_SETUP:
623 if (urb->transfer_buffer_length == urb->actual_length) {
624 ep->nextpid = USB_PID_ACK;
625 } else if (usb_pipeout(urb->pipe)) {
626 usb_settoggle(udev, 0, 1, 1);
627 ep->nextpid = USB_PID_OUT;
628 } else {
629 usb_settoggle(udev, 0, 0, 1);
630 ep->nextpid = USB_PID_IN;
631 }
632 break;
633 case USB_PID_ACK:
634 DBG(3, "%s: req %d got ACK %d -> 0\n", __func__, ep->num_req,
635 urbstat);
636 WARN_ON(urbstat != -EINPROGRESS);
637 urbstat = 0;
638 ep->nextpid = 0;
639 break;
640 default:
641 BUG_ON(1);
642 }
643
644 out:
645 if (urbstat != -EINPROGRESS) {
646 DBG(2, "%s: Finishing ep %p req %d urb %p status %d\n", __func__,
647 ep, ep->num_req, urb, urbstat);
648 finish_request(isp1362_hcd, ep, urb, urbstat);
649 }
650}
651
652static void finish_unlinks(struct isp1362_hcd *isp1362_hcd)
653{
654 struct isp1362_ep *ep;
655 struct isp1362_ep *tmp;
656
657 list_for_each_entry_safe(ep, tmp, &isp1362_hcd->remove_list, remove_list) {
658 struct isp1362_ep_queue *epq =
659 get_ptd_queue(isp1362_hcd, ep->ptd_offset);
660 int index = ep->ptd_index;
661
662 BUG_ON(epq == NULL);
663 if (index >= 0) {
664 DBG(1, "%s: remove PTD[%d] $%04x\n", __func__, index, ep->ptd_offset);
665 BUG_ON(ep->num_ptds == 0);
666 release_ptd_buffers(epq, ep);
667 }
668 if (!list_empty(&ep->hep->urb_list)) {
669 struct urb *urb = get_urb(ep);
670
671 DBG(1, "%s: Finishing req %d ep %p from remove_list\n", __func__,
672 ep->num_req, ep);
673 finish_request(isp1362_hcd, ep, urb, -ESHUTDOWN);
674 }
675 WARN_ON(list_empty(&ep->active));
676 if (!list_empty(&ep->active)) {
677 list_del_init(&ep->active);
678 DBG(1, "%s: ep %p removed from active list\n", __func__, ep);
679 }
680 list_del_init(&ep->remove_list);
681 DBG(1, "%s: ep %p removed from remove_list\n", __func__, ep);
682 }
683 DBG(1, "%s: Done\n", __func__);
684}
685
686static inline void enable_atl_transfers(struct isp1362_hcd *isp1362_hcd, int count)
687{
688 if (count > 0) {
689 if (count < isp1362_hcd->atl_queue.ptd_count)
690 isp1362_write_reg16(isp1362_hcd, HCATLDTC, count);
691 isp1362_enable_int(isp1362_hcd, HCuPINT_ATL);
692 isp1362_write_reg32(isp1362_hcd, HCATLSKIP, isp1362_hcd->atl_queue.skip_map);
693 isp1362_set_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ATL_ACTIVE);
694 } else
695 isp1362_enable_int(isp1362_hcd, HCuPINT_SOF);
696}
697
698static inline void enable_intl_transfers(struct isp1362_hcd *isp1362_hcd)
699{
700 isp1362_enable_int(isp1362_hcd, HCuPINT_INTL);
701 isp1362_set_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_INTL_ACTIVE);
702 isp1362_write_reg32(isp1362_hcd, HCINTLSKIP, isp1362_hcd->intl_queue.skip_map);
703}
704
705static inline void enable_istl_transfers(struct isp1362_hcd *isp1362_hcd, int flip)
706{
707 isp1362_enable_int(isp1362_hcd, flip ? HCuPINT_ISTL1 : HCuPINT_ISTL0);
708 isp1362_set_mask16(isp1362_hcd, HCBUFSTAT, flip ?
709 HCBUFSTAT_ISTL1_FULL : HCBUFSTAT_ISTL0_FULL);
710}
711
712static int submit_req(struct isp1362_hcd *isp1362_hcd, struct urb *urb,
713 struct isp1362_ep *ep, struct isp1362_ep_queue *epq)
714{
715 int index = epq->free_ptd;
716
717 prepare_ptd(isp1362_hcd, urb, ep, epq, 0);
718 index = claim_ptd_buffers(epq, ep, ep->length);
719 if (index == -ENOMEM) {
720 DBG(1, "%s: req %d No free %s PTD available: %d, %08lx:%08lx\n", __func__,
721 ep->num_req, epq->name, ep->num_ptds, epq->buf_map, epq->skip_map);
722 return index;
723 } else if (index == -EOVERFLOW) {
724 DBG(1, "%s: req %d Not enough space for %d byte %s PTD %d %08lx:%08lx\n",
725 __func__, ep->num_req, ep->length, epq->name, ep->num_ptds,
726 epq->buf_map, epq->skip_map);
727 return index;
728 } else
729 BUG_ON(index < 0);
730 list_add_tail(&ep->active, &epq->active);
731 DBG(1, "%s: ep %p req %d len %d added to active list %p\n", __func__,
732 ep, ep->num_req, ep->length, &epq->active);
733 DBG(1, "%s: Submitting %s PTD $%04x for ep %p req %d\n", __func__, epq->name,
734 ep->ptd_offset, ep, ep->num_req);
735 isp1362_write_ptd(isp1362_hcd, ep, epq);
736 __clear_bit(ep->ptd_index, &epq->skip_map);
737
738 return 0;
739}
740
741static void start_atl_transfers(struct isp1362_hcd *isp1362_hcd)
742{
743 int ptd_count = 0;
744 struct isp1362_ep_queue *epq = &isp1362_hcd->atl_queue;
745 struct isp1362_ep *ep;
746 int defer = 0;
747
748 if (atomic_read(&epq->finishing)) {
749 DBG(1, "%s: finish_transfers is active for %s\n", __func__, epq->name);
750 return;
751 }
752
753 list_for_each_entry(ep, &isp1362_hcd->async, schedule) {
754 struct urb *urb = get_urb(ep);
755 int ret;
756
757 if (!list_empty(&ep->active)) {
758 DBG(2, "%s: Skipping active %s ep %p\n", __func__, epq->name, ep);
759 continue;
760 }
761
762 DBG(1, "%s: Processing %s ep %p req %d\n", __func__, epq->name,
763 ep, ep->num_req);
764
765 ret = submit_req(isp1362_hcd, urb, ep, epq);
766 if (ret == -ENOMEM) {
767 defer = 1;
768 break;
769 } else if (ret == -EOVERFLOW) {
770 defer = 1;
771 continue;
772 }
773#ifdef BUGGY_PXA2XX_UDC_USBTEST
774 defer = ep->nextpid == USB_PID_SETUP;
775#endif
776 ptd_count++;
777 }
778
779
780 if (isp1362_hcd->async.next != isp1362_hcd->async.prev) {
781 DBG(2, "%s: Cycling ASYNC schedule %d\n", __func__, ptd_count);
782 list_move(&isp1362_hcd->async, isp1362_hcd->async.next);
783 }
784 if (ptd_count || defer)
785 enable_atl_transfers(isp1362_hcd, defer ? 0 : ptd_count);
786
787 epq->ptd_count += ptd_count;
788 if (epq->ptd_count > epq->stat_maxptds) {
789 epq->stat_maxptds = epq->ptd_count;
790 DBG(0, "%s: max_ptds: %d\n", __func__, epq->stat_maxptds);
791 }
792}
793
794static void start_intl_transfers(struct isp1362_hcd *isp1362_hcd)
795{
796 int ptd_count = 0;
797 struct isp1362_ep_queue *epq = &isp1362_hcd->intl_queue;
798 struct isp1362_ep *ep;
799
800 if (atomic_read(&epq->finishing)) {
801 DBG(1, "%s: finish_transfers is active for %s\n", __func__, epq->name);
802 return;
803 }
804
805 list_for_each_entry(ep, &isp1362_hcd->periodic, schedule) {
806 struct urb *urb = get_urb(ep);
807 int ret;
808
809 if (!list_empty(&ep->active)) {
810 DBG(1, "%s: Skipping active %s ep %p\n", __func__,
811 epq->name, ep);
812 continue;
813 }
814
815 DBG(1, "%s: Processing %s ep %p req %d\n", __func__,
816 epq->name, ep, ep->num_req);
817 ret = submit_req(isp1362_hcd, urb, ep, epq);
818 if (ret == -ENOMEM)
819 break;
820 else if (ret == -EOVERFLOW)
821 continue;
822 ptd_count++;
823 }
824
825 if (ptd_count) {
826 static int last_count;
827
828 if (ptd_count != last_count) {
829 DBG(0, "%s: ptd_count: %d\n", __func__, ptd_count);
830 last_count = ptd_count;
831 }
832 enable_intl_transfers(isp1362_hcd);
833 }
834
835 epq->ptd_count += ptd_count;
836 if (epq->ptd_count > epq->stat_maxptds)
837 epq->stat_maxptds = epq->ptd_count;
838}
839
840static inline int next_ptd(struct isp1362_ep_queue *epq, struct isp1362_ep *ep)
841{
842 u16 ptd_offset = ep->ptd_offset;
843 int num_ptds = (ep->length + PTD_HEADER_SIZE + (epq->blk_size - 1)) / epq->blk_size;
844
845 DBG(2, "%s: PTD offset $%04x + %04x => %d * %04x -> $%04x\n", __func__, ptd_offset,
846 ep->length, num_ptds, epq->blk_size, ptd_offset + num_ptds * epq->blk_size);
847
848 ptd_offset += num_ptds * epq->blk_size;
849 if (ptd_offset < epq->buf_start + epq->buf_size)
850 return ptd_offset;
851 else
852 return -ENOMEM;
853}
854
855static void start_iso_transfers(struct isp1362_hcd *isp1362_hcd)
856{
857 int ptd_count = 0;
858 int flip = isp1362_hcd->istl_flip;
859 struct isp1362_ep_queue *epq;
860 int ptd_offset;
861 struct isp1362_ep *ep;
862 struct isp1362_ep *tmp;
863 u16 fno = isp1362_read_reg32(isp1362_hcd, HCFMNUM);
864
865 fill2:
866 epq = &isp1362_hcd->istl_queue[flip];
867 if (atomic_read(&epq->finishing)) {
868 DBG(1, "%s: finish_transfers is active for %s\n", __func__, epq->name);
869 return;
870 }
871
872 if (!list_empty(&epq->active))
873 return;
874
875 ptd_offset = epq->buf_start;
876 list_for_each_entry_safe(ep, tmp, &isp1362_hcd->isoc, schedule) {
877 struct urb *urb = get_urb(ep);
878 s16 diff = fno - (u16)urb->start_frame;
879
880 DBG(1, "%s: Processing %s ep %p\n", __func__, epq->name, ep);
881
882 if (diff > urb->number_of_packets) {
883
884 finish_request(isp1362_hcd, ep, urb, -EOVERFLOW);
885 continue;
886 } else if (diff < -1) {
887
888
889
890
891
892 } else if (diff == -1) {
893
894 prepare_ptd(isp1362_hcd, urb, ep, epq, fno);
895 if (ptd_offset + PTD_HEADER_SIZE + ep->length >
896 epq->buf_start + epq->buf_size) {
897 pr_err("%s: Not enough ISO buffer space for %d byte PTD\n",
898 __func__, ep->length);
899 continue;
900 }
901 ep->ptd_offset = ptd_offset;
902 list_add_tail(&ep->active, &epq->active);
903
904 ptd_offset = next_ptd(epq, ep);
905 if (ptd_offset < 0) {
906 pr_warn("%s: req %d No more %s PTD buffers available\n",
907 __func__, ep->num_req, epq->name);
908 break;
909 }
910 }
911 }
912 list_for_each_entry(ep, &epq->active, active) {
913 if (epq->active.next == &ep->active)
914 ep->ptd.mps |= PTD_LAST_MSK;
915 isp1362_write_ptd(isp1362_hcd, ep, epq);
916 ptd_count++;
917 }
918
919 if (ptd_count)
920 enable_istl_transfers(isp1362_hcd, flip);
921
922 epq->ptd_count += ptd_count;
923 if (epq->ptd_count > epq->stat_maxptds)
924 epq->stat_maxptds = epq->ptd_count;
925
926
927 if (!(isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) &
928 (flip ? HCBUFSTAT_ISTL0_FULL : HCBUFSTAT_ISTL1_FULL))) {
929 fno++;
930 ptd_count = 0;
931 flip = 1 - flip;
932 goto fill2;
933 }
934}
935
936static void finish_transfers(struct isp1362_hcd *isp1362_hcd, unsigned long done_map,
937 struct isp1362_ep_queue *epq)
938{
939 struct isp1362_ep *ep;
940 struct isp1362_ep *tmp;
941
942 if (list_empty(&epq->active)) {
943 DBG(1, "%s: Nothing to do for %s queue\n", __func__, epq->name);
944 return;
945 }
946
947 DBG(1, "%s: Finishing %s transfers %08lx\n", __func__, epq->name, done_map);
948
949 atomic_inc(&epq->finishing);
950 list_for_each_entry_safe(ep, tmp, &epq->active, active) {
951 int index = ep->ptd_index;
952
953 DBG(1, "%s: Checking %s PTD[%02x] $%04x\n", __func__, epq->name,
954 index, ep->ptd_offset);
955
956 BUG_ON(index < 0);
957 if (__test_and_clear_bit(index, &done_map)) {
958 isp1362_read_ptd(isp1362_hcd, ep, epq);
959 epq->free_ptd = index;
960 BUG_ON(ep->num_ptds == 0);
961 release_ptd_buffers(epq, ep);
962
963 DBG(1, "%s: ep %p req %d removed from active list\n", __func__,
964 ep, ep->num_req);
965 if (!list_empty(&ep->remove_list)) {
966 list_del_init(&ep->remove_list);
967 DBG(1, "%s: ep %p removed from remove list\n", __func__, ep);
968 }
969 DBG(1, "%s: Postprocessing %s ep %p req %d\n", __func__, epq->name,
970 ep, ep->num_req);
971 postproc_ep(isp1362_hcd, ep);
972 }
973 if (!done_map)
974 break;
975 }
976 if (done_map)
977 pr_warn("%s: done_map not clear: %08lx:%08lx\n",
978 __func__, done_map, epq->skip_map);
979 atomic_dec(&epq->finishing);
980}
981
982static void finish_iso_transfers(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep_queue *epq)
983{
984 struct isp1362_ep *ep;
985 struct isp1362_ep *tmp;
986
987 if (list_empty(&epq->active)) {
988 DBG(1, "%s: Nothing to do for %s queue\n", __func__, epq->name);
989 return;
990 }
991
992 DBG(1, "%s: Finishing %s transfers\n", __func__, epq->name);
993
994 atomic_inc(&epq->finishing);
995 list_for_each_entry_safe(ep, tmp, &epq->active, active) {
996 DBG(1, "%s: Checking PTD $%04x\n", __func__, ep->ptd_offset);
997
998 isp1362_read_ptd(isp1362_hcd, ep, epq);
999 DBG(1, "%s: Postprocessing %s ep %p\n", __func__, epq->name, ep);
1000 postproc_ep(isp1362_hcd, ep);
1001 }
1002 WARN_ON(epq->blk_size != 0);
1003 atomic_dec(&epq->finishing);
1004}
1005
1006static irqreturn_t isp1362_irq(struct usb_hcd *hcd)
1007{
1008 int handled = 0;
1009 struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1010 u16 irqstat;
1011 u16 svc_mask;
1012
1013 spin_lock(&isp1362_hcd->lock);
1014
1015 BUG_ON(isp1362_hcd->irq_active++);
1016
1017 isp1362_write_reg16(isp1362_hcd, HCuPINTENB, 0);
1018
1019 irqstat = isp1362_read_reg16(isp1362_hcd, HCuPINT);
1020 DBG(3, "%s: got IRQ %04x:%04x\n", __func__, irqstat, isp1362_hcd->irqenb);
1021
1022
1023 irqstat &= isp1362_hcd->irqenb;
1024 isp1362_write_reg16(isp1362_hcd, HCuPINT, irqstat);
1025 svc_mask = irqstat;
1026
1027 if (irqstat & HCuPINT_SOF) {
1028 isp1362_hcd->irqenb &= ~HCuPINT_SOF;
1029 isp1362_hcd->irq_stat[ISP1362_INT_SOF]++;
1030 handled = 1;
1031 svc_mask &= ~HCuPINT_SOF;
1032 DBG(3, "%s: SOF\n", __func__);
1033 isp1362_hcd->fmindex = isp1362_read_reg32(isp1362_hcd, HCFMNUM);
1034 if (!list_empty(&isp1362_hcd->remove_list))
1035 finish_unlinks(isp1362_hcd);
1036 if (!list_empty(&isp1362_hcd->async) && !(irqstat & HCuPINT_ATL)) {
1037 if (list_empty(&isp1362_hcd->atl_queue.active)) {
1038 start_atl_transfers(isp1362_hcd);
1039 } else {
1040 isp1362_enable_int(isp1362_hcd, HCuPINT_ATL);
1041 isp1362_write_reg32(isp1362_hcd, HCATLSKIP,
1042 isp1362_hcd->atl_queue.skip_map);
1043 isp1362_set_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ATL_ACTIVE);
1044 }
1045 }
1046 }
1047
1048 if (irqstat & HCuPINT_ISTL0) {
1049 isp1362_hcd->irq_stat[ISP1362_INT_ISTL0]++;
1050 handled = 1;
1051 svc_mask &= ~HCuPINT_ISTL0;
1052 isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ISTL0_FULL);
1053 DBG(1, "%s: ISTL0\n", __func__);
1054 WARN_ON((int)!!isp1362_hcd->istl_flip);
1055 WARN_ON(isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) &
1056 HCBUFSTAT_ISTL0_ACTIVE);
1057 WARN_ON(!(isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) &
1058 HCBUFSTAT_ISTL0_DONE));
1059 isp1362_hcd->irqenb &= ~HCuPINT_ISTL0;
1060 }
1061
1062 if (irqstat & HCuPINT_ISTL1) {
1063 isp1362_hcd->irq_stat[ISP1362_INT_ISTL1]++;
1064 handled = 1;
1065 svc_mask &= ~HCuPINT_ISTL1;
1066 isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ISTL1_FULL);
1067 DBG(1, "%s: ISTL1\n", __func__);
1068 WARN_ON(!(int)isp1362_hcd->istl_flip);
1069 WARN_ON(isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) &
1070 HCBUFSTAT_ISTL1_ACTIVE);
1071 WARN_ON(!(isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) &
1072 HCBUFSTAT_ISTL1_DONE));
1073 isp1362_hcd->irqenb &= ~HCuPINT_ISTL1;
1074 }
1075
1076 if (irqstat & (HCuPINT_ISTL0 | HCuPINT_ISTL1)) {
1077 WARN_ON((irqstat & (HCuPINT_ISTL0 | HCuPINT_ISTL1)) ==
1078 (HCuPINT_ISTL0 | HCuPINT_ISTL1));
1079 finish_iso_transfers(isp1362_hcd,
1080 &isp1362_hcd->istl_queue[isp1362_hcd->istl_flip]);
1081 start_iso_transfers(isp1362_hcd);
1082 isp1362_hcd->istl_flip = 1 - isp1362_hcd->istl_flip;
1083 }
1084
1085 if (irqstat & HCuPINT_INTL) {
1086 u32 done_map = isp1362_read_reg32(isp1362_hcd, HCINTLDONE);
1087 u32 skip_map = isp1362_read_reg32(isp1362_hcd, HCINTLSKIP);
1088 isp1362_hcd->irq_stat[ISP1362_INT_INTL]++;
1089
1090 DBG(2, "%s: INTL\n", __func__);
1091
1092 svc_mask &= ~HCuPINT_INTL;
1093
1094 isp1362_write_reg32(isp1362_hcd, HCINTLSKIP, skip_map | done_map);
1095 if (~(done_map | skip_map) == 0)
1096
1097 isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_INTL_ACTIVE);
1098
1099 handled = 1;
1100 WARN_ON(!done_map);
1101 if (done_map) {
1102 DBG(3, "%s: INTL done_map %08x\n", __func__, done_map);
1103 finish_transfers(isp1362_hcd, done_map, &isp1362_hcd->intl_queue);
1104 start_intl_transfers(isp1362_hcd);
1105 }
1106 }
1107
1108 if (irqstat & HCuPINT_ATL) {
1109 u32 done_map = isp1362_read_reg32(isp1362_hcd, HCATLDONE);
1110 u32 skip_map = isp1362_read_reg32(isp1362_hcd, HCATLSKIP);
1111 isp1362_hcd->irq_stat[ISP1362_INT_ATL]++;
1112
1113 DBG(2, "%s: ATL\n", __func__);
1114
1115 svc_mask &= ~HCuPINT_ATL;
1116
1117 isp1362_write_reg32(isp1362_hcd, HCATLSKIP, skip_map | done_map);
1118 if (~(done_map | skip_map) == 0)
1119 isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ATL_ACTIVE);
1120 if (done_map) {
1121 DBG(3, "%s: ATL done_map %08x\n", __func__, done_map);
1122 finish_transfers(isp1362_hcd, done_map, &isp1362_hcd->atl_queue);
1123 start_atl_transfers(isp1362_hcd);
1124 }
1125 handled = 1;
1126 }
1127
1128 if (irqstat & HCuPINT_OPR) {
1129 u32 intstat = isp1362_read_reg32(isp1362_hcd, HCINTSTAT);
1130 isp1362_hcd->irq_stat[ISP1362_INT_OPR]++;
1131
1132 svc_mask &= ~HCuPINT_OPR;
1133 DBG(2, "%s: OPR %08x:%08x\n", __func__, intstat, isp1362_hcd->intenb);
1134 intstat &= isp1362_hcd->intenb;
1135 if (intstat & OHCI_INTR_UE) {
1136 pr_err("Unrecoverable error\n");
1137
1138 }
1139 if (intstat & OHCI_INTR_RHSC) {
1140 isp1362_hcd->rhstatus = isp1362_read_reg32(isp1362_hcd, HCRHSTATUS);
1141 isp1362_hcd->rhport[0] = isp1362_read_reg32(isp1362_hcd, HCRHPORT1);
1142 isp1362_hcd->rhport[1] = isp1362_read_reg32(isp1362_hcd, HCRHPORT2);
1143 }
1144 if (intstat & OHCI_INTR_RD) {
1145 pr_info("%s: RESUME DETECTED\n", __func__);
1146 isp1362_show_reg(isp1362_hcd, HCCONTROL);
1147 usb_hcd_resume_root_hub(hcd);
1148 }
1149 isp1362_write_reg32(isp1362_hcd, HCINTSTAT, intstat);
1150 irqstat &= ~HCuPINT_OPR;
1151 handled = 1;
1152 }
1153
1154 if (irqstat & HCuPINT_SUSP) {
1155 isp1362_hcd->irq_stat[ISP1362_INT_SUSP]++;
1156 handled = 1;
1157 svc_mask &= ~HCuPINT_SUSP;
1158
1159 pr_info("%s: SUSPEND IRQ\n", __func__);
1160 }
1161
1162 if (irqstat & HCuPINT_CLKRDY) {
1163 isp1362_hcd->irq_stat[ISP1362_INT_CLKRDY]++;
1164 handled = 1;
1165 isp1362_hcd->irqenb &= ~HCuPINT_CLKRDY;
1166 svc_mask &= ~HCuPINT_CLKRDY;
1167 pr_info("%s: CLKRDY IRQ\n", __func__);
1168 }
1169
1170 if (svc_mask)
1171 pr_err("%s: Unserviced interrupt(s) %04x\n", __func__, svc_mask);
1172
1173 isp1362_write_reg16(isp1362_hcd, HCuPINTENB, isp1362_hcd->irqenb);
1174 isp1362_hcd->irq_active--;
1175 spin_unlock(&isp1362_hcd->lock);
1176
1177 return IRQ_RETVAL(handled);
1178}
1179
1180
1181
1182#define MAX_PERIODIC_LOAD 900
1183static int balance(struct isp1362_hcd *isp1362_hcd, u16 interval, u16 load)
1184{
1185 int i, branch = -ENOSPC;
1186
1187
1188
1189
1190 for (i = 0; i < interval; i++) {
1191 if (branch < 0 || isp1362_hcd->load[branch] > isp1362_hcd->load[i]) {
1192 int j;
1193
1194 for (j = i; j < PERIODIC_SIZE; j += interval) {
1195 if ((isp1362_hcd->load[j] + load) > MAX_PERIODIC_LOAD) {
1196 pr_err("%s: new load %d load[%02x] %d max %d\n", __func__,
1197 load, j, isp1362_hcd->load[j], MAX_PERIODIC_LOAD);
1198 break;
1199 }
1200 }
1201 if (j < PERIODIC_SIZE)
1202 continue;
1203 branch = i;
1204 }
1205 }
1206 return branch;
1207}
1208
1209
1210
1211
1212
1213
1214
1215static int isp1362_urb_enqueue(struct usb_hcd *hcd,
1216 struct urb *urb,
1217 gfp_t mem_flags)
1218{
1219 struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1220 struct usb_device *udev = urb->dev;
1221 unsigned int pipe = urb->pipe;
1222 int is_out = !usb_pipein(pipe);
1223 int type = usb_pipetype(pipe);
1224 int epnum = usb_pipeendpoint(pipe);
1225 struct usb_host_endpoint *hep = urb->ep;
1226 struct isp1362_ep *ep = NULL;
1227 unsigned long flags;
1228 int retval = 0;
1229
1230 DBG(3, "%s: urb %p\n", __func__, urb);
1231
1232 if (type == PIPE_ISOCHRONOUS) {
1233 pr_err("Isochronous transfers not supported\n");
1234 return -ENOSPC;
1235 }
1236
1237 URB_DBG("%s: FA %d ep%d%s %s: len %d %s%s\n", __func__,
1238 usb_pipedevice(pipe), epnum,
1239 is_out ? "out" : "in",
1240 usb_pipecontrol(pipe) ? "ctrl" :
1241 usb_pipeint(pipe) ? "int" :
1242 usb_pipebulk(pipe) ? "bulk" :
1243 "iso",
1244 urb->transfer_buffer_length,
1245 (urb->transfer_flags & URB_ZERO_PACKET) ? "ZERO_PACKET " : "",
1246 !(urb->transfer_flags & URB_SHORT_NOT_OK) ?
1247 "short_ok" : "");
1248
1249
1250 if (!hep->hcpriv) {
1251 ep = kzalloc(sizeof *ep, mem_flags);
1252 if (!ep)
1253 return -ENOMEM;
1254 }
1255 spin_lock_irqsave(&isp1362_hcd->lock, flags);
1256
1257
1258 if (!((isp1362_hcd->rhport[0] | isp1362_hcd->rhport[1]) &
1259 USB_PORT_STAT_ENABLE) ||
1260 !HC_IS_RUNNING(hcd->state)) {
1261 kfree(ep);
1262 retval = -ENODEV;
1263 goto fail_not_linked;
1264 }
1265
1266 retval = usb_hcd_link_urb_to_ep(hcd, urb);
1267 if (retval) {
1268 kfree(ep);
1269 goto fail_not_linked;
1270 }
1271
1272 if (hep->hcpriv) {
1273 ep = hep->hcpriv;
1274 } else {
1275 INIT_LIST_HEAD(&ep->schedule);
1276 INIT_LIST_HEAD(&ep->active);
1277 INIT_LIST_HEAD(&ep->remove_list);
1278 ep->udev = usb_get_dev(udev);
1279 ep->hep = hep;
1280 ep->epnum = epnum;
1281 ep->maxpacket = usb_maxpacket(udev, urb->pipe, is_out);
1282 ep->ptd_offset = -EINVAL;
1283 ep->ptd_index = -EINVAL;
1284 usb_settoggle(udev, epnum, is_out, 0);
1285
1286 if (type == PIPE_CONTROL)
1287 ep->nextpid = USB_PID_SETUP;
1288 else if (is_out)
1289 ep->nextpid = USB_PID_OUT;
1290 else
1291 ep->nextpid = USB_PID_IN;
1292
1293 switch (type) {
1294 case PIPE_ISOCHRONOUS:
1295 case PIPE_INTERRUPT:
1296 if (urb->interval > PERIODIC_SIZE)
1297 urb->interval = PERIODIC_SIZE;
1298 ep->interval = urb->interval;
1299 ep->branch = PERIODIC_SIZE;
1300 ep->load = usb_calc_bus_time(udev->speed, !is_out,
1301 (type == PIPE_ISOCHRONOUS),
1302 usb_maxpacket(udev, pipe, is_out)) / 1000;
1303 break;
1304 }
1305 hep->hcpriv = ep;
1306 }
1307 ep->num_req = isp1362_hcd->req_serial++;
1308
1309
1310 switch (type) {
1311 case PIPE_CONTROL:
1312 case PIPE_BULK:
1313 if (list_empty(&ep->schedule)) {
1314 DBG(1, "%s: Adding ep %p req %d to async schedule\n",
1315 __func__, ep, ep->num_req);
1316 list_add_tail(&ep->schedule, &isp1362_hcd->async);
1317 }
1318 break;
1319 case PIPE_ISOCHRONOUS:
1320 case PIPE_INTERRUPT:
1321 urb->interval = ep->interval;
1322
1323
1324 if (ep->branch < PERIODIC_SIZE)
1325 break;
1326
1327 retval = balance(isp1362_hcd, ep->interval, ep->load);
1328 if (retval < 0) {
1329 pr_err("%s: balance returned %d\n", __func__, retval);
1330 goto fail;
1331 }
1332 ep->branch = retval;
1333 retval = 0;
1334 isp1362_hcd->fmindex = isp1362_read_reg32(isp1362_hcd, HCFMNUM);
1335 DBG(1, "%s: Current frame %04x branch %02x start_frame %04x(%04x)\n",
1336 __func__, isp1362_hcd->fmindex, ep->branch,
1337 ((isp1362_hcd->fmindex + PERIODIC_SIZE - 1) &
1338 ~(PERIODIC_SIZE - 1)) + ep->branch,
1339 (isp1362_hcd->fmindex & (PERIODIC_SIZE - 1)) + ep->branch);
1340
1341 if (list_empty(&ep->schedule)) {
1342 if (type == PIPE_ISOCHRONOUS) {
1343 u16 frame = isp1362_hcd->fmindex;
1344
1345 frame += max_t(u16, 8, ep->interval);
1346 frame &= ~(ep->interval - 1);
1347 frame |= ep->branch;
1348 if (frame_before(frame, isp1362_hcd->fmindex))
1349 frame += ep->interval;
1350 urb->start_frame = frame;
1351
1352 DBG(1, "%s: Adding ep %p to isoc schedule\n", __func__, ep);
1353 list_add_tail(&ep->schedule, &isp1362_hcd->isoc);
1354 } else {
1355 DBG(1, "%s: Adding ep %p to periodic schedule\n", __func__, ep);
1356 list_add_tail(&ep->schedule, &isp1362_hcd->periodic);
1357 }
1358 } else
1359 DBG(1, "%s: ep %p already scheduled\n", __func__, ep);
1360
1361 DBG(2, "%s: load %d bandwidth %d -> %d\n", __func__,
1362 ep->load / ep->interval, isp1362_hcd->load[ep->branch],
1363 isp1362_hcd->load[ep->branch] + ep->load);
1364 isp1362_hcd->load[ep->branch] += ep->load;
1365 }
1366
1367 urb->hcpriv = hep;
1368 ALIGNSTAT(isp1362_hcd, urb->transfer_buffer);
1369
1370 switch (type) {
1371 case PIPE_CONTROL:
1372 case PIPE_BULK:
1373 start_atl_transfers(isp1362_hcd);
1374 break;
1375 case PIPE_INTERRUPT:
1376 start_intl_transfers(isp1362_hcd);
1377 break;
1378 case PIPE_ISOCHRONOUS:
1379 start_iso_transfers(isp1362_hcd);
1380 break;
1381 default:
1382 BUG();
1383 }
1384 fail:
1385 if (retval)
1386 usb_hcd_unlink_urb_from_ep(hcd, urb);
1387
1388
1389 fail_not_linked:
1390 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1391 if (retval)
1392 DBG(0, "%s: urb %p failed with %d\n", __func__, urb, retval);
1393 return retval;
1394}
1395
1396static int isp1362_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
1397{
1398 struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1399 struct usb_host_endpoint *hep;
1400 unsigned long flags;
1401 struct isp1362_ep *ep;
1402 int retval = 0;
1403
1404 DBG(3, "%s: urb %p\n", __func__, urb);
1405
1406 spin_lock_irqsave(&isp1362_hcd->lock, flags);
1407 retval = usb_hcd_check_unlink_urb(hcd, urb, status);
1408 if (retval)
1409 goto done;
1410
1411 hep = urb->hcpriv;
1412
1413 if (!hep) {
1414 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1415 return -EIDRM;
1416 }
1417
1418 ep = hep->hcpriv;
1419 if (ep) {
1420
1421 if (ep->hep->urb_list.next == &urb->urb_list) {
1422 if (!list_empty(&ep->active)) {
1423 DBG(1, "%s: urb %p ep %p req %d active PTD[%d] $%04x\n", __func__,
1424 urb, ep, ep->num_req, ep->ptd_index, ep->ptd_offset);
1425
1426 remove_ptd(isp1362_hcd, ep);
1427 urb = NULL;
1428 }
1429 }
1430 if (urb) {
1431 DBG(1, "%s: Finishing ep %p req %d\n", __func__, ep,
1432 ep->num_req);
1433 finish_request(isp1362_hcd, ep, urb, status);
1434 } else
1435 DBG(1, "%s: urb %p active; wait4irq\n", __func__, urb);
1436 } else {
1437 pr_warn("%s: No EP in URB %p\n", __func__, urb);
1438 retval = -EINVAL;
1439 }
1440done:
1441 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1442
1443 DBG(3, "%s: exit\n", __func__);
1444
1445 return retval;
1446}
1447
1448static void isp1362_endpoint_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep)
1449{
1450 struct isp1362_ep *ep = hep->hcpriv;
1451 struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1452 unsigned long flags;
1453
1454 DBG(1, "%s: ep %p\n", __func__, ep);
1455 if (!ep)
1456 return;
1457 spin_lock_irqsave(&isp1362_hcd->lock, flags);
1458 if (!list_empty(&hep->urb_list)) {
1459 if (!list_empty(&ep->active) && list_empty(&ep->remove_list)) {
1460 DBG(1, "%s: Removing ep %p req %d PTD[%d] $%04x\n", __func__,
1461 ep, ep->num_req, ep->ptd_index, ep->ptd_offset);
1462 remove_ptd(isp1362_hcd, ep);
1463 pr_info("%s: Waiting for Interrupt to clean up\n", __func__);
1464 }
1465 }
1466 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1467
1468 while (!list_empty(&ep->active))
1469 msleep(1);
1470
1471 DBG(1, "%s: Freeing EP %p\n", __func__, ep);
1472
1473 usb_put_dev(ep->udev);
1474 kfree(ep);
1475 hep->hcpriv = NULL;
1476}
1477
1478static int isp1362_get_frame(struct usb_hcd *hcd)
1479{
1480 struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1481 u32 fmnum;
1482 unsigned long flags;
1483
1484 spin_lock_irqsave(&isp1362_hcd->lock, flags);
1485 fmnum = isp1362_read_reg32(isp1362_hcd, HCFMNUM);
1486 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1487
1488 return (int)fmnum;
1489}
1490
1491
1492
1493
1494static int isp1362_hub_status_data(struct usb_hcd *hcd, char *buf)
1495{
1496 struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1497 int ports, i, changed = 0;
1498 unsigned long flags;
1499
1500 if (!HC_IS_RUNNING(hcd->state))
1501 return -ESHUTDOWN;
1502
1503
1504
1505 if (timer_pending(&hcd->rh_timer))
1506 return 0;
1507
1508 ports = isp1362_hcd->rhdesca & RH_A_NDP;
1509 BUG_ON(ports > 2);
1510
1511 spin_lock_irqsave(&isp1362_hcd->lock, flags);
1512
1513 if (isp1362_hcd->rhstatus & (RH_HS_LPSC | RH_HS_OCIC))
1514 buf[0] = changed = 1;
1515 else
1516 buf[0] = 0;
1517
1518 for (i = 0; i < ports; i++) {
1519 u32 status = isp1362_hcd->rhport[i];
1520
1521 if (status & (RH_PS_CSC | RH_PS_PESC | RH_PS_PSSC |
1522 RH_PS_OCIC | RH_PS_PRSC)) {
1523 changed = 1;
1524 buf[0] |= 1 << (i + 1);
1525 continue;
1526 }
1527
1528 if (!(status & RH_PS_CCS))
1529 continue;
1530 }
1531 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1532 return changed;
1533}
1534
1535static void isp1362_hub_descriptor(struct isp1362_hcd *isp1362_hcd,
1536 struct usb_hub_descriptor *desc)
1537{
1538 u32 reg = isp1362_hcd->rhdesca;
1539
1540 DBG(3, "%s: enter\n", __func__);
1541
1542 desc->bDescriptorType = USB_DT_HUB;
1543 desc->bDescLength = 9;
1544 desc->bHubContrCurrent = 0;
1545 desc->bNbrPorts = reg & 0x3;
1546
1547 desc->wHubCharacteristics = cpu_to_le16((reg >> 8) &
1548 (HUB_CHAR_LPSM |
1549 HUB_CHAR_COMPOUND |
1550 HUB_CHAR_OCPM));
1551 DBG(0, "%s: hubcharacteristics = %02x\n", __func__,
1552 desc->wHubCharacteristics);
1553 desc->bPwrOn2PwrGood = (reg >> 24) & 0xff;
1554
1555 desc->u.hs.DeviceRemovable[0] = desc->bNbrPorts == 1 ? 1 << 1 : 3 << 1;
1556 desc->u.hs.DeviceRemovable[1] = ~0;
1557
1558 DBG(3, "%s: exit\n", __func__);
1559}
1560
1561
1562static int isp1362_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
1563 u16 wIndex, char *buf, u16 wLength)
1564{
1565 struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1566 int retval = 0;
1567 unsigned long flags;
1568 unsigned long t1;
1569 int ports = isp1362_hcd->rhdesca & RH_A_NDP;
1570 u32 tmp = 0;
1571
1572 switch (typeReq) {
1573 case ClearHubFeature:
1574 DBG(0, "ClearHubFeature: ");
1575 switch (wValue) {
1576 case C_HUB_OVER_CURRENT:
1577 DBG(0, "C_HUB_OVER_CURRENT\n");
1578 spin_lock_irqsave(&isp1362_hcd->lock, flags);
1579 isp1362_write_reg32(isp1362_hcd, HCRHSTATUS, RH_HS_OCIC);
1580 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1581 case C_HUB_LOCAL_POWER:
1582 DBG(0, "C_HUB_LOCAL_POWER\n");
1583 break;
1584 default:
1585 goto error;
1586 }
1587 break;
1588 case SetHubFeature:
1589 DBG(0, "SetHubFeature: ");
1590 switch (wValue) {
1591 case C_HUB_OVER_CURRENT:
1592 case C_HUB_LOCAL_POWER:
1593 DBG(0, "C_HUB_OVER_CURRENT or C_HUB_LOCAL_POWER\n");
1594 break;
1595 default:
1596 goto error;
1597 }
1598 break;
1599 case GetHubDescriptor:
1600 DBG(0, "GetHubDescriptor\n");
1601 isp1362_hub_descriptor(isp1362_hcd, (struct usb_hub_descriptor *)buf);
1602 break;
1603 case GetHubStatus:
1604 DBG(0, "GetHubStatus\n");
1605 put_unaligned(cpu_to_le32(0), (__le32 *) buf);
1606 break;
1607 case GetPortStatus:
1608#ifndef VERBOSE
1609 DBG(0, "GetPortStatus\n");
1610#endif
1611 if (!wIndex || wIndex > ports)
1612 goto error;
1613 tmp = isp1362_hcd->rhport[--wIndex];
1614 put_unaligned(cpu_to_le32(tmp), (__le32 *) buf);
1615 break;
1616 case ClearPortFeature:
1617 DBG(0, "ClearPortFeature: ");
1618 if (!wIndex || wIndex > ports)
1619 goto error;
1620 wIndex--;
1621
1622 switch (wValue) {
1623 case USB_PORT_FEAT_ENABLE:
1624 DBG(0, "USB_PORT_FEAT_ENABLE\n");
1625 tmp = RH_PS_CCS;
1626 break;
1627 case USB_PORT_FEAT_C_ENABLE:
1628 DBG(0, "USB_PORT_FEAT_C_ENABLE\n");
1629 tmp = RH_PS_PESC;
1630 break;
1631 case USB_PORT_FEAT_SUSPEND:
1632 DBG(0, "USB_PORT_FEAT_SUSPEND\n");
1633 tmp = RH_PS_POCI;
1634 break;
1635 case USB_PORT_FEAT_C_SUSPEND:
1636 DBG(0, "USB_PORT_FEAT_C_SUSPEND\n");
1637 tmp = RH_PS_PSSC;
1638 break;
1639 case USB_PORT_FEAT_POWER:
1640 DBG(0, "USB_PORT_FEAT_POWER\n");
1641 tmp = RH_PS_LSDA;
1642
1643 break;
1644 case USB_PORT_FEAT_C_CONNECTION:
1645 DBG(0, "USB_PORT_FEAT_C_CONNECTION\n");
1646 tmp = RH_PS_CSC;
1647 break;
1648 case USB_PORT_FEAT_C_OVER_CURRENT:
1649 DBG(0, "USB_PORT_FEAT_C_OVER_CURRENT\n");
1650 tmp = RH_PS_OCIC;
1651 break;
1652 case USB_PORT_FEAT_C_RESET:
1653 DBG(0, "USB_PORT_FEAT_C_RESET\n");
1654 tmp = RH_PS_PRSC;
1655 break;
1656 default:
1657 goto error;
1658 }
1659
1660 spin_lock_irqsave(&isp1362_hcd->lock, flags);
1661 isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + wIndex, tmp);
1662 isp1362_hcd->rhport[wIndex] =
1663 isp1362_read_reg32(isp1362_hcd, HCRHPORT1 + wIndex);
1664 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1665 break;
1666 case SetPortFeature:
1667 DBG(0, "SetPortFeature: ");
1668 if (!wIndex || wIndex > ports)
1669 goto error;
1670 wIndex--;
1671 switch (wValue) {
1672 case USB_PORT_FEAT_SUSPEND:
1673 DBG(0, "USB_PORT_FEAT_SUSPEND\n");
1674 spin_lock_irqsave(&isp1362_hcd->lock, flags);
1675 isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + wIndex, RH_PS_PSS);
1676 isp1362_hcd->rhport[wIndex] =
1677 isp1362_read_reg32(isp1362_hcd, HCRHPORT1 + wIndex);
1678 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1679 break;
1680 case USB_PORT_FEAT_POWER:
1681 DBG(0, "USB_PORT_FEAT_POWER\n");
1682 spin_lock_irqsave(&isp1362_hcd->lock, flags);
1683 isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + wIndex, RH_PS_PPS);
1684 isp1362_hcd->rhport[wIndex] =
1685 isp1362_read_reg32(isp1362_hcd, HCRHPORT1 + wIndex);
1686 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1687 break;
1688 case USB_PORT_FEAT_RESET:
1689 DBG(0, "USB_PORT_FEAT_RESET\n");
1690 spin_lock_irqsave(&isp1362_hcd->lock, flags);
1691
1692 t1 = jiffies + msecs_to_jiffies(USB_RESET_WIDTH);
1693 while (time_before(jiffies, t1)) {
1694
1695 for (;;) {
1696 tmp = isp1362_read_reg32(isp1362_hcd, HCRHPORT1 + wIndex);
1697 if (!(tmp & RH_PS_PRS))
1698 break;
1699 udelay(500);
1700 }
1701 if (!(tmp & RH_PS_CCS))
1702 break;
1703
1704 isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + wIndex, (RH_PS_PRS));
1705
1706 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1707 msleep(10);
1708 spin_lock_irqsave(&isp1362_hcd->lock, flags);
1709 }
1710
1711 isp1362_hcd->rhport[wIndex] = isp1362_read_reg32(isp1362_hcd,
1712 HCRHPORT1 + wIndex);
1713 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1714 break;
1715 default:
1716 goto error;
1717 }
1718 break;
1719
1720 default:
1721 error:
1722
1723 DBG(0, "PROTOCOL STALL\n");
1724 retval = -EPIPE;
1725 }
1726
1727 return retval;
1728}
1729
1730#ifdef CONFIG_PM
1731static int isp1362_bus_suspend(struct usb_hcd *hcd)
1732{
1733 int status = 0;
1734 struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1735 unsigned long flags;
1736
1737 if (time_before(jiffies, isp1362_hcd->next_statechange))
1738 msleep(5);
1739
1740 spin_lock_irqsave(&isp1362_hcd->lock, flags);
1741
1742 isp1362_hcd->hc_control = isp1362_read_reg32(isp1362_hcd, HCCONTROL);
1743 switch (isp1362_hcd->hc_control & OHCI_CTRL_HCFS) {
1744 case OHCI_USB_RESUME:
1745 DBG(0, "%s: resume/suspend?\n", __func__);
1746 isp1362_hcd->hc_control &= ~OHCI_CTRL_HCFS;
1747 isp1362_hcd->hc_control |= OHCI_USB_RESET;
1748 isp1362_write_reg32(isp1362_hcd, HCCONTROL, isp1362_hcd->hc_control);
1749
1750 case OHCI_USB_RESET:
1751 status = -EBUSY;
1752 pr_warn("%s: needs reinit!\n", __func__);
1753 goto done;
1754 case OHCI_USB_SUSPEND:
1755 pr_warn("%s: already suspended?\n", __func__);
1756 goto done;
1757 }
1758 DBG(0, "%s: suspend root hub\n", __func__);
1759
1760
1761 hcd->state = HC_STATE_QUIESCING;
1762 if (!list_empty(&isp1362_hcd->atl_queue.active) ||
1763 !list_empty(&isp1362_hcd->intl_queue.active) ||
1764 !list_empty(&isp1362_hcd->istl_queue[0] .active) ||
1765 !list_empty(&isp1362_hcd->istl_queue[1] .active)) {
1766 int limit;
1767
1768 isp1362_write_reg32(isp1362_hcd, HCATLSKIP, ~0);
1769 isp1362_write_reg32(isp1362_hcd, HCINTLSKIP, ~0);
1770 isp1362_write_reg16(isp1362_hcd, HCBUFSTAT, 0);
1771 isp1362_write_reg16(isp1362_hcd, HCuPINTENB, 0);
1772 isp1362_write_reg32(isp1362_hcd, HCINTSTAT, OHCI_INTR_SF);
1773
1774 DBG(0, "%s: stopping schedules ...\n", __func__);
1775 limit = 2000;
1776 while (limit > 0) {
1777 udelay(250);
1778 limit -= 250;
1779 if (isp1362_read_reg32(isp1362_hcd, HCINTSTAT) & OHCI_INTR_SF)
1780 break;
1781 }
1782 mdelay(7);
1783 if (isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_ATL) {
1784 u32 done_map = isp1362_read_reg32(isp1362_hcd, HCATLDONE);
1785 finish_transfers(isp1362_hcd, done_map, &isp1362_hcd->atl_queue);
1786 }
1787 if (isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_INTL) {
1788 u32 done_map = isp1362_read_reg32(isp1362_hcd, HCINTLDONE);
1789 finish_transfers(isp1362_hcd, done_map, &isp1362_hcd->intl_queue);
1790 }
1791 if (isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_ISTL0)
1792 finish_iso_transfers(isp1362_hcd, &isp1362_hcd->istl_queue[0]);
1793 if (isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_ISTL1)
1794 finish_iso_transfers(isp1362_hcd, &isp1362_hcd->istl_queue[1]);
1795 }
1796 DBG(0, "%s: HCINTSTAT: %08x\n", __func__,
1797 isp1362_read_reg32(isp1362_hcd, HCINTSTAT));
1798 isp1362_write_reg32(isp1362_hcd, HCINTSTAT,
1799 isp1362_read_reg32(isp1362_hcd, HCINTSTAT));
1800
1801
1802 isp1362_hcd->hc_control = OHCI_USB_SUSPEND;
1803 isp1362_show_reg(isp1362_hcd, HCCONTROL);
1804 isp1362_write_reg32(isp1362_hcd, HCCONTROL, isp1362_hcd->hc_control);
1805 isp1362_show_reg(isp1362_hcd, HCCONTROL);
1806
1807#if 1
1808 isp1362_hcd->hc_control = isp1362_read_reg32(isp1362_hcd, HCCONTROL);
1809 if ((isp1362_hcd->hc_control & OHCI_CTRL_HCFS) != OHCI_USB_SUSPEND) {
1810 pr_err("%s: controller won't suspend %08x\n", __func__,
1811 isp1362_hcd->hc_control);
1812 status = -EBUSY;
1813 } else
1814#endif
1815 {
1816
1817 isp1362_hcd->next_statechange = jiffies + msecs_to_jiffies(5);
1818 }
1819done:
1820 if (status == 0) {
1821 hcd->state = HC_STATE_SUSPENDED;
1822 DBG(0, "%s: HCD suspended: %08x\n", __func__,
1823 isp1362_read_reg32(isp1362_hcd, HCCONTROL));
1824 }
1825 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1826 return status;
1827}
1828
1829static int isp1362_bus_resume(struct usb_hcd *hcd)
1830{
1831 struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1832 u32 port;
1833 unsigned long flags;
1834 int status = -EINPROGRESS;
1835
1836 if (time_before(jiffies, isp1362_hcd->next_statechange))
1837 msleep(5);
1838
1839 spin_lock_irqsave(&isp1362_hcd->lock, flags);
1840 isp1362_hcd->hc_control = isp1362_read_reg32(isp1362_hcd, HCCONTROL);
1841 pr_info("%s: HCCONTROL: %08x\n", __func__, isp1362_hcd->hc_control);
1842 if (hcd->state == HC_STATE_RESUMING) {
1843 pr_warn("%s: duplicate resume\n", __func__);
1844 status = 0;
1845 } else
1846 switch (isp1362_hcd->hc_control & OHCI_CTRL_HCFS) {
1847 case OHCI_USB_SUSPEND:
1848 DBG(0, "%s: resume root hub\n", __func__);
1849 isp1362_hcd->hc_control &= ~OHCI_CTRL_HCFS;
1850 isp1362_hcd->hc_control |= OHCI_USB_RESUME;
1851 isp1362_write_reg32(isp1362_hcd, HCCONTROL, isp1362_hcd->hc_control);
1852 break;
1853 case OHCI_USB_RESUME:
1854
1855 DBG(0, "%s: remote wakeup\n", __func__);
1856 break;
1857 case OHCI_USB_OPER:
1858 DBG(0, "%s: odd resume\n", __func__);
1859 status = 0;
1860 hcd->self.root_hub->dev.power.power_state = PMSG_ON;
1861 break;
1862 default:
1863 DBG(0, "%s: root hub hardware reset\n", __func__);
1864 status = -EBUSY;
1865 }
1866 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1867 if (status == -EBUSY) {
1868 DBG(0, "%s: Restarting HC\n", __func__);
1869 isp1362_hc_stop(hcd);
1870 return isp1362_hc_start(hcd);
1871 }
1872 if (status != -EINPROGRESS)
1873 return status;
1874 spin_lock_irqsave(&isp1362_hcd->lock, flags);
1875 port = isp1362_read_reg32(isp1362_hcd, HCRHDESCA) & RH_A_NDP;
1876 while (port--) {
1877 u32 stat = isp1362_read_reg32(isp1362_hcd, HCRHPORT1 + port);
1878
1879
1880 if (!(stat & RH_PS_PSS)) {
1881 DBG(0, "%s: Not Resuming RH port %d\n", __func__, port);
1882 continue;
1883 }
1884 DBG(0, "%s: Resuming RH port %d\n", __func__, port);
1885 isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + port, RH_PS_POCI);
1886 }
1887 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1888
1889
1890 hcd->state = HC_STATE_RESUMING;
1891 mdelay(20 + 15);
1892
1893 isp1362_hcd->hc_control = OHCI_USB_OPER;
1894 spin_lock_irqsave(&isp1362_hcd->lock, flags);
1895 isp1362_show_reg(isp1362_hcd, HCCONTROL);
1896 isp1362_write_reg32(isp1362_hcd, HCCONTROL, isp1362_hcd->hc_control);
1897 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1898
1899 msleep(10);
1900
1901
1902 isp1362_hcd->next_statechange = jiffies + msecs_to_jiffies(250);
1903
1904 hcd->self.root_hub->dev.power.power_state = PMSG_ON;
1905 hcd->state = HC_STATE_RUNNING;
1906 return 0;
1907}
1908#else
1909#define isp1362_bus_suspend NULL
1910#define isp1362_bus_resume NULL
1911#endif
1912
1913
1914
1915static void dump_irq(struct seq_file *s, char *label, u16 mask)
1916{
1917 seq_printf(s, "%-15s %04x%s%s%s%s%s%s\n", label, mask,
1918 mask & HCuPINT_CLKRDY ? " clkrdy" : "",
1919 mask & HCuPINT_SUSP ? " susp" : "",
1920 mask & HCuPINT_OPR ? " opr" : "",
1921 mask & HCuPINT_EOT ? " eot" : "",
1922 mask & HCuPINT_ATL ? " atl" : "",
1923 mask & HCuPINT_SOF ? " sof" : "");
1924}
1925
1926static void dump_int(struct seq_file *s, char *label, u32 mask)
1927{
1928 seq_printf(s, "%-15s %08x%s%s%s%s%s%s%s\n", label, mask,
1929 mask & OHCI_INTR_MIE ? " MIE" : "",
1930 mask & OHCI_INTR_RHSC ? " rhsc" : "",
1931 mask & OHCI_INTR_FNO ? " fno" : "",
1932 mask & OHCI_INTR_UE ? " ue" : "",
1933 mask & OHCI_INTR_RD ? " rd" : "",
1934 mask & OHCI_INTR_SF ? " sof" : "",
1935 mask & OHCI_INTR_SO ? " so" : "");
1936}
1937
1938static void dump_ctrl(struct seq_file *s, char *label, u32 mask)
1939{
1940 seq_printf(s, "%-15s %08x%s%s%s\n", label, mask,
1941 mask & OHCI_CTRL_RWC ? " rwc" : "",
1942 mask & OHCI_CTRL_RWE ? " rwe" : "",
1943 ({
1944 char *hcfs;
1945 switch (mask & OHCI_CTRL_HCFS) {
1946 case OHCI_USB_OPER:
1947 hcfs = " oper";
1948 break;
1949 case OHCI_USB_RESET:
1950 hcfs = " reset";
1951 break;
1952 case OHCI_USB_RESUME:
1953 hcfs = " resume";
1954 break;
1955 case OHCI_USB_SUSPEND:
1956 hcfs = " suspend";
1957 break;
1958 default:
1959 hcfs = " ?";
1960 }
1961 hcfs;
1962 }));
1963}
1964
1965static void dump_regs(struct seq_file *s, struct isp1362_hcd *isp1362_hcd)
1966{
1967 seq_printf(s, "HCREVISION [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCREVISION),
1968 isp1362_read_reg32(isp1362_hcd, HCREVISION));
1969 seq_printf(s, "HCCONTROL [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCCONTROL),
1970 isp1362_read_reg32(isp1362_hcd, HCCONTROL));
1971 seq_printf(s, "HCCMDSTAT [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCCMDSTAT),
1972 isp1362_read_reg32(isp1362_hcd, HCCMDSTAT));
1973 seq_printf(s, "HCINTSTAT [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTSTAT),
1974 isp1362_read_reg32(isp1362_hcd, HCINTSTAT));
1975 seq_printf(s, "HCINTENB [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTENB),
1976 isp1362_read_reg32(isp1362_hcd, HCINTENB));
1977 seq_printf(s, "HCFMINTVL [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCFMINTVL),
1978 isp1362_read_reg32(isp1362_hcd, HCFMINTVL));
1979 seq_printf(s, "HCFMREM [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCFMREM),
1980 isp1362_read_reg32(isp1362_hcd, HCFMREM));
1981 seq_printf(s, "HCFMNUM [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCFMNUM),
1982 isp1362_read_reg32(isp1362_hcd, HCFMNUM));
1983 seq_printf(s, "HCLSTHRESH [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCLSTHRESH),
1984 isp1362_read_reg32(isp1362_hcd, HCLSTHRESH));
1985 seq_printf(s, "HCRHDESCA [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHDESCA),
1986 isp1362_read_reg32(isp1362_hcd, HCRHDESCA));
1987 seq_printf(s, "HCRHDESCB [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHDESCB),
1988 isp1362_read_reg32(isp1362_hcd, HCRHDESCB));
1989 seq_printf(s, "HCRHSTATUS [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHSTATUS),
1990 isp1362_read_reg32(isp1362_hcd, HCRHSTATUS));
1991 seq_printf(s, "HCRHPORT1 [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHPORT1),
1992 isp1362_read_reg32(isp1362_hcd, HCRHPORT1));
1993 seq_printf(s, "HCRHPORT2 [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHPORT2),
1994 isp1362_read_reg32(isp1362_hcd, HCRHPORT2));
1995 seq_printf(s, "\n");
1996 seq_printf(s, "HCHWCFG [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCHWCFG),
1997 isp1362_read_reg16(isp1362_hcd, HCHWCFG));
1998 seq_printf(s, "HCDMACFG [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCDMACFG),
1999 isp1362_read_reg16(isp1362_hcd, HCDMACFG));
2000 seq_printf(s, "HCXFERCTR [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCXFERCTR),
2001 isp1362_read_reg16(isp1362_hcd, HCXFERCTR));
2002 seq_printf(s, "HCuPINT [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCuPINT),
2003 isp1362_read_reg16(isp1362_hcd, HCuPINT));
2004 seq_printf(s, "HCuPINTENB [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCuPINTENB),
2005 isp1362_read_reg16(isp1362_hcd, HCuPINTENB));
2006 seq_printf(s, "HCCHIPID [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCCHIPID),
2007 isp1362_read_reg16(isp1362_hcd, HCCHIPID));
2008 seq_printf(s, "HCSCRATCH [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCSCRATCH),
2009 isp1362_read_reg16(isp1362_hcd, HCSCRATCH));
2010 seq_printf(s, "HCBUFSTAT [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCBUFSTAT),
2011 isp1362_read_reg16(isp1362_hcd, HCBUFSTAT));
2012 seq_printf(s, "HCDIRADDR [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCDIRADDR),
2013 isp1362_read_reg32(isp1362_hcd, HCDIRADDR));
2014#if 0
2015 seq_printf(s, "HCDIRDATA [%02x] %04x\n", ISP1362_REG_NO(HCDIRDATA),
2016 isp1362_read_reg16(isp1362_hcd, HCDIRDATA));
2017#endif
2018 seq_printf(s, "HCISTLBUFSZ[%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCISTLBUFSZ),
2019 isp1362_read_reg16(isp1362_hcd, HCISTLBUFSZ));
2020 seq_printf(s, "HCISTLRATE [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCISTLRATE),
2021 isp1362_read_reg16(isp1362_hcd, HCISTLRATE));
2022 seq_printf(s, "\n");
2023 seq_printf(s, "HCINTLBUFSZ[%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLBUFSZ),
2024 isp1362_read_reg16(isp1362_hcd, HCINTLBUFSZ));
2025 seq_printf(s, "HCINTLBLKSZ[%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLBLKSZ),
2026 isp1362_read_reg16(isp1362_hcd, HCINTLBLKSZ));
2027 seq_printf(s, "HCINTLDONE [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLDONE),
2028 isp1362_read_reg32(isp1362_hcd, HCINTLDONE));
2029 seq_printf(s, "HCINTLSKIP [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLSKIP),
2030 isp1362_read_reg32(isp1362_hcd, HCINTLSKIP));
2031 seq_printf(s, "HCINTLLAST [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLLAST),
2032 isp1362_read_reg32(isp1362_hcd, HCINTLLAST));
2033 seq_printf(s, "HCINTLCURR [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLCURR),
2034 isp1362_read_reg16(isp1362_hcd, HCINTLCURR));
2035 seq_printf(s, "\n");
2036 seq_printf(s, "HCATLBUFSZ [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLBUFSZ),
2037 isp1362_read_reg16(isp1362_hcd, HCATLBUFSZ));
2038 seq_printf(s, "HCATLBLKSZ [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLBLKSZ),
2039 isp1362_read_reg16(isp1362_hcd, HCATLBLKSZ));
2040#if 0
2041 seq_printf(s, "HCATLDONE [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCATLDONE),
2042 isp1362_read_reg32(isp1362_hcd, HCATLDONE));
2043#endif
2044 seq_printf(s, "HCATLSKIP [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCATLSKIP),
2045 isp1362_read_reg32(isp1362_hcd, HCATLSKIP));
2046 seq_printf(s, "HCATLLAST [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCATLLAST),
2047 isp1362_read_reg32(isp1362_hcd, HCATLLAST));
2048 seq_printf(s, "HCATLCURR [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLCURR),
2049 isp1362_read_reg16(isp1362_hcd, HCATLCURR));
2050 seq_printf(s, "\n");
2051 seq_printf(s, "HCATLDTC [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLDTC),
2052 isp1362_read_reg16(isp1362_hcd, HCATLDTC));
2053 seq_printf(s, "HCATLDTCTO [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLDTCTO),
2054 isp1362_read_reg16(isp1362_hcd, HCATLDTCTO));
2055}
2056
2057static int isp1362_show(struct seq_file *s, void *unused)
2058{
2059 struct isp1362_hcd *isp1362_hcd = s->private;
2060 struct isp1362_ep *ep;
2061 int i;
2062
2063 seq_printf(s, "%s\n%s version %s\n",
2064 isp1362_hcd_to_hcd(isp1362_hcd)->product_desc, hcd_name, DRIVER_VERSION);
2065
2066
2067
2068
2069 seq_printf(s, "alignment: 16b/%ld 8b/%ld 4b/%ld 2b/%ld 1b/%ld\n",
2070 isp1362_hcd->stat16, isp1362_hcd->stat8, isp1362_hcd->stat4,
2071 isp1362_hcd->stat2, isp1362_hcd->stat1);
2072 seq_printf(s, "max # ptds in ATL fifo: %d\n", isp1362_hcd->atl_queue.stat_maxptds);
2073 seq_printf(s, "max # ptds in INTL fifo: %d\n", isp1362_hcd->intl_queue.stat_maxptds);
2074 seq_printf(s, "max # ptds in ISTL fifo: %d\n",
2075 max(isp1362_hcd->istl_queue[0] .stat_maxptds,
2076 isp1362_hcd->istl_queue[1] .stat_maxptds));
2077
2078
2079 spin_lock_irq(&isp1362_hcd->lock);
2080
2081 dump_irq(s, "hc_irq_enable", isp1362_read_reg16(isp1362_hcd, HCuPINTENB));
2082 dump_irq(s, "hc_irq_status", isp1362_read_reg16(isp1362_hcd, HCuPINT));
2083 dump_int(s, "ohci_int_enable", isp1362_read_reg32(isp1362_hcd, HCINTENB));
2084 dump_int(s, "ohci_int_status", isp1362_read_reg32(isp1362_hcd, HCINTSTAT));
2085 dump_ctrl(s, "ohci_control", isp1362_read_reg32(isp1362_hcd, HCCONTROL));
2086
2087 for (i = 0; i < NUM_ISP1362_IRQS; i++)
2088 if (isp1362_hcd->irq_stat[i])
2089 seq_printf(s, "%-15s: %d\n",
2090 ISP1362_INT_NAME(i), isp1362_hcd->irq_stat[i]);
2091
2092 dump_regs(s, isp1362_hcd);
2093 list_for_each_entry(ep, &isp1362_hcd->async, schedule) {
2094 struct urb *urb;
2095
2096 seq_printf(s, "%p, ep%d%s, maxpacket %d:\n", ep, ep->epnum,
2097 ({
2098 char *s;
2099 switch (ep->nextpid) {
2100 case USB_PID_IN:
2101 s = "in";
2102 break;
2103 case USB_PID_OUT:
2104 s = "out";
2105 break;
2106 case USB_PID_SETUP:
2107 s = "setup";
2108 break;
2109 case USB_PID_ACK:
2110 s = "status";
2111 break;
2112 default:
2113 s = "?";
2114 break;
2115 }
2116 s;}), ep->maxpacket) ;
2117 list_for_each_entry(urb, &ep->hep->urb_list, urb_list) {
2118 seq_printf(s, " urb%p, %d/%d\n", urb,
2119 urb->actual_length,
2120 urb->transfer_buffer_length);
2121 }
2122 }
2123 if (!list_empty(&isp1362_hcd->async))
2124 seq_printf(s, "\n");
2125 dump_ptd_queue(&isp1362_hcd->atl_queue);
2126
2127 seq_printf(s, "periodic size= %d\n", PERIODIC_SIZE);
2128
2129 list_for_each_entry(ep, &isp1362_hcd->periodic, schedule) {
2130 seq_printf(s, "branch:%2d load:%3d PTD[%d] $%04x:\n", ep->branch,
2131 isp1362_hcd->load[ep->branch], ep->ptd_index, ep->ptd_offset);
2132
2133 seq_printf(s, " %d/%p (%sdev%d ep%d%s max %d)\n",
2134 ep->interval, ep,
2135 (ep->udev->speed == USB_SPEED_FULL) ? "" : "ls ",
2136 ep->udev->devnum, ep->epnum,
2137 (ep->epnum == 0) ? "" :
2138 ((ep->nextpid == USB_PID_IN) ?
2139 "in" : "out"), ep->maxpacket);
2140 }
2141 dump_ptd_queue(&isp1362_hcd->intl_queue);
2142
2143 seq_printf(s, "ISO:\n");
2144
2145 list_for_each_entry(ep, &isp1362_hcd->isoc, schedule) {
2146 seq_printf(s, " %d/%p (%sdev%d ep%d%s max %d)\n",
2147 ep->interval, ep,
2148 (ep->udev->speed == USB_SPEED_FULL) ? "" : "ls ",
2149 ep->udev->devnum, ep->epnum,
2150 (ep->epnum == 0) ? "" :
2151 ((ep->nextpid == USB_PID_IN) ?
2152 "in" : "out"), ep->maxpacket);
2153 }
2154
2155 spin_unlock_irq(&isp1362_hcd->lock);
2156 seq_printf(s, "\n");
2157
2158 return 0;
2159}
2160
2161static int isp1362_open(struct inode *inode, struct file *file)
2162{
2163 return single_open(file, isp1362_show, inode);
2164}
2165
2166static const struct file_operations debug_ops = {
2167 .open = isp1362_open,
2168 .read = seq_read,
2169 .llseek = seq_lseek,
2170 .release = single_release,
2171};
2172
2173
2174static void create_debug_file(struct isp1362_hcd *isp1362_hcd)
2175{
2176 isp1362_hcd->debug_file = debugfs_create_file("isp1362", S_IRUGO,
2177 usb_debug_root,
2178 isp1362_hcd, &debug_ops);
2179}
2180
2181static void remove_debug_file(struct isp1362_hcd *isp1362_hcd)
2182{
2183 debugfs_remove(isp1362_hcd->debug_file);
2184}
2185
2186
2187
2188static void __isp1362_sw_reset(struct isp1362_hcd *isp1362_hcd)
2189{
2190 int tmp = 20;
2191
2192 isp1362_write_reg16(isp1362_hcd, HCSWRES, HCSWRES_MAGIC);
2193 isp1362_write_reg32(isp1362_hcd, HCCMDSTAT, OHCI_HCR);
2194 while (--tmp) {
2195 mdelay(1);
2196 if (!(isp1362_read_reg32(isp1362_hcd, HCCMDSTAT) & OHCI_HCR))
2197 break;
2198 }
2199 if (!tmp)
2200 pr_err("Software reset timeout\n");
2201}
2202
2203static void isp1362_sw_reset(struct isp1362_hcd *isp1362_hcd)
2204{
2205 unsigned long flags;
2206
2207 spin_lock_irqsave(&isp1362_hcd->lock, flags);
2208 __isp1362_sw_reset(isp1362_hcd);
2209 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2210}
2211
2212static int isp1362_mem_config(struct usb_hcd *hcd)
2213{
2214 struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
2215 unsigned long flags;
2216 u32 total;
2217 u16 istl_size = ISP1362_ISTL_BUFSIZE;
2218 u16 intl_blksize = ISP1362_INTL_BLKSIZE + PTD_HEADER_SIZE;
2219 u16 intl_size = ISP1362_INTL_BUFFERS * intl_blksize;
2220 u16 atl_blksize = ISP1362_ATL_BLKSIZE + PTD_HEADER_SIZE;
2221 u16 atl_buffers = (ISP1362_BUF_SIZE - (istl_size + intl_size)) / atl_blksize;
2222 u16 atl_size;
2223 int i;
2224
2225 WARN_ON(istl_size & 3);
2226 WARN_ON(atl_blksize & 3);
2227 WARN_ON(intl_blksize & 3);
2228 WARN_ON(atl_blksize < PTD_HEADER_SIZE);
2229 WARN_ON(intl_blksize < PTD_HEADER_SIZE);
2230
2231 BUG_ON((unsigned)ISP1362_INTL_BUFFERS > 32);
2232 if (atl_buffers > 32)
2233 atl_buffers = 32;
2234 atl_size = atl_buffers * atl_blksize;
2235 total = atl_size + intl_size + istl_size;
2236 dev_info(hcd->self.controller, "ISP1362 Memory usage:\n");
2237 dev_info(hcd->self.controller, " ISTL: 2 * %4d: %4d @ $%04x:$%04x\n",
2238 istl_size / 2, istl_size, 0, istl_size / 2);
2239 dev_info(hcd->self.controller, " INTL: %4d * (%3zu+8): %4d @ $%04x\n",
2240 ISP1362_INTL_BUFFERS, intl_blksize - PTD_HEADER_SIZE,
2241 intl_size, istl_size);
2242 dev_info(hcd->self.controller, " ATL : %4d * (%3zu+8): %4d @ $%04x\n",
2243 atl_buffers, atl_blksize - PTD_HEADER_SIZE,
2244 atl_size, istl_size + intl_size);
2245 dev_info(hcd->self.controller, " USED/FREE: %4d %4d\n", total,
2246 ISP1362_BUF_SIZE - total);
2247
2248 if (total > ISP1362_BUF_SIZE) {
2249 dev_err(hcd->self.controller, "%s: Memory requested: %d, available %d\n",
2250 __func__, total, ISP1362_BUF_SIZE);
2251 return -ENOMEM;
2252 }
2253
2254 total = istl_size + intl_size + atl_size;
2255 spin_lock_irqsave(&isp1362_hcd->lock, flags);
2256
2257 for (i = 0; i < 2; i++) {
2258 isp1362_hcd->istl_queue[i].buf_start = i * istl_size / 2,
2259 isp1362_hcd->istl_queue[i].buf_size = istl_size / 2;
2260 isp1362_hcd->istl_queue[i].blk_size = 4;
2261 INIT_LIST_HEAD(&isp1362_hcd->istl_queue[i].active);
2262 snprintf(isp1362_hcd->istl_queue[i].name,
2263 sizeof(isp1362_hcd->istl_queue[i].name), "ISTL%d", i);
2264 DBG(3, "%s: %5s buf $%04x %d\n", __func__,
2265 isp1362_hcd->istl_queue[i].name,
2266 isp1362_hcd->istl_queue[i].buf_start,
2267 isp1362_hcd->istl_queue[i].buf_size);
2268 }
2269 isp1362_write_reg16(isp1362_hcd, HCISTLBUFSZ, istl_size / 2);
2270
2271 isp1362_hcd->intl_queue.buf_start = istl_size;
2272 isp1362_hcd->intl_queue.buf_size = intl_size;
2273 isp1362_hcd->intl_queue.buf_count = ISP1362_INTL_BUFFERS;
2274 isp1362_hcd->intl_queue.blk_size = intl_blksize;
2275 isp1362_hcd->intl_queue.buf_avail = isp1362_hcd->intl_queue.buf_count;
2276 isp1362_hcd->intl_queue.skip_map = ~0;
2277 INIT_LIST_HEAD(&isp1362_hcd->intl_queue.active);
2278
2279 isp1362_write_reg16(isp1362_hcd, HCINTLBUFSZ,
2280 isp1362_hcd->intl_queue.buf_size);
2281 isp1362_write_reg16(isp1362_hcd, HCINTLBLKSZ,
2282 isp1362_hcd->intl_queue.blk_size - PTD_HEADER_SIZE);
2283 isp1362_write_reg32(isp1362_hcd, HCINTLSKIP, ~0);
2284 isp1362_write_reg32(isp1362_hcd, HCINTLLAST,
2285 1 << (ISP1362_INTL_BUFFERS - 1));
2286
2287 isp1362_hcd->atl_queue.buf_start = istl_size + intl_size;
2288 isp1362_hcd->atl_queue.buf_size = atl_size;
2289 isp1362_hcd->atl_queue.buf_count = atl_buffers;
2290 isp1362_hcd->atl_queue.blk_size = atl_blksize;
2291 isp1362_hcd->atl_queue.buf_avail = isp1362_hcd->atl_queue.buf_count;
2292 isp1362_hcd->atl_queue.skip_map = ~0;
2293 INIT_LIST_HEAD(&isp1362_hcd->atl_queue.active);
2294
2295 isp1362_write_reg16(isp1362_hcd, HCATLBUFSZ,
2296 isp1362_hcd->atl_queue.buf_size);
2297 isp1362_write_reg16(isp1362_hcd, HCATLBLKSZ,
2298 isp1362_hcd->atl_queue.blk_size - PTD_HEADER_SIZE);
2299 isp1362_write_reg32(isp1362_hcd, HCATLSKIP, ~0);
2300 isp1362_write_reg32(isp1362_hcd, HCATLLAST,
2301 1 << (atl_buffers - 1));
2302
2303 snprintf(isp1362_hcd->atl_queue.name,
2304 sizeof(isp1362_hcd->atl_queue.name), "ATL");
2305 snprintf(isp1362_hcd->intl_queue.name,
2306 sizeof(isp1362_hcd->intl_queue.name), "INTL");
2307 DBG(3, "%s: %5s buf $%04x %2d * %4d = %4d\n", __func__,
2308 isp1362_hcd->intl_queue.name,
2309 isp1362_hcd->intl_queue.buf_start,
2310 ISP1362_INTL_BUFFERS, isp1362_hcd->intl_queue.blk_size,
2311 isp1362_hcd->intl_queue.buf_size);
2312 DBG(3, "%s: %5s buf $%04x %2d * %4d = %4d\n", __func__,
2313 isp1362_hcd->atl_queue.name,
2314 isp1362_hcd->atl_queue.buf_start,
2315 atl_buffers, isp1362_hcd->atl_queue.blk_size,
2316 isp1362_hcd->atl_queue.buf_size);
2317
2318 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2319
2320 return 0;
2321}
2322
2323static int isp1362_hc_reset(struct usb_hcd *hcd)
2324{
2325 int ret = 0;
2326 struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
2327 unsigned long t;
2328 unsigned long timeout = 100;
2329 unsigned long flags;
2330 int clkrdy = 0;
2331
2332 pr_debug("%s:\n", __func__);
2333
2334 if (isp1362_hcd->board && isp1362_hcd->board->reset) {
2335 isp1362_hcd->board->reset(hcd->self.controller, 1);
2336 msleep(20);
2337 if (isp1362_hcd->board->clock)
2338 isp1362_hcd->board->clock(hcd->self.controller, 1);
2339 isp1362_hcd->board->reset(hcd->self.controller, 0);
2340 } else
2341 isp1362_sw_reset(isp1362_hcd);
2342
2343
2344 t = jiffies + msecs_to_jiffies(timeout);
2345 while (!clkrdy && time_before_eq(jiffies, t)) {
2346 spin_lock_irqsave(&isp1362_hcd->lock, flags);
2347 clkrdy = isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_CLKRDY;
2348 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2349 if (!clkrdy)
2350 msleep(4);
2351 }
2352
2353 spin_lock_irqsave(&isp1362_hcd->lock, flags);
2354 isp1362_write_reg16(isp1362_hcd, HCuPINT, HCuPINT_CLKRDY);
2355 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2356 if (!clkrdy) {
2357 pr_err("Clock not ready after %lums\n", timeout);
2358 ret = -ENODEV;
2359 }
2360 return ret;
2361}
2362
2363static void isp1362_hc_stop(struct usb_hcd *hcd)
2364{
2365 struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
2366 unsigned long flags;
2367 u32 tmp;
2368
2369 pr_debug("%s:\n", __func__);
2370
2371 del_timer_sync(&hcd->rh_timer);
2372
2373 spin_lock_irqsave(&isp1362_hcd->lock, flags);
2374
2375 isp1362_write_reg16(isp1362_hcd, HCuPINTENB, 0);
2376
2377
2378 tmp = isp1362_read_reg32(isp1362_hcd, HCRHDESCA);
2379 tmp &= ~(RH_A_NPS | RH_A_PSM);
2380 isp1362_write_reg32(isp1362_hcd, HCRHDESCA, tmp);
2381 isp1362_write_reg32(isp1362_hcd, HCRHSTATUS, RH_HS_LPS);
2382
2383
2384 if (isp1362_hcd->board && isp1362_hcd->board->reset)
2385 isp1362_hcd->board->reset(hcd->self.controller, 1);
2386 else
2387 __isp1362_sw_reset(isp1362_hcd);
2388
2389 if (isp1362_hcd->board && isp1362_hcd->board->clock)
2390 isp1362_hcd->board->clock(hcd->self.controller, 0);
2391
2392 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2393}
2394
2395#ifdef CHIP_BUFFER_TEST
2396static int isp1362_chip_test(struct isp1362_hcd *isp1362_hcd)
2397{
2398 int ret = 0;
2399 u16 *ref;
2400 unsigned long flags;
2401
2402 ref = kmalloc(2 * ISP1362_BUF_SIZE, GFP_KERNEL);
2403 if (ref) {
2404 int offset;
2405 u16 *tst = &ref[ISP1362_BUF_SIZE / 2];
2406
2407 for (offset = 0; offset < ISP1362_BUF_SIZE / 2; offset++) {
2408 ref[offset] = ~offset;
2409 tst[offset] = offset;
2410 }
2411
2412 for (offset = 0; offset < 4; offset++) {
2413 int j;
2414
2415 for (j = 0; j < 8; j++) {
2416 spin_lock_irqsave(&isp1362_hcd->lock, flags);
2417 isp1362_write_buffer(isp1362_hcd, (u8 *)ref + offset, 0, j);
2418 isp1362_read_buffer(isp1362_hcd, (u8 *)tst + offset, 0, j);
2419 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2420
2421 if (memcmp(ref, tst, j)) {
2422 ret = -ENODEV;
2423 pr_err("%s: memory check with %d byte offset %d failed\n",
2424 __func__, j, offset);
2425 dump_data((u8 *)ref + offset, j);
2426 dump_data((u8 *)tst + offset, j);
2427 }
2428 }
2429 }
2430
2431 spin_lock_irqsave(&isp1362_hcd->lock, flags);
2432 isp1362_write_buffer(isp1362_hcd, ref, 0, ISP1362_BUF_SIZE);
2433 isp1362_read_buffer(isp1362_hcd, tst, 0, ISP1362_BUF_SIZE);
2434 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2435
2436 if (memcmp(ref, tst, ISP1362_BUF_SIZE)) {
2437 ret = -ENODEV;
2438 pr_err("%s: memory check failed\n", __func__);
2439 dump_data((u8 *)tst, ISP1362_BUF_SIZE / 2);
2440 }
2441
2442 for (offset = 0; offset < 256; offset++) {
2443 int test_size = 0;
2444
2445 yield();
2446
2447 memset(tst, 0, ISP1362_BUF_SIZE);
2448 spin_lock_irqsave(&isp1362_hcd->lock, flags);
2449 isp1362_write_buffer(isp1362_hcd, tst, 0, ISP1362_BUF_SIZE);
2450 isp1362_read_buffer(isp1362_hcd, tst, 0, ISP1362_BUF_SIZE);
2451 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2452 if (memcmp(tst, tst + (ISP1362_BUF_SIZE / (2 * sizeof(*tst))),
2453 ISP1362_BUF_SIZE / 2)) {
2454 pr_err("%s: Failed to clear buffer\n", __func__);
2455 dump_data((u8 *)tst, ISP1362_BUF_SIZE);
2456 break;
2457 }
2458 spin_lock_irqsave(&isp1362_hcd->lock, flags);
2459 isp1362_write_buffer(isp1362_hcd, ref, offset * 2, PTD_HEADER_SIZE);
2460 isp1362_write_buffer(isp1362_hcd, ref + PTD_HEADER_SIZE / sizeof(*ref),
2461 offset * 2 + PTD_HEADER_SIZE, test_size);
2462 isp1362_read_buffer(isp1362_hcd, tst, offset * 2,
2463 PTD_HEADER_SIZE + test_size);
2464 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2465 if (memcmp(ref, tst, PTD_HEADER_SIZE + test_size)) {
2466 dump_data(((u8 *)ref) + offset, PTD_HEADER_SIZE + test_size);
2467 dump_data((u8 *)tst, PTD_HEADER_SIZE + test_size);
2468 spin_lock_irqsave(&isp1362_hcd->lock, flags);
2469 isp1362_read_buffer(isp1362_hcd, tst, offset * 2,
2470 PTD_HEADER_SIZE + test_size);
2471 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2472 if (memcmp(ref, tst, PTD_HEADER_SIZE + test_size)) {
2473 ret = -ENODEV;
2474 pr_err("%s: memory check with offset %02x failed\n",
2475 __func__, offset);
2476 break;
2477 }
2478 pr_warn("%s: memory check with offset %02x ok after second read\n",
2479 __func__, offset);
2480 }
2481 }
2482 kfree(ref);
2483 }
2484 return ret;
2485}
2486#endif
2487
2488static int isp1362_hc_start(struct usb_hcd *hcd)
2489{
2490 int ret;
2491 struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
2492 struct isp1362_platform_data *board = isp1362_hcd->board;
2493 u16 hwcfg;
2494 u16 chipid;
2495 unsigned long flags;
2496
2497 pr_debug("%s:\n", __func__);
2498
2499 spin_lock_irqsave(&isp1362_hcd->lock, flags);
2500 chipid = isp1362_read_reg16(isp1362_hcd, HCCHIPID);
2501 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2502
2503 if ((chipid & HCCHIPID_MASK) != HCCHIPID_MAGIC) {
2504 pr_err("%s: Invalid chip ID %04x\n", __func__, chipid);
2505 return -ENODEV;
2506 }
2507
2508#ifdef CHIP_BUFFER_TEST
2509 ret = isp1362_chip_test(isp1362_hcd);
2510 if (ret)
2511 return -ENODEV;
2512#endif
2513 spin_lock_irqsave(&isp1362_hcd->lock, flags);
2514
2515 isp1362_write_reg16(isp1362_hcd, HCuPINT, 0xff);
2516 isp1362_write_reg16(isp1362_hcd, HCuPINTENB, 0);
2517
2518
2519 hwcfg = HCHWCFG_INT_ENABLE | HCHWCFG_DBWIDTH(1);
2520 if (board->sel15Kres)
2521 hwcfg |= HCHWCFG_PULLDOWN_DS2 |
2522 ((MAX_ROOT_PORTS > 1) ? HCHWCFG_PULLDOWN_DS1 : 0);
2523 if (board->clknotstop)
2524 hwcfg |= HCHWCFG_CLKNOTSTOP;
2525 if (board->oc_enable)
2526 hwcfg |= HCHWCFG_ANALOG_OC;
2527 if (board->int_act_high)
2528 hwcfg |= HCHWCFG_INT_POL;
2529 if (board->int_edge_triggered)
2530 hwcfg |= HCHWCFG_INT_TRIGGER;
2531 if (board->dreq_act_high)
2532 hwcfg |= HCHWCFG_DREQ_POL;
2533 if (board->dack_act_high)
2534 hwcfg |= HCHWCFG_DACK_POL;
2535 isp1362_write_reg16(isp1362_hcd, HCHWCFG, hwcfg);
2536 isp1362_show_reg(isp1362_hcd, HCHWCFG);
2537 isp1362_write_reg16(isp1362_hcd, HCDMACFG, 0);
2538 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2539
2540 ret = isp1362_mem_config(hcd);
2541 if (ret)
2542 return ret;
2543
2544 spin_lock_irqsave(&isp1362_hcd->lock, flags);
2545
2546
2547 isp1362_hcd->rhdesca = 0;
2548 if (board->no_power_switching)
2549 isp1362_hcd->rhdesca |= RH_A_NPS;
2550 if (board->power_switching_mode)
2551 isp1362_hcd->rhdesca |= RH_A_PSM;
2552 if (board->potpg)
2553 isp1362_hcd->rhdesca |= (board->potpg << 24) & RH_A_POTPGT;
2554 else
2555 isp1362_hcd->rhdesca |= (25 << 24) & RH_A_POTPGT;
2556
2557 isp1362_write_reg32(isp1362_hcd, HCRHDESCA, isp1362_hcd->rhdesca & ~RH_A_OCPM);
2558 isp1362_write_reg32(isp1362_hcd, HCRHDESCA, isp1362_hcd->rhdesca | RH_A_OCPM);
2559 isp1362_hcd->rhdesca = isp1362_read_reg32(isp1362_hcd, HCRHDESCA);
2560
2561 isp1362_hcd->rhdescb = RH_B_PPCM;
2562 isp1362_write_reg32(isp1362_hcd, HCRHDESCB, isp1362_hcd->rhdescb);
2563 isp1362_hcd->rhdescb = isp1362_read_reg32(isp1362_hcd, HCRHDESCB);
2564
2565 isp1362_read_reg32(isp1362_hcd, HCFMINTVL);
2566 isp1362_write_reg32(isp1362_hcd, HCFMINTVL, (FSMP(FI) << 16) | FI);
2567 isp1362_write_reg32(isp1362_hcd, HCLSTHRESH, LSTHRESH);
2568
2569 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2570
2571 isp1362_hcd->hc_control = OHCI_USB_OPER;
2572 hcd->state = HC_STATE_RUNNING;
2573
2574 spin_lock_irqsave(&isp1362_hcd->lock, flags);
2575
2576 isp1362_hcd->intenb = OHCI_INTR_MIE | OHCI_INTR_RHSC | OHCI_INTR_UE;
2577 isp1362_hcd->intenb |= OHCI_INTR_RD;
2578 isp1362_hcd->irqenb = HCuPINT_OPR | HCuPINT_SUSP;
2579 isp1362_write_reg32(isp1362_hcd, HCINTENB, isp1362_hcd->intenb);
2580 isp1362_write_reg16(isp1362_hcd, HCuPINTENB, isp1362_hcd->irqenb);
2581
2582
2583 isp1362_write_reg32(isp1362_hcd, HCCONTROL, isp1362_hcd->hc_control);
2584
2585 isp1362_write_reg32(isp1362_hcd, HCRHSTATUS, RH_HS_LPSC | RH_HS_DRWE);
2586
2587 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2588
2589 return 0;
2590}
2591
2592
2593
2594static struct hc_driver isp1362_hc_driver = {
2595 .description = hcd_name,
2596 .product_desc = "ISP1362 Host Controller",
2597 .hcd_priv_size = sizeof(struct isp1362_hcd),
2598
2599 .irq = isp1362_irq,
2600 .flags = HCD_USB11 | HCD_MEMORY,
2601
2602 .reset = isp1362_hc_reset,
2603 .start = isp1362_hc_start,
2604 .stop = isp1362_hc_stop,
2605
2606 .urb_enqueue = isp1362_urb_enqueue,
2607 .urb_dequeue = isp1362_urb_dequeue,
2608 .endpoint_disable = isp1362_endpoint_disable,
2609
2610 .get_frame_number = isp1362_get_frame,
2611
2612 .hub_status_data = isp1362_hub_status_data,
2613 .hub_control = isp1362_hub_control,
2614 .bus_suspend = isp1362_bus_suspend,
2615 .bus_resume = isp1362_bus_resume,
2616};
2617
2618
2619
2620static int isp1362_remove(struct platform_device *pdev)
2621{
2622 struct usb_hcd *hcd = platform_get_drvdata(pdev);
2623 struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
2624
2625 remove_debug_file(isp1362_hcd);
2626 DBG(0, "%s: Removing HCD\n", __func__);
2627 usb_remove_hcd(hcd);
2628 DBG(0, "%s: put_hcd\n", __func__);
2629 usb_put_hcd(hcd);
2630 DBG(0, "%s: Done\n", __func__);
2631
2632 return 0;
2633}
2634
2635static int isp1362_probe(struct platform_device *pdev)
2636{
2637 struct usb_hcd *hcd;
2638 struct isp1362_hcd *isp1362_hcd;
2639 struct resource *addr, *data, *irq_res;
2640 void __iomem *addr_reg;
2641 void __iomem *data_reg;
2642 int irq;
2643 int retval = 0;
2644 unsigned int irq_flags = 0;
2645
2646 if (usb_disabled())
2647 return -ENODEV;
2648
2649
2650
2651
2652
2653
2654 if (pdev->num_resources < 3)
2655 return -ENODEV;
2656
2657 if (pdev->dev.dma_mask) {
2658 DBG(1, "won't do DMA");
2659 return -ENODEV;
2660 }
2661
2662 irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
2663 if (!irq_res)
2664 return -ENODEV;
2665
2666 irq = irq_res->start;
2667
2668 addr = platform_get_resource(pdev, IORESOURCE_MEM, 1);
2669 addr_reg = devm_ioremap_resource(&pdev->dev, addr);
2670 if (IS_ERR(addr_reg))
2671 return PTR_ERR(addr_reg);
2672
2673 data = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2674 data_reg = devm_ioremap_resource(&pdev->dev, data);
2675 if (IS_ERR(data_reg))
2676 return PTR_ERR(data_reg);
2677
2678
2679 hcd = usb_create_hcd(&isp1362_hc_driver, &pdev->dev, dev_name(&pdev->dev));
2680 if (!hcd)
2681 return -ENOMEM;
2682
2683 hcd->rsrc_start = data->start;
2684 isp1362_hcd = hcd_to_isp1362_hcd(hcd);
2685 isp1362_hcd->data_reg = data_reg;
2686 isp1362_hcd->addr_reg = addr_reg;
2687
2688 isp1362_hcd->next_statechange = jiffies;
2689 spin_lock_init(&isp1362_hcd->lock);
2690 INIT_LIST_HEAD(&isp1362_hcd->async);
2691 INIT_LIST_HEAD(&isp1362_hcd->periodic);
2692 INIT_LIST_HEAD(&isp1362_hcd->isoc);
2693 INIT_LIST_HEAD(&isp1362_hcd->remove_list);
2694 isp1362_hcd->board = dev_get_platdata(&pdev->dev);
2695#if USE_PLATFORM_DELAY
2696 if (!isp1362_hcd->board->delay) {
2697 dev_err(hcd->self.controller, "No platform delay function given\n");
2698 retval = -ENODEV;
2699 goto err;
2700 }
2701#endif
2702
2703 if (irq_res->flags & IORESOURCE_IRQ_HIGHEDGE)
2704 irq_flags |= IRQF_TRIGGER_RISING;
2705 if (irq_res->flags & IORESOURCE_IRQ_LOWEDGE)
2706 irq_flags |= IRQF_TRIGGER_FALLING;
2707 if (irq_res->flags & IORESOURCE_IRQ_HIGHLEVEL)
2708 irq_flags |= IRQF_TRIGGER_HIGH;
2709 if (irq_res->flags & IORESOURCE_IRQ_LOWLEVEL)
2710 irq_flags |= IRQF_TRIGGER_LOW;
2711
2712 retval = usb_add_hcd(hcd, irq, irq_flags | IRQF_SHARED);
2713 if (retval != 0)
2714 goto err;
2715 device_wakeup_enable(hcd->self.controller);
2716
2717 dev_info(&pdev->dev, "%s, irq %d\n", hcd->product_desc, irq);
2718
2719 create_debug_file(isp1362_hcd);
2720
2721 return 0;
2722
2723 err:
2724 usb_put_hcd(hcd);
2725
2726 return retval;
2727}
2728
2729#ifdef CONFIG_PM
2730static int isp1362_suspend(struct platform_device *pdev, pm_message_t state)
2731{
2732 struct usb_hcd *hcd = platform_get_drvdata(pdev);
2733 struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
2734 unsigned long flags;
2735 int retval = 0;
2736
2737 DBG(0, "%s: Suspending device\n", __func__);
2738
2739 if (state.event == PM_EVENT_FREEZE) {
2740 DBG(0, "%s: Suspending root hub\n", __func__);
2741 retval = isp1362_bus_suspend(hcd);
2742 } else {
2743 DBG(0, "%s: Suspending RH ports\n", __func__);
2744 spin_lock_irqsave(&isp1362_hcd->lock, flags);
2745 isp1362_write_reg32(isp1362_hcd, HCRHSTATUS, RH_HS_LPS);
2746 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2747 }
2748 if (retval == 0)
2749 pdev->dev.power.power_state = state;
2750 return retval;
2751}
2752
2753static int isp1362_resume(struct platform_device *pdev)
2754{
2755 struct usb_hcd *hcd = platform_get_drvdata(pdev);
2756 struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
2757 unsigned long flags;
2758
2759 DBG(0, "%s: Resuming\n", __func__);
2760
2761 if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
2762 DBG(0, "%s: Resume RH ports\n", __func__);
2763 spin_lock_irqsave(&isp1362_hcd->lock, flags);
2764 isp1362_write_reg32(isp1362_hcd, HCRHSTATUS, RH_HS_LPSC);
2765 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2766 return 0;
2767 }
2768
2769 pdev->dev.power.power_state = PMSG_ON;
2770
2771 return isp1362_bus_resume(isp1362_hcd_to_hcd(isp1362_hcd));
2772}
2773#else
2774#define isp1362_suspend NULL
2775#define isp1362_resume NULL
2776#endif
2777
2778static struct platform_driver isp1362_driver = {
2779 .probe = isp1362_probe,
2780 .remove = isp1362_remove,
2781
2782 .suspend = isp1362_suspend,
2783 .resume = isp1362_resume,
2784 .driver = {
2785 .name = hcd_name,
2786 },
2787};
2788
2789module_platform_driver(isp1362_driver);
2790