1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40#undef ISP1362_DEBUG
41
42
43
44
45
46
47
48
49
50#undef BUGGY_PXA2XX_UDC_USBTEST
51
52#undef PTD_TRACE
53#undef URB_TRACE
54#undef VERBOSE
55#undef REGISTERS
56
57
58
59
60#undef CHIP_BUFFER_TEST
61
62#include <linux/module.h>
63#include <linux/moduleparam.h>
64#include <linux/kernel.h>
65#include <linux/delay.h>
66#include <linux/ioport.h>
67#include <linux/sched.h>
68#include <linux/slab.h>
69#include <linux/errno.h>
70#include <linux/list.h>
71#include <linux/interrupt.h>
72#include <linux/usb.h>
73#include <linux/usb/isp1362.h>
74#include <linux/usb/hcd.h>
75#include <linux/platform_device.h>
76#include <linux/pm.h>
77#include <linux/io.h>
78#include <linux/bitmap.h>
79#include <linux/prefetch.h>
80#include <linux/debugfs.h>
81#include <linux/seq_file.h>
82
83#include <asm/irq.h>
84#include <asm/byteorder.h>
85#include <asm/unaligned.h>
86
87static int dbg_level;
88#ifdef ISP1362_DEBUG
89module_param(dbg_level, int, 0644);
90#else
91module_param(dbg_level, int, 0);
92#endif
93
94#include "../core/usb.h"
95#include "isp1362.h"
96
97
98#define DRIVER_VERSION "2005-04-04"
99#define DRIVER_DESC "ISP1362 USB Host Controller Driver"
100
101MODULE_DESCRIPTION(DRIVER_DESC);
102MODULE_LICENSE("GPL");
103
104static const char hcd_name[] = "isp1362-hcd";
105
106static void isp1362_hc_stop(struct usb_hcd *hcd);
107static int isp1362_hc_start(struct usb_hcd *hcd);
108
109
110
111
112
113
114
115
116
117
118static inline void isp1362_enable_int(struct isp1362_hcd *isp1362_hcd, u16 mask)
119{
120 if ((isp1362_hcd->irqenb | mask) == isp1362_hcd->irqenb)
121 return;
122 if (mask & ~isp1362_hcd->irqenb)
123 isp1362_write_reg16(isp1362_hcd, HCuPINT, mask & ~isp1362_hcd->irqenb);
124 isp1362_hcd->irqenb |= mask;
125 if (isp1362_hcd->irq_active)
126 return;
127 isp1362_write_reg16(isp1362_hcd, HCuPINTENB, isp1362_hcd->irqenb);
128}
129
130
131
132static inline struct isp1362_ep_queue *get_ptd_queue(struct isp1362_hcd *isp1362_hcd,
133 u16 offset)
134{
135 struct isp1362_ep_queue *epq = NULL;
136
137 if (offset < isp1362_hcd->istl_queue[1].buf_start)
138 epq = &isp1362_hcd->istl_queue[0];
139 else if (offset < isp1362_hcd->intl_queue.buf_start)
140 epq = &isp1362_hcd->istl_queue[1];
141 else if (offset < isp1362_hcd->atl_queue.buf_start)
142 epq = &isp1362_hcd->intl_queue;
143 else if (offset < isp1362_hcd->atl_queue.buf_start +
144 isp1362_hcd->atl_queue.buf_size)
145 epq = &isp1362_hcd->atl_queue;
146
147 if (epq)
148 DBG(1, "%s: PTD $%04x is on %s queue\n", __func__, offset, epq->name);
149 else
150 pr_warning("%s: invalid PTD $%04x\n", __func__, offset);
151
152 return epq;
153}
154
155static inline int get_ptd_offset(struct isp1362_ep_queue *epq, u8 index)
156{
157 int offset;
158
159 if (index * epq->blk_size > epq->buf_size) {
160 pr_warning("%s: Bad %s index %d(%d)\n", __func__, epq->name, index,
161 epq->buf_size / epq->blk_size);
162 return -EINVAL;
163 }
164 offset = epq->buf_start + index * epq->blk_size;
165 DBG(3, "%s: %s PTD[%02x] # %04x\n", __func__, epq->name, index, offset);
166
167 return offset;
168}
169
170
171
172static inline u16 max_transfer_size(struct isp1362_ep_queue *epq, size_t size,
173 int mps)
174{
175 u16 xfer_size = min_t(size_t, MAX_XFER_SIZE, size);
176
177 xfer_size = min_t(size_t, xfer_size, epq->buf_avail * epq->blk_size - PTD_HEADER_SIZE);
178 if (xfer_size < size && xfer_size % mps)
179 xfer_size -= xfer_size % mps;
180
181 return xfer_size;
182}
183
184static int claim_ptd_buffers(struct isp1362_ep_queue *epq,
185 struct isp1362_ep *ep, u16 len)
186{
187 int ptd_offset = -EINVAL;
188 int num_ptds = ((len + PTD_HEADER_SIZE - 1) / epq->blk_size) + 1;
189 int found;
190
191 BUG_ON(len > epq->buf_size);
192
193 if (!epq->buf_avail)
194 return -ENOMEM;
195
196 if (ep->num_ptds)
197 pr_err("%s: %s len %d/%d num_ptds %d buf_map %08lx skip_map %08lx\n", __func__,
198 epq->name, len, epq->blk_size, num_ptds, epq->buf_map, epq->skip_map);
199 BUG_ON(ep->num_ptds != 0);
200
201 found = bitmap_find_next_zero_area(&epq->buf_map, epq->buf_count, 0,
202 num_ptds, 0);
203 if (found >= epq->buf_count)
204 return -EOVERFLOW;
205
206 DBG(1, "%s: Found %d PTDs[%d] for %d/%d byte\n", __func__,
207 num_ptds, found, len, (int)(epq->blk_size - PTD_HEADER_SIZE));
208 ptd_offset = get_ptd_offset(epq, found);
209 WARN_ON(ptd_offset < 0);
210 ep->ptd_offset = ptd_offset;
211 ep->num_ptds += num_ptds;
212 epq->buf_avail -= num_ptds;
213 BUG_ON(epq->buf_avail > epq->buf_count);
214 ep->ptd_index = found;
215 bitmap_set(&epq->buf_map, found, num_ptds);
216 DBG(1, "%s: Done %s PTD[%d] $%04x, avail %d count %d claimed %d %08lx:%08lx\n",
217 __func__, epq->name, ep->ptd_index, ep->ptd_offset,
218 epq->buf_avail, epq->buf_count, num_ptds, epq->buf_map, epq->skip_map);
219
220 return found;
221}
222
223static inline void release_ptd_buffers(struct isp1362_ep_queue *epq, struct isp1362_ep *ep)
224{
225 int last = ep->ptd_index + ep->num_ptds;
226
227 if (last > epq->buf_count)
228 pr_err("%s: ep %p req %d len %d %s PTD[%d] $%04x num_ptds %d buf_count %d buf_avail %d buf_map %08lx skip_map %08lx\n",
229 __func__, ep, ep->num_req, ep->length, epq->name, ep->ptd_index,
230 ep->ptd_offset, ep->num_ptds, epq->buf_count, epq->buf_avail,
231 epq->buf_map, epq->skip_map);
232 BUG_ON(last > epq->buf_count);
233
234 bitmap_clear(&epq->buf_map, ep->ptd_index, ep->num_ptds);
235 bitmap_set(&epq->skip_map, ep->ptd_index, ep->num_ptds);
236 epq->buf_avail += ep->num_ptds;
237 epq->ptd_count--;
238
239 BUG_ON(epq->buf_avail > epq->buf_count);
240 BUG_ON(epq->ptd_count > epq->buf_count);
241
242 DBG(1, "%s: Done %s PTDs $%04x released %d avail %d count %d\n",
243 __func__, epq->name,
244 ep->ptd_offset, ep->num_ptds, epq->buf_avail, epq->buf_count);
245 DBG(1, "%s: buf_map %08lx skip_map %08lx\n", __func__,
246 epq->buf_map, epq->skip_map);
247
248 ep->num_ptds = 0;
249 ep->ptd_offset = -EINVAL;
250 ep->ptd_index = -EINVAL;
251}
252
253
254
255
256
257
258static void prepare_ptd(struct isp1362_hcd *isp1362_hcd, struct urb *urb,
259 struct isp1362_ep *ep, struct isp1362_ep_queue *epq,
260 u16 fno)
261{
262 struct ptd *ptd;
263 int toggle;
264 int dir;
265 u16 len;
266 size_t buf_len = urb->transfer_buffer_length - urb->actual_length;
267
268 DBG(3, "%s: %s ep %p\n", __func__, epq->name, ep);
269
270 ptd = &ep->ptd;
271
272 ep->data = (unsigned char *)urb->transfer_buffer + urb->actual_length;
273
274 switch (ep->nextpid) {
275 case USB_PID_IN:
276 toggle = usb_gettoggle(urb->dev, ep->epnum, 0);
277 dir = PTD_DIR_IN;
278 if (usb_pipecontrol(urb->pipe)) {
279 len = min_t(size_t, ep->maxpacket, buf_len);
280 } else if (usb_pipeisoc(urb->pipe)) {
281 len = min_t(size_t, urb->iso_frame_desc[fno].length, MAX_XFER_SIZE);
282 ep->data = urb->transfer_buffer + urb->iso_frame_desc[fno].offset;
283 } else
284 len = max_transfer_size(epq, buf_len, ep->maxpacket);
285 DBG(1, "%s: IN len %d/%d/%d from URB\n", __func__, len, ep->maxpacket,
286 (int)buf_len);
287 break;
288 case USB_PID_OUT:
289 toggle = usb_gettoggle(urb->dev, ep->epnum, 1);
290 dir = PTD_DIR_OUT;
291 if (usb_pipecontrol(urb->pipe))
292 len = min_t(size_t, ep->maxpacket, buf_len);
293 else if (usb_pipeisoc(urb->pipe))
294 len = min_t(size_t, urb->iso_frame_desc[0].length, MAX_XFER_SIZE);
295 else
296 len = max_transfer_size(epq, buf_len, ep->maxpacket);
297 if (len == 0)
298 pr_info("%s: Sending ZERO packet: %d\n", __func__,
299 urb->transfer_flags & URB_ZERO_PACKET);
300 DBG(1, "%s: OUT len %d/%d/%d from URB\n", __func__, len, ep->maxpacket,
301 (int)buf_len);
302 break;
303 case USB_PID_SETUP:
304 toggle = 0;
305 dir = PTD_DIR_SETUP;
306 len = sizeof(struct usb_ctrlrequest);
307 DBG(1, "%s: SETUP len %d\n", __func__, len);
308 ep->data = urb->setup_packet;
309 break;
310 case USB_PID_ACK:
311 toggle = 1;
312 len = 0;
313 dir = (urb->transfer_buffer_length && usb_pipein(urb->pipe)) ?
314 PTD_DIR_OUT : PTD_DIR_IN;
315 DBG(1, "%s: ACK len %d\n", __func__, len);
316 break;
317 default:
318 toggle = dir = len = 0;
319 pr_err("%s@%d: ep->nextpid %02x\n", __func__, __LINE__, ep->nextpid);
320 BUG_ON(1);
321 }
322
323 ep->length = len;
324 if (!len)
325 ep->data = NULL;
326
327 ptd->count = PTD_CC_MSK | PTD_ACTIVE_MSK | PTD_TOGGLE(toggle);
328 ptd->mps = PTD_MPS(ep->maxpacket) | PTD_SPD(urb->dev->speed == USB_SPEED_LOW) |
329 PTD_EP(ep->epnum);
330 ptd->len = PTD_LEN(len) | PTD_DIR(dir);
331 ptd->faddr = PTD_FA(usb_pipedevice(urb->pipe));
332
333 if (usb_pipeint(urb->pipe)) {
334 ptd->faddr |= PTD_SF_INT(ep->branch);
335 ptd->faddr |= PTD_PR(ep->interval ? __ffs(ep->interval) : 0);
336 }
337 if (usb_pipeisoc(urb->pipe))
338 ptd->faddr |= PTD_SF_ISO(fno);
339
340 DBG(1, "%s: Finished\n", __func__);
341}
342
343static void isp1362_write_ptd(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep,
344 struct isp1362_ep_queue *epq)
345{
346 struct ptd *ptd = &ep->ptd;
347 int len = PTD_GET_DIR(ptd) == PTD_DIR_IN ? 0 : ep->length;
348
349 prefetch(ptd);
350 isp1362_write_buffer(isp1362_hcd, ptd, ep->ptd_offset, PTD_HEADER_SIZE);
351 if (len)
352 isp1362_write_buffer(isp1362_hcd, ep->data,
353 ep->ptd_offset + PTD_HEADER_SIZE, len);
354
355 dump_ptd(ptd);
356 dump_ptd_out_data(ptd, ep->data);
357}
358
359static void isp1362_read_ptd(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep,
360 struct isp1362_ep_queue *epq)
361{
362 struct ptd *ptd = &ep->ptd;
363 int act_len;
364
365 WARN_ON(list_empty(&ep->active));
366 BUG_ON(ep->ptd_offset < 0);
367
368 list_del_init(&ep->active);
369 DBG(1, "%s: ep %p removed from active list %p\n", __func__, ep, &epq->active);
370
371 prefetchw(ptd);
372 isp1362_read_buffer(isp1362_hcd, ptd, ep->ptd_offset, PTD_HEADER_SIZE);
373 dump_ptd(ptd);
374 act_len = PTD_GET_COUNT(ptd);
375 if (PTD_GET_DIR(ptd) != PTD_DIR_IN || act_len == 0)
376 return;
377 if (act_len > ep->length)
378 pr_err("%s: ep %p PTD $%04x act_len %d ep->length %d\n", __func__, ep,
379 ep->ptd_offset, act_len, ep->length);
380 BUG_ON(act_len > ep->length);
381
382
383
384
385 prefetchw(ep->data);
386 isp1362_read_buffer(isp1362_hcd, ep->data,
387 ep->ptd_offset + PTD_HEADER_SIZE, act_len);
388 dump_ptd_in_data(ptd, ep->data);
389}
390
391
392
393
394
395
396static void remove_ptd(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep)
397
398{
399 int index;
400 struct isp1362_ep_queue *epq;
401
402 DBG(1, "%s: ep %p PTD[%d] $%04x\n", __func__, ep, ep->ptd_index, ep->ptd_offset);
403 BUG_ON(ep->ptd_offset < 0);
404
405 epq = get_ptd_queue(isp1362_hcd, ep->ptd_offset);
406 BUG_ON(!epq);
407
408
409 WARN_ON(!list_empty(&ep->remove_list));
410 list_add_tail(&ep->remove_list, &isp1362_hcd->remove_list);
411
412 isp1362_enable_int(isp1362_hcd, HCuPINT_SOF);
413
414 index = ep->ptd_index;
415 if (index < 0)
416
417 return;
418
419 DBG(1, "%s: Disabling PTD[%02x] $%04x %08lx|%08x\n", __func__,
420 index, ep->ptd_offset, epq->skip_map, 1 << index);
421
422
423 epq->skip_map |= 1 << index;
424 if (epq == &isp1362_hcd->atl_queue) {
425 DBG(2, "%s: ATLSKIP = %08x -> %08lx\n", __func__,
426 isp1362_read_reg32(isp1362_hcd, HCATLSKIP), epq->skip_map);
427 isp1362_write_reg32(isp1362_hcd, HCATLSKIP, epq->skip_map);
428 if (~epq->skip_map == 0)
429 isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ATL_ACTIVE);
430 } else if (epq == &isp1362_hcd->intl_queue) {
431 DBG(2, "%s: INTLSKIP = %08x -> %08lx\n", __func__,
432 isp1362_read_reg32(isp1362_hcd, HCINTLSKIP), epq->skip_map);
433 isp1362_write_reg32(isp1362_hcd, HCINTLSKIP, epq->skip_map);
434 if (~epq->skip_map == 0)
435 isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_INTL_ACTIVE);
436 }
437}
438
439
440
441
442
443static void finish_request(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep,
444 struct urb *urb, int status)
445 __releases(isp1362_hcd->lock)
446 __acquires(isp1362_hcd->lock)
447{
448 urb->hcpriv = NULL;
449 ep->error_count = 0;
450
451 if (usb_pipecontrol(urb->pipe))
452 ep->nextpid = USB_PID_SETUP;
453
454 URB_DBG("%s: req %d FA %d ep%d%s %s: len %d/%d %s stat %d\n", __func__,
455 ep->num_req, usb_pipedevice(urb->pipe),
456 usb_pipeendpoint(urb->pipe),
457 !usb_pipein(urb->pipe) ? "out" : "in",
458 usb_pipecontrol(urb->pipe) ? "ctrl" :
459 usb_pipeint(urb->pipe) ? "int" :
460 usb_pipebulk(urb->pipe) ? "bulk" :
461 "iso",
462 urb->actual_length, urb->transfer_buffer_length,
463 !(urb->transfer_flags & URB_SHORT_NOT_OK) ?
464 "short_ok" : "", urb->status);
465
466
467 usb_hcd_unlink_urb_from_ep(isp1362_hcd_to_hcd(isp1362_hcd), urb);
468 spin_unlock(&isp1362_hcd->lock);
469 usb_hcd_giveback_urb(isp1362_hcd_to_hcd(isp1362_hcd), urb, status);
470 spin_lock(&isp1362_hcd->lock);
471
472
473 if (!list_empty(&ep->hep->urb_list))
474 return;
475
476
477 if (!list_empty(&ep->schedule)) {
478 list_del_init(&ep->schedule);
479 return;
480 }
481
482
483 if (ep->interval) {
484
485 DBG(1, "deschedule qh%d/%p branch %d load %d bandwidth %d -> %d\n", ep->interval,
486 ep, ep->branch, ep->load,
487 isp1362_hcd->load[ep->branch],
488 isp1362_hcd->load[ep->branch] - ep->load);
489 isp1362_hcd->load[ep->branch] -= ep->load;
490 ep->branch = PERIODIC_SIZE;
491 }
492}
493
494
495
496
497static void postproc_ep(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep)
498{
499 struct urb *urb = get_urb(ep);
500 struct usb_device *udev;
501 struct ptd *ptd;
502 int short_ok;
503 u16 len;
504 int urbstat = -EINPROGRESS;
505 u8 cc;
506
507 DBG(2, "%s: ep %p req %d\n", __func__, ep, ep->num_req);
508
509 udev = urb->dev;
510 ptd = &ep->ptd;
511 cc = PTD_GET_CC(ptd);
512 if (cc == PTD_NOTACCESSED) {
513 pr_err("%s: req %d PTD %p Untouched by ISP1362\n", __func__,
514 ep->num_req, ptd);
515 cc = PTD_DEVNOTRESP;
516 }
517
518 short_ok = !(urb->transfer_flags & URB_SHORT_NOT_OK);
519 len = urb->transfer_buffer_length - urb->actual_length;
520
521
522
523
524
525
526
527 if (cc == PTD_DATAUNDERRUN) {
528 if (short_ok) {
529 DBG(1, "%s: req %d Allowed data underrun short_%sok %d/%d/%d byte\n",
530 __func__, ep->num_req, short_ok ? "" : "not_",
531 PTD_GET_COUNT(ptd), ep->maxpacket, len);
532 cc = PTD_CC_NOERROR;
533 urbstat = 0;
534 } else {
535 DBG(1, "%s: req %d Data Underrun %s nextpid %02x short_%sok %d/%d/%d byte\n",
536 __func__, ep->num_req,
537 usb_pipein(urb->pipe) ? "IN" : "OUT", ep->nextpid,
538 short_ok ? "" : "not_",
539 PTD_GET_COUNT(ptd), ep->maxpacket, len);
540
541
542
543 urb->actual_length += PTD_GET_COUNT(ptd);
544 if (usb_pipecontrol(urb->pipe)) {
545 ep->nextpid = USB_PID_ACK;
546 BUG_ON(urb->actual_length > urb->transfer_buffer_length);
547
548 if (urb->status == -EINPROGRESS)
549 urb->status = cc_to_error[PTD_DATAUNDERRUN];
550 } else {
551 usb_settoggle(udev, ep->epnum, ep->nextpid == USB_PID_OUT,
552 PTD_GET_TOGGLE(ptd));
553 urbstat = cc_to_error[PTD_DATAUNDERRUN];
554 }
555 goto out;
556 }
557 }
558
559 if (cc != PTD_CC_NOERROR) {
560 if (++ep->error_count >= 3 || cc == PTD_CC_STALL || cc == PTD_DATAOVERRUN) {
561 urbstat = cc_to_error[cc];
562 DBG(1, "%s: req %d nextpid %02x, status %d, error %d, error_count %d\n",
563 __func__, ep->num_req, ep->nextpid, urbstat, cc,
564 ep->error_count);
565 }
566 goto out;
567 }
568
569 switch (ep->nextpid) {
570 case USB_PID_OUT:
571 if (PTD_GET_COUNT(ptd) != ep->length)
572 pr_err("%s: count=%d len=%d\n", __func__,
573 PTD_GET_COUNT(ptd), ep->length);
574 BUG_ON(PTD_GET_COUNT(ptd) != ep->length);
575 urb->actual_length += ep->length;
576 BUG_ON(urb->actual_length > urb->transfer_buffer_length);
577 usb_settoggle(udev, ep->epnum, 1, PTD_GET_TOGGLE(ptd));
578 if (urb->actual_length == urb->transfer_buffer_length) {
579 DBG(3, "%s: req %d xfer complete %d/%d status %d -> 0\n", __func__,
580 ep->num_req, len, ep->maxpacket, urbstat);
581 if (usb_pipecontrol(urb->pipe)) {
582 DBG(3, "%s: req %d %s Wait for ACK\n", __func__,
583 ep->num_req,
584 usb_pipein(urb->pipe) ? "IN" : "OUT");
585 ep->nextpid = USB_PID_ACK;
586 } else {
587 if (len % ep->maxpacket ||
588 !(urb->transfer_flags & URB_ZERO_PACKET)) {
589 urbstat = 0;
590 DBG(3, "%s: req %d URB %s status %d count %d/%d/%d\n",
591 __func__, ep->num_req, usb_pipein(urb->pipe) ? "IN" : "OUT",
592 urbstat, len, ep->maxpacket, urb->actual_length);
593 }
594 }
595 }
596 break;
597 case USB_PID_IN:
598 len = PTD_GET_COUNT(ptd);
599 BUG_ON(len > ep->length);
600 urb->actual_length += len;
601 BUG_ON(urb->actual_length > urb->transfer_buffer_length);
602 usb_settoggle(udev, ep->epnum, 0, PTD_GET_TOGGLE(ptd));
603
604 if ((urb->transfer_buffer_length == urb->actual_length) ||
605 len % ep->maxpacket) {
606 DBG(3, "%s: req %d xfer complete %d/%d status %d -> 0\n", __func__,
607 ep->num_req, len, ep->maxpacket, urbstat);
608 if (usb_pipecontrol(urb->pipe)) {
609 DBG(3, "%s: req %d %s Wait for ACK\n", __func__,
610 ep->num_req,
611 usb_pipein(urb->pipe) ? "IN" : "OUT");
612 ep->nextpid = USB_PID_ACK;
613 } else {
614 urbstat = 0;
615 DBG(3, "%s: req %d URB %s status %d count %d/%d/%d\n",
616 __func__, ep->num_req, usb_pipein(urb->pipe) ? "IN" : "OUT",
617 urbstat, len, ep->maxpacket, urb->actual_length);
618 }
619 }
620 break;
621 case USB_PID_SETUP:
622 if (urb->transfer_buffer_length == urb->actual_length) {
623 ep->nextpid = USB_PID_ACK;
624 } else if (usb_pipeout(urb->pipe)) {
625 usb_settoggle(udev, 0, 1, 1);
626 ep->nextpid = USB_PID_OUT;
627 } else {
628 usb_settoggle(udev, 0, 0, 1);
629 ep->nextpid = USB_PID_IN;
630 }
631 break;
632 case USB_PID_ACK:
633 DBG(3, "%s: req %d got ACK %d -> 0\n", __func__, ep->num_req,
634 urbstat);
635 WARN_ON(urbstat != -EINPROGRESS);
636 urbstat = 0;
637 ep->nextpid = 0;
638 break;
639 default:
640 BUG_ON(1);
641 }
642
643 out:
644 if (urbstat != -EINPROGRESS) {
645 DBG(2, "%s: Finishing ep %p req %d urb %p status %d\n", __func__,
646 ep, ep->num_req, urb, urbstat);
647 finish_request(isp1362_hcd, ep, urb, urbstat);
648 }
649}
650
651static void finish_unlinks(struct isp1362_hcd *isp1362_hcd)
652{
653 struct isp1362_ep *ep;
654 struct isp1362_ep *tmp;
655
656 list_for_each_entry_safe(ep, tmp, &isp1362_hcd->remove_list, remove_list) {
657 struct isp1362_ep_queue *epq =
658 get_ptd_queue(isp1362_hcd, ep->ptd_offset);
659 int index = ep->ptd_index;
660
661 BUG_ON(epq == NULL);
662 if (index >= 0) {
663 DBG(1, "%s: remove PTD[%d] $%04x\n", __func__, index, ep->ptd_offset);
664 BUG_ON(ep->num_ptds == 0);
665 release_ptd_buffers(epq, ep);
666 }
667 if (!list_empty(&ep->hep->urb_list)) {
668 struct urb *urb = get_urb(ep);
669
670 DBG(1, "%s: Finishing req %d ep %p from remove_list\n", __func__,
671 ep->num_req, ep);
672 finish_request(isp1362_hcd, ep, urb, -ESHUTDOWN);
673 }
674 WARN_ON(list_empty(&ep->active));
675 if (!list_empty(&ep->active)) {
676 list_del_init(&ep->active);
677 DBG(1, "%s: ep %p removed from active list\n", __func__, ep);
678 }
679 list_del_init(&ep->remove_list);
680 DBG(1, "%s: ep %p removed from remove_list\n", __func__, ep);
681 }
682 DBG(1, "%s: Done\n", __func__);
683}
684
685static inline void enable_atl_transfers(struct isp1362_hcd *isp1362_hcd, int count)
686{
687 if (count > 0) {
688 if (count < isp1362_hcd->atl_queue.ptd_count)
689 isp1362_write_reg16(isp1362_hcd, HCATLDTC, count);
690 isp1362_enable_int(isp1362_hcd, HCuPINT_ATL);
691 isp1362_write_reg32(isp1362_hcd, HCATLSKIP, isp1362_hcd->atl_queue.skip_map);
692 isp1362_set_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ATL_ACTIVE);
693 } else
694 isp1362_enable_int(isp1362_hcd, HCuPINT_SOF);
695}
696
697static inline void enable_intl_transfers(struct isp1362_hcd *isp1362_hcd)
698{
699 isp1362_enable_int(isp1362_hcd, HCuPINT_INTL);
700 isp1362_set_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_INTL_ACTIVE);
701 isp1362_write_reg32(isp1362_hcd, HCINTLSKIP, isp1362_hcd->intl_queue.skip_map);
702}
703
704static inline void enable_istl_transfers(struct isp1362_hcd *isp1362_hcd, int flip)
705{
706 isp1362_enable_int(isp1362_hcd, flip ? HCuPINT_ISTL1 : HCuPINT_ISTL0);
707 isp1362_set_mask16(isp1362_hcd, HCBUFSTAT, flip ?
708 HCBUFSTAT_ISTL1_FULL : HCBUFSTAT_ISTL0_FULL);
709}
710
711static int submit_req(struct isp1362_hcd *isp1362_hcd, struct urb *urb,
712 struct isp1362_ep *ep, struct isp1362_ep_queue *epq)
713{
714 int index = epq->free_ptd;
715
716 prepare_ptd(isp1362_hcd, urb, ep, epq, 0);
717 index = claim_ptd_buffers(epq, ep, ep->length);
718 if (index == -ENOMEM) {
719 DBG(1, "%s: req %d No free %s PTD available: %d, %08lx:%08lx\n", __func__,
720 ep->num_req, epq->name, ep->num_ptds, epq->buf_map, epq->skip_map);
721 return index;
722 } else if (index == -EOVERFLOW) {
723 DBG(1, "%s: req %d Not enough space for %d byte %s PTD %d %08lx:%08lx\n",
724 __func__, ep->num_req, ep->length, epq->name, ep->num_ptds,
725 epq->buf_map, epq->skip_map);
726 return index;
727 } else
728 BUG_ON(index < 0);
729 list_add_tail(&ep->active, &epq->active);
730 DBG(1, "%s: ep %p req %d len %d added to active list %p\n", __func__,
731 ep, ep->num_req, ep->length, &epq->active);
732 DBG(1, "%s: Submitting %s PTD $%04x for ep %p req %d\n", __func__, epq->name,
733 ep->ptd_offset, ep, ep->num_req);
734 isp1362_write_ptd(isp1362_hcd, ep, epq);
735 __clear_bit(ep->ptd_index, &epq->skip_map);
736
737 return 0;
738}
739
740static void start_atl_transfers(struct isp1362_hcd *isp1362_hcd)
741{
742 int ptd_count = 0;
743 struct isp1362_ep_queue *epq = &isp1362_hcd->atl_queue;
744 struct isp1362_ep *ep;
745 int defer = 0;
746
747 if (atomic_read(&epq->finishing)) {
748 DBG(1, "%s: finish_transfers is active for %s\n", __func__, epq->name);
749 return;
750 }
751
752 list_for_each_entry(ep, &isp1362_hcd->async, schedule) {
753 struct urb *urb = get_urb(ep);
754 int ret;
755
756 if (!list_empty(&ep->active)) {
757 DBG(2, "%s: Skipping active %s ep %p\n", __func__, epq->name, ep);
758 continue;
759 }
760
761 DBG(1, "%s: Processing %s ep %p req %d\n", __func__, epq->name,
762 ep, ep->num_req);
763
764 ret = submit_req(isp1362_hcd, urb, ep, epq);
765 if (ret == -ENOMEM) {
766 defer = 1;
767 break;
768 } else if (ret == -EOVERFLOW) {
769 defer = 1;
770 continue;
771 }
772#ifdef BUGGY_PXA2XX_UDC_USBTEST
773 defer = ep->nextpid == USB_PID_SETUP;
774#endif
775 ptd_count++;
776 }
777
778
779 if (isp1362_hcd->async.next != isp1362_hcd->async.prev) {
780 DBG(2, "%s: Cycling ASYNC schedule %d\n", __func__, ptd_count);
781 list_move(&isp1362_hcd->async, isp1362_hcd->async.next);
782 }
783 if (ptd_count || defer)
784 enable_atl_transfers(isp1362_hcd, defer ? 0 : ptd_count);
785
786 epq->ptd_count += ptd_count;
787 if (epq->ptd_count > epq->stat_maxptds) {
788 epq->stat_maxptds = epq->ptd_count;
789 DBG(0, "%s: max_ptds: %d\n", __func__, epq->stat_maxptds);
790 }
791}
792
793static void start_intl_transfers(struct isp1362_hcd *isp1362_hcd)
794{
795 int ptd_count = 0;
796 struct isp1362_ep_queue *epq = &isp1362_hcd->intl_queue;
797 struct isp1362_ep *ep;
798
799 if (atomic_read(&epq->finishing)) {
800 DBG(1, "%s: finish_transfers is active for %s\n", __func__, epq->name);
801 return;
802 }
803
804 list_for_each_entry(ep, &isp1362_hcd->periodic, schedule) {
805 struct urb *urb = get_urb(ep);
806 int ret;
807
808 if (!list_empty(&ep->active)) {
809 DBG(1, "%s: Skipping active %s ep %p\n", __func__,
810 epq->name, ep);
811 continue;
812 }
813
814 DBG(1, "%s: Processing %s ep %p req %d\n", __func__,
815 epq->name, ep, ep->num_req);
816 ret = submit_req(isp1362_hcd, urb, ep, epq);
817 if (ret == -ENOMEM)
818 break;
819 else if (ret == -EOVERFLOW)
820 continue;
821 ptd_count++;
822 }
823
824 if (ptd_count) {
825 static int last_count;
826
827 if (ptd_count != last_count) {
828 DBG(0, "%s: ptd_count: %d\n", __func__, ptd_count);
829 last_count = ptd_count;
830 }
831 enable_intl_transfers(isp1362_hcd);
832 }
833
834 epq->ptd_count += ptd_count;
835 if (epq->ptd_count > epq->stat_maxptds)
836 epq->stat_maxptds = epq->ptd_count;
837}
838
839static inline int next_ptd(struct isp1362_ep_queue *epq, struct isp1362_ep *ep)
840{
841 u16 ptd_offset = ep->ptd_offset;
842 int num_ptds = (ep->length + PTD_HEADER_SIZE + (epq->blk_size - 1)) / epq->blk_size;
843
844 DBG(2, "%s: PTD offset $%04x + %04x => %d * %04x -> $%04x\n", __func__, ptd_offset,
845 ep->length, num_ptds, epq->blk_size, ptd_offset + num_ptds * epq->blk_size);
846
847 ptd_offset += num_ptds * epq->blk_size;
848 if (ptd_offset < epq->buf_start + epq->buf_size)
849 return ptd_offset;
850 else
851 return -ENOMEM;
852}
853
854static void start_iso_transfers(struct isp1362_hcd *isp1362_hcd)
855{
856 int ptd_count = 0;
857 int flip = isp1362_hcd->istl_flip;
858 struct isp1362_ep_queue *epq;
859 int ptd_offset;
860 struct isp1362_ep *ep;
861 struct isp1362_ep *tmp;
862 u16 fno = isp1362_read_reg32(isp1362_hcd, HCFMNUM);
863
864 fill2:
865 epq = &isp1362_hcd->istl_queue[flip];
866 if (atomic_read(&epq->finishing)) {
867 DBG(1, "%s: finish_transfers is active for %s\n", __func__, epq->name);
868 return;
869 }
870
871 if (!list_empty(&epq->active))
872 return;
873
874 ptd_offset = epq->buf_start;
875 list_for_each_entry_safe(ep, tmp, &isp1362_hcd->isoc, schedule) {
876 struct urb *urb = get_urb(ep);
877 s16 diff = fno - (u16)urb->start_frame;
878
879 DBG(1, "%s: Processing %s ep %p\n", __func__, epq->name, ep);
880
881 if (diff > urb->number_of_packets) {
882
883 finish_request(isp1362_hcd, ep, urb, -EOVERFLOW);
884 continue;
885 } else if (diff < -1) {
886
887
888
889
890
891 } else if (diff == -1) {
892
893 prepare_ptd(isp1362_hcd, urb, ep, epq, fno);
894 if (ptd_offset + PTD_HEADER_SIZE + ep->length >
895 epq->buf_start + epq->buf_size) {
896 pr_err("%s: Not enough ISO buffer space for %d byte PTD\n",
897 __func__, ep->length);
898 continue;
899 }
900 ep->ptd_offset = ptd_offset;
901 list_add_tail(&ep->active, &epq->active);
902
903 ptd_offset = next_ptd(epq, ep);
904 if (ptd_offset < 0) {
905 pr_warning("%s: req %d No more %s PTD buffers available\n", __func__,
906 ep->num_req, epq->name);
907 break;
908 }
909 }
910 }
911 list_for_each_entry(ep, &epq->active, active) {
912 if (epq->active.next == &ep->active)
913 ep->ptd.mps |= PTD_LAST_MSK;
914 isp1362_write_ptd(isp1362_hcd, ep, epq);
915 ptd_count++;
916 }
917
918 if (ptd_count)
919 enable_istl_transfers(isp1362_hcd, flip);
920
921 epq->ptd_count += ptd_count;
922 if (epq->ptd_count > epq->stat_maxptds)
923 epq->stat_maxptds = epq->ptd_count;
924
925
926 if (!(isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) &
927 (flip ? HCBUFSTAT_ISTL0_FULL : HCBUFSTAT_ISTL1_FULL))) {
928 fno++;
929 ptd_count = 0;
930 flip = 1 - flip;
931 goto fill2;
932 }
933}
934
935static void finish_transfers(struct isp1362_hcd *isp1362_hcd, unsigned long done_map,
936 struct isp1362_ep_queue *epq)
937{
938 struct isp1362_ep *ep;
939 struct isp1362_ep *tmp;
940
941 if (list_empty(&epq->active)) {
942 DBG(1, "%s: Nothing to do for %s queue\n", __func__, epq->name);
943 return;
944 }
945
946 DBG(1, "%s: Finishing %s transfers %08lx\n", __func__, epq->name, done_map);
947
948 atomic_inc(&epq->finishing);
949 list_for_each_entry_safe(ep, tmp, &epq->active, active) {
950 int index = ep->ptd_index;
951
952 DBG(1, "%s: Checking %s PTD[%02x] $%04x\n", __func__, epq->name,
953 index, ep->ptd_offset);
954
955 BUG_ON(index < 0);
956 if (__test_and_clear_bit(index, &done_map)) {
957 isp1362_read_ptd(isp1362_hcd, ep, epq);
958 epq->free_ptd = index;
959 BUG_ON(ep->num_ptds == 0);
960 release_ptd_buffers(epq, ep);
961
962 DBG(1, "%s: ep %p req %d removed from active list\n", __func__,
963 ep, ep->num_req);
964 if (!list_empty(&ep->remove_list)) {
965 list_del_init(&ep->remove_list);
966 DBG(1, "%s: ep %p removed from remove list\n", __func__, ep);
967 }
968 DBG(1, "%s: Postprocessing %s ep %p req %d\n", __func__, epq->name,
969 ep, ep->num_req);
970 postproc_ep(isp1362_hcd, ep);
971 }
972 if (!done_map)
973 break;
974 }
975 if (done_map)
976 pr_warning("%s: done_map not clear: %08lx:%08lx\n", __func__, done_map,
977 epq->skip_map);
978 atomic_dec(&epq->finishing);
979}
980
981static void finish_iso_transfers(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep_queue *epq)
982{
983 struct isp1362_ep *ep;
984 struct isp1362_ep *tmp;
985
986 if (list_empty(&epq->active)) {
987 DBG(1, "%s: Nothing to do for %s queue\n", __func__, epq->name);
988 return;
989 }
990
991 DBG(1, "%s: Finishing %s transfers\n", __func__, epq->name);
992
993 atomic_inc(&epq->finishing);
994 list_for_each_entry_safe(ep, tmp, &epq->active, active) {
995 DBG(1, "%s: Checking PTD $%04x\n", __func__, ep->ptd_offset);
996
997 isp1362_read_ptd(isp1362_hcd, ep, epq);
998 DBG(1, "%s: Postprocessing %s ep %p\n", __func__, epq->name, ep);
999 postproc_ep(isp1362_hcd, ep);
1000 }
1001 WARN_ON(epq->blk_size != 0);
1002 atomic_dec(&epq->finishing);
1003}
1004
1005static irqreturn_t isp1362_irq(struct usb_hcd *hcd)
1006{
1007 int handled = 0;
1008 struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1009 u16 irqstat;
1010 u16 svc_mask;
1011
1012 spin_lock(&isp1362_hcd->lock);
1013
1014 BUG_ON(isp1362_hcd->irq_active++);
1015
1016 isp1362_write_reg16(isp1362_hcd, HCuPINTENB, 0);
1017
1018 irqstat = isp1362_read_reg16(isp1362_hcd, HCuPINT);
1019 DBG(3, "%s: got IRQ %04x:%04x\n", __func__, irqstat, isp1362_hcd->irqenb);
1020
1021
1022 irqstat &= isp1362_hcd->irqenb;
1023 isp1362_write_reg16(isp1362_hcd, HCuPINT, irqstat);
1024 svc_mask = irqstat;
1025
1026 if (irqstat & HCuPINT_SOF) {
1027 isp1362_hcd->irqenb &= ~HCuPINT_SOF;
1028 isp1362_hcd->irq_stat[ISP1362_INT_SOF]++;
1029 handled = 1;
1030 svc_mask &= ~HCuPINT_SOF;
1031 DBG(3, "%s: SOF\n", __func__);
1032 isp1362_hcd->fmindex = isp1362_read_reg32(isp1362_hcd, HCFMNUM);
1033 if (!list_empty(&isp1362_hcd->remove_list))
1034 finish_unlinks(isp1362_hcd);
1035 if (!list_empty(&isp1362_hcd->async) && !(irqstat & HCuPINT_ATL)) {
1036 if (list_empty(&isp1362_hcd->atl_queue.active)) {
1037 start_atl_transfers(isp1362_hcd);
1038 } else {
1039 isp1362_enable_int(isp1362_hcd, HCuPINT_ATL);
1040 isp1362_write_reg32(isp1362_hcd, HCATLSKIP,
1041 isp1362_hcd->atl_queue.skip_map);
1042 isp1362_set_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ATL_ACTIVE);
1043 }
1044 }
1045 }
1046
1047 if (irqstat & HCuPINT_ISTL0) {
1048 isp1362_hcd->irq_stat[ISP1362_INT_ISTL0]++;
1049 handled = 1;
1050 svc_mask &= ~HCuPINT_ISTL0;
1051 isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ISTL0_FULL);
1052 DBG(1, "%s: ISTL0\n", __func__);
1053 WARN_ON((int)!!isp1362_hcd->istl_flip);
1054 WARN_ON(isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) &
1055 HCBUFSTAT_ISTL0_ACTIVE);
1056 WARN_ON(!(isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) &
1057 HCBUFSTAT_ISTL0_DONE));
1058 isp1362_hcd->irqenb &= ~HCuPINT_ISTL0;
1059 }
1060
1061 if (irqstat & HCuPINT_ISTL1) {
1062 isp1362_hcd->irq_stat[ISP1362_INT_ISTL1]++;
1063 handled = 1;
1064 svc_mask &= ~HCuPINT_ISTL1;
1065 isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ISTL1_FULL);
1066 DBG(1, "%s: ISTL1\n", __func__);
1067 WARN_ON(!(int)isp1362_hcd->istl_flip);
1068 WARN_ON(isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) &
1069 HCBUFSTAT_ISTL1_ACTIVE);
1070 WARN_ON(!(isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) &
1071 HCBUFSTAT_ISTL1_DONE));
1072 isp1362_hcd->irqenb &= ~HCuPINT_ISTL1;
1073 }
1074
1075 if (irqstat & (HCuPINT_ISTL0 | HCuPINT_ISTL1)) {
1076 WARN_ON((irqstat & (HCuPINT_ISTL0 | HCuPINT_ISTL1)) ==
1077 (HCuPINT_ISTL0 | HCuPINT_ISTL1));
1078 finish_iso_transfers(isp1362_hcd,
1079 &isp1362_hcd->istl_queue[isp1362_hcd->istl_flip]);
1080 start_iso_transfers(isp1362_hcd);
1081 isp1362_hcd->istl_flip = 1 - isp1362_hcd->istl_flip;
1082 }
1083
1084 if (irqstat & HCuPINT_INTL) {
1085 u32 done_map = isp1362_read_reg32(isp1362_hcd, HCINTLDONE);
1086 u32 skip_map = isp1362_read_reg32(isp1362_hcd, HCINTLSKIP);
1087 isp1362_hcd->irq_stat[ISP1362_INT_INTL]++;
1088
1089 DBG(2, "%s: INTL\n", __func__);
1090
1091 svc_mask &= ~HCuPINT_INTL;
1092
1093 isp1362_write_reg32(isp1362_hcd, HCINTLSKIP, skip_map | done_map);
1094 if (~(done_map | skip_map) == 0)
1095
1096 isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_INTL_ACTIVE);
1097
1098 handled = 1;
1099 WARN_ON(!done_map);
1100 if (done_map) {
1101 DBG(3, "%s: INTL done_map %08x\n", __func__, done_map);
1102 finish_transfers(isp1362_hcd, done_map, &isp1362_hcd->intl_queue);
1103 start_intl_transfers(isp1362_hcd);
1104 }
1105 }
1106
1107 if (irqstat & HCuPINT_ATL) {
1108 u32 done_map = isp1362_read_reg32(isp1362_hcd, HCATLDONE);
1109 u32 skip_map = isp1362_read_reg32(isp1362_hcd, HCATLSKIP);
1110 isp1362_hcd->irq_stat[ISP1362_INT_ATL]++;
1111
1112 DBG(2, "%s: ATL\n", __func__);
1113
1114 svc_mask &= ~HCuPINT_ATL;
1115
1116 isp1362_write_reg32(isp1362_hcd, HCATLSKIP, skip_map | done_map);
1117 if (~(done_map | skip_map) == 0)
1118 isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ATL_ACTIVE);
1119 if (done_map) {
1120 DBG(3, "%s: ATL done_map %08x\n", __func__, done_map);
1121 finish_transfers(isp1362_hcd, done_map, &isp1362_hcd->atl_queue);
1122 start_atl_transfers(isp1362_hcd);
1123 }
1124 handled = 1;
1125 }
1126
1127 if (irqstat & HCuPINT_OPR) {
1128 u32 intstat = isp1362_read_reg32(isp1362_hcd, HCINTSTAT);
1129 isp1362_hcd->irq_stat[ISP1362_INT_OPR]++;
1130
1131 svc_mask &= ~HCuPINT_OPR;
1132 DBG(2, "%s: OPR %08x:%08x\n", __func__, intstat, isp1362_hcd->intenb);
1133 intstat &= isp1362_hcd->intenb;
1134 if (intstat & OHCI_INTR_UE) {
1135 pr_err("Unrecoverable error\n");
1136
1137 }
1138 if (intstat & OHCI_INTR_RHSC) {
1139 isp1362_hcd->rhstatus = isp1362_read_reg32(isp1362_hcd, HCRHSTATUS);
1140 isp1362_hcd->rhport[0] = isp1362_read_reg32(isp1362_hcd, HCRHPORT1);
1141 isp1362_hcd->rhport[1] = isp1362_read_reg32(isp1362_hcd, HCRHPORT2);
1142 }
1143 if (intstat & OHCI_INTR_RD) {
1144 pr_info("%s: RESUME DETECTED\n", __func__);
1145 isp1362_show_reg(isp1362_hcd, HCCONTROL);
1146 usb_hcd_resume_root_hub(hcd);
1147 }
1148 isp1362_write_reg32(isp1362_hcd, HCINTSTAT, intstat);
1149 irqstat &= ~HCuPINT_OPR;
1150 handled = 1;
1151 }
1152
1153 if (irqstat & HCuPINT_SUSP) {
1154 isp1362_hcd->irq_stat[ISP1362_INT_SUSP]++;
1155 handled = 1;
1156 svc_mask &= ~HCuPINT_SUSP;
1157
1158 pr_info("%s: SUSPEND IRQ\n", __func__);
1159 }
1160
1161 if (irqstat & HCuPINT_CLKRDY) {
1162 isp1362_hcd->irq_stat[ISP1362_INT_CLKRDY]++;
1163 handled = 1;
1164 isp1362_hcd->irqenb &= ~HCuPINT_CLKRDY;
1165 svc_mask &= ~HCuPINT_CLKRDY;
1166 pr_info("%s: CLKRDY IRQ\n", __func__);
1167 }
1168
1169 if (svc_mask)
1170 pr_err("%s: Unserviced interrupt(s) %04x\n", __func__, svc_mask);
1171
1172 isp1362_write_reg16(isp1362_hcd, HCuPINTENB, isp1362_hcd->irqenb);
1173 isp1362_hcd->irq_active--;
1174 spin_unlock(&isp1362_hcd->lock);
1175
1176 return IRQ_RETVAL(handled);
1177}
1178
1179
1180
1181#define MAX_PERIODIC_LOAD 900
1182static int balance(struct isp1362_hcd *isp1362_hcd, u16 interval, u16 load)
1183{
1184 int i, branch = -ENOSPC;
1185
1186
1187
1188
1189 for (i = 0; i < interval; i++) {
1190 if (branch < 0 || isp1362_hcd->load[branch] > isp1362_hcd->load[i]) {
1191 int j;
1192
1193 for (j = i; j < PERIODIC_SIZE; j += interval) {
1194 if ((isp1362_hcd->load[j] + load) > MAX_PERIODIC_LOAD) {
1195 pr_err("%s: new load %d load[%02x] %d max %d\n", __func__,
1196 load, j, isp1362_hcd->load[j], MAX_PERIODIC_LOAD);
1197 break;
1198 }
1199 }
1200 if (j < PERIODIC_SIZE)
1201 continue;
1202 branch = i;
1203 }
1204 }
1205 return branch;
1206}
1207
1208
1209
1210
1211
1212
1213
1214static int isp1362_urb_enqueue(struct usb_hcd *hcd,
1215 struct urb *urb,
1216 gfp_t mem_flags)
1217{
1218 struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1219 struct usb_device *udev = urb->dev;
1220 unsigned int pipe = urb->pipe;
1221 int is_out = !usb_pipein(pipe);
1222 int type = usb_pipetype(pipe);
1223 int epnum = usb_pipeendpoint(pipe);
1224 struct usb_host_endpoint *hep = urb->ep;
1225 struct isp1362_ep *ep = NULL;
1226 unsigned long flags;
1227 int retval = 0;
1228
1229 DBG(3, "%s: urb %p\n", __func__, urb);
1230
1231 if (type == PIPE_ISOCHRONOUS) {
1232 pr_err("Isochronous transfers not supported\n");
1233 return -ENOSPC;
1234 }
1235
1236 URB_DBG("%s: FA %d ep%d%s %s: len %d %s%s\n", __func__,
1237 usb_pipedevice(pipe), epnum,
1238 is_out ? "out" : "in",
1239 usb_pipecontrol(pipe) ? "ctrl" :
1240 usb_pipeint(pipe) ? "int" :
1241 usb_pipebulk(pipe) ? "bulk" :
1242 "iso",
1243 urb->transfer_buffer_length,
1244 (urb->transfer_flags & URB_ZERO_PACKET) ? "ZERO_PACKET " : "",
1245 !(urb->transfer_flags & URB_SHORT_NOT_OK) ?
1246 "short_ok" : "");
1247
1248
1249 if (!hep->hcpriv) {
1250 ep = kzalloc(sizeof *ep, mem_flags);
1251 if (!ep)
1252 return -ENOMEM;
1253 }
1254 spin_lock_irqsave(&isp1362_hcd->lock, flags);
1255
1256
1257 if (!((isp1362_hcd->rhport[0] | isp1362_hcd->rhport[1]) &
1258 USB_PORT_STAT_ENABLE) ||
1259 !HC_IS_RUNNING(hcd->state)) {
1260 kfree(ep);
1261 retval = -ENODEV;
1262 goto fail_not_linked;
1263 }
1264
1265 retval = usb_hcd_link_urb_to_ep(hcd, urb);
1266 if (retval) {
1267 kfree(ep);
1268 goto fail_not_linked;
1269 }
1270
1271 if (hep->hcpriv) {
1272 ep = hep->hcpriv;
1273 } else {
1274 INIT_LIST_HEAD(&ep->schedule);
1275 INIT_LIST_HEAD(&ep->active);
1276 INIT_LIST_HEAD(&ep->remove_list);
1277 ep->udev = usb_get_dev(udev);
1278 ep->hep = hep;
1279 ep->epnum = epnum;
1280 ep->maxpacket = usb_maxpacket(udev, urb->pipe, is_out);
1281 ep->ptd_offset = -EINVAL;
1282 ep->ptd_index = -EINVAL;
1283 usb_settoggle(udev, epnum, is_out, 0);
1284
1285 if (type == PIPE_CONTROL)
1286 ep->nextpid = USB_PID_SETUP;
1287 else if (is_out)
1288 ep->nextpid = USB_PID_OUT;
1289 else
1290 ep->nextpid = USB_PID_IN;
1291
1292 switch (type) {
1293 case PIPE_ISOCHRONOUS:
1294 case PIPE_INTERRUPT:
1295 if (urb->interval > PERIODIC_SIZE)
1296 urb->interval = PERIODIC_SIZE;
1297 ep->interval = urb->interval;
1298 ep->branch = PERIODIC_SIZE;
1299 ep->load = usb_calc_bus_time(udev->speed, !is_out,
1300 (type == PIPE_ISOCHRONOUS),
1301 usb_maxpacket(udev, pipe, is_out)) / 1000;
1302 break;
1303 }
1304 hep->hcpriv = ep;
1305 }
1306 ep->num_req = isp1362_hcd->req_serial++;
1307
1308
1309 switch (type) {
1310 case PIPE_CONTROL:
1311 case PIPE_BULK:
1312 if (list_empty(&ep->schedule)) {
1313 DBG(1, "%s: Adding ep %p req %d to async schedule\n",
1314 __func__, ep, ep->num_req);
1315 list_add_tail(&ep->schedule, &isp1362_hcd->async);
1316 }
1317 break;
1318 case PIPE_ISOCHRONOUS:
1319 case PIPE_INTERRUPT:
1320 urb->interval = ep->interval;
1321
1322
1323 if (ep->branch < PERIODIC_SIZE)
1324 break;
1325
1326 retval = balance(isp1362_hcd, ep->interval, ep->load);
1327 if (retval < 0) {
1328 pr_err("%s: balance returned %d\n", __func__, retval);
1329 goto fail;
1330 }
1331 ep->branch = retval;
1332 retval = 0;
1333 isp1362_hcd->fmindex = isp1362_read_reg32(isp1362_hcd, HCFMNUM);
1334 DBG(1, "%s: Current frame %04x branch %02x start_frame %04x(%04x)\n",
1335 __func__, isp1362_hcd->fmindex, ep->branch,
1336 ((isp1362_hcd->fmindex + PERIODIC_SIZE - 1) &
1337 ~(PERIODIC_SIZE - 1)) + ep->branch,
1338 (isp1362_hcd->fmindex & (PERIODIC_SIZE - 1)) + ep->branch);
1339
1340 if (list_empty(&ep->schedule)) {
1341 if (type == PIPE_ISOCHRONOUS) {
1342 u16 frame = isp1362_hcd->fmindex;
1343
1344 frame += max_t(u16, 8, ep->interval);
1345 frame &= ~(ep->interval - 1);
1346 frame |= ep->branch;
1347 if (frame_before(frame, isp1362_hcd->fmindex))
1348 frame += ep->interval;
1349 urb->start_frame = frame;
1350
1351 DBG(1, "%s: Adding ep %p to isoc schedule\n", __func__, ep);
1352 list_add_tail(&ep->schedule, &isp1362_hcd->isoc);
1353 } else {
1354 DBG(1, "%s: Adding ep %p to periodic schedule\n", __func__, ep);
1355 list_add_tail(&ep->schedule, &isp1362_hcd->periodic);
1356 }
1357 } else
1358 DBG(1, "%s: ep %p already scheduled\n", __func__, ep);
1359
1360 DBG(2, "%s: load %d bandwidth %d -> %d\n", __func__,
1361 ep->load / ep->interval, isp1362_hcd->load[ep->branch],
1362 isp1362_hcd->load[ep->branch] + ep->load);
1363 isp1362_hcd->load[ep->branch] += ep->load;
1364 }
1365
1366 urb->hcpriv = hep;
1367 ALIGNSTAT(isp1362_hcd, urb->transfer_buffer);
1368
1369 switch (type) {
1370 case PIPE_CONTROL:
1371 case PIPE_BULK:
1372 start_atl_transfers(isp1362_hcd);
1373 break;
1374 case PIPE_INTERRUPT:
1375 start_intl_transfers(isp1362_hcd);
1376 break;
1377 case PIPE_ISOCHRONOUS:
1378 start_iso_transfers(isp1362_hcd);
1379 break;
1380 default:
1381 BUG();
1382 }
1383 fail:
1384 if (retval)
1385 usb_hcd_unlink_urb_from_ep(hcd, urb);
1386
1387
1388 fail_not_linked:
1389 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1390 if (retval)
1391 DBG(0, "%s: urb %p failed with %d\n", __func__, urb, retval);
1392 return retval;
1393}
1394
1395static int isp1362_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
1396{
1397 struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1398 struct usb_host_endpoint *hep;
1399 unsigned long flags;
1400 struct isp1362_ep *ep;
1401 int retval = 0;
1402
1403 DBG(3, "%s: urb %p\n", __func__, urb);
1404
1405 spin_lock_irqsave(&isp1362_hcd->lock, flags);
1406 retval = usb_hcd_check_unlink_urb(hcd, urb, status);
1407 if (retval)
1408 goto done;
1409
1410 hep = urb->hcpriv;
1411
1412 if (!hep) {
1413 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1414 return -EIDRM;
1415 }
1416
1417 ep = hep->hcpriv;
1418 if (ep) {
1419
1420 if (ep->hep->urb_list.next == &urb->urb_list) {
1421 if (!list_empty(&ep->active)) {
1422 DBG(1, "%s: urb %p ep %p req %d active PTD[%d] $%04x\n", __func__,
1423 urb, ep, ep->num_req, ep->ptd_index, ep->ptd_offset);
1424
1425 remove_ptd(isp1362_hcd, ep);
1426 urb = NULL;
1427 }
1428 }
1429 if (urb) {
1430 DBG(1, "%s: Finishing ep %p req %d\n", __func__, ep,
1431 ep->num_req);
1432 finish_request(isp1362_hcd, ep, urb, status);
1433 } else
1434 DBG(1, "%s: urb %p active; wait4irq\n", __func__, urb);
1435 } else {
1436 pr_warning("%s: No EP in URB %p\n", __func__, urb);
1437 retval = -EINVAL;
1438 }
1439done:
1440 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1441
1442 DBG(3, "%s: exit\n", __func__);
1443
1444 return retval;
1445}
1446
1447static void isp1362_endpoint_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep)
1448{
1449 struct isp1362_ep *ep = hep->hcpriv;
1450 struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1451 unsigned long flags;
1452
1453 DBG(1, "%s: ep %p\n", __func__, ep);
1454 if (!ep)
1455 return;
1456 spin_lock_irqsave(&isp1362_hcd->lock, flags);
1457 if (!list_empty(&hep->urb_list)) {
1458 if (!list_empty(&ep->active) && list_empty(&ep->remove_list)) {
1459 DBG(1, "%s: Removing ep %p req %d PTD[%d] $%04x\n", __func__,
1460 ep, ep->num_req, ep->ptd_index, ep->ptd_offset);
1461 remove_ptd(isp1362_hcd, ep);
1462 pr_info("%s: Waiting for Interrupt to clean up\n", __func__);
1463 }
1464 }
1465 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1466
1467 while (!list_empty(&ep->active))
1468 msleep(1);
1469
1470 DBG(1, "%s: Freeing EP %p\n", __func__, ep);
1471
1472 usb_put_dev(ep->udev);
1473 kfree(ep);
1474 hep->hcpriv = NULL;
1475}
1476
1477static int isp1362_get_frame(struct usb_hcd *hcd)
1478{
1479 struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1480 u32 fmnum;
1481 unsigned long flags;
1482
1483 spin_lock_irqsave(&isp1362_hcd->lock, flags);
1484 fmnum = isp1362_read_reg32(isp1362_hcd, HCFMNUM);
1485 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1486
1487 return (int)fmnum;
1488}
1489
1490
1491
1492
1493static int isp1362_hub_status_data(struct usb_hcd *hcd, char *buf)
1494{
1495 struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1496 int ports, i, changed = 0;
1497 unsigned long flags;
1498
1499 if (!HC_IS_RUNNING(hcd->state))
1500 return -ESHUTDOWN;
1501
1502
1503
1504 if (timer_pending(&hcd->rh_timer))
1505 return 0;
1506
1507 ports = isp1362_hcd->rhdesca & RH_A_NDP;
1508 BUG_ON(ports > 2);
1509
1510 spin_lock_irqsave(&isp1362_hcd->lock, flags);
1511
1512 if (isp1362_hcd->rhstatus & (RH_HS_LPSC | RH_HS_OCIC))
1513 buf[0] = changed = 1;
1514 else
1515 buf[0] = 0;
1516
1517 for (i = 0; i < ports; i++) {
1518 u32 status = isp1362_hcd->rhport[i];
1519
1520 if (status & (RH_PS_CSC | RH_PS_PESC | RH_PS_PSSC |
1521 RH_PS_OCIC | RH_PS_PRSC)) {
1522 changed = 1;
1523 buf[0] |= 1 << (i + 1);
1524 continue;
1525 }
1526
1527 if (!(status & RH_PS_CCS))
1528 continue;
1529 }
1530 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1531 return changed;
1532}
1533
1534static void isp1362_hub_descriptor(struct isp1362_hcd *isp1362_hcd,
1535 struct usb_hub_descriptor *desc)
1536{
1537 u32 reg = isp1362_hcd->rhdesca;
1538
1539 DBG(3, "%s: enter\n", __func__);
1540
1541 desc->bDescriptorType = USB_DT_HUB;
1542 desc->bDescLength = 9;
1543 desc->bHubContrCurrent = 0;
1544 desc->bNbrPorts = reg & 0x3;
1545
1546 desc->wHubCharacteristics = cpu_to_le16((reg >> 8) &
1547 (HUB_CHAR_LPSM |
1548 HUB_CHAR_COMPOUND |
1549 HUB_CHAR_OCPM));
1550 DBG(0, "%s: hubcharacteristics = %02x\n", __func__,
1551 desc->wHubCharacteristics);
1552 desc->bPwrOn2PwrGood = (reg >> 24) & 0xff;
1553
1554 desc->u.hs.DeviceRemovable[0] = desc->bNbrPorts == 1 ? 1 << 1 : 3 << 1;
1555 desc->u.hs.DeviceRemovable[1] = ~0;
1556
1557 DBG(3, "%s: exit\n", __func__);
1558}
1559
1560
1561static int isp1362_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
1562 u16 wIndex, char *buf, u16 wLength)
1563{
1564 struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1565 int retval = 0;
1566 unsigned long flags;
1567 unsigned long t1;
1568 int ports = isp1362_hcd->rhdesca & RH_A_NDP;
1569 u32 tmp = 0;
1570
1571 switch (typeReq) {
1572 case ClearHubFeature:
1573 DBG(0, "ClearHubFeature: ");
1574 switch (wValue) {
1575 case C_HUB_OVER_CURRENT:
1576 DBG(0, "C_HUB_OVER_CURRENT\n");
1577 spin_lock_irqsave(&isp1362_hcd->lock, flags);
1578 isp1362_write_reg32(isp1362_hcd, HCRHSTATUS, RH_HS_OCIC);
1579 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1580 case C_HUB_LOCAL_POWER:
1581 DBG(0, "C_HUB_LOCAL_POWER\n");
1582 break;
1583 default:
1584 goto error;
1585 }
1586 break;
1587 case SetHubFeature:
1588 DBG(0, "SetHubFeature: ");
1589 switch (wValue) {
1590 case C_HUB_OVER_CURRENT:
1591 case C_HUB_LOCAL_POWER:
1592 DBG(0, "C_HUB_OVER_CURRENT or C_HUB_LOCAL_POWER\n");
1593 break;
1594 default:
1595 goto error;
1596 }
1597 break;
1598 case GetHubDescriptor:
1599 DBG(0, "GetHubDescriptor\n");
1600 isp1362_hub_descriptor(isp1362_hcd, (struct usb_hub_descriptor *)buf);
1601 break;
1602 case GetHubStatus:
1603 DBG(0, "GetHubStatus\n");
1604 put_unaligned(cpu_to_le32(0), (__le32 *) buf);
1605 break;
1606 case GetPortStatus:
1607#ifndef VERBOSE
1608 DBG(0, "GetPortStatus\n");
1609#endif
1610 if (!wIndex || wIndex > ports)
1611 goto error;
1612 tmp = isp1362_hcd->rhport[--wIndex];
1613 put_unaligned(cpu_to_le32(tmp), (__le32 *) buf);
1614 break;
1615 case ClearPortFeature:
1616 DBG(0, "ClearPortFeature: ");
1617 if (!wIndex || wIndex > ports)
1618 goto error;
1619 wIndex--;
1620
1621 switch (wValue) {
1622 case USB_PORT_FEAT_ENABLE:
1623 DBG(0, "USB_PORT_FEAT_ENABLE\n");
1624 tmp = RH_PS_CCS;
1625 break;
1626 case USB_PORT_FEAT_C_ENABLE:
1627 DBG(0, "USB_PORT_FEAT_C_ENABLE\n");
1628 tmp = RH_PS_PESC;
1629 break;
1630 case USB_PORT_FEAT_SUSPEND:
1631 DBG(0, "USB_PORT_FEAT_SUSPEND\n");
1632 tmp = RH_PS_POCI;
1633 break;
1634 case USB_PORT_FEAT_C_SUSPEND:
1635 DBG(0, "USB_PORT_FEAT_C_SUSPEND\n");
1636 tmp = RH_PS_PSSC;
1637 break;
1638 case USB_PORT_FEAT_POWER:
1639 DBG(0, "USB_PORT_FEAT_POWER\n");
1640 tmp = RH_PS_LSDA;
1641
1642 break;
1643 case USB_PORT_FEAT_C_CONNECTION:
1644 DBG(0, "USB_PORT_FEAT_C_CONNECTION\n");
1645 tmp = RH_PS_CSC;
1646 break;
1647 case USB_PORT_FEAT_C_OVER_CURRENT:
1648 DBG(0, "USB_PORT_FEAT_C_OVER_CURRENT\n");
1649 tmp = RH_PS_OCIC;
1650 break;
1651 case USB_PORT_FEAT_C_RESET:
1652 DBG(0, "USB_PORT_FEAT_C_RESET\n");
1653 tmp = RH_PS_PRSC;
1654 break;
1655 default:
1656 goto error;
1657 }
1658
1659 spin_lock_irqsave(&isp1362_hcd->lock, flags);
1660 isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + wIndex, tmp);
1661 isp1362_hcd->rhport[wIndex] =
1662 isp1362_read_reg32(isp1362_hcd, HCRHPORT1 + wIndex);
1663 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1664 break;
1665 case SetPortFeature:
1666 DBG(0, "SetPortFeature: ");
1667 if (!wIndex || wIndex > ports)
1668 goto error;
1669 wIndex--;
1670 switch (wValue) {
1671 case USB_PORT_FEAT_SUSPEND:
1672 DBG(0, "USB_PORT_FEAT_SUSPEND\n");
1673 spin_lock_irqsave(&isp1362_hcd->lock, flags);
1674 isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + wIndex, RH_PS_PSS);
1675 isp1362_hcd->rhport[wIndex] =
1676 isp1362_read_reg32(isp1362_hcd, HCRHPORT1 + wIndex);
1677 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1678 break;
1679 case USB_PORT_FEAT_POWER:
1680 DBG(0, "USB_PORT_FEAT_POWER\n");
1681 spin_lock_irqsave(&isp1362_hcd->lock, flags);
1682 isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + wIndex, RH_PS_PPS);
1683 isp1362_hcd->rhport[wIndex] =
1684 isp1362_read_reg32(isp1362_hcd, HCRHPORT1 + wIndex);
1685 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1686 break;
1687 case USB_PORT_FEAT_RESET:
1688 DBG(0, "USB_PORT_FEAT_RESET\n");
1689 spin_lock_irqsave(&isp1362_hcd->lock, flags);
1690
1691 t1 = jiffies + msecs_to_jiffies(USB_RESET_WIDTH);
1692 while (time_before(jiffies, t1)) {
1693
1694 for (;;) {
1695 tmp = isp1362_read_reg32(isp1362_hcd, HCRHPORT1 + wIndex);
1696 if (!(tmp & RH_PS_PRS))
1697 break;
1698 udelay(500);
1699 }
1700 if (!(tmp & RH_PS_CCS))
1701 break;
1702
1703 isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + wIndex, (RH_PS_PRS));
1704
1705 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1706 msleep(10);
1707 spin_lock_irqsave(&isp1362_hcd->lock, flags);
1708 }
1709
1710 isp1362_hcd->rhport[wIndex] = isp1362_read_reg32(isp1362_hcd,
1711 HCRHPORT1 + wIndex);
1712 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1713 break;
1714 default:
1715 goto error;
1716 }
1717 break;
1718
1719 default:
1720 error:
1721
1722 DBG(0, "PROTOCOL STALL\n");
1723 retval = -EPIPE;
1724 }
1725
1726 return retval;
1727}
1728
1729#ifdef CONFIG_PM
1730static int isp1362_bus_suspend(struct usb_hcd *hcd)
1731{
1732 int status = 0;
1733 struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1734 unsigned long flags;
1735
1736 if (time_before(jiffies, isp1362_hcd->next_statechange))
1737 msleep(5);
1738
1739 spin_lock_irqsave(&isp1362_hcd->lock, flags);
1740
1741 isp1362_hcd->hc_control = isp1362_read_reg32(isp1362_hcd, HCCONTROL);
1742 switch (isp1362_hcd->hc_control & OHCI_CTRL_HCFS) {
1743 case OHCI_USB_RESUME:
1744 DBG(0, "%s: resume/suspend?\n", __func__);
1745 isp1362_hcd->hc_control &= ~OHCI_CTRL_HCFS;
1746 isp1362_hcd->hc_control |= OHCI_USB_RESET;
1747 isp1362_write_reg32(isp1362_hcd, HCCONTROL, isp1362_hcd->hc_control);
1748
1749 case OHCI_USB_RESET:
1750 status = -EBUSY;
1751 pr_warning("%s: needs reinit!\n", __func__);
1752 goto done;
1753 case OHCI_USB_SUSPEND:
1754 pr_warning("%s: already suspended?\n", __func__);
1755 goto done;
1756 }
1757 DBG(0, "%s: suspend root hub\n", __func__);
1758
1759
1760 hcd->state = HC_STATE_QUIESCING;
1761 if (!list_empty(&isp1362_hcd->atl_queue.active) ||
1762 !list_empty(&isp1362_hcd->intl_queue.active) ||
1763 !list_empty(&isp1362_hcd->istl_queue[0] .active) ||
1764 !list_empty(&isp1362_hcd->istl_queue[1] .active)) {
1765 int limit;
1766
1767 isp1362_write_reg32(isp1362_hcd, HCATLSKIP, ~0);
1768 isp1362_write_reg32(isp1362_hcd, HCINTLSKIP, ~0);
1769 isp1362_write_reg16(isp1362_hcd, HCBUFSTAT, 0);
1770 isp1362_write_reg16(isp1362_hcd, HCuPINTENB, 0);
1771 isp1362_write_reg32(isp1362_hcd, HCINTSTAT, OHCI_INTR_SF);
1772
1773 DBG(0, "%s: stopping schedules ...\n", __func__);
1774 limit = 2000;
1775 while (limit > 0) {
1776 udelay(250);
1777 limit -= 250;
1778 if (isp1362_read_reg32(isp1362_hcd, HCINTSTAT) & OHCI_INTR_SF)
1779 break;
1780 }
1781 mdelay(7);
1782 if (isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_ATL) {
1783 u32 done_map = isp1362_read_reg32(isp1362_hcd, HCATLDONE);
1784 finish_transfers(isp1362_hcd, done_map, &isp1362_hcd->atl_queue);
1785 }
1786 if (isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_INTL) {
1787 u32 done_map = isp1362_read_reg32(isp1362_hcd, HCINTLDONE);
1788 finish_transfers(isp1362_hcd, done_map, &isp1362_hcd->intl_queue);
1789 }
1790 if (isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_ISTL0)
1791 finish_iso_transfers(isp1362_hcd, &isp1362_hcd->istl_queue[0]);
1792 if (isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_ISTL1)
1793 finish_iso_transfers(isp1362_hcd, &isp1362_hcd->istl_queue[1]);
1794 }
1795 DBG(0, "%s: HCINTSTAT: %08x\n", __func__,
1796 isp1362_read_reg32(isp1362_hcd, HCINTSTAT));
1797 isp1362_write_reg32(isp1362_hcd, HCINTSTAT,
1798 isp1362_read_reg32(isp1362_hcd, HCINTSTAT));
1799
1800
1801 isp1362_hcd->hc_control = OHCI_USB_SUSPEND;
1802 isp1362_show_reg(isp1362_hcd, HCCONTROL);
1803 isp1362_write_reg32(isp1362_hcd, HCCONTROL, isp1362_hcd->hc_control);
1804 isp1362_show_reg(isp1362_hcd, HCCONTROL);
1805
1806#if 1
1807 isp1362_hcd->hc_control = isp1362_read_reg32(isp1362_hcd, HCCONTROL);
1808 if ((isp1362_hcd->hc_control & OHCI_CTRL_HCFS) != OHCI_USB_SUSPEND) {
1809 pr_err("%s: controller won't suspend %08x\n", __func__,
1810 isp1362_hcd->hc_control);
1811 status = -EBUSY;
1812 } else
1813#endif
1814 {
1815
1816 isp1362_hcd->next_statechange = jiffies + msecs_to_jiffies(5);
1817 }
1818done:
1819 if (status == 0) {
1820 hcd->state = HC_STATE_SUSPENDED;
1821 DBG(0, "%s: HCD suspended: %08x\n", __func__,
1822 isp1362_read_reg32(isp1362_hcd, HCCONTROL));
1823 }
1824 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1825 return status;
1826}
1827
1828static int isp1362_bus_resume(struct usb_hcd *hcd)
1829{
1830 struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1831 u32 port;
1832 unsigned long flags;
1833 int status = -EINPROGRESS;
1834
1835 if (time_before(jiffies, isp1362_hcd->next_statechange))
1836 msleep(5);
1837
1838 spin_lock_irqsave(&isp1362_hcd->lock, flags);
1839 isp1362_hcd->hc_control = isp1362_read_reg32(isp1362_hcd, HCCONTROL);
1840 pr_info("%s: HCCONTROL: %08x\n", __func__, isp1362_hcd->hc_control);
1841 if (hcd->state == HC_STATE_RESUMING) {
1842 pr_warning("%s: duplicate resume\n", __func__);
1843 status = 0;
1844 } else
1845 switch (isp1362_hcd->hc_control & OHCI_CTRL_HCFS) {
1846 case OHCI_USB_SUSPEND:
1847 DBG(0, "%s: resume root hub\n", __func__);
1848 isp1362_hcd->hc_control &= ~OHCI_CTRL_HCFS;
1849 isp1362_hcd->hc_control |= OHCI_USB_RESUME;
1850 isp1362_write_reg32(isp1362_hcd, HCCONTROL, isp1362_hcd->hc_control);
1851 break;
1852 case OHCI_USB_RESUME:
1853
1854 DBG(0, "%s: remote wakeup\n", __func__);
1855 break;
1856 case OHCI_USB_OPER:
1857 DBG(0, "%s: odd resume\n", __func__);
1858 status = 0;
1859 hcd->self.root_hub->dev.power.power_state = PMSG_ON;
1860 break;
1861 default:
1862 DBG(0, "%s: root hub hardware reset\n", __func__);
1863 status = -EBUSY;
1864 }
1865 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1866 if (status == -EBUSY) {
1867 DBG(0, "%s: Restarting HC\n", __func__);
1868 isp1362_hc_stop(hcd);
1869 return isp1362_hc_start(hcd);
1870 }
1871 if (status != -EINPROGRESS)
1872 return status;
1873 spin_lock_irqsave(&isp1362_hcd->lock, flags);
1874 port = isp1362_read_reg32(isp1362_hcd, HCRHDESCA) & RH_A_NDP;
1875 while (port--) {
1876 u32 stat = isp1362_read_reg32(isp1362_hcd, HCRHPORT1 + port);
1877
1878
1879 if (!(stat & RH_PS_PSS)) {
1880 DBG(0, "%s: Not Resuming RH port %d\n", __func__, port);
1881 continue;
1882 }
1883 DBG(0, "%s: Resuming RH port %d\n", __func__, port);
1884 isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + port, RH_PS_POCI);
1885 }
1886 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1887
1888
1889 hcd->state = HC_STATE_RESUMING;
1890 mdelay(20 + 15);
1891
1892 isp1362_hcd->hc_control = OHCI_USB_OPER;
1893 spin_lock_irqsave(&isp1362_hcd->lock, flags);
1894 isp1362_show_reg(isp1362_hcd, HCCONTROL);
1895 isp1362_write_reg32(isp1362_hcd, HCCONTROL, isp1362_hcd->hc_control);
1896 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1897
1898 msleep(10);
1899
1900
1901 isp1362_hcd->next_statechange = jiffies + msecs_to_jiffies(250);
1902
1903 hcd->self.root_hub->dev.power.power_state = PMSG_ON;
1904 hcd->state = HC_STATE_RUNNING;
1905 return 0;
1906}
1907#else
1908#define isp1362_bus_suspend NULL
1909#define isp1362_bus_resume NULL
1910#endif
1911
1912
1913
1914static void dump_irq(struct seq_file *s, char *label, u16 mask)
1915{
1916 seq_printf(s, "%-15s %04x%s%s%s%s%s%s\n", label, mask,
1917 mask & HCuPINT_CLKRDY ? " clkrdy" : "",
1918 mask & HCuPINT_SUSP ? " susp" : "",
1919 mask & HCuPINT_OPR ? " opr" : "",
1920 mask & HCuPINT_EOT ? " eot" : "",
1921 mask & HCuPINT_ATL ? " atl" : "",
1922 mask & HCuPINT_SOF ? " sof" : "");
1923}
1924
1925static void dump_int(struct seq_file *s, char *label, u32 mask)
1926{
1927 seq_printf(s, "%-15s %08x%s%s%s%s%s%s%s\n", label, mask,
1928 mask & OHCI_INTR_MIE ? " MIE" : "",
1929 mask & OHCI_INTR_RHSC ? " rhsc" : "",
1930 mask & OHCI_INTR_FNO ? " fno" : "",
1931 mask & OHCI_INTR_UE ? " ue" : "",
1932 mask & OHCI_INTR_RD ? " rd" : "",
1933 mask & OHCI_INTR_SF ? " sof" : "",
1934 mask & OHCI_INTR_SO ? " so" : "");
1935}
1936
1937static void dump_ctrl(struct seq_file *s, char *label, u32 mask)
1938{
1939 seq_printf(s, "%-15s %08x%s%s%s\n", label, mask,
1940 mask & OHCI_CTRL_RWC ? " rwc" : "",
1941 mask & OHCI_CTRL_RWE ? " rwe" : "",
1942 ({
1943 char *hcfs;
1944 switch (mask & OHCI_CTRL_HCFS) {
1945 case OHCI_USB_OPER:
1946 hcfs = " oper";
1947 break;
1948 case OHCI_USB_RESET:
1949 hcfs = " reset";
1950 break;
1951 case OHCI_USB_RESUME:
1952 hcfs = " resume";
1953 break;
1954 case OHCI_USB_SUSPEND:
1955 hcfs = " suspend";
1956 break;
1957 default:
1958 hcfs = " ?";
1959 }
1960 hcfs;
1961 }));
1962}
1963
1964static void dump_regs(struct seq_file *s, struct isp1362_hcd *isp1362_hcd)
1965{
1966 seq_printf(s, "HCREVISION [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCREVISION),
1967 isp1362_read_reg32(isp1362_hcd, HCREVISION));
1968 seq_printf(s, "HCCONTROL [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCCONTROL),
1969 isp1362_read_reg32(isp1362_hcd, HCCONTROL));
1970 seq_printf(s, "HCCMDSTAT [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCCMDSTAT),
1971 isp1362_read_reg32(isp1362_hcd, HCCMDSTAT));
1972 seq_printf(s, "HCINTSTAT [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTSTAT),
1973 isp1362_read_reg32(isp1362_hcd, HCINTSTAT));
1974 seq_printf(s, "HCINTENB [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTENB),
1975 isp1362_read_reg32(isp1362_hcd, HCINTENB));
1976 seq_printf(s, "HCFMINTVL [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCFMINTVL),
1977 isp1362_read_reg32(isp1362_hcd, HCFMINTVL));
1978 seq_printf(s, "HCFMREM [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCFMREM),
1979 isp1362_read_reg32(isp1362_hcd, HCFMREM));
1980 seq_printf(s, "HCFMNUM [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCFMNUM),
1981 isp1362_read_reg32(isp1362_hcd, HCFMNUM));
1982 seq_printf(s, "HCLSTHRESH [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCLSTHRESH),
1983 isp1362_read_reg32(isp1362_hcd, HCLSTHRESH));
1984 seq_printf(s, "HCRHDESCA [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHDESCA),
1985 isp1362_read_reg32(isp1362_hcd, HCRHDESCA));
1986 seq_printf(s, "HCRHDESCB [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHDESCB),
1987 isp1362_read_reg32(isp1362_hcd, HCRHDESCB));
1988 seq_printf(s, "HCRHSTATUS [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHSTATUS),
1989 isp1362_read_reg32(isp1362_hcd, HCRHSTATUS));
1990 seq_printf(s, "HCRHPORT1 [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHPORT1),
1991 isp1362_read_reg32(isp1362_hcd, HCRHPORT1));
1992 seq_printf(s, "HCRHPORT2 [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHPORT2),
1993 isp1362_read_reg32(isp1362_hcd, HCRHPORT2));
1994 seq_printf(s, "\n");
1995 seq_printf(s, "HCHWCFG [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCHWCFG),
1996 isp1362_read_reg16(isp1362_hcd, HCHWCFG));
1997 seq_printf(s, "HCDMACFG [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCDMACFG),
1998 isp1362_read_reg16(isp1362_hcd, HCDMACFG));
1999 seq_printf(s, "HCXFERCTR [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCXFERCTR),
2000 isp1362_read_reg16(isp1362_hcd, HCXFERCTR));
2001 seq_printf(s, "HCuPINT [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCuPINT),
2002 isp1362_read_reg16(isp1362_hcd, HCuPINT));
2003 seq_printf(s, "HCuPINTENB [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCuPINTENB),
2004 isp1362_read_reg16(isp1362_hcd, HCuPINTENB));
2005 seq_printf(s, "HCCHIPID [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCCHIPID),
2006 isp1362_read_reg16(isp1362_hcd, HCCHIPID));
2007 seq_printf(s, "HCSCRATCH [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCSCRATCH),
2008 isp1362_read_reg16(isp1362_hcd, HCSCRATCH));
2009 seq_printf(s, "HCBUFSTAT [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCBUFSTAT),
2010 isp1362_read_reg16(isp1362_hcd, HCBUFSTAT));
2011 seq_printf(s, "HCDIRADDR [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCDIRADDR),
2012 isp1362_read_reg32(isp1362_hcd, HCDIRADDR));
2013#if 0
2014 seq_printf(s, "HCDIRDATA [%02x] %04x\n", ISP1362_REG_NO(HCDIRDATA),
2015 isp1362_read_reg16(isp1362_hcd, HCDIRDATA));
2016#endif
2017 seq_printf(s, "HCISTLBUFSZ[%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCISTLBUFSZ),
2018 isp1362_read_reg16(isp1362_hcd, HCISTLBUFSZ));
2019 seq_printf(s, "HCISTLRATE [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCISTLRATE),
2020 isp1362_read_reg16(isp1362_hcd, HCISTLRATE));
2021 seq_printf(s, "\n");
2022 seq_printf(s, "HCINTLBUFSZ[%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLBUFSZ),
2023 isp1362_read_reg16(isp1362_hcd, HCINTLBUFSZ));
2024 seq_printf(s, "HCINTLBLKSZ[%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLBLKSZ),
2025 isp1362_read_reg16(isp1362_hcd, HCINTLBLKSZ));
2026 seq_printf(s, "HCINTLDONE [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLDONE),
2027 isp1362_read_reg32(isp1362_hcd, HCINTLDONE));
2028 seq_printf(s, "HCINTLSKIP [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLSKIP),
2029 isp1362_read_reg32(isp1362_hcd, HCINTLSKIP));
2030 seq_printf(s, "HCINTLLAST [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLLAST),
2031 isp1362_read_reg32(isp1362_hcd, HCINTLLAST));
2032 seq_printf(s, "HCINTLCURR [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLCURR),
2033 isp1362_read_reg16(isp1362_hcd, HCINTLCURR));
2034 seq_printf(s, "\n");
2035 seq_printf(s, "HCATLBUFSZ [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLBUFSZ),
2036 isp1362_read_reg16(isp1362_hcd, HCATLBUFSZ));
2037 seq_printf(s, "HCATLBLKSZ [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLBLKSZ),
2038 isp1362_read_reg16(isp1362_hcd, HCATLBLKSZ));
2039#if 0
2040 seq_printf(s, "HCATLDONE [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCATLDONE),
2041 isp1362_read_reg32(isp1362_hcd, HCATLDONE));
2042#endif
2043 seq_printf(s, "HCATLSKIP [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCATLSKIP),
2044 isp1362_read_reg32(isp1362_hcd, HCATLSKIP));
2045 seq_printf(s, "HCATLLAST [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCATLLAST),
2046 isp1362_read_reg32(isp1362_hcd, HCATLLAST));
2047 seq_printf(s, "HCATLCURR [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLCURR),
2048 isp1362_read_reg16(isp1362_hcd, HCATLCURR));
2049 seq_printf(s, "\n");
2050 seq_printf(s, "HCATLDTC [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLDTC),
2051 isp1362_read_reg16(isp1362_hcd, HCATLDTC));
2052 seq_printf(s, "HCATLDTCTO [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLDTCTO),
2053 isp1362_read_reg16(isp1362_hcd, HCATLDTCTO));
2054}
2055
2056static int isp1362_show(struct seq_file *s, void *unused)
2057{
2058 struct isp1362_hcd *isp1362_hcd = s->private;
2059 struct isp1362_ep *ep;
2060 int i;
2061
2062 seq_printf(s, "%s\n%s version %s\n",
2063 isp1362_hcd_to_hcd(isp1362_hcd)->product_desc, hcd_name, DRIVER_VERSION);
2064
2065
2066
2067
2068 seq_printf(s, "alignment: 16b/%ld 8b/%ld 4b/%ld 2b/%ld 1b/%ld\n",
2069 isp1362_hcd->stat16, isp1362_hcd->stat8, isp1362_hcd->stat4,
2070 isp1362_hcd->stat2, isp1362_hcd->stat1);
2071 seq_printf(s, "max # ptds in ATL fifo: %d\n", isp1362_hcd->atl_queue.stat_maxptds);
2072 seq_printf(s, "max # ptds in INTL fifo: %d\n", isp1362_hcd->intl_queue.stat_maxptds);
2073 seq_printf(s, "max # ptds in ISTL fifo: %d\n",
2074 max(isp1362_hcd->istl_queue[0] .stat_maxptds,
2075 isp1362_hcd->istl_queue[1] .stat_maxptds));
2076
2077
2078 spin_lock_irq(&isp1362_hcd->lock);
2079
2080 dump_irq(s, "hc_irq_enable", isp1362_read_reg16(isp1362_hcd, HCuPINTENB));
2081 dump_irq(s, "hc_irq_status", isp1362_read_reg16(isp1362_hcd, HCuPINT));
2082 dump_int(s, "ohci_int_enable", isp1362_read_reg32(isp1362_hcd, HCINTENB));
2083 dump_int(s, "ohci_int_status", isp1362_read_reg32(isp1362_hcd, HCINTSTAT));
2084 dump_ctrl(s, "ohci_control", isp1362_read_reg32(isp1362_hcd, HCCONTROL));
2085
2086 for (i = 0; i < NUM_ISP1362_IRQS; i++)
2087 if (isp1362_hcd->irq_stat[i])
2088 seq_printf(s, "%-15s: %d\n",
2089 ISP1362_INT_NAME(i), isp1362_hcd->irq_stat[i]);
2090
2091 dump_regs(s, isp1362_hcd);
2092 list_for_each_entry(ep, &isp1362_hcd->async, schedule) {
2093 struct urb *urb;
2094
2095 seq_printf(s, "%p, ep%d%s, maxpacket %d:\n", ep, ep->epnum,
2096 ({
2097 char *s;
2098 switch (ep->nextpid) {
2099 case USB_PID_IN:
2100 s = "in";
2101 break;
2102 case USB_PID_OUT:
2103 s = "out";
2104 break;
2105 case USB_PID_SETUP:
2106 s = "setup";
2107 break;
2108 case USB_PID_ACK:
2109 s = "status";
2110 break;
2111 default:
2112 s = "?";
2113 break;
2114 }
2115 s;}), ep->maxpacket) ;
2116 list_for_each_entry(urb, &ep->hep->urb_list, urb_list) {
2117 seq_printf(s, " urb%p, %d/%d\n", urb,
2118 urb->actual_length,
2119 urb->transfer_buffer_length);
2120 }
2121 }
2122 if (!list_empty(&isp1362_hcd->async))
2123 seq_printf(s, "\n");
2124 dump_ptd_queue(&isp1362_hcd->atl_queue);
2125
2126 seq_printf(s, "periodic size= %d\n", PERIODIC_SIZE);
2127
2128 list_for_each_entry(ep, &isp1362_hcd->periodic, schedule) {
2129 seq_printf(s, "branch:%2d load:%3d PTD[%d] $%04x:\n", ep->branch,
2130 isp1362_hcd->load[ep->branch], ep->ptd_index, ep->ptd_offset);
2131
2132 seq_printf(s, " %d/%p (%sdev%d ep%d%s max %d)\n",
2133 ep->interval, ep,
2134 (ep->udev->speed == USB_SPEED_FULL) ? "" : "ls ",
2135 ep->udev->devnum, ep->epnum,
2136 (ep->epnum == 0) ? "" :
2137 ((ep->nextpid == USB_PID_IN) ?
2138 "in" : "out"), ep->maxpacket);
2139 }
2140 dump_ptd_queue(&isp1362_hcd->intl_queue);
2141
2142 seq_printf(s, "ISO:\n");
2143
2144 list_for_each_entry(ep, &isp1362_hcd->isoc, schedule) {
2145 seq_printf(s, " %d/%p (%sdev%d ep%d%s max %d)\n",
2146 ep->interval, ep,
2147 (ep->udev->speed == USB_SPEED_FULL) ? "" : "ls ",
2148 ep->udev->devnum, ep->epnum,
2149 (ep->epnum == 0) ? "" :
2150 ((ep->nextpid == USB_PID_IN) ?
2151 "in" : "out"), ep->maxpacket);
2152 }
2153
2154 spin_unlock_irq(&isp1362_hcd->lock);
2155 seq_printf(s, "\n");
2156
2157 return 0;
2158}
2159
2160static int isp1362_open(struct inode *inode, struct file *file)
2161{
2162 return single_open(file, isp1362_show, inode);
2163}
2164
2165static const struct file_operations debug_ops = {
2166 .open = isp1362_open,
2167 .read = seq_read,
2168 .llseek = seq_lseek,
2169 .release = single_release,
2170};
2171
2172
2173static void create_debug_file(struct isp1362_hcd *isp1362_hcd)
2174{
2175 isp1362_hcd->debug_file = debugfs_create_file("isp1362", S_IRUGO,
2176 usb_debug_root,
2177 isp1362_hcd, &debug_ops);
2178}
2179
2180static void remove_debug_file(struct isp1362_hcd *isp1362_hcd)
2181{
2182 debugfs_remove(isp1362_hcd->debug_file);
2183}
2184
2185
2186
2187static void __isp1362_sw_reset(struct isp1362_hcd *isp1362_hcd)
2188{
2189 int tmp = 20;
2190
2191 isp1362_write_reg16(isp1362_hcd, HCSWRES, HCSWRES_MAGIC);
2192 isp1362_write_reg32(isp1362_hcd, HCCMDSTAT, OHCI_HCR);
2193 while (--tmp) {
2194 mdelay(1);
2195 if (!(isp1362_read_reg32(isp1362_hcd, HCCMDSTAT) & OHCI_HCR))
2196 break;
2197 }
2198 if (!tmp)
2199 pr_err("Software reset timeout\n");
2200}
2201
2202static void isp1362_sw_reset(struct isp1362_hcd *isp1362_hcd)
2203{
2204 unsigned long flags;
2205
2206 spin_lock_irqsave(&isp1362_hcd->lock, flags);
2207 __isp1362_sw_reset(isp1362_hcd);
2208 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2209}
2210
2211static int isp1362_mem_config(struct usb_hcd *hcd)
2212{
2213 struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
2214 unsigned long flags;
2215 u32 total;
2216 u16 istl_size = ISP1362_ISTL_BUFSIZE;
2217 u16 intl_blksize = ISP1362_INTL_BLKSIZE + PTD_HEADER_SIZE;
2218 u16 intl_size = ISP1362_INTL_BUFFERS * intl_blksize;
2219 u16 atl_blksize = ISP1362_ATL_BLKSIZE + PTD_HEADER_SIZE;
2220 u16 atl_buffers = (ISP1362_BUF_SIZE - (istl_size + intl_size)) / atl_blksize;
2221 u16 atl_size;
2222 int i;
2223
2224 WARN_ON(istl_size & 3);
2225 WARN_ON(atl_blksize & 3);
2226 WARN_ON(intl_blksize & 3);
2227 WARN_ON(atl_blksize < PTD_HEADER_SIZE);
2228 WARN_ON(intl_blksize < PTD_HEADER_SIZE);
2229
2230 BUG_ON((unsigned)ISP1362_INTL_BUFFERS > 32);
2231 if (atl_buffers > 32)
2232 atl_buffers = 32;
2233 atl_size = atl_buffers * atl_blksize;
2234 total = atl_size + intl_size + istl_size;
2235 dev_info(hcd->self.controller, "ISP1362 Memory usage:\n");
2236 dev_info(hcd->self.controller, " ISTL: 2 * %4d: %4d @ $%04x:$%04x\n",
2237 istl_size / 2, istl_size, 0, istl_size / 2);
2238 dev_info(hcd->self.controller, " INTL: %4d * (%3zu+8): %4d @ $%04x\n",
2239 ISP1362_INTL_BUFFERS, intl_blksize - PTD_HEADER_SIZE,
2240 intl_size, istl_size);
2241 dev_info(hcd->self.controller, " ATL : %4d * (%3zu+8): %4d @ $%04x\n",
2242 atl_buffers, atl_blksize - PTD_HEADER_SIZE,
2243 atl_size, istl_size + intl_size);
2244 dev_info(hcd->self.controller, " USED/FREE: %4d %4d\n", total,
2245 ISP1362_BUF_SIZE - total);
2246
2247 if (total > ISP1362_BUF_SIZE) {
2248 dev_err(hcd->self.controller, "%s: Memory requested: %d, available %d\n",
2249 __func__, total, ISP1362_BUF_SIZE);
2250 return -ENOMEM;
2251 }
2252
2253 total = istl_size + intl_size + atl_size;
2254 spin_lock_irqsave(&isp1362_hcd->lock, flags);
2255
2256 for (i = 0; i < 2; i++) {
2257 isp1362_hcd->istl_queue[i].buf_start = i * istl_size / 2,
2258 isp1362_hcd->istl_queue[i].buf_size = istl_size / 2;
2259 isp1362_hcd->istl_queue[i].blk_size = 4;
2260 INIT_LIST_HEAD(&isp1362_hcd->istl_queue[i].active);
2261 snprintf(isp1362_hcd->istl_queue[i].name,
2262 sizeof(isp1362_hcd->istl_queue[i].name), "ISTL%d", i);
2263 DBG(3, "%s: %5s buf $%04x %d\n", __func__,
2264 isp1362_hcd->istl_queue[i].name,
2265 isp1362_hcd->istl_queue[i].buf_start,
2266 isp1362_hcd->istl_queue[i].buf_size);
2267 }
2268 isp1362_write_reg16(isp1362_hcd, HCISTLBUFSZ, istl_size / 2);
2269
2270 isp1362_hcd->intl_queue.buf_start = istl_size;
2271 isp1362_hcd->intl_queue.buf_size = intl_size;
2272 isp1362_hcd->intl_queue.buf_count = ISP1362_INTL_BUFFERS;
2273 isp1362_hcd->intl_queue.blk_size = intl_blksize;
2274 isp1362_hcd->intl_queue.buf_avail = isp1362_hcd->intl_queue.buf_count;
2275 isp1362_hcd->intl_queue.skip_map = ~0;
2276 INIT_LIST_HEAD(&isp1362_hcd->intl_queue.active);
2277
2278 isp1362_write_reg16(isp1362_hcd, HCINTLBUFSZ,
2279 isp1362_hcd->intl_queue.buf_size);
2280 isp1362_write_reg16(isp1362_hcd, HCINTLBLKSZ,
2281 isp1362_hcd->intl_queue.blk_size - PTD_HEADER_SIZE);
2282 isp1362_write_reg32(isp1362_hcd, HCINTLSKIP, ~0);
2283 isp1362_write_reg32(isp1362_hcd, HCINTLLAST,
2284 1 << (ISP1362_INTL_BUFFERS - 1));
2285
2286 isp1362_hcd->atl_queue.buf_start = istl_size + intl_size;
2287 isp1362_hcd->atl_queue.buf_size = atl_size;
2288 isp1362_hcd->atl_queue.buf_count = atl_buffers;
2289 isp1362_hcd->atl_queue.blk_size = atl_blksize;
2290 isp1362_hcd->atl_queue.buf_avail = isp1362_hcd->atl_queue.buf_count;
2291 isp1362_hcd->atl_queue.skip_map = ~0;
2292 INIT_LIST_HEAD(&isp1362_hcd->atl_queue.active);
2293
2294 isp1362_write_reg16(isp1362_hcd, HCATLBUFSZ,
2295 isp1362_hcd->atl_queue.buf_size);
2296 isp1362_write_reg16(isp1362_hcd, HCATLBLKSZ,
2297 isp1362_hcd->atl_queue.blk_size - PTD_HEADER_SIZE);
2298 isp1362_write_reg32(isp1362_hcd, HCATLSKIP, ~0);
2299 isp1362_write_reg32(isp1362_hcd, HCATLLAST,
2300 1 << (atl_buffers - 1));
2301
2302 snprintf(isp1362_hcd->atl_queue.name,
2303 sizeof(isp1362_hcd->atl_queue.name), "ATL");
2304 snprintf(isp1362_hcd->intl_queue.name,
2305 sizeof(isp1362_hcd->intl_queue.name), "INTL");
2306 DBG(3, "%s: %5s buf $%04x %2d * %4d = %4d\n", __func__,
2307 isp1362_hcd->intl_queue.name,
2308 isp1362_hcd->intl_queue.buf_start,
2309 ISP1362_INTL_BUFFERS, isp1362_hcd->intl_queue.blk_size,
2310 isp1362_hcd->intl_queue.buf_size);
2311 DBG(3, "%s: %5s buf $%04x %2d * %4d = %4d\n", __func__,
2312 isp1362_hcd->atl_queue.name,
2313 isp1362_hcd->atl_queue.buf_start,
2314 atl_buffers, isp1362_hcd->atl_queue.blk_size,
2315 isp1362_hcd->atl_queue.buf_size);
2316
2317 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2318
2319 return 0;
2320}
2321
2322static int isp1362_hc_reset(struct usb_hcd *hcd)
2323{
2324 int ret = 0;
2325 struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
2326 unsigned long t;
2327 unsigned long timeout = 100;
2328 unsigned long flags;
2329 int clkrdy = 0;
2330
2331 pr_debug("%s:\n", __func__);
2332
2333 if (isp1362_hcd->board && isp1362_hcd->board->reset) {
2334 isp1362_hcd->board->reset(hcd->self.controller, 1);
2335 msleep(20);
2336 if (isp1362_hcd->board->clock)
2337 isp1362_hcd->board->clock(hcd->self.controller, 1);
2338 isp1362_hcd->board->reset(hcd->self.controller, 0);
2339 } else
2340 isp1362_sw_reset(isp1362_hcd);
2341
2342
2343 t = jiffies + msecs_to_jiffies(timeout);
2344 while (!clkrdy && time_before_eq(jiffies, t)) {
2345 spin_lock_irqsave(&isp1362_hcd->lock, flags);
2346 clkrdy = isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_CLKRDY;
2347 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2348 if (!clkrdy)
2349 msleep(4);
2350 }
2351
2352 spin_lock_irqsave(&isp1362_hcd->lock, flags);
2353 isp1362_write_reg16(isp1362_hcd, HCuPINT, HCuPINT_CLKRDY);
2354 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2355 if (!clkrdy) {
2356 pr_err("Clock not ready after %lums\n", timeout);
2357 ret = -ENODEV;
2358 }
2359 return ret;
2360}
2361
2362static void isp1362_hc_stop(struct usb_hcd *hcd)
2363{
2364 struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
2365 unsigned long flags;
2366 u32 tmp;
2367
2368 pr_debug("%s:\n", __func__);
2369
2370 del_timer_sync(&hcd->rh_timer);
2371
2372 spin_lock_irqsave(&isp1362_hcd->lock, flags);
2373
2374 isp1362_write_reg16(isp1362_hcd, HCuPINTENB, 0);
2375
2376
2377 tmp = isp1362_read_reg32(isp1362_hcd, HCRHDESCA);
2378 tmp &= ~(RH_A_NPS | RH_A_PSM);
2379 isp1362_write_reg32(isp1362_hcd, HCRHDESCA, tmp);
2380 isp1362_write_reg32(isp1362_hcd, HCRHSTATUS, RH_HS_LPS);
2381
2382
2383 if (isp1362_hcd->board && isp1362_hcd->board->reset)
2384 isp1362_hcd->board->reset(hcd->self.controller, 1);
2385 else
2386 __isp1362_sw_reset(isp1362_hcd);
2387
2388 if (isp1362_hcd->board && isp1362_hcd->board->clock)
2389 isp1362_hcd->board->clock(hcd->self.controller, 0);
2390
2391 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2392}
2393
2394#ifdef CHIP_BUFFER_TEST
2395static int isp1362_chip_test(struct isp1362_hcd *isp1362_hcd)
2396{
2397 int ret = 0;
2398 u16 *ref;
2399 unsigned long flags;
2400
2401 ref = kmalloc(2 * ISP1362_BUF_SIZE, GFP_KERNEL);
2402 if (ref) {
2403 int offset;
2404 u16 *tst = &ref[ISP1362_BUF_SIZE / 2];
2405
2406 for (offset = 0; offset < ISP1362_BUF_SIZE / 2; offset++) {
2407 ref[offset] = ~offset;
2408 tst[offset] = offset;
2409 }
2410
2411 for (offset = 0; offset < 4; offset++) {
2412 int j;
2413
2414 for (j = 0; j < 8; j++) {
2415 spin_lock_irqsave(&isp1362_hcd->lock, flags);
2416 isp1362_write_buffer(isp1362_hcd, (u8 *)ref + offset, 0, j);
2417 isp1362_read_buffer(isp1362_hcd, (u8 *)tst + offset, 0, j);
2418 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2419
2420 if (memcmp(ref, tst, j)) {
2421 ret = -ENODEV;
2422 pr_err("%s: memory check with %d byte offset %d failed\n",
2423 __func__, j, offset);
2424 dump_data((u8 *)ref + offset, j);
2425 dump_data((u8 *)tst + offset, j);
2426 }
2427 }
2428 }
2429
2430 spin_lock_irqsave(&isp1362_hcd->lock, flags);
2431 isp1362_write_buffer(isp1362_hcd, ref, 0, ISP1362_BUF_SIZE);
2432 isp1362_read_buffer(isp1362_hcd, tst, 0, ISP1362_BUF_SIZE);
2433 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2434
2435 if (memcmp(ref, tst, ISP1362_BUF_SIZE)) {
2436 ret = -ENODEV;
2437 pr_err("%s: memory check failed\n", __func__);
2438 dump_data((u8 *)tst, ISP1362_BUF_SIZE / 2);
2439 }
2440
2441 for (offset = 0; offset < 256; offset++) {
2442 int test_size = 0;
2443
2444 yield();
2445
2446 memset(tst, 0, ISP1362_BUF_SIZE);
2447 spin_lock_irqsave(&isp1362_hcd->lock, flags);
2448 isp1362_write_buffer(isp1362_hcd, tst, 0, ISP1362_BUF_SIZE);
2449 isp1362_read_buffer(isp1362_hcd, tst, 0, ISP1362_BUF_SIZE);
2450 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2451 if (memcmp(tst, tst + (ISP1362_BUF_SIZE / (2 * sizeof(*tst))),
2452 ISP1362_BUF_SIZE / 2)) {
2453 pr_err("%s: Failed to clear buffer\n", __func__);
2454 dump_data((u8 *)tst, ISP1362_BUF_SIZE);
2455 break;
2456 }
2457 spin_lock_irqsave(&isp1362_hcd->lock, flags);
2458 isp1362_write_buffer(isp1362_hcd, ref, offset * 2, PTD_HEADER_SIZE);
2459 isp1362_write_buffer(isp1362_hcd, ref + PTD_HEADER_SIZE / sizeof(*ref),
2460 offset * 2 + PTD_HEADER_SIZE, test_size);
2461 isp1362_read_buffer(isp1362_hcd, tst, offset * 2,
2462 PTD_HEADER_SIZE + test_size);
2463 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2464 if (memcmp(ref, tst, PTD_HEADER_SIZE + test_size)) {
2465 dump_data(((u8 *)ref) + offset, PTD_HEADER_SIZE + test_size);
2466 dump_data((u8 *)tst, PTD_HEADER_SIZE + test_size);
2467 spin_lock_irqsave(&isp1362_hcd->lock, flags);
2468 isp1362_read_buffer(isp1362_hcd, tst, offset * 2,
2469 PTD_HEADER_SIZE + test_size);
2470 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2471 if (memcmp(ref, tst, PTD_HEADER_SIZE + test_size)) {
2472 ret = -ENODEV;
2473 pr_err("%s: memory check with offset %02x failed\n",
2474 __func__, offset);
2475 break;
2476 }
2477 pr_warning("%s: memory check with offset %02x ok after second read\n",
2478 __func__, offset);
2479 }
2480 }
2481 kfree(ref);
2482 }
2483 return ret;
2484}
2485#endif
2486
2487static int isp1362_hc_start(struct usb_hcd *hcd)
2488{
2489 int ret;
2490 struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
2491 struct isp1362_platform_data *board = isp1362_hcd->board;
2492 u16 hwcfg;
2493 u16 chipid;
2494 unsigned long flags;
2495
2496 pr_debug("%s:\n", __func__);
2497
2498 spin_lock_irqsave(&isp1362_hcd->lock, flags);
2499 chipid = isp1362_read_reg16(isp1362_hcd, HCCHIPID);
2500 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2501
2502 if ((chipid & HCCHIPID_MASK) != HCCHIPID_MAGIC) {
2503 pr_err("%s: Invalid chip ID %04x\n", __func__, chipid);
2504 return -ENODEV;
2505 }
2506
2507#ifdef CHIP_BUFFER_TEST
2508 ret = isp1362_chip_test(isp1362_hcd);
2509 if (ret)
2510 return -ENODEV;
2511#endif
2512 spin_lock_irqsave(&isp1362_hcd->lock, flags);
2513
2514 isp1362_write_reg16(isp1362_hcd, HCuPINT, 0xff);
2515 isp1362_write_reg16(isp1362_hcd, HCuPINTENB, 0);
2516
2517
2518 hwcfg = HCHWCFG_INT_ENABLE | HCHWCFG_DBWIDTH(1);
2519 if (board->sel15Kres)
2520 hwcfg |= HCHWCFG_PULLDOWN_DS2 |
2521 ((MAX_ROOT_PORTS > 1) ? HCHWCFG_PULLDOWN_DS1 : 0);
2522 if (board->clknotstop)
2523 hwcfg |= HCHWCFG_CLKNOTSTOP;
2524 if (board->oc_enable)
2525 hwcfg |= HCHWCFG_ANALOG_OC;
2526 if (board->int_act_high)
2527 hwcfg |= HCHWCFG_INT_POL;
2528 if (board->int_edge_triggered)
2529 hwcfg |= HCHWCFG_INT_TRIGGER;
2530 if (board->dreq_act_high)
2531 hwcfg |= HCHWCFG_DREQ_POL;
2532 if (board->dack_act_high)
2533 hwcfg |= HCHWCFG_DACK_POL;
2534 isp1362_write_reg16(isp1362_hcd, HCHWCFG, hwcfg);
2535 isp1362_show_reg(isp1362_hcd, HCHWCFG);
2536 isp1362_write_reg16(isp1362_hcd, HCDMACFG, 0);
2537 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2538
2539 ret = isp1362_mem_config(hcd);
2540 if (ret)
2541 return ret;
2542
2543 spin_lock_irqsave(&isp1362_hcd->lock, flags);
2544
2545
2546 isp1362_hcd->rhdesca = 0;
2547 if (board->no_power_switching)
2548 isp1362_hcd->rhdesca |= RH_A_NPS;
2549 if (board->power_switching_mode)
2550 isp1362_hcd->rhdesca |= RH_A_PSM;
2551 if (board->potpg)
2552 isp1362_hcd->rhdesca |= (board->potpg << 24) & RH_A_POTPGT;
2553 else
2554 isp1362_hcd->rhdesca |= (25 << 24) & RH_A_POTPGT;
2555
2556 isp1362_write_reg32(isp1362_hcd, HCRHDESCA, isp1362_hcd->rhdesca & ~RH_A_OCPM);
2557 isp1362_write_reg32(isp1362_hcd, HCRHDESCA, isp1362_hcd->rhdesca | RH_A_OCPM);
2558 isp1362_hcd->rhdesca = isp1362_read_reg32(isp1362_hcd, HCRHDESCA);
2559
2560 isp1362_hcd->rhdescb = RH_B_PPCM;
2561 isp1362_write_reg32(isp1362_hcd, HCRHDESCB, isp1362_hcd->rhdescb);
2562 isp1362_hcd->rhdescb = isp1362_read_reg32(isp1362_hcd, HCRHDESCB);
2563
2564 isp1362_read_reg32(isp1362_hcd, HCFMINTVL);
2565 isp1362_write_reg32(isp1362_hcd, HCFMINTVL, (FSMP(FI) << 16) | FI);
2566 isp1362_write_reg32(isp1362_hcd, HCLSTHRESH, LSTHRESH);
2567
2568 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2569
2570 isp1362_hcd->hc_control = OHCI_USB_OPER;
2571 hcd->state = HC_STATE_RUNNING;
2572
2573 spin_lock_irqsave(&isp1362_hcd->lock, flags);
2574
2575 isp1362_hcd->intenb = OHCI_INTR_MIE | OHCI_INTR_RHSC | OHCI_INTR_UE;
2576 isp1362_hcd->intenb |= OHCI_INTR_RD;
2577 isp1362_hcd->irqenb = HCuPINT_OPR | HCuPINT_SUSP;
2578 isp1362_write_reg32(isp1362_hcd, HCINTENB, isp1362_hcd->intenb);
2579 isp1362_write_reg16(isp1362_hcd, HCuPINTENB, isp1362_hcd->irqenb);
2580
2581
2582 isp1362_write_reg32(isp1362_hcd, HCCONTROL, isp1362_hcd->hc_control);
2583
2584 isp1362_write_reg32(isp1362_hcd, HCRHSTATUS, RH_HS_LPSC | RH_HS_DRWE);
2585
2586 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2587
2588 return 0;
2589}
2590
2591
2592
2593static struct hc_driver isp1362_hc_driver = {
2594 .description = hcd_name,
2595 .product_desc = "ISP1362 Host Controller",
2596 .hcd_priv_size = sizeof(struct isp1362_hcd),
2597
2598 .irq = isp1362_irq,
2599 .flags = HCD_USB11 | HCD_MEMORY,
2600
2601 .reset = isp1362_hc_reset,
2602 .start = isp1362_hc_start,
2603 .stop = isp1362_hc_stop,
2604
2605 .urb_enqueue = isp1362_urb_enqueue,
2606 .urb_dequeue = isp1362_urb_dequeue,
2607 .endpoint_disable = isp1362_endpoint_disable,
2608
2609 .get_frame_number = isp1362_get_frame,
2610
2611 .hub_status_data = isp1362_hub_status_data,
2612 .hub_control = isp1362_hub_control,
2613 .bus_suspend = isp1362_bus_suspend,
2614 .bus_resume = isp1362_bus_resume,
2615};
2616
2617
2618
2619static int isp1362_remove(struct platform_device *pdev)
2620{
2621 struct usb_hcd *hcd = platform_get_drvdata(pdev);
2622 struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
2623
2624 remove_debug_file(isp1362_hcd);
2625 DBG(0, "%s: Removing HCD\n", __func__);
2626 usb_remove_hcd(hcd);
2627 DBG(0, "%s: put_hcd\n", __func__);
2628 usb_put_hcd(hcd);
2629 DBG(0, "%s: Done\n", __func__);
2630
2631 return 0;
2632}
2633
2634static int isp1362_probe(struct platform_device *pdev)
2635{
2636 struct usb_hcd *hcd;
2637 struct isp1362_hcd *isp1362_hcd;
2638 struct resource *addr, *data, *irq_res;
2639 void __iomem *addr_reg;
2640 void __iomem *data_reg;
2641 int irq;
2642 int retval = 0;
2643 unsigned int irq_flags = 0;
2644
2645 if (usb_disabled())
2646 return -ENODEV;
2647
2648
2649
2650
2651
2652
2653 if (pdev->num_resources < 3)
2654 return -ENODEV;
2655
2656 if (pdev->dev.dma_mask) {
2657 DBG(1, "won't do DMA");
2658 return -ENODEV;
2659 }
2660
2661 irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
2662 if (!irq_res)
2663 return -ENODEV;
2664
2665 irq = irq_res->start;
2666
2667 addr = platform_get_resource(pdev, IORESOURCE_MEM, 1);
2668 addr_reg = devm_ioremap_resource(&pdev->dev, addr);
2669 if (IS_ERR(addr_reg))
2670 return PTR_ERR(addr_reg);
2671
2672 data = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2673 data_reg = devm_ioremap_resource(&pdev->dev, data);
2674 if (IS_ERR(data_reg))
2675 return PTR_ERR(data_reg);
2676
2677
2678 hcd = usb_create_hcd(&isp1362_hc_driver, &pdev->dev, dev_name(&pdev->dev));
2679 if (!hcd)
2680 return -ENOMEM;
2681
2682 hcd->rsrc_start = data->start;
2683 isp1362_hcd = hcd_to_isp1362_hcd(hcd);
2684 isp1362_hcd->data_reg = data_reg;
2685 isp1362_hcd->addr_reg = addr_reg;
2686
2687 isp1362_hcd->next_statechange = jiffies;
2688 spin_lock_init(&isp1362_hcd->lock);
2689 INIT_LIST_HEAD(&isp1362_hcd->async);
2690 INIT_LIST_HEAD(&isp1362_hcd->periodic);
2691 INIT_LIST_HEAD(&isp1362_hcd->isoc);
2692 INIT_LIST_HEAD(&isp1362_hcd->remove_list);
2693 isp1362_hcd->board = dev_get_platdata(&pdev->dev);
2694#if USE_PLATFORM_DELAY
2695 if (!isp1362_hcd->board->delay) {
2696 dev_err(hcd->self.controller, "No platform delay function given\n");
2697 retval = -ENODEV;
2698 goto err;
2699 }
2700#endif
2701
2702 if (irq_res->flags & IORESOURCE_IRQ_HIGHEDGE)
2703 irq_flags |= IRQF_TRIGGER_RISING;
2704 if (irq_res->flags & IORESOURCE_IRQ_LOWEDGE)
2705 irq_flags |= IRQF_TRIGGER_FALLING;
2706 if (irq_res->flags & IORESOURCE_IRQ_HIGHLEVEL)
2707 irq_flags |= IRQF_TRIGGER_HIGH;
2708 if (irq_res->flags & IORESOURCE_IRQ_LOWLEVEL)
2709 irq_flags |= IRQF_TRIGGER_LOW;
2710
2711 retval = usb_add_hcd(hcd, irq, irq_flags | IRQF_SHARED);
2712 if (retval != 0)
2713 goto err;
2714 device_wakeup_enable(hcd->self.controller);
2715
2716 dev_info(&pdev->dev, "%s, irq %d\n", hcd->product_desc, irq);
2717
2718 create_debug_file(isp1362_hcd);
2719
2720 return 0;
2721
2722 err:
2723 usb_put_hcd(hcd);
2724
2725 return retval;
2726}
2727
2728#ifdef CONFIG_PM
2729static int isp1362_suspend(struct platform_device *pdev, pm_message_t state)
2730{
2731 struct usb_hcd *hcd = platform_get_drvdata(pdev);
2732 struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
2733 unsigned long flags;
2734 int retval = 0;
2735
2736 DBG(0, "%s: Suspending device\n", __func__);
2737
2738 if (state.event == PM_EVENT_FREEZE) {
2739 DBG(0, "%s: Suspending root hub\n", __func__);
2740 retval = isp1362_bus_suspend(hcd);
2741 } else {
2742 DBG(0, "%s: Suspending RH ports\n", __func__);
2743 spin_lock_irqsave(&isp1362_hcd->lock, flags);
2744 isp1362_write_reg32(isp1362_hcd, HCRHSTATUS, RH_HS_LPS);
2745 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2746 }
2747 if (retval == 0)
2748 pdev->dev.power.power_state = state;
2749 return retval;
2750}
2751
2752static int isp1362_resume(struct platform_device *pdev)
2753{
2754 struct usb_hcd *hcd = platform_get_drvdata(pdev);
2755 struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
2756 unsigned long flags;
2757
2758 DBG(0, "%s: Resuming\n", __func__);
2759
2760 if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
2761 DBG(0, "%s: Resume RH ports\n", __func__);
2762 spin_lock_irqsave(&isp1362_hcd->lock, flags);
2763 isp1362_write_reg32(isp1362_hcd, HCRHSTATUS, RH_HS_LPSC);
2764 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2765 return 0;
2766 }
2767
2768 pdev->dev.power.power_state = PMSG_ON;
2769
2770 return isp1362_bus_resume(isp1362_hcd_to_hcd(isp1362_hcd));
2771}
2772#else
2773#define isp1362_suspend NULL
2774#define isp1362_resume NULL
2775#endif
2776
2777static struct platform_driver isp1362_driver = {
2778 .probe = isp1362_probe,
2779 .remove = isp1362_remove,
2780
2781 .suspend = isp1362_suspend,
2782 .resume = isp1362_resume,
2783 .driver = {
2784 .name = hcd_name,
2785 },
2786};
2787
2788module_platform_driver(isp1362_driver);
2789