1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40#ifdef CONFIG_USB_DEBUG
41# define ISP1362_DEBUG
42#else
43# undef ISP1362_DEBUG
44#endif
45
46
47
48
49
50
51
52
53
54#undef BUGGY_PXA2XX_UDC_USBTEST
55
56#undef PTD_TRACE
57#undef URB_TRACE
58#undef VERBOSE
59#undef REGISTERS
60
61
62
63
64#undef CHIP_BUFFER_TEST
65
66#include <linux/module.h>
67#include <linux/moduleparam.h>
68#include <linux/kernel.h>
69#include <linux/delay.h>
70#include <linux/ioport.h>
71#include <linux/sched.h>
72#include <linux/slab.h>
73#include <linux/errno.h>
74#include <linux/init.h>
75#include <linux/list.h>
76#include <linux/interrupt.h>
77#include <linux/usb.h>
78#include <linux/usb/isp1362.h>
79#include <linux/usb/hcd.h>
80#include <linux/platform_device.h>
81#include <linux/pm.h>
82#include <linux/io.h>
83#include <linux/bitmap.h>
84#include <linux/prefetch.h>
85
86#include <asm/irq.h>
87#include <asm/byteorder.h>
88#include <asm/unaligned.h>
89
90static int dbg_level;
91#ifdef ISP1362_DEBUG
92module_param(dbg_level, int, 0644);
93#else
94module_param(dbg_level, int, 0);
95#define STUB_DEBUG_FILE
96#endif
97
98#include "../core/usb.h"
99#include "isp1362.h"
100
101
102#define DRIVER_VERSION "2005-04-04"
103#define DRIVER_DESC "ISP1362 USB Host Controller Driver"
104
105MODULE_DESCRIPTION(DRIVER_DESC);
106MODULE_LICENSE("GPL");
107
108static const char hcd_name[] = "isp1362-hcd";
109
110static void isp1362_hc_stop(struct usb_hcd *hcd);
111static int isp1362_hc_start(struct usb_hcd *hcd);
112
113
114
115
116
117
118
119
120
121
122static inline void isp1362_enable_int(struct isp1362_hcd *isp1362_hcd, u16 mask)
123{
124 if ((isp1362_hcd->irqenb | mask) == isp1362_hcd->irqenb)
125 return;
126 if (mask & ~isp1362_hcd->irqenb)
127 isp1362_write_reg16(isp1362_hcd, HCuPINT, mask & ~isp1362_hcd->irqenb);
128 isp1362_hcd->irqenb |= mask;
129 if (isp1362_hcd->irq_active)
130 return;
131 isp1362_write_reg16(isp1362_hcd, HCuPINTENB, isp1362_hcd->irqenb);
132}
133
134
135
136static inline struct isp1362_ep_queue *get_ptd_queue(struct isp1362_hcd *isp1362_hcd,
137 u16 offset)
138{
139 struct isp1362_ep_queue *epq = NULL;
140
141 if (offset < isp1362_hcd->istl_queue[1].buf_start)
142 epq = &isp1362_hcd->istl_queue[0];
143 else if (offset < isp1362_hcd->intl_queue.buf_start)
144 epq = &isp1362_hcd->istl_queue[1];
145 else if (offset < isp1362_hcd->atl_queue.buf_start)
146 epq = &isp1362_hcd->intl_queue;
147 else if (offset < isp1362_hcd->atl_queue.buf_start +
148 isp1362_hcd->atl_queue.buf_size)
149 epq = &isp1362_hcd->atl_queue;
150
151 if (epq)
152 DBG(1, "%s: PTD $%04x is on %s queue\n", __func__, offset, epq->name);
153 else
154 pr_warning("%s: invalid PTD $%04x\n", __func__, offset);
155
156 return epq;
157}
158
159static inline int get_ptd_offset(struct isp1362_ep_queue *epq, u8 index)
160{
161 int offset;
162
163 if (index * epq->blk_size > epq->buf_size) {
164 pr_warning("%s: Bad %s index %d(%d)\n", __func__, epq->name, index,
165 epq->buf_size / epq->blk_size);
166 return -EINVAL;
167 }
168 offset = epq->buf_start + index * epq->blk_size;
169 DBG(3, "%s: %s PTD[%02x] # %04x\n", __func__, epq->name, index, offset);
170
171 return offset;
172}
173
174
175
176static inline u16 max_transfer_size(struct isp1362_ep_queue *epq, size_t size,
177 int mps)
178{
179 u16 xfer_size = min_t(size_t, MAX_XFER_SIZE, size);
180
181 xfer_size = min_t(size_t, xfer_size, epq->buf_avail * epq->blk_size - PTD_HEADER_SIZE);
182 if (xfer_size < size && xfer_size % mps)
183 xfer_size -= xfer_size % mps;
184
185 return xfer_size;
186}
187
188static int claim_ptd_buffers(struct isp1362_ep_queue *epq,
189 struct isp1362_ep *ep, u16 len)
190{
191 int ptd_offset = -EINVAL;
192 int num_ptds = ((len + PTD_HEADER_SIZE - 1) / epq->blk_size) + 1;
193 int found;
194
195 BUG_ON(len > epq->buf_size);
196
197 if (!epq->buf_avail)
198 return -ENOMEM;
199
200 if (ep->num_ptds)
201 pr_err("%s: %s len %d/%d num_ptds %d buf_map %08lx skip_map %08lx\n", __func__,
202 epq->name, len, epq->blk_size, num_ptds, epq->buf_map, epq->skip_map);
203 BUG_ON(ep->num_ptds != 0);
204
205 found = bitmap_find_next_zero_area(&epq->buf_map, epq->buf_count, 0,
206 num_ptds, 0);
207 if (found >= epq->buf_count)
208 return -EOVERFLOW;
209
210 DBG(1, "%s: Found %d PTDs[%d] for %d/%d byte\n", __func__,
211 num_ptds, found, len, (int)(epq->blk_size - PTD_HEADER_SIZE));
212 ptd_offset = get_ptd_offset(epq, found);
213 WARN_ON(ptd_offset < 0);
214 ep->ptd_offset = ptd_offset;
215 ep->num_ptds += num_ptds;
216 epq->buf_avail -= num_ptds;
217 BUG_ON(epq->buf_avail > epq->buf_count);
218 ep->ptd_index = found;
219 bitmap_set(&epq->buf_map, found, num_ptds);
220 DBG(1, "%s: Done %s PTD[%d] $%04x, avail %d count %d claimed %d %08lx:%08lx\n",
221 __func__, epq->name, ep->ptd_index, ep->ptd_offset,
222 epq->buf_avail, epq->buf_count, num_ptds, epq->buf_map, epq->skip_map);
223
224 return found;
225}
226
227static inline void release_ptd_buffers(struct isp1362_ep_queue *epq, struct isp1362_ep *ep)
228{
229 int last = ep->ptd_index + ep->num_ptds;
230
231 if (last > epq->buf_count)
232 pr_err("%s: ep %p req %d len %d %s PTD[%d] $%04x num_ptds %d buf_count %d buf_avail %d buf_map %08lx skip_map %08lx\n",
233 __func__, ep, ep->num_req, ep->length, epq->name, ep->ptd_index,
234 ep->ptd_offset, ep->num_ptds, epq->buf_count, epq->buf_avail,
235 epq->buf_map, epq->skip_map);
236 BUG_ON(last > epq->buf_count);
237
238 bitmap_clear(&epq->buf_map, ep->ptd_index, ep->num_ptds);
239 bitmap_set(&epq->skip_map, ep->ptd_index, ep->num_ptds);
240 epq->buf_avail += ep->num_ptds;
241 epq->ptd_count--;
242
243 BUG_ON(epq->buf_avail > epq->buf_count);
244 BUG_ON(epq->ptd_count > epq->buf_count);
245
246 DBG(1, "%s: Done %s PTDs $%04x released %d avail %d count %d\n",
247 __func__, epq->name,
248 ep->ptd_offset, ep->num_ptds, epq->buf_avail, epq->buf_count);
249 DBG(1, "%s: buf_map %08lx skip_map %08lx\n", __func__,
250 epq->buf_map, epq->skip_map);
251
252 ep->num_ptds = 0;
253 ep->ptd_offset = -EINVAL;
254 ep->ptd_index = -EINVAL;
255}
256
257
258
259
260
261
262static void prepare_ptd(struct isp1362_hcd *isp1362_hcd, struct urb *urb,
263 struct isp1362_ep *ep, struct isp1362_ep_queue *epq,
264 u16 fno)
265{
266 struct ptd *ptd;
267 int toggle;
268 int dir;
269 u16 len;
270 size_t buf_len = urb->transfer_buffer_length - urb->actual_length;
271
272 DBG(3, "%s: %s ep %p\n", __func__, epq->name, ep);
273
274 ptd = &ep->ptd;
275
276 ep->data = (unsigned char *)urb->transfer_buffer + urb->actual_length;
277
278 switch (ep->nextpid) {
279 case USB_PID_IN:
280 toggle = usb_gettoggle(urb->dev, ep->epnum, 0);
281 dir = PTD_DIR_IN;
282 if (usb_pipecontrol(urb->pipe)) {
283 len = min_t(size_t, ep->maxpacket, buf_len);
284 } else if (usb_pipeisoc(urb->pipe)) {
285 len = min_t(size_t, urb->iso_frame_desc[fno].length, MAX_XFER_SIZE);
286 ep->data = urb->transfer_buffer + urb->iso_frame_desc[fno].offset;
287 } else
288 len = max_transfer_size(epq, buf_len, ep->maxpacket);
289 DBG(1, "%s: IN len %d/%d/%d from URB\n", __func__, len, ep->maxpacket,
290 (int)buf_len);
291 break;
292 case USB_PID_OUT:
293 toggle = usb_gettoggle(urb->dev, ep->epnum, 1);
294 dir = PTD_DIR_OUT;
295 if (usb_pipecontrol(urb->pipe))
296 len = min_t(size_t, ep->maxpacket, buf_len);
297 else if (usb_pipeisoc(urb->pipe))
298 len = min_t(size_t, urb->iso_frame_desc[0].length, MAX_XFER_SIZE);
299 else
300 len = max_transfer_size(epq, buf_len, ep->maxpacket);
301 if (len == 0)
302 pr_info("%s: Sending ZERO packet: %d\n", __func__,
303 urb->transfer_flags & URB_ZERO_PACKET);
304 DBG(1, "%s: OUT len %d/%d/%d from URB\n", __func__, len, ep->maxpacket,
305 (int)buf_len);
306 break;
307 case USB_PID_SETUP:
308 toggle = 0;
309 dir = PTD_DIR_SETUP;
310 len = sizeof(struct usb_ctrlrequest);
311 DBG(1, "%s: SETUP len %d\n", __func__, len);
312 ep->data = urb->setup_packet;
313 break;
314 case USB_PID_ACK:
315 toggle = 1;
316 len = 0;
317 dir = (urb->transfer_buffer_length && usb_pipein(urb->pipe)) ?
318 PTD_DIR_OUT : PTD_DIR_IN;
319 DBG(1, "%s: ACK len %d\n", __func__, len);
320 break;
321 default:
322 toggle = dir = len = 0;
323 pr_err("%s@%d: ep->nextpid %02x\n", __func__, __LINE__, ep->nextpid);
324 BUG_ON(1);
325 }
326
327 ep->length = len;
328 if (!len)
329 ep->data = NULL;
330
331 ptd->count = PTD_CC_MSK | PTD_ACTIVE_MSK | PTD_TOGGLE(toggle);
332 ptd->mps = PTD_MPS(ep->maxpacket) | PTD_SPD(urb->dev->speed == USB_SPEED_LOW) |
333 PTD_EP(ep->epnum);
334 ptd->len = PTD_LEN(len) | PTD_DIR(dir);
335 ptd->faddr = PTD_FA(usb_pipedevice(urb->pipe));
336
337 if (usb_pipeint(urb->pipe)) {
338 ptd->faddr |= PTD_SF_INT(ep->branch);
339 ptd->faddr |= PTD_PR(ep->interval ? __ffs(ep->interval) : 0);
340 }
341 if (usb_pipeisoc(urb->pipe))
342 ptd->faddr |= PTD_SF_ISO(fno);
343
344 DBG(1, "%s: Finished\n", __func__);
345}
346
347static void isp1362_write_ptd(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep,
348 struct isp1362_ep_queue *epq)
349{
350 struct ptd *ptd = &ep->ptd;
351 int len = PTD_GET_DIR(ptd) == PTD_DIR_IN ? 0 : ep->length;
352
353 _BUG_ON(ep->ptd_offset < 0);
354
355 prefetch(ptd);
356 isp1362_write_buffer(isp1362_hcd, ptd, ep->ptd_offset, PTD_HEADER_SIZE);
357 if (len)
358 isp1362_write_buffer(isp1362_hcd, ep->data,
359 ep->ptd_offset + PTD_HEADER_SIZE, len);
360
361 dump_ptd(ptd);
362 dump_ptd_out_data(ptd, ep->data);
363}
364
365static void isp1362_read_ptd(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep,
366 struct isp1362_ep_queue *epq)
367{
368 struct ptd *ptd = &ep->ptd;
369 int act_len;
370
371 WARN_ON(list_empty(&ep->active));
372 BUG_ON(ep->ptd_offset < 0);
373
374 list_del_init(&ep->active);
375 DBG(1, "%s: ep %p removed from active list %p\n", __func__, ep, &epq->active);
376
377 prefetchw(ptd);
378 isp1362_read_buffer(isp1362_hcd, ptd, ep->ptd_offset, PTD_HEADER_SIZE);
379 dump_ptd(ptd);
380 act_len = PTD_GET_COUNT(ptd);
381 if (PTD_GET_DIR(ptd) != PTD_DIR_IN || act_len == 0)
382 return;
383 if (act_len > ep->length)
384 pr_err("%s: ep %p PTD $%04x act_len %d ep->length %d\n", __func__, ep,
385 ep->ptd_offset, act_len, ep->length);
386 BUG_ON(act_len > ep->length);
387
388
389
390
391 prefetchw(ep->data);
392 isp1362_read_buffer(isp1362_hcd, ep->data,
393 ep->ptd_offset + PTD_HEADER_SIZE, act_len);
394 dump_ptd_in_data(ptd, ep->data);
395}
396
397
398
399
400
401
402static void remove_ptd(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep)
403
404{
405 int index;
406 struct isp1362_ep_queue *epq;
407
408 DBG(1, "%s: ep %p PTD[%d] $%04x\n", __func__, ep, ep->ptd_index, ep->ptd_offset);
409 BUG_ON(ep->ptd_offset < 0);
410
411 epq = get_ptd_queue(isp1362_hcd, ep->ptd_offset);
412 BUG_ON(!epq);
413
414
415 WARN_ON(!list_empty(&ep->remove_list));
416 list_add_tail(&ep->remove_list, &isp1362_hcd->remove_list);
417
418 isp1362_enable_int(isp1362_hcd, HCuPINT_SOF);
419
420 index = ep->ptd_index;
421 if (index < 0)
422
423 return;
424
425 DBG(1, "%s: Disabling PTD[%02x] $%04x %08lx|%08x\n", __func__,
426 index, ep->ptd_offset, epq->skip_map, 1 << index);
427
428
429 epq->skip_map |= 1 << index;
430 if (epq == &isp1362_hcd->atl_queue) {
431 DBG(2, "%s: ATLSKIP = %08x -> %08lx\n", __func__,
432 isp1362_read_reg32(isp1362_hcd, HCATLSKIP), epq->skip_map);
433 isp1362_write_reg32(isp1362_hcd, HCATLSKIP, epq->skip_map);
434 if (~epq->skip_map == 0)
435 isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ATL_ACTIVE);
436 } else if (epq == &isp1362_hcd->intl_queue) {
437 DBG(2, "%s: INTLSKIP = %08x -> %08lx\n", __func__,
438 isp1362_read_reg32(isp1362_hcd, HCINTLSKIP), epq->skip_map);
439 isp1362_write_reg32(isp1362_hcd, HCINTLSKIP, epq->skip_map);
440 if (~epq->skip_map == 0)
441 isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_INTL_ACTIVE);
442 }
443}
444
445
446
447
448
449static void finish_request(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep,
450 struct urb *urb, int status)
451 __releases(isp1362_hcd->lock)
452 __acquires(isp1362_hcd->lock)
453{
454 urb->hcpriv = NULL;
455 ep->error_count = 0;
456
457 if (usb_pipecontrol(urb->pipe))
458 ep->nextpid = USB_PID_SETUP;
459
460 URB_DBG("%s: req %d FA %d ep%d%s %s: len %d/%d %s stat %d\n", __func__,
461 ep->num_req, usb_pipedevice(urb->pipe),
462 usb_pipeendpoint(urb->pipe),
463 !usb_pipein(urb->pipe) ? "out" : "in",
464 usb_pipecontrol(urb->pipe) ? "ctrl" :
465 usb_pipeint(urb->pipe) ? "int" :
466 usb_pipebulk(urb->pipe) ? "bulk" :
467 "iso",
468 urb->actual_length, urb->transfer_buffer_length,
469 !(urb->transfer_flags & URB_SHORT_NOT_OK) ?
470 "short_ok" : "", urb->status);
471
472
473 usb_hcd_unlink_urb_from_ep(isp1362_hcd_to_hcd(isp1362_hcd), urb);
474 spin_unlock(&isp1362_hcd->lock);
475 usb_hcd_giveback_urb(isp1362_hcd_to_hcd(isp1362_hcd), urb, status);
476 spin_lock(&isp1362_hcd->lock);
477
478
479 if (!list_empty(&ep->hep->urb_list))
480 return;
481
482
483 if (!list_empty(&ep->schedule)) {
484 list_del_init(&ep->schedule);
485 return;
486 }
487
488
489 if (ep->interval) {
490
491 DBG(1, "deschedule qh%d/%p branch %d load %d bandwidth %d -> %d\n", ep->interval,
492 ep, ep->branch, ep->load,
493 isp1362_hcd->load[ep->branch],
494 isp1362_hcd->load[ep->branch] - ep->load);
495 isp1362_hcd->load[ep->branch] -= ep->load;
496 ep->branch = PERIODIC_SIZE;
497 }
498}
499
500
501
502
503static void postproc_ep(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep)
504{
505 struct urb *urb = get_urb(ep);
506 struct usb_device *udev;
507 struct ptd *ptd;
508 int short_ok;
509 u16 len;
510 int urbstat = -EINPROGRESS;
511 u8 cc;
512
513 DBG(2, "%s: ep %p req %d\n", __func__, ep, ep->num_req);
514
515 udev = urb->dev;
516 ptd = &ep->ptd;
517 cc = PTD_GET_CC(ptd);
518 if (cc == PTD_NOTACCESSED) {
519 pr_err("%s: req %d PTD %p Untouched by ISP1362\n", __func__,
520 ep->num_req, ptd);
521 cc = PTD_DEVNOTRESP;
522 }
523
524 short_ok = !(urb->transfer_flags & URB_SHORT_NOT_OK);
525 len = urb->transfer_buffer_length - urb->actual_length;
526
527
528
529
530
531
532
533 if (cc == PTD_DATAUNDERRUN) {
534 if (short_ok) {
535 DBG(1, "%s: req %d Allowed data underrun short_%sok %d/%d/%d byte\n",
536 __func__, ep->num_req, short_ok ? "" : "not_",
537 PTD_GET_COUNT(ptd), ep->maxpacket, len);
538 cc = PTD_CC_NOERROR;
539 urbstat = 0;
540 } else {
541 DBG(1, "%s: req %d Data Underrun %s nextpid %02x short_%sok %d/%d/%d byte\n",
542 __func__, ep->num_req,
543 usb_pipein(urb->pipe) ? "IN" : "OUT", ep->nextpid,
544 short_ok ? "" : "not_",
545 PTD_GET_COUNT(ptd), ep->maxpacket, len);
546
547
548
549 urb->actual_length += PTD_GET_COUNT(ptd);
550 if (usb_pipecontrol(urb->pipe)) {
551 ep->nextpid = USB_PID_ACK;
552 BUG_ON(urb->actual_length > urb->transfer_buffer_length);
553
554 if (urb->status == -EINPROGRESS)
555 urb->status = cc_to_error[PTD_DATAUNDERRUN];
556 } else {
557 usb_settoggle(udev, ep->epnum, ep->nextpid == USB_PID_OUT,
558 PTD_GET_TOGGLE(ptd));
559 urbstat = cc_to_error[PTD_DATAUNDERRUN];
560 }
561 goto out;
562 }
563 }
564
565 if (cc != PTD_CC_NOERROR) {
566 if (++ep->error_count >= 3 || cc == PTD_CC_STALL || cc == PTD_DATAOVERRUN) {
567 urbstat = cc_to_error[cc];
568 DBG(1, "%s: req %d nextpid %02x, status %d, error %d, error_count %d\n",
569 __func__, ep->num_req, ep->nextpid, urbstat, cc,
570 ep->error_count);
571 }
572 goto out;
573 }
574
575 switch (ep->nextpid) {
576 case USB_PID_OUT:
577 if (PTD_GET_COUNT(ptd) != ep->length)
578 pr_err("%s: count=%d len=%d\n", __func__,
579 PTD_GET_COUNT(ptd), ep->length);
580 BUG_ON(PTD_GET_COUNT(ptd) != ep->length);
581 urb->actual_length += ep->length;
582 BUG_ON(urb->actual_length > urb->transfer_buffer_length);
583 usb_settoggle(udev, ep->epnum, 1, PTD_GET_TOGGLE(ptd));
584 if (urb->actual_length == urb->transfer_buffer_length) {
585 DBG(3, "%s: req %d xfer complete %d/%d status %d -> 0\n", __func__,
586 ep->num_req, len, ep->maxpacket, urbstat);
587 if (usb_pipecontrol(urb->pipe)) {
588 DBG(3, "%s: req %d %s Wait for ACK\n", __func__,
589 ep->num_req,
590 usb_pipein(urb->pipe) ? "IN" : "OUT");
591 ep->nextpid = USB_PID_ACK;
592 } else {
593 if (len % ep->maxpacket ||
594 !(urb->transfer_flags & URB_ZERO_PACKET)) {
595 urbstat = 0;
596 DBG(3, "%s: req %d URB %s status %d count %d/%d/%d\n",
597 __func__, ep->num_req, usb_pipein(urb->pipe) ? "IN" : "OUT",
598 urbstat, len, ep->maxpacket, urb->actual_length);
599 }
600 }
601 }
602 break;
603 case USB_PID_IN:
604 len = PTD_GET_COUNT(ptd);
605 BUG_ON(len > ep->length);
606 urb->actual_length += len;
607 BUG_ON(urb->actual_length > urb->transfer_buffer_length);
608 usb_settoggle(udev, ep->epnum, 0, PTD_GET_TOGGLE(ptd));
609
610 if ((urb->transfer_buffer_length == urb->actual_length) ||
611 len % ep->maxpacket) {
612 DBG(3, "%s: req %d xfer complete %d/%d status %d -> 0\n", __func__,
613 ep->num_req, len, ep->maxpacket, urbstat);
614 if (usb_pipecontrol(urb->pipe)) {
615 DBG(3, "%s: req %d %s Wait for ACK\n", __func__,
616 ep->num_req,
617 usb_pipein(urb->pipe) ? "IN" : "OUT");
618 ep->nextpid = USB_PID_ACK;
619 } else {
620 urbstat = 0;
621 DBG(3, "%s: req %d URB %s status %d count %d/%d/%d\n",
622 __func__, ep->num_req, usb_pipein(urb->pipe) ? "IN" : "OUT",
623 urbstat, len, ep->maxpacket, urb->actual_length);
624 }
625 }
626 break;
627 case USB_PID_SETUP:
628 if (urb->transfer_buffer_length == urb->actual_length) {
629 ep->nextpid = USB_PID_ACK;
630 } else if (usb_pipeout(urb->pipe)) {
631 usb_settoggle(udev, 0, 1, 1);
632 ep->nextpid = USB_PID_OUT;
633 } else {
634 usb_settoggle(udev, 0, 0, 1);
635 ep->nextpid = USB_PID_IN;
636 }
637 break;
638 case USB_PID_ACK:
639 DBG(3, "%s: req %d got ACK %d -> 0\n", __func__, ep->num_req,
640 urbstat);
641 WARN_ON(urbstat != -EINPROGRESS);
642 urbstat = 0;
643 ep->nextpid = 0;
644 break;
645 default:
646 BUG_ON(1);
647 }
648
649 out:
650 if (urbstat != -EINPROGRESS) {
651 DBG(2, "%s: Finishing ep %p req %d urb %p status %d\n", __func__,
652 ep, ep->num_req, urb, urbstat);
653 finish_request(isp1362_hcd, ep, urb, urbstat);
654 }
655}
656
657static void finish_unlinks(struct isp1362_hcd *isp1362_hcd)
658{
659 struct isp1362_ep *ep;
660 struct isp1362_ep *tmp;
661
662 list_for_each_entry_safe(ep, tmp, &isp1362_hcd->remove_list, remove_list) {
663 struct isp1362_ep_queue *epq =
664 get_ptd_queue(isp1362_hcd, ep->ptd_offset);
665 int index = ep->ptd_index;
666
667 BUG_ON(epq == NULL);
668 if (index >= 0) {
669 DBG(1, "%s: remove PTD[%d] $%04x\n", __func__, index, ep->ptd_offset);
670 BUG_ON(ep->num_ptds == 0);
671 release_ptd_buffers(epq, ep);
672 }
673 if (!list_empty(&ep->hep->urb_list)) {
674 struct urb *urb = get_urb(ep);
675
676 DBG(1, "%s: Finishing req %d ep %p from remove_list\n", __func__,
677 ep->num_req, ep);
678 finish_request(isp1362_hcd, ep, urb, -ESHUTDOWN);
679 }
680 WARN_ON(list_empty(&ep->active));
681 if (!list_empty(&ep->active)) {
682 list_del_init(&ep->active);
683 DBG(1, "%s: ep %p removed from active list\n", __func__, ep);
684 }
685 list_del_init(&ep->remove_list);
686 DBG(1, "%s: ep %p removed from remove_list\n", __func__, ep);
687 }
688 DBG(1, "%s: Done\n", __func__);
689}
690
691static inline void enable_atl_transfers(struct isp1362_hcd *isp1362_hcd, int count)
692{
693 if (count > 0) {
694 if (count < isp1362_hcd->atl_queue.ptd_count)
695 isp1362_write_reg16(isp1362_hcd, HCATLDTC, count);
696 isp1362_enable_int(isp1362_hcd, HCuPINT_ATL);
697 isp1362_write_reg32(isp1362_hcd, HCATLSKIP, isp1362_hcd->atl_queue.skip_map);
698 isp1362_set_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ATL_ACTIVE);
699 } else
700 isp1362_enable_int(isp1362_hcd, HCuPINT_SOF);
701}
702
703static inline void enable_intl_transfers(struct isp1362_hcd *isp1362_hcd)
704{
705 isp1362_enable_int(isp1362_hcd, HCuPINT_INTL);
706 isp1362_set_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_INTL_ACTIVE);
707 isp1362_write_reg32(isp1362_hcd, HCINTLSKIP, isp1362_hcd->intl_queue.skip_map);
708}
709
710static inline void enable_istl_transfers(struct isp1362_hcd *isp1362_hcd, int flip)
711{
712 isp1362_enable_int(isp1362_hcd, flip ? HCuPINT_ISTL1 : HCuPINT_ISTL0);
713 isp1362_set_mask16(isp1362_hcd, HCBUFSTAT, flip ?
714 HCBUFSTAT_ISTL1_FULL : HCBUFSTAT_ISTL0_FULL);
715}
716
717static int submit_req(struct isp1362_hcd *isp1362_hcd, struct urb *urb,
718 struct isp1362_ep *ep, struct isp1362_ep_queue *epq)
719{
720 int index = epq->free_ptd;
721
722 prepare_ptd(isp1362_hcd, urb, ep, epq, 0);
723 index = claim_ptd_buffers(epq, ep, ep->length);
724 if (index == -ENOMEM) {
725 DBG(1, "%s: req %d No free %s PTD available: %d, %08lx:%08lx\n", __func__,
726 ep->num_req, epq->name, ep->num_ptds, epq->buf_map, epq->skip_map);
727 return index;
728 } else if (index == -EOVERFLOW) {
729 DBG(1, "%s: req %d Not enough space for %d byte %s PTD %d %08lx:%08lx\n",
730 __func__, ep->num_req, ep->length, epq->name, ep->num_ptds,
731 epq->buf_map, epq->skip_map);
732 return index;
733 } else
734 BUG_ON(index < 0);
735 list_add_tail(&ep->active, &epq->active);
736 DBG(1, "%s: ep %p req %d len %d added to active list %p\n", __func__,
737 ep, ep->num_req, ep->length, &epq->active);
738 DBG(1, "%s: Submitting %s PTD $%04x for ep %p req %d\n", __func__, epq->name,
739 ep->ptd_offset, ep, ep->num_req);
740 isp1362_write_ptd(isp1362_hcd, ep, epq);
741 __clear_bit(ep->ptd_index, &epq->skip_map);
742
743 return 0;
744}
745
746static void start_atl_transfers(struct isp1362_hcd *isp1362_hcd)
747{
748 int ptd_count = 0;
749 struct isp1362_ep_queue *epq = &isp1362_hcd->atl_queue;
750 struct isp1362_ep *ep;
751 int defer = 0;
752
753 if (atomic_read(&epq->finishing)) {
754 DBG(1, "%s: finish_transfers is active for %s\n", __func__, epq->name);
755 return;
756 }
757
758 list_for_each_entry(ep, &isp1362_hcd->async, schedule) {
759 struct urb *urb = get_urb(ep);
760 int ret;
761
762 if (!list_empty(&ep->active)) {
763 DBG(2, "%s: Skipping active %s ep %p\n", __func__, epq->name, ep);
764 continue;
765 }
766
767 DBG(1, "%s: Processing %s ep %p req %d\n", __func__, epq->name,
768 ep, ep->num_req);
769
770 ret = submit_req(isp1362_hcd, urb, ep, epq);
771 if (ret == -ENOMEM) {
772 defer = 1;
773 break;
774 } else if (ret == -EOVERFLOW) {
775 defer = 1;
776 continue;
777 }
778#ifdef BUGGY_PXA2XX_UDC_USBTEST
779 defer = ep->nextpid == USB_PID_SETUP;
780#endif
781 ptd_count++;
782 }
783
784
785 if (isp1362_hcd->async.next != isp1362_hcd->async.prev) {
786 DBG(2, "%s: Cycling ASYNC schedule %d\n", __func__, ptd_count);
787 list_move(&isp1362_hcd->async, isp1362_hcd->async.next);
788 }
789 if (ptd_count || defer)
790 enable_atl_transfers(isp1362_hcd, defer ? 0 : ptd_count);
791
792 epq->ptd_count += ptd_count;
793 if (epq->ptd_count > epq->stat_maxptds) {
794 epq->stat_maxptds = epq->ptd_count;
795 DBG(0, "%s: max_ptds: %d\n", __func__, epq->stat_maxptds);
796 }
797}
798
799static void start_intl_transfers(struct isp1362_hcd *isp1362_hcd)
800{
801 int ptd_count = 0;
802 struct isp1362_ep_queue *epq = &isp1362_hcd->intl_queue;
803 struct isp1362_ep *ep;
804
805 if (atomic_read(&epq->finishing)) {
806 DBG(1, "%s: finish_transfers is active for %s\n", __func__, epq->name);
807 return;
808 }
809
810 list_for_each_entry(ep, &isp1362_hcd->periodic, schedule) {
811 struct urb *urb = get_urb(ep);
812 int ret;
813
814 if (!list_empty(&ep->active)) {
815 DBG(1, "%s: Skipping active %s ep %p\n", __func__,
816 epq->name, ep);
817 continue;
818 }
819
820 DBG(1, "%s: Processing %s ep %p req %d\n", __func__,
821 epq->name, ep, ep->num_req);
822 ret = submit_req(isp1362_hcd, urb, ep, epq);
823 if (ret == -ENOMEM)
824 break;
825 else if (ret == -EOVERFLOW)
826 continue;
827 ptd_count++;
828 }
829
830 if (ptd_count) {
831 static int last_count;
832
833 if (ptd_count != last_count) {
834 DBG(0, "%s: ptd_count: %d\n", __func__, ptd_count);
835 last_count = ptd_count;
836 }
837 enable_intl_transfers(isp1362_hcd);
838 }
839
840 epq->ptd_count += ptd_count;
841 if (epq->ptd_count > epq->stat_maxptds)
842 epq->stat_maxptds = epq->ptd_count;
843}
844
845static inline int next_ptd(struct isp1362_ep_queue *epq, struct isp1362_ep *ep)
846{
847 u16 ptd_offset = ep->ptd_offset;
848 int num_ptds = (ep->length + PTD_HEADER_SIZE + (epq->blk_size - 1)) / epq->blk_size;
849
850 DBG(2, "%s: PTD offset $%04x + %04x => %d * %04x -> $%04x\n", __func__, ptd_offset,
851 ep->length, num_ptds, epq->blk_size, ptd_offset + num_ptds * epq->blk_size);
852
853 ptd_offset += num_ptds * epq->blk_size;
854 if (ptd_offset < epq->buf_start + epq->buf_size)
855 return ptd_offset;
856 else
857 return -ENOMEM;
858}
859
860static void start_iso_transfers(struct isp1362_hcd *isp1362_hcd)
861{
862 int ptd_count = 0;
863 int flip = isp1362_hcd->istl_flip;
864 struct isp1362_ep_queue *epq;
865 int ptd_offset;
866 struct isp1362_ep *ep;
867 struct isp1362_ep *tmp;
868 u16 fno = isp1362_read_reg32(isp1362_hcd, HCFMNUM);
869
870 fill2:
871 epq = &isp1362_hcd->istl_queue[flip];
872 if (atomic_read(&epq->finishing)) {
873 DBG(1, "%s: finish_transfers is active for %s\n", __func__, epq->name);
874 return;
875 }
876
877 if (!list_empty(&epq->active))
878 return;
879
880 ptd_offset = epq->buf_start;
881 list_for_each_entry_safe(ep, tmp, &isp1362_hcd->isoc, schedule) {
882 struct urb *urb = get_urb(ep);
883 s16 diff = fno - (u16)urb->start_frame;
884
885 DBG(1, "%s: Processing %s ep %p\n", __func__, epq->name, ep);
886
887 if (diff > urb->number_of_packets) {
888
889 finish_request(isp1362_hcd, ep, urb, -EOVERFLOW);
890 continue;
891 } else if (diff < -1) {
892
893
894
895
896
897 } else if (diff == -1) {
898
899 prepare_ptd(isp1362_hcd, urb, ep, epq, fno);
900 if (ptd_offset + PTD_HEADER_SIZE + ep->length >
901 epq->buf_start + epq->buf_size) {
902 pr_err("%s: Not enough ISO buffer space for %d byte PTD\n",
903 __func__, ep->length);
904 continue;
905 }
906 ep->ptd_offset = ptd_offset;
907 list_add_tail(&ep->active, &epq->active);
908
909 ptd_offset = next_ptd(epq, ep);
910 if (ptd_offset < 0) {
911 pr_warning("%s: req %d No more %s PTD buffers available\n", __func__,
912 ep->num_req, epq->name);
913 break;
914 }
915 }
916 }
917 list_for_each_entry(ep, &epq->active, active) {
918 if (epq->active.next == &ep->active)
919 ep->ptd.mps |= PTD_LAST_MSK;
920 isp1362_write_ptd(isp1362_hcd, ep, epq);
921 ptd_count++;
922 }
923
924 if (ptd_count)
925 enable_istl_transfers(isp1362_hcd, flip);
926
927 epq->ptd_count += ptd_count;
928 if (epq->ptd_count > epq->stat_maxptds)
929 epq->stat_maxptds = epq->ptd_count;
930
931
932 if (!(isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) &
933 (flip ? HCBUFSTAT_ISTL0_FULL : HCBUFSTAT_ISTL1_FULL))) {
934 fno++;
935 ptd_count = 0;
936 flip = 1 - flip;
937 goto fill2;
938 }
939}
940
941static void finish_transfers(struct isp1362_hcd *isp1362_hcd, unsigned long done_map,
942 struct isp1362_ep_queue *epq)
943{
944 struct isp1362_ep *ep;
945 struct isp1362_ep *tmp;
946
947 if (list_empty(&epq->active)) {
948 DBG(1, "%s: Nothing to do for %s queue\n", __func__, epq->name);
949 return;
950 }
951
952 DBG(1, "%s: Finishing %s transfers %08lx\n", __func__, epq->name, done_map);
953
954 atomic_inc(&epq->finishing);
955 list_for_each_entry_safe(ep, tmp, &epq->active, active) {
956 int index = ep->ptd_index;
957
958 DBG(1, "%s: Checking %s PTD[%02x] $%04x\n", __func__, epq->name,
959 index, ep->ptd_offset);
960
961 BUG_ON(index < 0);
962 if (__test_and_clear_bit(index, &done_map)) {
963 isp1362_read_ptd(isp1362_hcd, ep, epq);
964 epq->free_ptd = index;
965 BUG_ON(ep->num_ptds == 0);
966 release_ptd_buffers(epq, ep);
967
968 DBG(1, "%s: ep %p req %d removed from active list\n", __func__,
969 ep, ep->num_req);
970 if (!list_empty(&ep->remove_list)) {
971 list_del_init(&ep->remove_list);
972 DBG(1, "%s: ep %p removed from remove list\n", __func__, ep);
973 }
974 DBG(1, "%s: Postprocessing %s ep %p req %d\n", __func__, epq->name,
975 ep, ep->num_req);
976 postproc_ep(isp1362_hcd, ep);
977 }
978 if (!done_map)
979 break;
980 }
981 if (done_map)
982 pr_warning("%s: done_map not clear: %08lx:%08lx\n", __func__, done_map,
983 epq->skip_map);
984 atomic_dec(&epq->finishing);
985}
986
987static void finish_iso_transfers(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep_queue *epq)
988{
989 struct isp1362_ep *ep;
990 struct isp1362_ep *tmp;
991
992 if (list_empty(&epq->active)) {
993 DBG(1, "%s: Nothing to do for %s queue\n", __func__, epq->name);
994 return;
995 }
996
997 DBG(1, "%s: Finishing %s transfers\n", __func__, epq->name);
998
999 atomic_inc(&epq->finishing);
1000 list_for_each_entry_safe(ep, tmp, &epq->active, active) {
1001 DBG(1, "%s: Checking PTD $%04x\n", __func__, ep->ptd_offset);
1002
1003 isp1362_read_ptd(isp1362_hcd, ep, epq);
1004 DBG(1, "%s: Postprocessing %s ep %p\n", __func__, epq->name, ep);
1005 postproc_ep(isp1362_hcd, ep);
1006 }
1007 WARN_ON(epq->blk_size != 0);
1008 atomic_dec(&epq->finishing);
1009}
1010
1011static irqreturn_t isp1362_irq(struct usb_hcd *hcd)
1012{
1013 int handled = 0;
1014 struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1015 u16 irqstat;
1016 u16 svc_mask;
1017
1018 spin_lock(&isp1362_hcd->lock);
1019
1020 BUG_ON(isp1362_hcd->irq_active++);
1021
1022 isp1362_write_reg16(isp1362_hcd, HCuPINTENB, 0);
1023
1024 irqstat = isp1362_read_reg16(isp1362_hcd, HCuPINT);
1025 DBG(3, "%s: got IRQ %04x:%04x\n", __func__, irqstat, isp1362_hcd->irqenb);
1026
1027
1028 irqstat &= isp1362_hcd->irqenb;
1029 isp1362_write_reg16(isp1362_hcd, HCuPINT, irqstat);
1030 svc_mask = irqstat;
1031
1032 if (irqstat & HCuPINT_SOF) {
1033 isp1362_hcd->irqenb &= ~HCuPINT_SOF;
1034 isp1362_hcd->irq_stat[ISP1362_INT_SOF]++;
1035 handled = 1;
1036 svc_mask &= ~HCuPINT_SOF;
1037 DBG(3, "%s: SOF\n", __func__);
1038 isp1362_hcd->fmindex = isp1362_read_reg32(isp1362_hcd, HCFMNUM);
1039 if (!list_empty(&isp1362_hcd->remove_list))
1040 finish_unlinks(isp1362_hcd);
1041 if (!list_empty(&isp1362_hcd->async) && !(irqstat & HCuPINT_ATL)) {
1042 if (list_empty(&isp1362_hcd->atl_queue.active)) {
1043 start_atl_transfers(isp1362_hcd);
1044 } else {
1045 isp1362_enable_int(isp1362_hcd, HCuPINT_ATL);
1046 isp1362_write_reg32(isp1362_hcd, HCATLSKIP,
1047 isp1362_hcd->atl_queue.skip_map);
1048 isp1362_set_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ATL_ACTIVE);
1049 }
1050 }
1051 }
1052
1053 if (irqstat & HCuPINT_ISTL0) {
1054 isp1362_hcd->irq_stat[ISP1362_INT_ISTL0]++;
1055 handled = 1;
1056 svc_mask &= ~HCuPINT_ISTL0;
1057 isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ISTL0_FULL);
1058 DBG(1, "%s: ISTL0\n", __func__);
1059 WARN_ON((int)!!isp1362_hcd->istl_flip);
1060 WARN_ON(isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) &
1061 HCBUFSTAT_ISTL0_ACTIVE);
1062 WARN_ON(!(isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) &
1063 HCBUFSTAT_ISTL0_DONE));
1064 isp1362_hcd->irqenb &= ~HCuPINT_ISTL0;
1065 }
1066
1067 if (irqstat & HCuPINT_ISTL1) {
1068 isp1362_hcd->irq_stat[ISP1362_INT_ISTL1]++;
1069 handled = 1;
1070 svc_mask &= ~HCuPINT_ISTL1;
1071 isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ISTL1_FULL);
1072 DBG(1, "%s: ISTL1\n", __func__);
1073 WARN_ON(!(int)isp1362_hcd->istl_flip);
1074 WARN_ON(isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) &
1075 HCBUFSTAT_ISTL1_ACTIVE);
1076 WARN_ON(!(isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) &
1077 HCBUFSTAT_ISTL1_DONE));
1078 isp1362_hcd->irqenb &= ~HCuPINT_ISTL1;
1079 }
1080
1081 if (irqstat & (HCuPINT_ISTL0 | HCuPINT_ISTL1)) {
1082 WARN_ON((irqstat & (HCuPINT_ISTL0 | HCuPINT_ISTL1)) ==
1083 (HCuPINT_ISTL0 | HCuPINT_ISTL1));
1084 finish_iso_transfers(isp1362_hcd,
1085 &isp1362_hcd->istl_queue[isp1362_hcd->istl_flip]);
1086 start_iso_transfers(isp1362_hcd);
1087 isp1362_hcd->istl_flip = 1 - isp1362_hcd->istl_flip;
1088 }
1089
1090 if (irqstat & HCuPINT_INTL) {
1091 u32 done_map = isp1362_read_reg32(isp1362_hcd, HCINTLDONE);
1092 u32 skip_map = isp1362_read_reg32(isp1362_hcd, HCINTLSKIP);
1093 isp1362_hcd->irq_stat[ISP1362_INT_INTL]++;
1094
1095 DBG(2, "%s: INTL\n", __func__);
1096
1097 svc_mask &= ~HCuPINT_INTL;
1098
1099 isp1362_write_reg32(isp1362_hcd, HCINTLSKIP, skip_map | done_map);
1100 if (~(done_map | skip_map) == 0)
1101
1102 isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_INTL_ACTIVE);
1103
1104 handled = 1;
1105 WARN_ON(!done_map);
1106 if (done_map) {
1107 DBG(3, "%s: INTL done_map %08x\n", __func__, done_map);
1108 finish_transfers(isp1362_hcd, done_map, &isp1362_hcd->intl_queue);
1109 start_intl_transfers(isp1362_hcd);
1110 }
1111 }
1112
1113 if (irqstat & HCuPINT_ATL) {
1114 u32 done_map = isp1362_read_reg32(isp1362_hcd, HCATLDONE);
1115 u32 skip_map = isp1362_read_reg32(isp1362_hcd, HCATLSKIP);
1116 isp1362_hcd->irq_stat[ISP1362_INT_ATL]++;
1117
1118 DBG(2, "%s: ATL\n", __func__);
1119
1120 svc_mask &= ~HCuPINT_ATL;
1121
1122 isp1362_write_reg32(isp1362_hcd, HCATLSKIP, skip_map | done_map);
1123 if (~(done_map | skip_map) == 0)
1124 isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ATL_ACTIVE);
1125 if (done_map) {
1126 DBG(3, "%s: ATL done_map %08x\n", __func__, done_map);
1127 finish_transfers(isp1362_hcd, done_map, &isp1362_hcd->atl_queue);
1128 start_atl_transfers(isp1362_hcd);
1129 }
1130 handled = 1;
1131 }
1132
1133 if (irqstat & HCuPINT_OPR) {
1134 u32 intstat = isp1362_read_reg32(isp1362_hcd, HCINTSTAT);
1135 isp1362_hcd->irq_stat[ISP1362_INT_OPR]++;
1136
1137 svc_mask &= ~HCuPINT_OPR;
1138 DBG(2, "%s: OPR %08x:%08x\n", __func__, intstat, isp1362_hcd->intenb);
1139 intstat &= isp1362_hcd->intenb;
1140 if (intstat & OHCI_INTR_UE) {
1141 pr_err("Unrecoverable error\n");
1142
1143 }
1144 if (intstat & OHCI_INTR_RHSC) {
1145 isp1362_hcd->rhstatus = isp1362_read_reg32(isp1362_hcd, HCRHSTATUS);
1146 isp1362_hcd->rhport[0] = isp1362_read_reg32(isp1362_hcd, HCRHPORT1);
1147 isp1362_hcd->rhport[1] = isp1362_read_reg32(isp1362_hcd, HCRHPORT2);
1148 }
1149 if (intstat & OHCI_INTR_RD) {
1150 pr_info("%s: RESUME DETECTED\n", __func__);
1151 isp1362_show_reg(isp1362_hcd, HCCONTROL);
1152 usb_hcd_resume_root_hub(hcd);
1153 }
1154 isp1362_write_reg32(isp1362_hcd, HCINTSTAT, intstat);
1155 irqstat &= ~HCuPINT_OPR;
1156 handled = 1;
1157 }
1158
1159 if (irqstat & HCuPINT_SUSP) {
1160 isp1362_hcd->irq_stat[ISP1362_INT_SUSP]++;
1161 handled = 1;
1162 svc_mask &= ~HCuPINT_SUSP;
1163
1164 pr_info("%s: SUSPEND IRQ\n", __func__);
1165 }
1166
1167 if (irqstat & HCuPINT_CLKRDY) {
1168 isp1362_hcd->irq_stat[ISP1362_INT_CLKRDY]++;
1169 handled = 1;
1170 isp1362_hcd->irqenb &= ~HCuPINT_CLKRDY;
1171 svc_mask &= ~HCuPINT_CLKRDY;
1172 pr_info("%s: CLKRDY IRQ\n", __func__);
1173 }
1174
1175 if (svc_mask)
1176 pr_err("%s: Unserviced interrupt(s) %04x\n", __func__, svc_mask);
1177
1178 isp1362_write_reg16(isp1362_hcd, HCuPINTENB, isp1362_hcd->irqenb);
1179 isp1362_hcd->irq_active--;
1180 spin_unlock(&isp1362_hcd->lock);
1181
1182 return IRQ_RETVAL(handled);
1183}
1184
1185
1186
1187#define MAX_PERIODIC_LOAD 900
1188static int balance(struct isp1362_hcd *isp1362_hcd, u16 interval, u16 load)
1189{
1190 int i, branch = -ENOSPC;
1191
1192
1193
1194
1195 for (i = 0; i < interval; i++) {
1196 if (branch < 0 || isp1362_hcd->load[branch] > isp1362_hcd->load[i]) {
1197 int j;
1198
1199 for (j = i; j < PERIODIC_SIZE; j += interval) {
1200 if ((isp1362_hcd->load[j] + load) > MAX_PERIODIC_LOAD) {
1201 pr_err("%s: new load %d load[%02x] %d max %d\n", __func__,
1202 load, j, isp1362_hcd->load[j], MAX_PERIODIC_LOAD);
1203 break;
1204 }
1205 }
1206 if (j < PERIODIC_SIZE)
1207 continue;
1208 branch = i;
1209 }
1210 }
1211 return branch;
1212}
1213
1214
1215
1216
1217
1218
1219
1220static int isp1362_urb_enqueue(struct usb_hcd *hcd,
1221 struct urb *urb,
1222 gfp_t mem_flags)
1223{
1224 struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1225 struct usb_device *udev = urb->dev;
1226 unsigned int pipe = urb->pipe;
1227 int is_out = !usb_pipein(pipe);
1228 int type = usb_pipetype(pipe);
1229 int epnum = usb_pipeendpoint(pipe);
1230 struct usb_host_endpoint *hep = urb->ep;
1231 struct isp1362_ep *ep = NULL;
1232 unsigned long flags;
1233 int retval = 0;
1234
1235 DBG(3, "%s: urb %p\n", __func__, urb);
1236
1237 if (type == PIPE_ISOCHRONOUS) {
1238 pr_err("Isochronous transfers not supported\n");
1239 return -ENOSPC;
1240 }
1241
1242 URB_DBG("%s: FA %d ep%d%s %s: len %d %s%s\n", __func__,
1243 usb_pipedevice(pipe), epnum,
1244 is_out ? "out" : "in",
1245 usb_pipecontrol(pipe) ? "ctrl" :
1246 usb_pipeint(pipe) ? "int" :
1247 usb_pipebulk(pipe) ? "bulk" :
1248 "iso",
1249 urb->transfer_buffer_length,
1250 (urb->transfer_flags & URB_ZERO_PACKET) ? "ZERO_PACKET " : "",
1251 !(urb->transfer_flags & URB_SHORT_NOT_OK) ?
1252 "short_ok" : "");
1253
1254
1255 if (!hep->hcpriv) {
1256 ep = kzalloc(sizeof *ep, mem_flags);
1257 if (!ep)
1258 return -ENOMEM;
1259 }
1260 spin_lock_irqsave(&isp1362_hcd->lock, flags);
1261
1262
1263 if (!((isp1362_hcd->rhport[0] | isp1362_hcd->rhport[1]) &
1264 USB_PORT_STAT_ENABLE) ||
1265 !HC_IS_RUNNING(hcd->state)) {
1266 kfree(ep);
1267 retval = -ENODEV;
1268 goto fail_not_linked;
1269 }
1270
1271 retval = usb_hcd_link_urb_to_ep(hcd, urb);
1272 if (retval) {
1273 kfree(ep);
1274 goto fail_not_linked;
1275 }
1276
1277 if (hep->hcpriv) {
1278 ep = hep->hcpriv;
1279 } else {
1280 INIT_LIST_HEAD(&ep->schedule);
1281 INIT_LIST_HEAD(&ep->active);
1282 INIT_LIST_HEAD(&ep->remove_list);
1283 ep->udev = usb_get_dev(udev);
1284 ep->hep = hep;
1285 ep->epnum = epnum;
1286 ep->maxpacket = usb_maxpacket(udev, urb->pipe, is_out);
1287 ep->ptd_offset = -EINVAL;
1288 ep->ptd_index = -EINVAL;
1289 usb_settoggle(udev, epnum, is_out, 0);
1290
1291 if (type == PIPE_CONTROL)
1292 ep->nextpid = USB_PID_SETUP;
1293 else if (is_out)
1294 ep->nextpid = USB_PID_OUT;
1295 else
1296 ep->nextpid = USB_PID_IN;
1297
1298 switch (type) {
1299 case PIPE_ISOCHRONOUS:
1300 case PIPE_INTERRUPT:
1301 if (urb->interval > PERIODIC_SIZE)
1302 urb->interval = PERIODIC_SIZE;
1303 ep->interval = urb->interval;
1304 ep->branch = PERIODIC_SIZE;
1305 ep->load = usb_calc_bus_time(udev->speed, !is_out,
1306 (type == PIPE_ISOCHRONOUS),
1307 usb_maxpacket(udev, pipe, is_out)) / 1000;
1308 break;
1309 }
1310 hep->hcpriv = ep;
1311 }
1312 ep->num_req = isp1362_hcd->req_serial++;
1313
1314
1315 switch (type) {
1316 case PIPE_CONTROL:
1317 case PIPE_BULK:
1318 if (list_empty(&ep->schedule)) {
1319 DBG(1, "%s: Adding ep %p req %d to async schedule\n",
1320 __func__, ep, ep->num_req);
1321 list_add_tail(&ep->schedule, &isp1362_hcd->async);
1322 }
1323 break;
1324 case PIPE_ISOCHRONOUS:
1325 case PIPE_INTERRUPT:
1326 urb->interval = ep->interval;
1327
1328
1329 if (ep->branch < PERIODIC_SIZE)
1330 break;
1331
1332 retval = balance(isp1362_hcd, ep->interval, ep->load);
1333 if (retval < 0) {
1334 pr_err("%s: balance returned %d\n", __func__, retval);
1335 goto fail;
1336 }
1337 ep->branch = retval;
1338 retval = 0;
1339 isp1362_hcd->fmindex = isp1362_read_reg32(isp1362_hcd, HCFMNUM);
1340 DBG(1, "%s: Current frame %04x branch %02x start_frame %04x(%04x)\n",
1341 __func__, isp1362_hcd->fmindex, ep->branch,
1342 ((isp1362_hcd->fmindex + PERIODIC_SIZE - 1) &
1343 ~(PERIODIC_SIZE - 1)) + ep->branch,
1344 (isp1362_hcd->fmindex & (PERIODIC_SIZE - 1)) + ep->branch);
1345
1346 if (list_empty(&ep->schedule)) {
1347 if (type == PIPE_ISOCHRONOUS) {
1348 u16 frame = isp1362_hcd->fmindex;
1349
1350 frame += max_t(u16, 8, ep->interval);
1351 frame &= ~(ep->interval - 1);
1352 frame |= ep->branch;
1353 if (frame_before(frame, isp1362_hcd->fmindex))
1354 frame += ep->interval;
1355 urb->start_frame = frame;
1356
1357 DBG(1, "%s: Adding ep %p to isoc schedule\n", __func__, ep);
1358 list_add_tail(&ep->schedule, &isp1362_hcd->isoc);
1359 } else {
1360 DBG(1, "%s: Adding ep %p to periodic schedule\n", __func__, ep);
1361 list_add_tail(&ep->schedule, &isp1362_hcd->periodic);
1362 }
1363 } else
1364 DBG(1, "%s: ep %p already scheduled\n", __func__, ep);
1365
1366 DBG(2, "%s: load %d bandwidth %d -> %d\n", __func__,
1367 ep->load / ep->interval, isp1362_hcd->load[ep->branch],
1368 isp1362_hcd->load[ep->branch] + ep->load);
1369 isp1362_hcd->load[ep->branch] += ep->load;
1370 }
1371
1372 urb->hcpriv = hep;
1373 ALIGNSTAT(isp1362_hcd, urb->transfer_buffer);
1374
1375 switch (type) {
1376 case PIPE_CONTROL:
1377 case PIPE_BULK:
1378 start_atl_transfers(isp1362_hcd);
1379 break;
1380 case PIPE_INTERRUPT:
1381 start_intl_transfers(isp1362_hcd);
1382 break;
1383 case PIPE_ISOCHRONOUS:
1384 start_iso_transfers(isp1362_hcd);
1385 break;
1386 default:
1387 BUG();
1388 }
1389 fail:
1390 if (retval)
1391 usb_hcd_unlink_urb_from_ep(hcd, urb);
1392
1393
1394 fail_not_linked:
1395 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1396 if (retval)
1397 DBG(0, "%s: urb %p failed with %d\n", __func__, urb, retval);
1398 return retval;
1399}
1400
1401static int isp1362_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
1402{
1403 struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1404 struct usb_host_endpoint *hep;
1405 unsigned long flags;
1406 struct isp1362_ep *ep;
1407 int retval = 0;
1408
1409 DBG(3, "%s: urb %p\n", __func__, urb);
1410
1411 spin_lock_irqsave(&isp1362_hcd->lock, flags);
1412 retval = usb_hcd_check_unlink_urb(hcd, urb, status);
1413 if (retval)
1414 goto done;
1415
1416 hep = urb->hcpriv;
1417
1418 if (!hep) {
1419 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1420 return -EIDRM;
1421 }
1422
1423 ep = hep->hcpriv;
1424 if (ep) {
1425
1426 if (ep->hep->urb_list.next == &urb->urb_list) {
1427 if (!list_empty(&ep->active)) {
1428 DBG(1, "%s: urb %p ep %p req %d active PTD[%d] $%04x\n", __func__,
1429 urb, ep, ep->num_req, ep->ptd_index, ep->ptd_offset);
1430
1431 remove_ptd(isp1362_hcd, ep);
1432 urb = NULL;
1433 }
1434 }
1435 if (urb) {
1436 DBG(1, "%s: Finishing ep %p req %d\n", __func__, ep,
1437 ep->num_req);
1438 finish_request(isp1362_hcd, ep, urb, status);
1439 } else
1440 DBG(1, "%s: urb %p active; wait4irq\n", __func__, urb);
1441 } else {
1442 pr_warning("%s: No EP in URB %p\n", __func__, urb);
1443 retval = -EINVAL;
1444 }
1445done:
1446 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1447
1448 DBG(3, "%s: exit\n", __func__);
1449
1450 return retval;
1451}
1452
1453static void isp1362_endpoint_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep)
1454{
1455 struct isp1362_ep *ep = hep->hcpriv;
1456 struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1457 unsigned long flags;
1458
1459 DBG(1, "%s: ep %p\n", __func__, ep);
1460 if (!ep)
1461 return;
1462 spin_lock_irqsave(&isp1362_hcd->lock, flags);
1463 if (!list_empty(&hep->urb_list)) {
1464 if (!list_empty(&ep->active) && list_empty(&ep->remove_list)) {
1465 DBG(1, "%s: Removing ep %p req %d PTD[%d] $%04x\n", __func__,
1466 ep, ep->num_req, ep->ptd_index, ep->ptd_offset);
1467 remove_ptd(isp1362_hcd, ep);
1468 pr_info("%s: Waiting for Interrupt to clean up\n", __func__);
1469 }
1470 }
1471 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1472
1473 while (!list_empty(&ep->active))
1474 msleep(1);
1475
1476 DBG(1, "%s: Freeing EP %p\n", __func__, ep);
1477
1478 usb_put_dev(ep->udev);
1479 kfree(ep);
1480 hep->hcpriv = NULL;
1481}
1482
1483static int isp1362_get_frame(struct usb_hcd *hcd)
1484{
1485 struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1486 u32 fmnum;
1487 unsigned long flags;
1488
1489 spin_lock_irqsave(&isp1362_hcd->lock, flags);
1490 fmnum = isp1362_read_reg32(isp1362_hcd, HCFMNUM);
1491 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1492
1493 return (int)fmnum;
1494}
1495
1496
1497
1498
1499static int isp1362_hub_status_data(struct usb_hcd *hcd, char *buf)
1500{
1501 struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1502 int ports, i, changed = 0;
1503 unsigned long flags;
1504
1505 if (!HC_IS_RUNNING(hcd->state))
1506 return -ESHUTDOWN;
1507
1508
1509
1510 if (timer_pending(&hcd->rh_timer))
1511 return 0;
1512
1513 ports = isp1362_hcd->rhdesca & RH_A_NDP;
1514 BUG_ON(ports > 2);
1515
1516 spin_lock_irqsave(&isp1362_hcd->lock, flags);
1517
1518 if (isp1362_hcd->rhstatus & (RH_HS_LPSC | RH_HS_OCIC))
1519 buf[0] = changed = 1;
1520 else
1521 buf[0] = 0;
1522
1523 for (i = 0; i < ports; i++) {
1524 u32 status = isp1362_hcd->rhport[i];
1525
1526 if (status & (RH_PS_CSC | RH_PS_PESC | RH_PS_PSSC |
1527 RH_PS_OCIC | RH_PS_PRSC)) {
1528 changed = 1;
1529 buf[0] |= 1 << (i + 1);
1530 continue;
1531 }
1532
1533 if (!(status & RH_PS_CCS))
1534 continue;
1535 }
1536 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1537 return changed;
1538}
1539
1540static void isp1362_hub_descriptor(struct isp1362_hcd *isp1362_hcd,
1541 struct usb_hub_descriptor *desc)
1542{
1543 u32 reg = isp1362_hcd->rhdesca;
1544
1545 DBG(3, "%s: enter\n", __func__);
1546
1547 desc->bDescriptorType = 0x29;
1548 desc->bDescLength = 9;
1549 desc->bHubContrCurrent = 0;
1550 desc->bNbrPorts = reg & 0x3;
1551
1552 desc->wHubCharacteristics = cpu_to_le16((reg >> 8) & 0x1f);
1553 DBG(0, "%s: hubcharacteristics = %02x\n", __func__, cpu_to_le16((reg >> 8) & 0x1f));
1554 desc->bPwrOn2PwrGood = (reg >> 24) & 0xff;
1555
1556 desc->u.hs.DeviceRemovable[0] = desc->bNbrPorts == 1 ? 1 << 1 : 3 << 1;
1557 desc->u.hs.DeviceRemovable[1] = ~0;
1558
1559 DBG(3, "%s: exit\n", __func__);
1560}
1561
1562
1563static int isp1362_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
1564 u16 wIndex, char *buf, u16 wLength)
1565{
1566 struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1567 int retval = 0;
1568 unsigned long flags;
1569 unsigned long t1;
1570 int ports = isp1362_hcd->rhdesca & RH_A_NDP;
1571 u32 tmp = 0;
1572
1573 switch (typeReq) {
1574 case ClearHubFeature:
1575 DBG(0, "ClearHubFeature: ");
1576 switch (wValue) {
1577 case C_HUB_OVER_CURRENT:
1578 _DBG(0, "C_HUB_OVER_CURRENT\n");
1579 spin_lock_irqsave(&isp1362_hcd->lock, flags);
1580 isp1362_write_reg32(isp1362_hcd, HCRHSTATUS, RH_HS_OCIC);
1581 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1582 case C_HUB_LOCAL_POWER:
1583 _DBG(0, "C_HUB_LOCAL_POWER\n");
1584 break;
1585 default:
1586 goto error;
1587 }
1588 break;
1589 case SetHubFeature:
1590 DBG(0, "SetHubFeature: ");
1591 switch (wValue) {
1592 case C_HUB_OVER_CURRENT:
1593 case C_HUB_LOCAL_POWER:
1594 _DBG(0, "C_HUB_OVER_CURRENT or C_HUB_LOCAL_POWER\n");
1595 break;
1596 default:
1597 goto error;
1598 }
1599 break;
1600 case GetHubDescriptor:
1601 DBG(0, "GetHubDescriptor\n");
1602 isp1362_hub_descriptor(isp1362_hcd, (struct usb_hub_descriptor *)buf);
1603 break;
1604 case GetHubStatus:
1605 DBG(0, "GetHubStatus\n");
1606 put_unaligned(cpu_to_le32(0), (__le32 *) buf);
1607 break;
1608 case GetPortStatus:
1609#ifndef VERBOSE
1610 DBG(0, "GetPortStatus\n");
1611#endif
1612 if (!wIndex || wIndex > ports)
1613 goto error;
1614 tmp = isp1362_hcd->rhport[--wIndex];
1615 put_unaligned(cpu_to_le32(tmp), (__le32 *) buf);
1616 break;
1617 case ClearPortFeature:
1618 DBG(0, "ClearPortFeature: ");
1619 if (!wIndex || wIndex > ports)
1620 goto error;
1621 wIndex--;
1622
1623 switch (wValue) {
1624 case USB_PORT_FEAT_ENABLE:
1625 _DBG(0, "USB_PORT_FEAT_ENABLE\n");
1626 tmp = RH_PS_CCS;
1627 break;
1628 case USB_PORT_FEAT_C_ENABLE:
1629 _DBG(0, "USB_PORT_FEAT_C_ENABLE\n");
1630 tmp = RH_PS_PESC;
1631 break;
1632 case USB_PORT_FEAT_SUSPEND:
1633 _DBG(0, "USB_PORT_FEAT_SUSPEND\n");
1634 tmp = RH_PS_POCI;
1635 break;
1636 case USB_PORT_FEAT_C_SUSPEND:
1637 _DBG(0, "USB_PORT_FEAT_C_SUSPEND\n");
1638 tmp = RH_PS_PSSC;
1639 break;
1640 case USB_PORT_FEAT_POWER:
1641 _DBG(0, "USB_PORT_FEAT_POWER\n");
1642 tmp = RH_PS_LSDA;
1643
1644 break;
1645 case USB_PORT_FEAT_C_CONNECTION:
1646 _DBG(0, "USB_PORT_FEAT_C_CONNECTION\n");
1647 tmp = RH_PS_CSC;
1648 break;
1649 case USB_PORT_FEAT_C_OVER_CURRENT:
1650 _DBG(0, "USB_PORT_FEAT_C_OVER_CURRENT\n");
1651 tmp = RH_PS_OCIC;
1652 break;
1653 case USB_PORT_FEAT_C_RESET:
1654 _DBG(0, "USB_PORT_FEAT_C_RESET\n");
1655 tmp = RH_PS_PRSC;
1656 break;
1657 default:
1658 goto error;
1659 }
1660
1661 spin_lock_irqsave(&isp1362_hcd->lock, flags);
1662 isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + wIndex, tmp);
1663 isp1362_hcd->rhport[wIndex] =
1664 isp1362_read_reg32(isp1362_hcd, HCRHPORT1 + wIndex);
1665 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1666 break;
1667 case SetPortFeature:
1668 DBG(0, "SetPortFeature: ");
1669 if (!wIndex || wIndex > ports)
1670 goto error;
1671 wIndex--;
1672 switch (wValue) {
1673 case USB_PORT_FEAT_SUSPEND:
1674 _DBG(0, "USB_PORT_FEAT_SUSPEND\n");
1675 spin_lock_irqsave(&isp1362_hcd->lock, flags);
1676 isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + wIndex, RH_PS_PSS);
1677 isp1362_hcd->rhport[wIndex] =
1678 isp1362_read_reg32(isp1362_hcd, HCRHPORT1 + wIndex);
1679 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1680 break;
1681 case USB_PORT_FEAT_POWER:
1682 _DBG(0, "USB_PORT_FEAT_POWER\n");
1683 spin_lock_irqsave(&isp1362_hcd->lock, flags);
1684 isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + wIndex, RH_PS_PPS);
1685 isp1362_hcd->rhport[wIndex] =
1686 isp1362_read_reg32(isp1362_hcd, HCRHPORT1 + wIndex);
1687 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1688 break;
1689 case USB_PORT_FEAT_RESET:
1690 _DBG(0, "USB_PORT_FEAT_RESET\n");
1691 spin_lock_irqsave(&isp1362_hcd->lock, flags);
1692
1693 t1 = jiffies + msecs_to_jiffies(USB_RESET_WIDTH);
1694 while (time_before(jiffies, t1)) {
1695
1696 for (;;) {
1697 tmp = isp1362_read_reg32(isp1362_hcd, HCRHPORT1 + wIndex);
1698 if (!(tmp & RH_PS_PRS))
1699 break;
1700 udelay(500);
1701 }
1702 if (!(tmp & RH_PS_CCS))
1703 break;
1704
1705 isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + wIndex, (RH_PS_PRS));
1706
1707 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1708 msleep(10);
1709 spin_lock_irqsave(&isp1362_hcd->lock, flags);
1710 }
1711
1712 isp1362_hcd->rhport[wIndex] = isp1362_read_reg32(isp1362_hcd,
1713 HCRHPORT1 + wIndex);
1714 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1715 break;
1716 default:
1717 goto error;
1718 }
1719 break;
1720
1721 default:
1722 error:
1723
1724 _DBG(0, "PROTOCOL STALL\n");
1725 retval = -EPIPE;
1726 }
1727
1728 return retval;
1729}
1730
1731#ifdef CONFIG_PM
1732static int isp1362_bus_suspend(struct usb_hcd *hcd)
1733{
1734 int status = 0;
1735 struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1736 unsigned long flags;
1737
1738 if (time_before(jiffies, isp1362_hcd->next_statechange))
1739 msleep(5);
1740
1741 spin_lock_irqsave(&isp1362_hcd->lock, flags);
1742
1743 isp1362_hcd->hc_control = isp1362_read_reg32(isp1362_hcd, HCCONTROL);
1744 switch (isp1362_hcd->hc_control & OHCI_CTRL_HCFS) {
1745 case OHCI_USB_RESUME:
1746 DBG(0, "%s: resume/suspend?\n", __func__);
1747 isp1362_hcd->hc_control &= ~OHCI_CTRL_HCFS;
1748 isp1362_hcd->hc_control |= OHCI_USB_RESET;
1749 isp1362_write_reg32(isp1362_hcd, HCCONTROL, isp1362_hcd->hc_control);
1750
1751 case OHCI_USB_RESET:
1752 status = -EBUSY;
1753 pr_warning("%s: needs reinit!\n", __func__);
1754 goto done;
1755 case OHCI_USB_SUSPEND:
1756 pr_warning("%s: already suspended?\n", __func__);
1757 goto done;
1758 }
1759 DBG(0, "%s: suspend root hub\n", __func__);
1760
1761
1762 hcd->state = HC_STATE_QUIESCING;
1763 if (!list_empty(&isp1362_hcd->atl_queue.active) ||
1764 !list_empty(&isp1362_hcd->intl_queue.active) ||
1765 !list_empty(&isp1362_hcd->istl_queue[0] .active) ||
1766 !list_empty(&isp1362_hcd->istl_queue[1] .active)) {
1767 int limit;
1768
1769 isp1362_write_reg32(isp1362_hcd, HCATLSKIP, ~0);
1770 isp1362_write_reg32(isp1362_hcd, HCINTLSKIP, ~0);
1771 isp1362_write_reg16(isp1362_hcd, HCBUFSTAT, 0);
1772 isp1362_write_reg16(isp1362_hcd, HCuPINTENB, 0);
1773 isp1362_write_reg32(isp1362_hcd, HCINTSTAT, OHCI_INTR_SF);
1774
1775 DBG(0, "%s: stopping schedules ...\n", __func__);
1776 limit = 2000;
1777 while (limit > 0) {
1778 udelay(250);
1779 limit -= 250;
1780 if (isp1362_read_reg32(isp1362_hcd, HCINTSTAT) & OHCI_INTR_SF)
1781 break;
1782 }
1783 mdelay(7);
1784 if (isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_ATL) {
1785 u32 done_map = isp1362_read_reg32(isp1362_hcd, HCATLDONE);
1786 finish_transfers(isp1362_hcd, done_map, &isp1362_hcd->atl_queue);
1787 }
1788 if (isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_INTL) {
1789 u32 done_map = isp1362_read_reg32(isp1362_hcd, HCINTLDONE);
1790 finish_transfers(isp1362_hcd, done_map, &isp1362_hcd->intl_queue);
1791 }
1792 if (isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_ISTL0)
1793 finish_iso_transfers(isp1362_hcd, &isp1362_hcd->istl_queue[0]);
1794 if (isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_ISTL1)
1795 finish_iso_transfers(isp1362_hcd, &isp1362_hcd->istl_queue[1]);
1796 }
1797 DBG(0, "%s: HCINTSTAT: %08x\n", __func__,
1798 isp1362_read_reg32(isp1362_hcd, HCINTSTAT));
1799 isp1362_write_reg32(isp1362_hcd, HCINTSTAT,
1800 isp1362_read_reg32(isp1362_hcd, HCINTSTAT));
1801
1802
1803 isp1362_hcd->hc_control = OHCI_USB_SUSPEND;
1804 isp1362_show_reg(isp1362_hcd, HCCONTROL);
1805 isp1362_write_reg32(isp1362_hcd, HCCONTROL, isp1362_hcd->hc_control);
1806 isp1362_show_reg(isp1362_hcd, HCCONTROL);
1807
1808#if 1
1809 isp1362_hcd->hc_control = isp1362_read_reg32(isp1362_hcd, HCCONTROL);
1810 if ((isp1362_hcd->hc_control & OHCI_CTRL_HCFS) != OHCI_USB_SUSPEND) {
1811 pr_err("%s: controller won't suspend %08x\n", __func__,
1812 isp1362_hcd->hc_control);
1813 status = -EBUSY;
1814 } else
1815#endif
1816 {
1817
1818 isp1362_hcd->next_statechange = jiffies + msecs_to_jiffies(5);
1819 }
1820done:
1821 if (status == 0) {
1822 hcd->state = HC_STATE_SUSPENDED;
1823 DBG(0, "%s: HCD suspended: %08x\n", __func__,
1824 isp1362_read_reg32(isp1362_hcd, HCCONTROL));
1825 }
1826 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1827 return status;
1828}
1829
1830static int isp1362_bus_resume(struct usb_hcd *hcd)
1831{
1832 struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1833 u32 port;
1834 unsigned long flags;
1835 int status = -EINPROGRESS;
1836
1837 if (time_before(jiffies, isp1362_hcd->next_statechange))
1838 msleep(5);
1839
1840 spin_lock_irqsave(&isp1362_hcd->lock, flags);
1841 isp1362_hcd->hc_control = isp1362_read_reg32(isp1362_hcd, HCCONTROL);
1842 pr_info("%s: HCCONTROL: %08x\n", __func__, isp1362_hcd->hc_control);
1843 if (hcd->state == HC_STATE_RESUMING) {
1844 pr_warning("%s: duplicate resume\n", __func__);
1845 status = 0;
1846 } else
1847 switch (isp1362_hcd->hc_control & OHCI_CTRL_HCFS) {
1848 case OHCI_USB_SUSPEND:
1849 DBG(0, "%s: resume root hub\n", __func__);
1850 isp1362_hcd->hc_control &= ~OHCI_CTRL_HCFS;
1851 isp1362_hcd->hc_control |= OHCI_USB_RESUME;
1852 isp1362_write_reg32(isp1362_hcd, HCCONTROL, isp1362_hcd->hc_control);
1853 break;
1854 case OHCI_USB_RESUME:
1855
1856 DBG(0, "%s: remote wakeup\n", __func__);
1857 break;
1858 case OHCI_USB_OPER:
1859 DBG(0, "%s: odd resume\n", __func__);
1860 status = 0;
1861 hcd->self.root_hub->dev.power.power_state = PMSG_ON;
1862 break;
1863 default:
1864 DBG(0, "%s: root hub hardware reset\n", __func__);
1865 status = -EBUSY;
1866 }
1867 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1868 if (status == -EBUSY) {
1869 DBG(0, "%s: Restarting HC\n", __func__);
1870 isp1362_hc_stop(hcd);
1871 return isp1362_hc_start(hcd);
1872 }
1873 if (status != -EINPROGRESS)
1874 return status;
1875 spin_lock_irqsave(&isp1362_hcd->lock, flags);
1876 port = isp1362_read_reg32(isp1362_hcd, HCRHDESCA) & RH_A_NDP;
1877 while (port--) {
1878 u32 stat = isp1362_read_reg32(isp1362_hcd, HCRHPORT1 + port);
1879
1880
1881 if (!(stat & RH_PS_PSS)) {
1882 DBG(0, "%s: Not Resuming RH port %d\n", __func__, port);
1883 continue;
1884 }
1885 DBG(0, "%s: Resuming RH port %d\n", __func__, port);
1886 isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + port, RH_PS_POCI);
1887 }
1888 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1889
1890
1891 hcd->state = HC_STATE_RESUMING;
1892 mdelay(20 + 15);
1893
1894 isp1362_hcd->hc_control = OHCI_USB_OPER;
1895 spin_lock_irqsave(&isp1362_hcd->lock, flags);
1896 isp1362_show_reg(isp1362_hcd, HCCONTROL);
1897 isp1362_write_reg32(isp1362_hcd, HCCONTROL, isp1362_hcd->hc_control);
1898 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1899
1900 msleep(10);
1901
1902
1903 isp1362_hcd->next_statechange = jiffies + msecs_to_jiffies(250);
1904
1905 hcd->self.root_hub->dev.power.power_state = PMSG_ON;
1906 hcd->state = HC_STATE_RUNNING;
1907 return 0;
1908}
1909#else
1910#define isp1362_bus_suspend NULL
1911#define isp1362_bus_resume NULL
1912#endif
1913
1914
1915
1916#ifdef STUB_DEBUG_FILE
1917
1918static inline void create_debug_file(struct isp1362_hcd *isp1362_hcd)
1919{
1920}
1921static inline void remove_debug_file(struct isp1362_hcd *isp1362_hcd)
1922{
1923}
1924
1925#else
1926
1927#include <linux/proc_fs.h>
1928#include <linux/seq_file.h>
1929
1930static void dump_irq(struct seq_file *s, char *label, u16 mask)
1931{
1932 seq_printf(s, "%-15s %04x%s%s%s%s%s%s\n", label, mask,
1933 mask & HCuPINT_CLKRDY ? " clkrdy" : "",
1934 mask & HCuPINT_SUSP ? " susp" : "",
1935 mask & HCuPINT_OPR ? " opr" : "",
1936 mask & HCuPINT_EOT ? " eot" : "",
1937 mask & HCuPINT_ATL ? " atl" : "",
1938 mask & HCuPINT_SOF ? " sof" : "");
1939}
1940
1941static void dump_int(struct seq_file *s, char *label, u32 mask)
1942{
1943 seq_printf(s, "%-15s %08x%s%s%s%s%s%s%s\n", label, mask,
1944 mask & OHCI_INTR_MIE ? " MIE" : "",
1945 mask & OHCI_INTR_RHSC ? " rhsc" : "",
1946 mask & OHCI_INTR_FNO ? " fno" : "",
1947 mask & OHCI_INTR_UE ? " ue" : "",
1948 mask & OHCI_INTR_RD ? " rd" : "",
1949 mask & OHCI_INTR_SF ? " sof" : "",
1950 mask & OHCI_INTR_SO ? " so" : "");
1951}
1952
1953static void dump_ctrl(struct seq_file *s, char *label, u32 mask)
1954{
1955 seq_printf(s, "%-15s %08x%s%s%s\n", label, mask,
1956 mask & OHCI_CTRL_RWC ? " rwc" : "",
1957 mask & OHCI_CTRL_RWE ? " rwe" : "",
1958 ({
1959 char *hcfs;
1960 switch (mask & OHCI_CTRL_HCFS) {
1961 case OHCI_USB_OPER:
1962 hcfs = " oper";
1963 break;
1964 case OHCI_USB_RESET:
1965 hcfs = " reset";
1966 break;
1967 case OHCI_USB_RESUME:
1968 hcfs = " resume";
1969 break;
1970 case OHCI_USB_SUSPEND:
1971 hcfs = " suspend";
1972 break;
1973 default:
1974 hcfs = " ?";
1975 }
1976 hcfs;
1977 }));
1978}
1979
1980static void dump_regs(struct seq_file *s, struct isp1362_hcd *isp1362_hcd)
1981{
1982 seq_printf(s, "HCREVISION [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCREVISION),
1983 isp1362_read_reg32(isp1362_hcd, HCREVISION));
1984 seq_printf(s, "HCCONTROL [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCCONTROL),
1985 isp1362_read_reg32(isp1362_hcd, HCCONTROL));
1986 seq_printf(s, "HCCMDSTAT [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCCMDSTAT),
1987 isp1362_read_reg32(isp1362_hcd, HCCMDSTAT));
1988 seq_printf(s, "HCINTSTAT [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTSTAT),
1989 isp1362_read_reg32(isp1362_hcd, HCINTSTAT));
1990 seq_printf(s, "HCINTENB [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTENB),
1991 isp1362_read_reg32(isp1362_hcd, HCINTENB));
1992 seq_printf(s, "HCFMINTVL [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCFMINTVL),
1993 isp1362_read_reg32(isp1362_hcd, HCFMINTVL));
1994 seq_printf(s, "HCFMREM [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCFMREM),
1995 isp1362_read_reg32(isp1362_hcd, HCFMREM));
1996 seq_printf(s, "HCFMNUM [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCFMNUM),
1997 isp1362_read_reg32(isp1362_hcd, HCFMNUM));
1998 seq_printf(s, "HCLSTHRESH [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCLSTHRESH),
1999 isp1362_read_reg32(isp1362_hcd, HCLSTHRESH));
2000 seq_printf(s, "HCRHDESCA [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHDESCA),
2001 isp1362_read_reg32(isp1362_hcd, HCRHDESCA));
2002 seq_printf(s, "HCRHDESCB [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHDESCB),
2003 isp1362_read_reg32(isp1362_hcd, HCRHDESCB));
2004 seq_printf(s, "HCRHSTATUS [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHSTATUS),
2005 isp1362_read_reg32(isp1362_hcd, HCRHSTATUS));
2006 seq_printf(s, "HCRHPORT1 [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHPORT1),
2007 isp1362_read_reg32(isp1362_hcd, HCRHPORT1));
2008 seq_printf(s, "HCRHPORT2 [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHPORT2),
2009 isp1362_read_reg32(isp1362_hcd, HCRHPORT2));
2010 seq_printf(s, "\n");
2011 seq_printf(s, "HCHWCFG [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCHWCFG),
2012 isp1362_read_reg16(isp1362_hcd, HCHWCFG));
2013 seq_printf(s, "HCDMACFG [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCDMACFG),
2014 isp1362_read_reg16(isp1362_hcd, HCDMACFG));
2015 seq_printf(s, "HCXFERCTR [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCXFERCTR),
2016 isp1362_read_reg16(isp1362_hcd, HCXFERCTR));
2017 seq_printf(s, "HCuPINT [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCuPINT),
2018 isp1362_read_reg16(isp1362_hcd, HCuPINT));
2019 seq_printf(s, "HCuPINTENB [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCuPINTENB),
2020 isp1362_read_reg16(isp1362_hcd, HCuPINTENB));
2021 seq_printf(s, "HCCHIPID [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCCHIPID),
2022 isp1362_read_reg16(isp1362_hcd, HCCHIPID));
2023 seq_printf(s, "HCSCRATCH [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCSCRATCH),
2024 isp1362_read_reg16(isp1362_hcd, HCSCRATCH));
2025 seq_printf(s, "HCBUFSTAT [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCBUFSTAT),
2026 isp1362_read_reg16(isp1362_hcd, HCBUFSTAT));
2027 seq_printf(s, "HCDIRADDR [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCDIRADDR),
2028 isp1362_read_reg32(isp1362_hcd, HCDIRADDR));
2029#if 0
2030 seq_printf(s, "HCDIRDATA [%02x] %04x\n", ISP1362_REG_NO(HCDIRDATA),
2031 isp1362_read_reg16(isp1362_hcd, HCDIRDATA));
2032#endif
2033 seq_printf(s, "HCISTLBUFSZ[%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCISTLBUFSZ),
2034 isp1362_read_reg16(isp1362_hcd, HCISTLBUFSZ));
2035 seq_printf(s, "HCISTLRATE [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCISTLRATE),
2036 isp1362_read_reg16(isp1362_hcd, HCISTLRATE));
2037 seq_printf(s, "\n");
2038 seq_printf(s, "HCINTLBUFSZ[%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLBUFSZ),
2039 isp1362_read_reg16(isp1362_hcd, HCINTLBUFSZ));
2040 seq_printf(s, "HCINTLBLKSZ[%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLBLKSZ),
2041 isp1362_read_reg16(isp1362_hcd, HCINTLBLKSZ));
2042 seq_printf(s, "HCINTLDONE [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLDONE),
2043 isp1362_read_reg32(isp1362_hcd, HCINTLDONE));
2044 seq_printf(s, "HCINTLSKIP [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLSKIP),
2045 isp1362_read_reg32(isp1362_hcd, HCINTLSKIP));
2046 seq_printf(s, "HCINTLLAST [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLLAST),
2047 isp1362_read_reg32(isp1362_hcd, HCINTLLAST));
2048 seq_printf(s, "HCINTLCURR [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLCURR),
2049 isp1362_read_reg16(isp1362_hcd, HCINTLCURR));
2050 seq_printf(s, "\n");
2051 seq_printf(s, "HCATLBUFSZ [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLBUFSZ),
2052 isp1362_read_reg16(isp1362_hcd, HCATLBUFSZ));
2053 seq_printf(s, "HCATLBLKSZ [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLBLKSZ),
2054 isp1362_read_reg16(isp1362_hcd, HCATLBLKSZ));
2055#if 0
2056 seq_printf(s, "HCATLDONE [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCATLDONE),
2057 isp1362_read_reg32(isp1362_hcd, HCATLDONE));
2058#endif
2059 seq_printf(s, "HCATLSKIP [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCATLSKIP),
2060 isp1362_read_reg32(isp1362_hcd, HCATLSKIP));
2061 seq_printf(s, "HCATLLAST [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCATLLAST),
2062 isp1362_read_reg32(isp1362_hcd, HCATLLAST));
2063 seq_printf(s, "HCATLCURR [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLCURR),
2064 isp1362_read_reg16(isp1362_hcd, HCATLCURR));
2065 seq_printf(s, "\n");
2066 seq_printf(s, "HCATLDTC [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLDTC),
2067 isp1362_read_reg16(isp1362_hcd, HCATLDTC));
2068 seq_printf(s, "HCATLDTCTO [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLDTCTO),
2069 isp1362_read_reg16(isp1362_hcd, HCATLDTCTO));
2070}
2071
2072static int proc_isp1362_show(struct seq_file *s, void *unused)
2073{
2074 struct isp1362_hcd *isp1362_hcd = s->private;
2075 struct isp1362_ep *ep;
2076 int i;
2077
2078 seq_printf(s, "%s\n%s version %s\n",
2079 isp1362_hcd_to_hcd(isp1362_hcd)->product_desc, hcd_name, DRIVER_VERSION);
2080
2081
2082
2083
2084 seq_printf(s, "alignment: 16b/%ld 8b/%ld 4b/%ld 2b/%ld 1b/%ld\n",
2085 isp1362_hcd->stat16, isp1362_hcd->stat8, isp1362_hcd->stat4,
2086 isp1362_hcd->stat2, isp1362_hcd->stat1);
2087 seq_printf(s, "max # ptds in ATL fifo: %d\n", isp1362_hcd->atl_queue.stat_maxptds);
2088 seq_printf(s, "max # ptds in INTL fifo: %d\n", isp1362_hcd->intl_queue.stat_maxptds);
2089 seq_printf(s, "max # ptds in ISTL fifo: %d\n",
2090 max(isp1362_hcd->istl_queue[0] .stat_maxptds,
2091 isp1362_hcd->istl_queue[1] .stat_maxptds));
2092
2093
2094 spin_lock_irq(&isp1362_hcd->lock);
2095
2096 dump_irq(s, "hc_irq_enable", isp1362_read_reg16(isp1362_hcd, HCuPINTENB));
2097 dump_irq(s, "hc_irq_status", isp1362_read_reg16(isp1362_hcd, HCuPINT));
2098 dump_int(s, "ohci_int_enable", isp1362_read_reg32(isp1362_hcd, HCINTENB));
2099 dump_int(s, "ohci_int_status", isp1362_read_reg32(isp1362_hcd, HCINTSTAT));
2100 dump_ctrl(s, "ohci_control", isp1362_read_reg32(isp1362_hcd, HCCONTROL));
2101
2102 for (i = 0; i < NUM_ISP1362_IRQS; i++)
2103 if (isp1362_hcd->irq_stat[i])
2104 seq_printf(s, "%-15s: %d\n",
2105 ISP1362_INT_NAME(i), isp1362_hcd->irq_stat[i]);
2106
2107 dump_regs(s, isp1362_hcd);
2108 list_for_each_entry(ep, &isp1362_hcd->async, schedule) {
2109 struct urb *urb;
2110
2111 seq_printf(s, "%p, ep%d%s, maxpacket %d:\n", ep, ep->epnum,
2112 ({
2113 char *s;
2114 switch (ep->nextpid) {
2115 case USB_PID_IN:
2116 s = "in";
2117 break;
2118 case USB_PID_OUT:
2119 s = "out";
2120 break;
2121 case USB_PID_SETUP:
2122 s = "setup";
2123 break;
2124 case USB_PID_ACK:
2125 s = "status";
2126 break;
2127 default:
2128 s = "?";
2129 break;
2130 };
2131 s;}), ep->maxpacket) ;
2132 list_for_each_entry(urb, &ep->hep->urb_list, urb_list) {
2133 seq_printf(s, " urb%p, %d/%d\n", urb,
2134 urb->actual_length,
2135 urb->transfer_buffer_length);
2136 }
2137 }
2138 if (!list_empty(&isp1362_hcd->async))
2139 seq_printf(s, "\n");
2140 dump_ptd_queue(&isp1362_hcd->atl_queue);
2141
2142 seq_printf(s, "periodic size= %d\n", PERIODIC_SIZE);
2143
2144 list_for_each_entry(ep, &isp1362_hcd->periodic, schedule) {
2145 seq_printf(s, "branch:%2d load:%3d PTD[%d] $%04x:\n", ep->branch,
2146 isp1362_hcd->load[ep->branch], ep->ptd_index, ep->ptd_offset);
2147
2148 seq_printf(s, " %d/%p (%sdev%d ep%d%s max %d)\n",
2149 ep->interval, ep,
2150 (ep->udev->speed == USB_SPEED_FULL) ? "" : "ls ",
2151 ep->udev->devnum, ep->epnum,
2152 (ep->epnum == 0) ? "" :
2153 ((ep->nextpid == USB_PID_IN) ?
2154 "in" : "out"), ep->maxpacket);
2155 }
2156 dump_ptd_queue(&isp1362_hcd->intl_queue);
2157
2158 seq_printf(s, "ISO:\n");
2159
2160 list_for_each_entry(ep, &isp1362_hcd->isoc, schedule) {
2161 seq_printf(s, " %d/%p (%sdev%d ep%d%s max %d)\n",
2162 ep->interval, ep,
2163 (ep->udev->speed == USB_SPEED_FULL) ? "" : "ls ",
2164 ep->udev->devnum, ep->epnum,
2165 (ep->epnum == 0) ? "" :
2166 ((ep->nextpid == USB_PID_IN) ?
2167 "in" : "out"), ep->maxpacket);
2168 }
2169
2170 spin_unlock_irq(&isp1362_hcd->lock);
2171 seq_printf(s, "\n");
2172
2173 return 0;
2174}
2175
2176static int proc_isp1362_open(struct inode *inode, struct file *file)
2177{
2178 return single_open(file, proc_isp1362_show, PDE_DATA(inode));
2179}
2180
2181static const struct file_operations proc_ops = {
2182 .open = proc_isp1362_open,
2183 .read = seq_read,
2184 .llseek = seq_lseek,
2185 .release = single_release,
2186};
2187
2188
2189static const char proc_filename[] = "driver/isp1362";
2190
2191static void create_debug_file(struct isp1362_hcd *isp1362_hcd)
2192{
2193 struct proc_dir_entry *pde;
2194
2195 pde = proc_create_data(proc_filename, 0, NULL, &proc_ops, isp1362_hcd);
2196 if (pde == NULL) {
2197 pr_warning("%s: Failed to create debug file '%s'\n", __func__, proc_filename);
2198 return;
2199 }
2200 isp1362_hcd->pde = pde;
2201}
2202
2203static void remove_debug_file(struct isp1362_hcd *isp1362_hcd)
2204{
2205 if (isp1362_hcd->pde)
2206 remove_proc_entry(proc_filename, NULL);
2207}
2208
2209#endif
2210
2211
2212
2213static void __isp1362_sw_reset(struct isp1362_hcd *isp1362_hcd)
2214{
2215 int tmp = 20;
2216
2217 isp1362_write_reg16(isp1362_hcd, HCSWRES, HCSWRES_MAGIC);
2218 isp1362_write_reg32(isp1362_hcd, HCCMDSTAT, OHCI_HCR);
2219 while (--tmp) {
2220 mdelay(1);
2221 if (!(isp1362_read_reg32(isp1362_hcd, HCCMDSTAT) & OHCI_HCR))
2222 break;
2223 }
2224 if (!tmp)
2225 pr_err("Software reset timeout\n");
2226}
2227
2228static void isp1362_sw_reset(struct isp1362_hcd *isp1362_hcd)
2229{
2230 unsigned long flags;
2231
2232 spin_lock_irqsave(&isp1362_hcd->lock, flags);
2233 __isp1362_sw_reset(isp1362_hcd);
2234 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2235}
2236
2237static int isp1362_mem_config(struct usb_hcd *hcd)
2238{
2239 struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
2240 unsigned long flags;
2241 u32 total;
2242 u16 istl_size = ISP1362_ISTL_BUFSIZE;
2243 u16 intl_blksize = ISP1362_INTL_BLKSIZE + PTD_HEADER_SIZE;
2244 u16 intl_size = ISP1362_INTL_BUFFERS * intl_blksize;
2245 u16 atl_blksize = ISP1362_ATL_BLKSIZE + PTD_HEADER_SIZE;
2246 u16 atl_buffers = (ISP1362_BUF_SIZE - (istl_size + intl_size)) / atl_blksize;
2247 u16 atl_size;
2248 int i;
2249
2250 WARN_ON(istl_size & 3);
2251 WARN_ON(atl_blksize & 3);
2252 WARN_ON(intl_blksize & 3);
2253 WARN_ON(atl_blksize < PTD_HEADER_SIZE);
2254 WARN_ON(intl_blksize < PTD_HEADER_SIZE);
2255
2256 BUG_ON((unsigned)ISP1362_INTL_BUFFERS > 32);
2257 if (atl_buffers > 32)
2258 atl_buffers = 32;
2259 atl_size = atl_buffers * atl_blksize;
2260 total = atl_size + intl_size + istl_size;
2261 dev_info(hcd->self.controller, "ISP1362 Memory usage:\n");
2262 dev_info(hcd->self.controller, " ISTL: 2 * %4d: %4d @ $%04x:$%04x\n",
2263 istl_size / 2, istl_size, 0, istl_size / 2);
2264 dev_info(hcd->self.controller, " INTL: %4d * (%3zu+8): %4d @ $%04x\n",
2265 ISP1362_INTL_BUFFERS, intl_blksize - PTD_HEADER_SIZE,
2266 intl_size, istl_size);
2267 dev_info(hcd->self.controller, " ATL : %4d * (%3zu+8): %4d @ $%04x\n",
2268 atl_buffers, atl_blksize - PTD_HEADER_SIZE,
2269 atl_size, istl_size + intl_size);
2270 dev_info(hcd->self.controller, " USED/FREE: %4d %4d\n", total,
2271 ISP1362_BUF_SIZE - total);
2272
2273 if (total > ISP1362_BUF_SIZE) {
2274 dev_err(hcd->self.controller, "%s: Memory requested: %d, available %d\n",
2275 __func__, total, ISP1362_BUF_SIZE);
2276 return -ENOMEM;
2277 }
2278
2279 total = istl_size + intl_size + atl_size;
2280 spin_lock_irqsave(&isp1362_hcd->lock, flags);
2281
2282 for (i = 0; i < 2; i++) {
2283 isp1362_hcd->istl_queue[i].buf_start = i * istl_size / 2,
2284 isp1362_hcd->istl_queue[i].buf_size = istl_size / 2;
2285 isp1362_hcd->istl_queue[i].blk_size = 4;
2286 INIT_LIST_HEAD(&isp1362_hcd->istl_queue[i].active);
2287 snprintf(isp1362_hcd->istl_queue[i].name,
2288 sizeof(isp1362_hcd->istl_queue[i].name), "ISTL%d", i);
2289 DBG(3, "%s: %5s buf $%04x %d\n", __func__,
2290 isp1362_hcd->istl_queue[i].name,
2291 isp1362_hcd->istl_queue[i].buf_start,
2292 isp1362_hcd->istl_queue[i].buf_size);
2293 }
2294 isp1362_write_reg16(isp1362_hcd, HCISTLBUFSZ, istl_size / 2);
2295
2296 isp1362_hcd->intl_queue.buf_start = istl_size;
2297 isp1362_hcd->intl_queue.buf_size = intl_size;
2298 isp1362_hcd->intl_queue.buf_count = ISP1362_INTL_BUFFERS;
2299 isp1362_hcd->intl_queue.blk_size = intl_blksize;
2300 isp1362_hcd->intl_queue.buf_avail = isp1362_hcd->intl_queue.buf_count;
2301 isp1362_hcd->intl_queue.skip_map = ~0;
2302 INIT_LIST_HEAD(&isp1362_hcd->intl_queue.active);
2303
2304 isp1362_write_reg16(isp1362_hcd, HCINTLBUFSZ,
2305 isp1362_hcd->intl_queue.buf_size);
2306 isp1362_write_reg16(isp1362_hcd, HCINTLBLKSZ,
2307 isp1362_hcd->intl_queue.blk_size - PTD_HEADER_SIZE);
2308 isp1362_write_reg32(isp1362_hcd, HCINTLSKIP, ~0);
2309 isp1362_write_reg32(isp1362_hcd, HCINTLLAST,
2310 1 << (ISP1362_INTL_BUFFERS - 1));
2311
2312 isp1362_hcd->atl_queue.buf_start = istl_size + intl_size;
2313 isp1362_hcd->atl_queue.buf_size = atl_size;
2314 isp1362_hcd->atl_queue.buf_count = atl_buffers;
2315 isp1362_hcd->atl_queue.blk_size = atl_blksize;
2316 isp1362_hcd->atl_queue.buf_avail = isp1362_hcd->atl_queue.buf_count;
2317 isp1362_hcd->atl_queue.skip_map = ~0;
2318 INIT_LIST_HEAD(&isp1362_hcd->atl_queue.active);
2319
2320 isp1362_write_reg16(isp1362_hcd, HCATLBUFSZ,
2321 isp1362_hcd->atl_queue.buf_size);
2322 isp1362_write_reg16(isp1362_hcd, HCATLBLKSZ,
2323 isp1362_hcd->atl_queue.blk_size - PTD_HEADER_SIZE);
2324 isp1362_write_reg32(isp1362_hcd, HCATLSKIP, ~0);
2325 isp1362_write_reg32(isp1362_hcd, HCATLLAST,
2326 1 << (atl_buffers - 1));
2327
2328 snprintf(isp1362_hcd->atl_queue.name,
2329 sizeof(isp1362_hcd->atl_queue.name), "ATL");
2330 snprintf(isp1362_hcd->intl_queue.name,
2331 sizeof(isp1362_hcd->intl_queue.name), "INTL");
2332 DBG(3, "%s: %5s buf $%04x %2d * %4d = %4d\n", __func__,
2333 isp1362_hcd->intl_queue.name,
2334 isp1362_hcd->intl_queue.buf_start,
2335 ISP1362_INTL_BUFFERS, isp1362_hcd->intl_queue.blk_size,
2336 isp1362_hcd->intl_queue.buf_size);
2337 DBG(3, "%s: %5s buf $%04x %2d * %4d = %4d\n", __func__,
2338 isp1362_hcd->atl_queue.name,
2339 isp1362_hcd->atl_queue.buf_start,
2340 atl_buffers, isp1362_hcd->atl_queue.blk_size,
2341 isp1362_hcd->atl_queue.buf_size);
2342
2343 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2344
2345 return 0;
2346}
2347
2348static int isp1362_hc_reset(struct usb_hcd *hcd)
2349{
2350 int ret = 0;
2351 struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
2352 unsigned long t;
2353 unsigned long timeout = 100;
2354 unsigned long flags;
2355 int clkrdy = 0;
2356
2357 pr_debug("%s:\n", __func__);
2358
2359 if (isp1362_hcd->board && isp1362_hcd->board->reset) {
2360 isp1362_hcd->board->reset(hcd->self.controller, 1);
2361 msleep(20);
2362 if (isp1362_hcd->board->clock)
2363 isp1362_hcd->board->clock(hcd->self.controller, 1);
2364 isp1362_hcd->board->reset(hcd->self.controller, 0);
2365 } else
2366 isp1362_sw_reset(isp1362_hcd);
2367
2368
2369 t = jiffies + msecs_to_jiffies(timeout);
2370 while (!clkrdy && time_before_eq(jiffies, t)) {
2371 spin_lock_irqsave(&isp1362_hcd->lock, flags);
2372 clkrdy = isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_CLKRDY;
2373 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2374 if (!clkrdy)
2375 msleep(4);
2376 }
2377
2378 spin_lock_irqsave(&isp1362_hcd->lock, flags);
2379 isp1362_write_reg16(isp1362_hcd, HCuPINT, HCuPINT_CLKRDY);
2380 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2381 if (!clkrdy) {
2382 pr_err("Clock not ready after %lums\n", timeout);
2383 ret = -ENODEV;
2384 }
2385 return ret;
2386}
2387
2388static void isp1362_hc_stop(struct usb_hcd *hcd)
2389{
2390 struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
2391 unsigned long flags;
2392 u32 tmp;
2393
2394 pr_debug("%s:\n", __func__);
2395
2396 del_timer_sync(&hcd->rh_timer);
2397
2398 spin_lock_irqsave(&isp1362_hcd->lock, flags);
2399
2400 isp1362_write_reg16(isp1362_hcd, HCuPINTENB, 0);
2401
2402
2403 tmp = isp1362_read_reg32(isp1362_hcd, HCRHDESCA);
2404 tmp &= ~(RH_A_NPS | RH_A_PSM);
2405 isp1362_write_reg32(isp1362_hcd, HCRHDESCA, tmp);
2406 isp1362_write_reg32(isp1362_hcd, HCRHSTATUS, RH_HS_LPS);
2407
2408
2409 if (isp1362_hcd->board && isp1362_hcd->board->reset)
2410 isp1362_hcd->board->reset(hcd->self.controller, 1);
2411 else
2412 __isp1362_sw_reset(isp1362_hcd);
2413
2414 if (isp1362_hcd->board && isp1362_hcd->board->clock)
2415 isp1362_hcd->board->clock(hcd->self.controller, 0);
2416
2417 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2418}
2419
2420#ifdef CHIP_BUFFER_TEST
2421static int isp1362_chip_test(struct isp1362_hcd *isp1362_hcd)
2422{
2423 int ret = 0;
2424 u16 *ref;
2425 unsigned long flags;
2426
2427 ref = kmalloc(2 * ISP1362_BUF_SIZE, GFP_KERNEL);
2428 if (ref) {
2429 int offset;
2430 u16 *tst = &ref[ISP1362_BUF_SIZE / 2];
2431
2432 for (offset = 0; offset < ISP1362_BUF_SIZE / 2; offset++) {
2433 ref[offset] = ~offset;
2434 tst[offset] = offset;
2435 }
2436
2437 for (offset = 0; offset < 4; offset++) {
2438 int j;
2439
2440 for (j = 0; j < 8; j++) {
2441 spin_lock_irqsave(&isp1362_hcd->lock, flags);
2442 isp1362_write_buffer(isp1362_hcd, (u8 *)ref + offset, 0, j);
2443 isp1362_read_buffer(isp1362_hcd, (u8 *)tst + offset, 0, j);
2444 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2445
2446 if (memcmp(ref, tst, j)) {
2447 ret = -ENODEV;
2448 pr_err("%s: memory check with %d byte offset %d failed\n",
2449 __func__, j, offset);
2450 dump_data((u8 *)ref + offset, j);
2451 dump_data((u8 *)tst + offset, j);
2452 }
2453 }
2454 }
2455
2456 spin_lock_irqsave(&isp1362_hcd->lock, flags);
2457 isp1362_write_buffer(isp1362_hcd, ref, 0, ISP1362_BUF_SIZE);
2458 isp1362_read_buffer(isp1362_hcd, tst, 0, ISP1362_BUF_SIZE);
2459 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2460
2461 if (memcmp(ref, tst, ISP1362_BUF_SIZE)) {
2462 ret = -ENODEV;
2463 pr_err("%s: memory check failed\n", __func__);
2464 dump_data((u8 *)tst, ISP1362_BUF_SIZE / 2);
2465 }
2466
2467 for (offset = 0; offset < 256; offset++) {
2468 int test_size = 0;
2469
2470 yield();
2471
2472 memset(tst, 0, ISP1362_BUF_SIZE);
2473 spin_lock_irqsave(&isp1362_hcd->lock, flags);
2474 isp1362_write_buffer(isp1362_hcd, tst, 0, ISP1362_BUF_SIZE);
2475 isp1362_read_buffer(isp1362_hcd, tst, 0, ISP1362_BUF_SIZE);
2476 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2477 if (memcmp(tst, tst + (ISP1362_BUF_SIZE / (2 * sizeof(*tst))),
2478 ISP1362_BUF_SIZE / 2)) {
2479 pr_err("%s: Failed to clear buffer\n", __func__);
2480 dump_data((u8 *)tst, ISP1362_BUF_SIZE);
2481 break;
2482 }
2483 spin_lock_irqsave(&isp1362_hcd->lock, flags);
2484 isp1362_write_buffer(isp1362_hcd, ref, offset * 2, PTD_HEADER_SIZE);
2485 isp1362_write_buffer(isp1362_hcd, ref + PTD_HEADER_SIZE / sizeof(*ref),
2486 offset * 2 + PTD_HEADER_SIZE, test_size);
2487 isp1362_read_buffer(isp1362_hcd, tst, offset * 2,
2488 PTD_HEADER_SIZE + test_size);
2489 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2490 if (memcmp(ref, tst, PTD_HEADER_SIZE + test_size)) {
2491 dump_data(((u8 *)ref) + offset, PTD_HEADER_SIZE + test_size);
2492 dump_data((u8 *)tst, PTD_HEADER_SIZE + test_size);
2493 spin_lock_irqsave(&isp1362_hcd->lock, flags);
2494 isp1362_read_buffer(isp1362_hcd, tst, offset * 2,
2495 PTD_HEADER_SIZE + test_size);
2496 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2497 if (memcmp(ref, tst, PTD_HEADER_SIZE + test_size)) {
2498 ret = -ENODEV;
2499 pr_err("%s: memory check with offset %02x failed\n",
2500 __func__, offset);
2501 break;
2502 }
2503 pr_warning("%s: memory check with offset %02x ok after second read\n",
2504 __func__, offset);
2505 }
2506 }
2507 kfree(ref);
2508 }
2509 return ret;
2510}
2511#endif
2512
2513static int isp1362_hc_start(struct usb_hcd *hcd)
2514{
2515 int ret;
2516 struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
2517 struct isp1362_platform_data *board = isp1362_hcd->board;
2518 u16 hwcfg;
2519 u16 chipid;
2520 unsigned long flags;
2521
2522 pr_debug("%s:\n", __func__);
2523
2524 spin_lock_irqsave(&isp1362_hcd->lock, flags);
2525 chipid = isp1362_read_reg16(isp1362_hcd, HCCHIPID);
2526 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2527
2528 if ((chipid & HCCHIPID_MASK) != HCCHIPID_MAGIC) {
2529 pr_err("%s: Invalid chip ID %04x\n", __func__, chipid);
2530 return -ENODEV;
2531 }
2532
2533#ifdef CHIP_BUFFER_TEST
2534 ret = isp1362_chip_test(isp1362_hcd);
2535 if (ret)
2536 return -ENODEV;
2537#endif
2538 spin_lock_irqsave(&isp1362_hcd->lock, flags);
2539
2540 isp1362_write_reg16(isp1362_hcd, HCuPINT, 0xff);
2541 isp1362_write_reg16(isp1362_hcd, HCuPINTENB, 0);
2542
2543
2544 hwcfg = HCHWCFG_INT_ENABLE | HCHWCFG_DBWIDTH(1);
2545 if (board->sel15Kres)
2546 hwcfg |= HCHWCFG_PULLDOWN_DS2 |
2547 ((MAX_ROOT_PORTS > 1) ? HCHWCFG_PULLDOWN_DS1 : 0);
2548 if (board->clknotstop)
2549 hwcfg |= HCHWCFG_CLKNOTSTOP;
2550 if (board->oc_enable)
2551 hwcfg |= HCHWCFG_ANALOG_OC;
2552 if (board->int_act_high)
2553 hwcfg |= HCHWCFG_INT_POL;
2554 if (board->int_edge_triggered)
2555 hwcfg |= HCHWCFG_INT_TRIGGER;
2556 if (board->dreq_act_high)
2557 hwcfg |= HCHWCFG_DREQ_POL;
2558 if (board->dack_act_high)
2559 hwcfg |= HCHWCFG_DACK_POL;
2560 isp1362_write_reg16(isp1362_hcd, HCHWCFG, hwcfg);
2561 isp1362_show_reg(isp1362_hcd, HCHWCFG);
2562 isp1362_write_reg16(isp1362_hcd, HCDMACFG, 0);
2563 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2564
2565 ret = isp1362_mem_config(hcd);
2566 if (ret)
2567 return ret;
2568
2569 spin_lock_irqsave(&isp1362_hcd->lock, flags);
2570
2571
2572 isp1362_hcd->rhdesca = 0;
2573 if (board->no_power_switching)
2574 isp1362_hcd->rhdesca |= RH_A_NPS;
2575 if (board->power_switching_mode)
2576 isp1362_hcd->rhdesca |= RH_A_PSM;
2577 if (board->potpg)
2578 isp1362_hcd->rhdesca |= (board->potpg << 24) & RH_A_POTPGT;
2579 else
2580 isp1362_hcd->rhdesca |= (25 << 24) & RH_A_POTPGT;
2581
2582 isp1362_write_reg32(isp1362_hcd, HCRHDESCA, isp1362_hcd->rhdesca & ~RH_A_OCPM);
2583 isp1362_write_reg32(isp1362_hcd, HCRHDESCA, isp1362_hcd->rhdesca | RH_A_OCPM);
2584 isp1362_hcd->rhdesca = isp1362_read_reg32(isp1362_hcd, HCRHDESCA);
2585
2586 isp1362_hcd->rhdescb = RH_B_PPCM;
2587 isp1362_write_reg32(isp1362_hcd, HCRHDESCB, isp1362_hcd->rhdescb);
2588 isp1362_hcd->rhdescb = isp1362_read_reg32(isp1362_hcd, HCRHDESCB);
2589
2590 isp1362_read_reg32(isp1362_hcd, HCFMINTVL);
2591 isp1362_write_reg32(isp1362_hcd, HCFMINTVL, (FSMP(FI) << 16) | FI);
2592 isp1362_write_reg32(isp1362_hcd, HCLSTHRESH, LSTHRESH);
2593
2594 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2595
2596 isp1362_hcd->hc_control = OHCI_USB_OPER;
2597 hcd->state = HC_STATE_RUNNING;
2598
2599 spin_lock_irqsave(&isp1362_hcd->lock, flags);
2600
2601 isp1362_hcd->intenb = OHCI_INTR_MIE | OHCI_INTR_RHSC | OHCI_INTR_UE;
2602 isp1362_hcd->intenb |= OHCI_INTR_RD;
2603 isp1362_hcd->irqenb = HCuPINT_OPR | HCuPINT_SUSP;
2604 isp1362_write_reg32(isp1362_hcd, HCINTENB, isp1362_hcd->intenb);
2605 isp1362_write_reg16(isp1362_hcd, HCuPINTENB, isp1362_hcd->irqenb);
2606
2607
2608 isp1362_write_reg32(isp1362_hcd, HCCONTROL, isp1362_hcd->hc_control);
2609
2610 isp1362_write_reg32(isp1362_hcd, HCRHSTATUS, RH_HS_LPSC | RH_HS_DRWE);
2611
2612 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2613
2614 return 0;
2615}
2616
2617
2618
2619static struct hc_driver isp1362_hc_driver = {
2620 .description = hcd_name,
2621 .product_desc = "ISP1362 Host Controller",
2622 .hcd_priv_size = sizeof(struct isp1362_hcd),
2623
2624 .irq = isp1362_irq,
2625 .flags = HCD_USB11 | HCD_MEMORY,
2626
2627 .reset = isp1362_hc_reset,
2628 .start = isp1362_hc_start,
2629 .stop = isp1362_hc_stop,
2630
2631 .urb_enqueue = isp1362_urb_enqueue,
2632 .urb_dequeue = isp1362_urb_dequeue,
2633 .endpoint_disable = isp1362_endpoint_disable,
2634
2635 .get_frame_number = isp1362_get_frame,
2636
2637 .hub_status_data = isp1362_hub_status_data,
2638 .hub_control = isp1362_hub_control,
2639 .bus_suspend = isp1362_bus_suspend,
2640 .bus_resume = isp1362_bus_resume,
2641};
2642
2643
2644
2645static int isp1362_remove(struct platform_device *pdev)
2646{
2647 struct usb_hcd *hcd = platform_get_drvdata(pdev);
2648 struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
2649 struct resource *res;
2650
2651 remove_debug_file(isp1362_hcd);
2652 DBG(0, "%s: Removing HCD\n", __func__);
2653 usb_remove_hcd(hcd);
2654
2655 DBG(0, "%s: Unmapping data_reg @ %p\n", __func__,
2656 isp1362_hcd->data_reg);
2657 iounmap(isp1362_hcd->data_reg);
2658
2659 DBG(0, "%s: Unmapping addr_reg @ %p\n", __func__,
2660 isp1362_hcd->addr_reg);
2661 iounmap(isp1362_hcd->addr_reg);
2662
2663 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
2664 DBG(0, "%s: release mem_region: %08lx\n", __func__, (long unsigned int)res->start);
2665 if (res)
2666 release_mem_region(res->start, resource_size(res));
2667
2668 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2669 DBG(0, "%s: release mem_region: %08lx\n", __func__, (long unsigned int)res->start);
2670 if (res)
2671 release_mem_region(res->start, resource_size(res));
2672
2673 DBG(0, "%s: put_hcd\n", __func__);
2674 usb_put_hcd(hcd);
2675 DBG(0, "%s: Done\n", __func__);
2676
2677 return 0;
2678}
2679
2680static int isp1362_probe(struct platform_device *pdev)
2681{
2682 struct usb_hcd *hcd;
2683 struct isp1362_hcd *isp1362_hcd;
2684 struct resource *addr, *data;
2685 void __iomem *addr_reg;
2686 void __iomem *data_reg;
2687 int irq;
2688 int retval = 0;
2689 struct resource *irq_res;
2690 unsigned int irq_flags = 0;
2691
2692 if (usb_disabled())
2693 return -ENODEV;
2694
2695
2696
2697
2698
2699
2700 if (pdev->num_resources < 3) {
2701 retval = -ENODEV;
2702 goto err1;
2703 }
2704
2705 data = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2706 addr = platform_get_resource(pdev, IORESOURCE_MEM, 1);
2707 irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
2708 if (!addr || !data || !irq_res) {
2709 retval = -ENODEV;
2710 goto err1;
2711 }
2712 irq = irq_res->start;
2713
2714 if (pdev->dev.dma_mask) {
2715 DBG(1, "won't do DMA");
2716 retval = -ENODEV;
2717 goto err1;
2718 }
2719
2720 if (!request_mem_region(addr->start, resource_size(addr), hcd_name)) {
2721 retval = -EBUSY;
2722 goto err1;
2723 }
2724 addr_reg = ioremap(addr->start, resource_size(addr));
2725 if (addr_reg == NULL) {
2726 retval = -ENOMEM;
2727 goto err2;
2728 }
2729
2730 if (!request_mem_region(data->start, resource_size(data), hcd_name)) {
2731 retval = -EBUSY;
2732 goto err3;
2733 }
2734 data_reg = ioremap(data->start, resource_size(data));
2735 if (data_reg == NULL) {
2736 retval = -ENOMEM;
2737 goto err4;
2738 }
2739
2740
2741 hcd = usb_create_hcd(&isp1362_hc_driver, &pdev->dev, dev_name(&pdev->dev));
2742 if (!hcd) {
2743 retval = -ENOMEM;
2744 goto err5;
2745 }
2746 hcd->rsrc_start = data->start;
2747 isp1362_hcd = hcd_to_isp1362_hcd(hcd);
2748 isp1362_hcd->data_reg = data_reg;
2749 isp1362_hcd->addr_reg = addr_reg;
2750
2751 isp1362_hcd->next_statechange = jiffies;
2752 spin_lock_init(&isp1362_hcd->lock);
2753 INIT_LIST_HEAD(&isp1362_hcd->async);
2754 INIT_LIST_HEAD(&isp1362_hcd->periodic);
2755 INIT_LIST_HEAD(&isp1362_hcd->isoc);
2756 INIT_LIST_HEAD(&isp1362_hcd->remove_list);
2757 isp1362_hcd->board = pdev->dev.platform_data;
2758#if USE_PLATFORM_DELAY
2759 if (!isp1362_hcd->board->delay) {
2760 dev_err(hcd->self.controller, "No platform delay function given\n");
2761 retval = -ENODEV;
2762 goto err6;
2763 }
2764#endif
2765
2766 if (irq_res->flags & IORESOURCE_IRQ_HIGHEDGE)
2767 irq_flags |= IRQF_TRIGGER_RISING;
2768 if (irq_res->flags & IORESOURCE_IRQ_LOWEDGE)
2769 irq_flags |= IRQF_TRIGGER_FALLING;
2770 if (irq_res->flags & IORESOURCE_IRQ_HIGHLEVEL)
2771 irq_flags |= IRQF_TRIGGER_HIGH;
2772 if (irq_res->flags & IORESOURCE_IRQ_LOWLEVEL)
2773 irq_flags |= IRQF_TRIGGER_LOW;
2774
2775 retval = usb_add_hcd(hcd, irq, irq_flags | IRQF_SHARED);
2776 if (retval != 0)
2777 goto err6;
2778 pr_info("%s, irq %d\n", hcd->product_desc, irq);
2779
2780 create_debug_file(isp1362_hcd);
2781
2782 return 0;
2783
2784 err6:
2785 DBG(0, "%s: Freeing dev %p\n", __func__, isp1362_hcd);
2786 usb_put_hcd(hcd);
2787 err5:
2788 DBG(0, "%s: Unmapping data_reg @ %p\n", __func__, data_reg);
2789 iounmap(data_reg);
2790 err4:
2791 DBG(0, "%s: Releasing mem region %08lx\n", __func__, (long unsigned int)data->start);
2792 release_mem_region(data->start, resource_size(data));
2793 err3:
2794 DBG(0, "%s: Unmapping addr_reg @ %p\n", __func__, addr_reg);
2795 iounmap(addr_reg);
2796 err2:
2797 DBG(0, "%s: Releasing mem region %08lx\n", __func__, (long unsigned int)addr->start);
2798 release_mem_region(addr->start, resource_size(addr));
2799 err1:
2800 pr_err("%s: init error, %d\n", __func__, retval);
2801
2802 return retval;
2803}
2804
2805#ifdef CONFIG_PM
2806static int isp1362_suspend(struct platform_device *pdev, pm_message_t state)
2807{
2808 struct usb_hcd *hcd = platform_get_drvdata(pdev);
2809 struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
2810 unsigned long flags;
2811 int retval = 0;
2812
2813 DBG(0, "%s: Suspending device\n", __func__);
2814
2815 if (state.event == PM_EVENT_FREEZE) {
2816 DBG(0, "%s: Suspending root hub\n", __func__);
2817 retval = isp1362_bus_suspend(hcd);
2818 } else {
2819 DBG(0, "%s: Suspending RH ports\n", __func__);
2820 spin_lock_irqsave(&isp1362_hcd->lock, flags);
2821 isp1362_write_reg32(isp1362_hcd, HCRHSTATUS, RH_HS_LPS);
2822 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2823 }
2824 if (retval == 0)
2825 pdev->dev.power.power_state = state;
2826 return retval;
2827}
2828
2829static int isp1362_resume(struct platform_device *pdev)
2830{
2831 struct usb_hcd *hcd = platform_get_drvdata(pdev);
2832 struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
2833 unsigned long flags;
2834
2835 DBG(0, "%s: Resuming\n", __func__);
2836
2837 if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
2838 DBG(0, "%s: Resume RH ports\n", __func__);
2839 spin_lock_irqsave(&isp1362_hcd->lock, flags);
2840 isp1362_write_reg32(isp1362_hcd, HCRHSTATUS, RH_HS_LPSC);
2841 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2842 return 0;
2843 }
2844
2845 pdev->dev.power.power_state = PMSG_ON;
2846
2847 return isp1362_bus_resume(isp1362_hcd_to_hcd(isp1362_hcd));
2848}
2849#else
2850#define isp1362_suspend NULL
2851#define isp1362_resume NULL
2852#endif
2853
2854static struct platform_driver isp1362_driver = {
2855 .probe = isp1362_probe,
2856 .remove = isp1362_remove,
2857
2858 .suspend = isp1362_suspend,
2859 .resume = isp1362_resume,
2860 .driver = {
2861 .name = (char *)hcd_name,
2862 .owner = THIS_MODULE,
2863 },
2864};
2865
2866module_platform_driver(isp1362_driver);
2867