1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23#include <linux/kernel.h>
24#include <linux/module.h>
25#include <linux/usb.h>
26#include <linux/usb_usual.h>
27#include <linux/blkdev.h>
28#include <linux/timer.h>
29#include <linux/scatterlist.h>
30#include <scsi/scsi.h>
31
32#define DRV_NAME "ub"
33
34#define UB_MAJOR 180
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107#define UB_MAX_LUNS 9
108
109
110
111
112#define UB_PARTS_PER_LUN 8
113
114#define UB_MAX_CDB_SIZE 16
115
116#define UB_SENSE_SIZE 18
117
118
119
120
121
122struct bulk_cb_wrap {
123 __le32 Signature;
124 u32 Tag;
125 __le32 DataTransferLength;
126 u8 Flags;
127 u8 Lun;
128 u8 Length;
129 u8 CDB[UB_MAX_CDB_SIZE];
130};
131
132#define US_BULK_CB_WRAP_LEN 31
133#define US_BULK_CB_SIGN 0x43425355
134#define US_BULK_FLAG_IN 1
135#define US_BULK_FLAG_OUT 0
136
137
138struct bulk_cs_wrap {
139 __le32 Signature;
140 u32 Tag;
141 __le32 Residue;
142 u8 Status;
143};
144
145#define US_BULK_CS_WRAP_LEN 13
146#define US_BULK_CS_SIGN 0x53425355
147#define US_BULK_STAT_OK 0
148#define US_BULK_STAT_FAIL 1
149#define US_BULK_STAT_PHASE 2
150
151
152#define US_BULK_RESET_REQUEST 0xff
153#define US_BULK_GET_MAX_LUN 0xfe
154
155
156
157struct ub_dev;
158
159#define UB_MAX_REQ_SG 9
160#define UB_MAX_SECTORS 64
161
162
163
164
165
166#define UB_URB_TIMEOUT (HZ*2)
167#define UB_DATA_TIMEOUT (HZ*5)
168#define UB_STAT_TIMEOUT (HZ*5)
169#define UB_CTRL_TIMEOUT (HZ/2)
170
171
172
173
174#define UB_DIR_NONE 0
175#define UB_DIR_READ 1
176#define UB_DIR_ILLEGAL2 2
177#define UB_DIR_WRITE 3
178
179#define UB_DIR_CHAR(c) (((c)==UB_DIR_WRITE)? 'w': \
180 (((c)==UB_DIR_READ)? 'r': 'n'))
181
182enum ub_scsi_cmd_state {
183 UB_CMDST_INIT,
184 UB_CMDST_CMD,
185 UB_CMDST_DATA,
186 UB_CMDST_CLR2STS,
187 UB_CMDST_STAT,
188 UB_CMDST_CLEAR,
189 UB_CMDST_CLRRS,
190 UB_CMDST_SENSE,
191 UB_CMDST_DONE
192};
193
194struct ub_scsi_cmd {
195 unsigned char cdb[UB_MAX_CDB_SIZE];
196 unsigned char cdb_len;
197
198 unsigned char dir;
199 enum ub_scsi_cmd_state state;
200 unsigned int tag;
201 struct ub_scsi_cmd *next;
202
203 int error;
204 unsigned int act_len;
205 unsigned char key, asc, ascq;
206
207 int stat_count;
208 unsigned int timeo;
209
210 unsigned int len;
211 unsigned int current_sg;
212 unsigned int nsg;
213 struct scatterlist sgv[UB_MAX_REQ_SG];
214
215 struct ub_lun *lun;
216 void (*done)(struct ub_dev *, struct ub_scsi_cmd *);
217 void *back;
218};
219
220struct ub_request {
221 struct request *rq;
222 unsigned int current_try;
223 unsigned int nsg;
224 struct scatterlist sgv[UB_MAX_REQ_SG];
225};
226
227
228
229struct ub_capacity {
230 unsigned long nsec;
231 unsigned int bsize;
232 unsigned int bshift;
233};
234
235
236
237
238
239
240
241
242
243
244struct ub_completion {
245 unsigned int done;
246 spinlock_t lock;
247};
248
249static inline void ub_init_completion(struct ub_completion *x)
250{
251 x->done = 0;
252 spin_lock_init(&x->lock);
253}
254
255#define UB_INIT_COMPLETION(x) ((x).done = 0)
256
257static void ub_complete(struct ub_completion *x)
258{
259 unsigned long flags;
260
261 spin_lock_irqsave(&x->lock, flags);
262 x->done++;
263 spin_unlock_irqrestore(&x->lock, flags);
264}
265
266static int ub_is_completed(struct ub_completion *x)
267{
268 unsigned long flags;
269 int ret;
270
271 spin_lock_irqsave(&x->lock, flags);
272 ret = x->done;
273 spin_unlock_irqrestore(&x->lock, flags);
274 return ret;
275}
276
277
278
279struct ub_scsi_cmd_queue {
280 int qlen, qmax;
281 struct ub_scsi_cmd *head, *tail;
282};
283
284
285
286
287struct ub_lun {
288 struct ub_dev *udev;
289 struct list_head link;
290 struct gendisk *disk;
291 int id;
292 int num;
293 char name[16];
294
295 int changed;
296 int removable;
297 int readonly;
298
299 struct ub_request urq;
300
301
302
303
304
305
306
307 int cmda[1];
308 struct ub_scsi_cmd cmdv[1];
309
310 struct ub_capacity capacity;
311};
312
313
314
315
316struct ub_dev {
317 spinlock_t *lock;
318 atomic_t poison;
319 int openc;
320
321 int reset;
322 int bad_resid;
323 unsigned int tagcnt;
324 char name[12];
325 struct usb_device *dev;
326 struct usb_interface *intf;
327
328 struct list_head luns;
329
330 unsigned int send_bulk_pipe;
331 unsigned int recv_bulk_pipe;
332 unsigned int send_ctrl_pipe;
333 unsigned int recv_ctrl_pipe;
334
335 struct tasklet_struct tasklet;
336
337 struct ub_scsi_cmd_queue cmd_queue;
338 struct ub_scsi_cmd top_rqs_cmd;
339 unsigned char top_sense[UB_SENSE_SIZE];
340
341 struct ub_completion work_done;
342 struct urb work_urb;
343 struct timer_list work_timer;
344 int last_pipe;
345 __le32 signature;
346 struct bulk_cb_wrap work_bcb;
347 struct bulk_cs_wrap work_bcs;
348 struct usb_ctrlrequest work_cr;
349
350 struct work_struct reset_work;
351 wait_queue_head_t reset_wait;
352};
353
354
355
356static void ub_cleanup(struct ub_dev *sc);
357static int ub_request_fn_1(struct ub_lun *lun, struct request *rq);
358static void ub_cmd_build_block(struct ub_dev *sc, struct ub_lun *lun,
359 struct ub_scsi_cmd *cmd, struct ub_request *urq);
360static void ub_cmd_build_packet(struct ub_dev *sc, struct ub_lun *lun,
361 struct ub_scsi_cmd *cmd, struct ub_request *urq);
362static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
363static void ub_end_rq(struct request *rq, unsigned int status);
364static int ub_rw_cmd_retry(struct ub_dev *sc, struct ub_lun *lun,
365 struct ub_request *urq, struct ub_scsi_cmd *cmd);
366static int ub_submit_scsi(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
367static void ub_urb_complete(struct urb *urb);
368static void ub_scsi_action(unsigned long _dev);
369static void ub_scsi_dispatch(struct ub_dev *sc);
370static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
371static void ub_data_start(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
372static void ub_state_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd, int rc);
373static int __ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
374static void ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
375static void ub_state_stat_counted(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
376static void ub_state_sense(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
377static int ub_submit_clear_stall(struct ub_dev *sc, struct ub_scsi_cmd *cmd,
378 int stalled_pipe);
379static void ub_top_sense_done(struct ub_dev *sc, struct ub_scsi_cmd *scmd);
380static void ub_reset_enter(struct ub_dev *sc, int try);
381static void ub_reset_task(struct work_struct *work);
382static int ub_sync_tur(struct ub_dev *sc, struct ub_lun *lun);
383static int ub_sync_read_cap(struct ub_dev *sc, struct ub_lun *lun,
384 struct ub_capacity *ret);
385static int ub_sync_reset(struct ub_dev *sc);
386static int ub_probe_clear_stall(struct ub_dev *sc, int stalled_pipe);
387static int ub_probe_lun(struct ub_dev *sc, int lnum);
388
389
390
391#ifdef CONFIG_USB_LIBUSUAL
392
393#define ub_usb_ids usb_storage_usb_ids
394#else
395
396static struct usb_device_id ub_usb_ids[] = {
397 { USB_INTERFACE_INFO(USB_CLASS_MASS_STORAGE, US_SC_SCSI, US_PR_BULK) },
398 { }
399};
400
401MODULE_DEVICE_TABLE(usb, ub_usb_ids);
402#endif
403
404
405
406
407
408
409
410
411#define UB_MAX_HOSTS 26
412static char ub_hostv[UB_MAX_HOSTS];
413
414#define UB_QLOCK_NUM 5
415static spinlock_t ub_qlockv[UB_QLOCK_NUM];
416static int ub_qlock_next = 0;
417
418static DEFINE_SPINLOCK(ub_lock);
419
420
421
422
423
424
425static int ub_id_get(void)
426{
427 unsigned long flags;
428 int i;
429
430 spin_lock_irqsave(&ub_lock, flags);
431 for (i = 0; i < UB_MAX_HOSTS; i++) {
432 if (ub_hostv[i] == 0) {
433 ub_hostv[i] = 1;
434 spin_unlock_irqrestore(&ub_lock, flags);
435 return i;
436 }
437 }
438 spin_unlock_irqrestore(&ub_lock, flags);
439 return -1;
440}
441
442static void ub_id_put(int id)
443{
444 unsigned long flags;
445
446 if (id < 0 || id >= UB_MAX_HOSTS) {
447 printk(KERN_ERR DRV_NAME ": bad host ID %d\n", id);
448 return;
449 }
450
451 spin_lock_irqsave(&ub_lock, flags);
452 if (ub_hostv[id] == 0) {
453 spin_unlock_irqrestore(&ub_lock, flags);
454 printk(KERN_ERR DRV_NAME ": freeing free host ID %d\n", id);
455 return;
456 }
457 ub_hostv[id] = 0;
458 spin_unlock_irqrestore(&ub_lock, flags);
459}
460
461
462
463
464
465
466
467static spinlock_t *ub_next_lock(void)
468{
469 unsigned long flags;
470 spinlock_t *ret;
471
472 spin_lock_irqsave(&ub_lock, flags);
473 ret = &ub_qlockv[ub_qlock_next];
474 ub_qlock_next = (ub_qlock_next + 1) % UB_QLOCK_NUM;
475 spin_unlock_irqrestore(&ub_lock, flags);
476 return ret;
477}
478
479
480
481
482
483
484
485
486
487static void ub_put(struct ub_dev *sc)
488{
489 unsigned long flags;
490
491 spin_lock_irqsave(&ub_lock, flags);
492 --sc->openc;
493 if (sc->openc == 0 && atomic_read(&sc->poison)) {
494 spin_unlock_irqrestore(&ub_lock, flags);
495 ub_cleanup(sc);
496 } else {
497 spin_unlock_irqrestore(&ub_lock, flags);
498 }
499}
500
501
502
503
504static void ub_cleanup(struct ub_dev *sc)
505{
506 struct list_head *p;
507 struct ub_lun *lun;
508 struct request_queue *q;
509
510 while (!list_empty(&sc->luns)) {
511 p = sc->luns.next;
512 lun = list_entry(p, struct ub_lun, link);
513 list_del(p);
514
515
516 if ((q = lun->disk->queue) != NULL)
517 blk_cleanup_queue(q);
518
519
520
521
522
523
524
525
526
527 put_disk(lun->disk);
528 lun->disk = NULL;
529
530 ub_id_put(lun->id);
531 kfree(lun);
532 }
533
534 usb_set_intfdata(sc->intf, NULL);
535 usb_put_intf(sc->intf);
536 usb_put_dev(sc->dev);
537 kfree(sc);
538}
539
540
541
542
543static struct ub_scsi_cmd *ub_get_cmd(struct ub_lun *lun)
544{
545 struct ub_scsi_cmd *ret;
546
547 if (lun->cmda[0])
548 return NULL;
549 ret = &lun->cmdv[0];
550 lun->cmda[0] = 1;
551 return ret;
552}
553
554static void ub_put_cmd(struct ub_lun *lun, struct ub_scsi_cmd *cmd)
555{
556 if (cmd != &lun->cmdv[0]) {
557 printk(KERN_WARNING "%s: releasing a foreign cmd %p\n",
558 lun->name, cmd);
559 return;
560 }
561 if (!lun->cmda[0]) {
562 printk(KERN_WARNING "%s: releasing a free cmd\n", lun->name);
563 return;
564 }
565 lun->cmda[0] = 0;
566}
567
568
569
570
571static void ub_cmdq_add(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
572{
573 struct ub_scsi_cmd_queue *t = &sc->cmd_queue;
574
575 if (t->qlen++ == 0) {
576 t->head = cmd;
577 t->tail = cmd;
578 } else {
579 t->tail->next = cmd;
580 t->tail = cmd;
581 }
582
583 if (t->qlen > t->qmax)
584 t->qmax = t->qlen;
585}
586
587static void ub_cmdq_insert(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
588{
589 struct ub_scsi_cmd_queue *t = &sc->cmd_queue;
590
591 if (t->qlen++ == 0) {
592 t->head = cmd;
593 t->tail = cmd;
594 } else {
595 cmd->next = t->head;
596 t->head = cmd;
597 }
598
599 if (t->qlen > t->qmax)
600 t->qmax = t->qlen;
601}
602
603static struct ub_scsi_cmd *ub_cmdq_pop(struct ub_dev *sc)
604{
605 struct ub_scsi_cmd_queue *t = &sc->cmd_queue;
606 struct ub_scsi_cmd *cmd;
607
608 if (t->qlen == 0)
609 return NULL;
610 if (--t->qlen == 0)
611 t->tail = NULL;
612 cmd = t->head;
613 t->head = cmd->next;
614 cmd->next = NULL;
615 return cmd;
616}
617
618#define ub_cmdq_peek(sc) ((sc)->cmd_queue.head)
619
620
621
622
623
624static void ub_request_fn(struct request_queue *q)
625{
626 struct ub_lun *lun = q->queuedata;
627 struct request *rq;
628
629 while ((rq = blk_peek_request(q)) != NULL) {
630 if (ub_request_fn_1(lun, rq) != 0) {
631 blk_stop_queue(q);
632 break;
633 }
634 }
635}
636
637static int ub_request_fn_1(struct ub_lun *lun, struct request *rq)
638{
639 struct ub_dev *sc = lun->udev;
640 struct ub_scsi_cmd *cmd;
641 struct ub_request *urq;
642 int n_elem;
643
644 if (atomic_read(&sc->poison)) {
645 blk_start_request(rq);
646 ub_end_rq(rq, DID_NO_CONNECT << 16);
647 return 0;
648 }
649
650 if (lun->changed && !blk_pc_request(rq)) {
651 blk_start_request(rq);
652 ub_end_rq(rq, SAM_STAT_CHECK_CONDITION);
653 return 0;
654 }
655
656 if (lun->urq.rq != NULL)
657 return -1;
658 if ((cmd = ub_get_cmd(lun)) == NULL)
659 return -1;
660 memset(cmd, 0, sizeof(struct ub_scsi_cmd));
661
662 blk_start_request(rq);
663
664 urq = &lun->urq;
665 memset(urq, 0, sizeof(struct ub_request));
666 urq->rq = rq;
667
668
669
670
671 sg_init_table(&urq->sgv[0], UB_MAX_REQ_SG);
672 n_elem = blk_rq_map_sg(lun->disk->queue, rq, &urq->sgv[0]);
673 if (n_elem < 0) {
674
675 printk(KERN_INFO "%s: failed request map (%d)\n",
676 lun->name, n_elem);
677 goto drop;
678 }
679 if (n_elem > UB_MAX_REQ_SG) {
680 printk(KERN_WARNING "%s: request with %d segments\n",
681 lun->name, n_elem);
682 goto drop;
683 }
684 urq->nsg = n_elem;
685
686 if (blk_pc_request(rq)) {
687 ub_cmd_build_packet(sc, lun, cmd, urq);
688 } else {
689 ub_cmd_build_block(sc, lun, cmd, urq);
690 }
691 cmd->state = UB_CMDST_INIT;
692 cmd->lun = lun;
693 cmd->done = ub_rw_cmd_done;
694 cmd->back = urq;
695
696 cmd->tag = sc->tagcnt++;
697 if (ub_submit_scsi(sc, cmd) != 0)
698 goto drop;
699
700 return 0;
701
702drop:
703 ub_put_cmd(lun, cmd);
704 ub_end_rq(rq, DID_ERROR << 16);
705 return 0;
706}
707
708static void ub_cmd_build_block(struct ub_dev *sc, struct ub_lun *lun,
709 struct ub_scsi_cmd *cmd, struct ub_request *urq)
710{
711 struct request *rq = urq->rq;
712 unsigned int block, nblks;
713
714 if (rq_data_dir(rq) == WRITE)
715 cmd->dir = UB_DIR_WRITE;
716 else
717 cmd->dir = UB_DIR_READ;
718
719 cmd->nsg = urq->nsg;
720 memcpy(cmd->sgv, urq->sgv, sizeof(struct scatterlist) * cmd->nsg);
721
722
723
724
725
726
727
728 block = blk_rq_pos(rq) >> lun->capacity.bshift;
729 nblks = blk_rq_sectors(rq) >> lun->capacity.bshift;
730
731 cmd->cdb[0] = (cmd->dir == UB_DIR_READ)? READ_10: WRITE_10;
732
733 cmd->cdb[2] = block >> 24;
734 cmd->cdb[3] = block >> 16;
735 cmd->cdb[4] = block >> 8;
736 cmd->cdb[5] = block;
737 cmd->cdb[7] = nblks >> 8;
738 cmd->cdb[8] = nblks;
739 cmd->cdb_len = 10;
740
741 cmd->len = blk_rq_bytes(rq);
742}
743
744static void ub_cmd_build_packet(struct ub_dev *sc, struct ub_lun *lun,
745 struct ub_scsi_cmd *cmd, struct ub_request *urq)
746{
747 struct request *rq = urq->rq;
748
749 if (blk_rq_bytes(rq) == 0) {
750 cmd->dir = UB_DIR_NONE;
751 } else {
752 if (rq_data_dir(rq) == WRITE)
753 cmd->dir = UB_DIR_WRITE;
754 else
755 cmd->dir = UB_DIR_READ;
756 }
757
758 cmd->nsg = urq->nsg;
759 memcpy(cmd->sgv, urq->sgv, sizeof(struct scatterlist) * cmd->nsg);
760
761 memcpy(&cmd->cdb, rq->cmd, rq->cmd_len);
762 cmd->cdb_len = rq->cmd_len;
763
764 cmd->len = blk_rq_bytes(rq);
765
766
767
768
769
770 cmd->timeo = rq->timeout;
771}
772
773static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
774{
775 struct ub_lun *lun = cmd->lun;
776 struct ub_request *urq = cmd->back;
777 struct request *rq;
778 unsigned int scsi_status;
779
780 rq = urq->rq;
781
782 if (cmd->error == 0) {
783 if (blk_pc_request(rq)) {
784 if (cmd->act_len >= rq->resid_len)
785 rq->resid_len = 0;
786 else
787 rq->resid_len -= cmd->act_len;
788 scsi_status = 0;
789 } else {
790 if (cmd->act_len != cmd->len) {
791 scsi_status = SAM_STAT_CHECK_CONDITION;
792 } else {
793 scsi_status = 0;
794 }
795 }
796 } else {
797 if (blk_pc_request(rq)) {
798
799 memcpy(rq->sense, sc->top_sense, UB_SENSE_SIZE);
800 rq->sense_len = UB_SENSE_SIZE;
801 if (sc->top_sense[0] != 0)
802 scsi_status = SAM_STAT_CHECK_CONDITION;
803 else
804 scsi_status = DID_ERROR << 16;
805 } else {
806 if (cmd->error == -EIO &&
807 (cmd->key == 0 ||
808 cmd->key == MEDIUM_ERROR ||
809 cmd->key == UNIT_ATTENTION)) {
810 if (ub_rw_cmd_retry(sc, lun, urq, cmd) == 0)
811 return;
812 }
813 scsi_status = SAM_STAT_CHECK_CONDITION;
814 }
815 }
816
817 urq->rq = NULL;
818
819 ub_put_cmd(lun, cmd);
820 ub_end_rq(rq, scsi_status);
821 blk_start_queue(lun->disk->queue);
822}
823
824static void ub_end_rq(struct request *rq, unsigned int scsi_status)
825{
826 int error;
827
828 if (scsi_status == 0) {
829 error = 0;
830 } else {
831 error = -EIO;
832 rq->errors = scsi_status;
833 }
834 __blk_end_request_all(rq, error);
835}
836
837static int ub_rw_cmd_retry(struct ub_dev *sc, struct ub_lun *lun,
838 struct ub_request *urq, struct ub_scsi_cmd *cmd)
839{
840
841 if (atomic_read(&sc->poison))
842 return -ENXIO;
843
844 ub_reset_enter(sc, urq->current_try);
845
846 if (urq->current_try >= 3)
847 return -EIO;
848 urq->current_try++;
849
850
851 printk(KERN_DEBUG "%s: dir %c len/act %d/%d "
852 "[sense %x %02x %02x] retry %d\n",
853 sc->name, UB_DIR_CHAR(cmd->dir), cmd->len, cmd->act_len,
854 cmd->key, cmd->asc, cmd->ascq, urq->current_try);
855
856 memset(cmd, 0, sizeof(struct ub_scsi_cmd));
857 ub_cmd_build_block(sc, lun, cmd, urq);
858
859 cmd->state = UB_CMDST_INIT;
860 cmd->lun = lun;
861 cmd->done = ub_rw_cmd_done;
862 cmd->back = urq;
863
864 cmd->tag = sc->tagcnt++;
865
866#if 0
867 return ub_submit_scsi(sc, cmd);
868#else
869 ub_cmdq_add(sc, cmd);
870 return 0;
871#endif
872}
873
874
875
876
877
878
879
880
881
882
883static int ub_submit_scsi(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
884{
885
886 if (cmd->state != UB_CMDST_INIT ||
887 (cmd->dir != UB_DIR_NONE && cmd->len == 0)) {
888 return -EINVAL;
889 }
890
891 ub_cmdq_add(sc, cmd);
892
893
894
895
896 tasklet_schedule(&sc->tasklet);
897 return 0;
898}
899
900
901
902
903
904static int ub_scsi_cmd_start(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
905{
906 struct bulk_cb_wrap *bcb;
907 int rc;
908
909 bcb = &sc->work_bcb;
910
911
912
913
914
915
916
917
918
919
920 memset(&sc->top_sense, 0, UB_SENSE_SIZE);
921
922
923 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
924 bcb->Tag = cmd->tag;
925 bcb->DataTransferLength = cpu_to_le32(cmd->len);
926 bcb->Flags = (cmd->dir == UB_DIR_READ) ? 0x80 : 0;
927 bcb->Lun = (cmd->lun != NULL) ? cmd->lun->num : 0;
928 bcb->Length = cmd->cdb_len;
929
930
931 memcpy(bcb->CDB, cmd->cdb, UB_MAX_CDB_SIZE);
932
933 UB_INIT_COMPLETION(sc->work_done);
934
935 sc->last_pipe = sc->send_bulk_pipe;
936 usb_fill_bulk_urb(&sc->work_urb, sc->dev, sc->send_bulk_pipe,
937 bcb, US_BULK_CB_WRAP_LEN, ub_urb_complete, sc);
938
939 if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) {
940
941 ub_complete(&sc->work_done);
942 return rc;
943 }
944
945 sc->work_timer.expires = jiffies + UB_URB_TIMEOUT;
946 add_timer(&sc->work_timer);
947
948 cmd->state = UB_CMDST_CMD;
949 return 0;
950}
951
952
953
954
955static void ub_urb_timeout(unsigned long arg)
956{
957 struct ub_dev *sc = (struct ub_dev *) arg;
958 unsigned long flags;
959
960 spin_lock_irqsave(sc->lock, flags);
961 if (!ub_is_completed(&sc->work_done))
962 usb_unlink_urb(&sc->work_urb);
963 spin_unlock_irqrestore(sc->lock, flags);
964}
965
966
967
968
969
970
971
972
973static void ub_urb_complete(struct urb *urb)
974{
975 struct ub_dev *sc = urb->context;
976
977 ub_complete(&sc->work_done);
978 tasklet_schedule(&sc->tasklet);
979}
980
981static void ub_scsi_action(unsigned long _dev)
982{
983 struct ub_dev *sc = (struct ub_dev *) _dev;
984 unsigned long flags;
985
986 spin_lock_irqsave(sc->lock, flags);
987 ub_scsi_dispatch(sc);
988 spin_unlock_irqrestore(sc->lock, flags);
989}
990
991static void ub_scsi_dispatch(struct ub_dev *sc)
992{
993 struct ub_scsi_cmd *cmd;
994 int rc;
995
996 while (!sc->reset && (cmd = ub_cmdq_peek(sc)) != NULL) {
997 if (cmd->state == UB_CMDST_DONE) {
998 ub_cmdq_pop(sc);
999 (*cmd->done)(sc, cmd);
1000 } else if (cmd->state == UB_CMDST_INIT) {
1001 if ((rc = ub_scsi_cmd_start(sc, cmd)) == 0)
1002 break;
1003 cmd->error = rc;
1004 cmd->state = UB_CMDST_DONE;
1005 } else {
1006 if (!ub_is_completed(&sc->work_done))
1007 break;
1008 del_timer(&sc->work_timer);
1009 ub_scsi_urb_compl(sc, cmd);
1010 }
1011 }
1012}
1013
1014static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1015{
1016 struct urb *urb = &sc->work_urb;
1017 struct bulk_cs_wrap *bcs;
1018 int endp;
1019 int len;
1020 int rc;
1021
1022 if (atomic_read(&sc->poison)) {
1023 ub_state_done(sc, cmd, -ENODEV);
1024 return;
1025 }
1026
1027 endp = usb_pipeendpoint(sc->last_pipe);
1028 if (usb_pipein(sc->last_pipe))
1029 endp |= USB_DIR_IN;
1030
1031 if (cmd->state == UB_CMDST_CLEAR) {
1032 if (urb->status == -EPIPE) {
1033
1034
1035
1036
1037 printk(KERN_NOTICE "%s: stall on control pipe\n",
1038 sc->name);
1039 goto Bad_End;
1040 }
1041
1042
1043
1044
1045
1046 usb_reset_endpoint(sc->dev, endp);
1047
1048 ub_state_sense(sc, cmd);
1049
1050 } else if (cmd->state == UB_CMDST_CLR2STS) {
1051 if (urb->status == -EPIPE) {
1052 printk(KERN_NOTICE "%s: stall on control pipe\n",
1053 sc->name);
1054 goto Bad_End;
1055 }
1056
1057
1058
1059
1060
1061 usb_reset_endpoint(sc->dev, endp);
1062
1063 ub_state_stat(sc, cmd);
1064
1065 } else if (cmd->state == UB_CMDST_CLRRS) {
1066 if (urb->status == -EPIPE) {
1067 printk(KERN_NOTICE "%s: stall on control pipe\n",
1068 sc->name);
1069 goto Bad_End;
1070 }
1071
1072
1073
1074
1075
1076 usb_reset_endpoint(sc->dev, endp);
1077
1078 ub_state_stat_counted(sc, cmd);
1079
1080 } else if (cmd->state == UB_CMDST_CMD) {
1081 switch (urb->status) {
1082 case 0:
1083 break;
1084 case -EOVERFLOW:
1085 goto Bad_End;
1086 case -EPIPE:
1087 rc = ub_submit_clear_stall(sc, cmd, sc->last_pipe);
1088 if (rc != 0) {
1089 printk(KERN_NOTICE "%s: "
1090 "unable to submit clear (%d)\n",
1091 sc->name, rc);
1092
1093
1094
1095
1096 ub_state_done(sc, cmd, rc);
1097 return;
1098 }
1099 cmd->state = UB_CMDST_CLEAR;
1100 return;
1101 case -ESHUTDOWN:
1102 case -EILSEQ:
1103 ub_state_done(sc, cmd, -ENODEV);
1104 return;
1105 default:
1106 goto Bad_End;
1107 }
1108 if (urb->actual_length != US_BULK_CB_WRAP_LEN) {
1109 goto Bad_End;
1110 }
1111
1112 if (cmd->dir == UB_DIR_NONE || cmd->nsg < 1) {
1113 ub_state_stat(sc, cmd);
1114 return;
1115 }
1116
1117
1118 ub_data_start(sc, cmd);
1119
1120 } else if (cmd->state == UB_CMDST_DATA) {
1121 if (urb->status == -EPIPE) {
1122 rc = ub_submit_clear_stall(sc, cmd, sc->last_pipe);
1123 if (rc != 0) {
1124 printk(KERN_NOTICE "%s: "
1125 "unable to submit clear (%d)\n",
1126 sc->name, rc);
1127 ub_state_done(sc, cmd, rc);
1128 return;
1129 }
1130 cmd->state = UB_CMDST_CLR2STS;
1131 return;
1132 }
1133 if (urb->status == -EOVERFLOW) {
1134
1135
1136
1137 cmd->error = -EOVERFLOW;
1138 ub_state_stat(sc, cmd);
1139 return;
1140 }
1141
1142 if (cmd->dir == UB_DIR_WRITE) {
1143
1144
1145
1146
1147
1148
1149
1150
1151 len = urb->actual_length;
1152 if (urb->status != 0 ||
1153 len != cmd->sgv[cmd->current_sg].length) {
1154 cmd->act_len += len;
1155
1156 cmd->error = -EIO;
1157 ub_state_stat(sc, cmd);
1158 return;
1159 }
1160
1161 } else {
1162
1163
1164
1165
1166
1167
1168
1169 if (urb->status != 0)
1170 cmd->error = -EIO;
1171
1172 len = urb->actual_length;
1173 if (urb->status != 0 ||
1174 len != cmd->sgv[cmd->current_sg].length) {
1175 if ((len & 0x1FF) == US_BULK_CS_WRAP_LEN)
1176 goto Bad_End;
1177 }
1178 }
1179
1180 cmd->act_len += urb->actual_length;
1181
1182 if (++cmd->current_sg < cmd->nsg) {
1183 ub_data_start(sc, cmd);
1184 return;
1185 }
1186 ub_state_stat(sc, cmd);
1187
1188 } else if (cmd->state == UB_CMDST_STAT) {
1189 if (urb->status == -EPIPE) {
1190 rc = ub_submit_clear_stall(sc, cmd, sc->last_pipe);
1191 if (rc != 0) {
1192 printk(KERN_NOTICE "%s: "
1193 "unable to submit clear (%d)\n",
1194 sc->name, rc);
1195 ub_state_done(sc, cmd, rc);
1196 return;
1197 }
1198
1199
1200
1201
1202
1203 cmd->error = -EIO;
1204
1205 cmd->state = UB_CMDST_CLRRS;
1206 return;
1207 }
1208
1209
1210 if (urb->status != 0)
1211 goto Bad_End;
1212
1213 if (urb->actual_length == 0) {
1214 ub_state_stat_counted(sc, cmd);
1215 return;
1216 }
1217
1218
1219
1220
1221
1222
1223 bcs = &sc->work_bcs;
1224
1225 if (sc->signature == cpu_to_le32(0)) {
1226
1227
1228
1229
1230
1231 sc->signature = bcs->Signature;
1232 if (sc->signature == cpu_to_le32(0)) {
1233 ub_state_stat_counted(sc, cmd);
1234 return;
1235 }
1236 } else {
1237 if (bcs->Signature != sc->signature) {
1238 ub_state_stat_counted(sc, cmd);
1239 return;
1240 }
1241 }
1242
1243 if (bcs->Tag != cmd->tag) {
1244
1245
1246
1247
1248
1249
1250
1251 ub_state_stat_counted(sc, cmd);
1252 return;
1253 }
1254
1255 if (!sc->bad_resid) {
1256 len = le32_to_cpu(bcs->Residue);
1257 if (len != cmd->len - cmd->act_len) {
1258
1259
1260
1261 if (cmd->len == cmd->act_len) {
1262 printk(KERN_NOTICE "%s: "
1263 "bad residual %d of %d, ignoring\n",
1264 sc->name, len, cmd->len);
1265 sc->bad_resid = 1;
1266 }
1267 }
1268 }
1269
1270 switch (bcs->Status) {
1271 case US_BULK_STAT_OK:
1272 break;
1273 case US_BULK_STAT_FAIL:
1274 ub_state_sense(sc, cmd);
1275 return;
1276 case US_BULK_STAT_PHASE:
1277 goto Bad_End;
1278 default:
1279 printk(KERN_INFO "%s: unknown CSW status 0x%x\n",
1280 sc->name, bcs->Status);
1281 ub_state_done(sc, cmd, -EINVAL);
1282 return;
1283 }
1284
1285
1286 if (cmd->error != 0) {
1287 ub_state_sense(sc, cmd);
1288 return;
1289 }
1290 cmd->state = UB_CMDST_DONE;
1291 ub_cmdq_pop(sc);
1292 (*cmd->done)(sc, cmd);
1293
1294 } else if (cmd->state == UB_CMDST_SENSE) {
1295 ub_state_done(sc, cmd, -EIO);
1296
1297 } else {
1298 printk(KERN_WARNING "%s: wrong command state %d\n",
1299 sc->name, cmd->state);
1300 ub_state_done(sc, cmd, -EINVAL);
1301 return;
1302 }
1303 return;
1304
1305Bad_End:
1306 ub_state_done(sc, cmd, -EIO);
1307}
1308
1309
1310
1311
1312
1313static void ub_data_start(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1314{
1315 struct scatterlist *sg = &cmd->sgv[cmd->current_sg];
1316 int pipe;
1317 int rc;
1318
1319 UB_INIT_COMPLETION(sc->work_done);
1320
1321 if (cmd->dir == UB_DIR_READ)
1322 pipe = sc->recv_bulk_pipe;
1323 else
1324 pipe = sc->send_bulk_pipe;
1325 sc->last_pipe = pipe;
1326 usb_fill_bulk_urb(&sc->work_urb, sc->dev, pipe, sg_virt(sg),
1327 sg->length, ub_urb_complete, sc);
1328
1329 if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) {
1330
1331 ub_complete(&sc->work_done);
1332 ub_state_done(sc, cmd, rc);
1333 return;
1334 }
1335
1336 if (cmd->timeo)
1337 sc->work_timer.expires = jiffies + cmd->timeo;
1338 else
1339 sc->work_timer.expires = jiffies + UB_DATA_TIMEOUT;
1340 add_timer(&sc->work_timer);
1341
1342 cmd->state = UB_CMDST_DATA;
1343}
1344
1345
1346
1347
1348
1349static void ub_state_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd, int rc)
1350{
1351
1352 cmd->error = rc;
1353 cmd->state = UB_CMDST_DONE;
1354 ub_cmdq_pop(sc);
1355 (*cmd->done)(sc, cmd);
1356}
1357
1358
1359
1360
1361
1362static int __ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1363{
1364 int rc;
1365
1366 UB_INIT_COMPLETION(sc->work_done);
1367
1368 sc->last_pipe = sc->recv_bulk_pipe;
1369 usb_fill_bulk_urb(&sc->work_urb, sc->dev, sc->recv_bulk_pipe,
1370 &sc->work_bcs, US_BULK_CS_WRAP_LEN, ub_urb_complete, sc);
1371
1372 if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) {
1373
1374 ub_complete(&sc->work_done);
1375 ub_state_done(sc, cmd, rc);
1376 return -1;
1377 }
1378
1379 if (cmd->timeo)
1380 sc->work_timer.expires = jiffies + cmd->timeo;
1381 else
1382 sc->work_timer.expires = jiffies + UB_STAT_TIMEOUT;
1383 add_timer(&sc->work_timer);
1384 return 0;
1385}
1386
1387
1388
1389
1390
1391static void ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1392{
1393
1394 if (__ub_state_stat(sc, cmd) != 0)
1395 return;
1396
1397 cmd->stat_count = 0;
1398 cmd->state = UB_CMDST_STAT;
1399}
1400
1401
1402
1403
1404
1405static void ub_state_stat_counted(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1406{
1407
1408 if (++cmd->stat_count >= 4) {
1409 ub_state_sense(sc, cmd);
1410 return;
1411 }
1412
1413 if (__ub_state_stat(sc, cmd) != 0)
1414 return;
1415
1416 cmd->state = UB_CMDST_STAT;
1417}
1418
1419
1420
1421
1422
1423static void ub_state_sense(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1424{
1425 struct ub_scsi_cmd *scmd;
1426 struct scatterlist *sg;
1427 int rc;
1428
1429 if (cmd->cdb[0] == REQUEST_SENSE) {
1430 rc = -EPIPE;
1431 goto error;
1432 }
1433
1434 scmd = &sc->top_rqs_cmd;
1435 memset(scmd, 0, sizeof(struct ub_scsi_cmd));
1436 scmd->cdb[0] = REQUEST_SENSE;
1437 scmd->cdb[4] = UB_SENSE_SIZE;
1438 scmd->cdb_len = 6;
1439 scmd->dir = UB_DIR_READ;
1440 scmd->state = UB_CMDST_INIT;
1441 scmd->nsg = 1;
1442 sg = &scmd->sgv[0];
1443 sg_init_table(sg, UB_MAX_REQ_SG);
1444 sg_set_page(sg, virt_to_page(sc->top_sense), UB_SENSE_SIZE,
1445 (unsigned long)sc->top_sense & (PAGE_SIZE-1));
1446 scmd->len = UB_SENSE_SIZE;
1447 scmd->lun = cmd->lun;
1448 scmd->done = ub_top_sense_done;
1449 scmd->back = cmd;
1450
1451 scmd->tag = sc->tagcnt++;
1452
1453 cmd->state = UB_CMDST_SENSE;
1454
1455 ub_cmdq_insert(sc, scmd);
1456 return;
1457
1458error:
1459 ub_state_done(sc, cmd, rc);
1460}
1461
1462
1463
1464
1465
1466static int ub_submit_clear_stall(struct ub_dev *sc, struct ub_scsi_cmd *cmd,
1467 int stalled_pipe)
1468{
1469 int endp;
1470 struct usb_ctrlrequest *cr;
1471 int rc;
1472
1473 endp = usb_pipeendpoint(stalled_pipe);
1474 if (usb_pipein (stalled_pipe))
1475 endp |= USB_DIR_IN;
1476
1477 cr = &sc->work_cr;
1478 cr->bRequestType = USB_RECIP_ENDPOINT;
1479 cr->bRequest = USB_REQ_CLEAR_FEATURE;
1480 cr->wValue = cpu_to_le16(USB_ENDPOINT_HALT);
1481 cr->wIndex = cpu_to_le16(endp);
1482 cr->wLength = cpu_to_le16(0);
1483
1484 UB_INIT_COMPLETION(sc->work_done);
1485
1486 usb_fill_control_urb(&sc->work_urb, sc->dev, sc->send_ctrl_pipe,
1487 (unsigned char*) cr, NULL, 0, ub_urb_complete, sc);
1488
1489 if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) {
1490 ub_complete(&sc->work_done);
1491 return rc;
1492 }
1493
1494 sc->work_timer.expires = jiffies + UB_CTRL_TIMEOUT;
1495 add_timer(&sc->work_timer);
1496 return 0;
1497}
1498
1499
1500
1501static void ub_top_sense_done(struct ub_dev *sc, struct ub_scsi_cmd *scmd)
1502{
1503 unsigned char *sense = sc->top_sense;
1504 struct ub_scsi_cmd *cmd;
1505
1506
1507
1508
1509
1510 if ((cmd = ub_cmdq_peek(sc)) == NULL) {
1511 printk(KERN_WARNING "%s: sense done while idle\n", sc->name);
1512 return;
1513 }
1514 if (cmd != scmd->back) {
1515 printk(KERN_WARNING "%s: "
1516 "sense done for wrong command 0x%x\n",
1517 sc->name, cmd->tag);
1518 return;
1519 }
1520 if (cmd->state != UB_CMDST_SENSE) {
1521 printk(KERN_WARNING "%s: sense done with bad cmd state %d\n",
1522 sc->name, cmd->state);
1523 return;
1524 }
1525
1526
1527
1528
1529 cmd->key = sense[2] & 0x0F;
1530 cmd->asc = sense[12];
1531 cmd->ascq = sense[13];
1532
1533 ub_scsi_urb_compl(sc, cmd);
1534}
1535
1536
1537
1538
1539
1540static void ub_reset_enter(struct ub_dev *sc, int try)
1541{
1542
1543 if (sc->reset) {
1544
1545 return;
1546 }
1547 sc->reset = try + 1;
1548
1549#if 0
1550 unsigned long flags;
1551 spin_lock_irqsave(&ub_lock, flags);
1552 sc->openc++;
1553 spin_unlock_irqrestore(&ub_lock, flags);
1554#endif
1555
1556#if 0
1557 struct ub_lun *lun;
1558 list_for_each_entry(lun, &sc->luns, link) {
1559 blk_stop_queue(lun->disk->queue);
1560 }
1561#endif
1562
1563 schedule_work(&sc->reset_work);
1564}
1565
1566static void ub_reset_task(struct work_struct *work)
1567{
1568 struct ub_dev *sc = container_of(work, struct ub_dev, reset_work);
1569 unsigned long flags;
1570 struct ub_lun *lun;
1571 int rc;
1572
1573 if (!sc->reset) {
1574 printk(KERN_WARNING "%s: Running reset unrequested\n",
1575 sc->name);
1576 return;
1577 }
1578
1579 if (atomic_read(&sc->poison)) {
1580 ;
1581 } else if ((sc->reset & 1) == 0) {
1582 ub_sync_reset(sc);
1583 msleep(700);
1584 ub_probe_clear_stall(sc, sc->recv_bulk_pipe);
1585 ub_probe_clear_stall(sc, sc->send_bulk_pipe);
1586 } else if (sc->dev->actconfig->desc.bNumInterfaces != 1) {
1587 ;
1588 } else {
1589 rc = usb_lock_device_for_reset(sc->dev, sc->intf);
1590 if (rc < 0) {
1591 printk(KERN_NOTICE
1592 "%s: usb_lock_device_for_reset failed (%d)\n",
1593 sc->name, rc);
1594 } else {
1595 rc = usb_reset_device(sc->dev);
1596 if (rc < 0) {
1597 printk(KERN_NOTICE "%s: "
1598 "usb_lock_device_for_reset failed (%d)\n",
1599 sc->name, rc);
1600 }
1601 usb_unlock_device(sc->dev);
1602 }
1603 }
1604
1605
1606
1607
1608
1609
1610
1611 spin_lock_irqsave(sc->lock, flags);
1612 sc->reset = 0;
1613 tasklet_schedule(&sc->tasklet);
1614 list_for_each_entry(lun, &sc->luns, link) {
1615 blk_start_queue(lun->disk->queue);
1616 }
1617 wake_up(&sc->reset_wait);
1618 spin_unlock_irqrestore(sc->lock, flags);
1619}
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629static int ub_pre_reset(struct usb_interface *iface) {
1630 return 0;
1631}
1632
1633static int ub_post_reset(struct usb_interface *iface) {
1634 return 0;
1635}
1636
1637
1638
1639
1640static void ub_revalidate(struct ub_dev *sc, struct ub_lun *lun)
1641{
1642
1643 lun->readonly = 0;
1644
1645 lun->capacity.nsec = 0;
1646 lun->capacity.bsize = 512;
1647 lun->capacity.bshift = 0;
1648
1649 if (ub_sync_tur(sc, lun) != 0)
1650 return;
1651 lun->changed = 0;
1652
1653 if (ub_sync_read_cap(sc, lun, &lun->capacity) != 0) {
1654
1655
1656
1657
1658
1659 if (ub_sync_read_cap(sc, lun, &lun->capacity) != 0) {
1660 lun->capacity.nsec = 0;
1661 lun->capacity.bsize = 512;
1662 lun->capacity.bshift = 0;
1663 }
1664 }
1665}
1666
1667
1668
1669
1670
1671
1672static int ub_bd_open(struct block_device *bdev, fmode_t mode)
1673{
1674 struct ub_lun *lun = bdev->bd_disk->private_data;
1675 struct ub_dev *sc = lun->udev;
1676 unsigned long flags;
1677 int rc;
1678
1679 spin_lock_irqsave(&ub_lock, flags);
1680 if (atomic_read(&sc->poison)) {
1681 spin_unlock_irqrestore(&ub_lock, flags);
1682 return -ENXIO;
1683 }
1684 sc->openc++;
1685 spin_unlock_irqrestore(&ub_lock, flags);
1686
1687 if (lun->removable || lun->readonly)
1688 check_disk_change(bdev);
1689
1690
1691
1692
1693
1694
1695 if (lun->removable && lun->changed && !(mode & FMODE_NDELAY)) {
1696 rc = -ENOMEDIUM;
1697 goto err_open;
1698 }
1699
1700 if (lun->readonly && (mode & FMODE_WRITE)) {
1701 rc = -EROFS;
1702 goto err_open;
1703 }
1704
1705 return 0;
1706
1707err_open:
1708 ub_put(sc);
1709 return rc;
1710}
1711
1712
1713
1714static int ub_bd_release(struct gendisk *disk, fmode_t mode)
1715{
1716 struct ub_lun *lun = disk->private_data;
1717 struct ub_dev *sc = lun->udev;
1718
1719 ub_put(sc);
1720 return 0;
1721}
1722
1723
1724
1725
1726static int ub_bd_ioctl(struct block_device *bdev, fmode_t mode,
1727 unsigned int cmd, unsigned long arg)
1728{
1729 struct gendisk *disk = bdev->bd_disk;
1730 void __user *usermem = (void __user *) arg;
1731
1732 return scsi_cmd_ioctl(disk->queue, disk, mode, cmd, usermem);
1733}
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745static int ub_bd_revalidate(struct gendisk *disk)
1746{
1747 struct ub_lun *lun = disk->private_data;
1748
1749 ub_revalidate(lun->udev, lun);
1750
1751
1752 blk_queue_logical_block_size(disk->queue, lun->capacity.bsize);
1753 set_capacity(disk, lun->capacity.nsec);
1754
1755
1756 return 0;
1757}
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768static int ub_bd_media_changed(struct gendisk *disk)
1769{
1770 struct ub_lun *lun = disk->private_data;
1771
1772 if (!lun->removable)
1773 return 0;
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784 if (ub_sync_tur(lun->udev, lun) != 0) {
1785 lun->changed = 1;
1786 return 1;
1787 }
1788
1789 return lun->changed;
1790}
1791
1792static const struct block_device_operations ub_bd_fops = {
1793 .owner = THIS_MODULE,
1794 .open = ub_bd_open,
1795 .release = ub_bd_release,
1796 .locked_ioctl = ub_bd_ioctl,
1797 .media_changed = ub_bd_media_changed,
1798 .revalidate_disk = ub_bd_revalidate,
1799};
1800
1801
1802
1803
1804static void ub_probe_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1805{
1806 struct completion *cop = cmd->back;
1807 complete(cop);
1808}
1809
1810
1811
1812
1813static int ub_sync_tur(struct ub_dev *sc, struct ub_lun *lun)
1814{
1815 struct ub_scsi_cmd *cmd;
1816 enum { ALLOC_SIZE = sizeof(struct ub_scsi_cmd) };
1817 unsigned long flags;
1818 struct completion compl;
1819 int rc;
1820
1821 init_completion(&compl);
1822
1823 rc = -ENOMEM;
1824 if ((cmd = kzalloc(ALLOC_SIZE, GFP_KERNEL)) == NULL)
1825 goto err_alloc;
1826
1827 cmd->cdb[0] = TEST_UNIT_READY;
1828 cmd->cdb_len = 6;
1829 cmd->dir = UB_DIR_NONE;
1830 cmd->state = UB_CMDST_INIT;
1831 cmd->lun = lun;
1832 cmd->done = ub_probe_done;
1833 cmd->back = &compl;
1834
1835 spin_lock_irqsave(sc->lock, flags);
1836 cmd->tag = sc->tagcnt++;
1837
1838 rc = ub_submit_scsi(sc, cmd);
1839 spin_unlock_irqrestore(sc->lock, flags);
1840
1841 if (rc != 0)
1842 goto err_submit;
1843
1844 wait_for_completion(&compl);
1845
1846 rc = cmd->error;
1847
1848 if (rc == -EIO && cmd->key != 0)
1849 rc = cmd->key;
1850
1851err_submit:
1852 kfree(cmd);
1853err_alloc:
1854 return rc;
1855}
1856
1857
1858
1859
1860static int ub_sync_read_cap(struct ub_dev *sc, struct ub_lun *lun,
1861 struct ub_capacity *ret)
1862{
1863 struct ub_scsi_cmd *cmd;
1864 struct scatterlist *sg;
1865 char *p;
1866 enum { ALLOC_SIZE = sizeof(struct ub_scsi_cmd) + 8 };
1867 unsigned long flags;
1868 unsigned int bsize, shift;
1869 unsigned long nsec;
1870 struct completion compl;
1871 int rc;
1872
1873 init_completion(&compl);
1874
1875 rc = -ENOMEM;
1876 if ((cmd = kzalloc(ALLOC_SIZE, GFP_KERNEL)) == NULL)
1877 goto err_alloc;
1878 p = (char *)cmd + sizeof(struct ub_scsi_cmd);
1879
1880 cmd->cdb[0] = 0x25;
1881 cmd->cdb_len = 10;
1882 cmd->dir = UB_DIR_READ;
1883 cmd->state = UB_CMDST_INIT;
1884 cmd->nsg = 1;
1885 sg = &cmd->sgv[0];
1886 sg_init_table(sg, UB_MAX_REQ_SG);
1887 sg_set_page(sg, virt_to_page(p), 8, (unsigned long)p & (PAGE_SIZE-1));
1888 cmd->len = 8;
1889 cmd->lun = lun;
1890 cmd->done = ub_probe_done;
1891 cmd->back = &compl;
1892
1893 spin_lock_irqsave(sc->lock, flags);
1894 cmd->tag = sc->tagcnt++;
1895
1896 rc = ub_submit_scsi(sc, cmd);
1897 spin_unlock_irqrestore(sc->lock, flags);
1898
1899 if (rc != 0)
1900 goto err_submit;
1901
1902 wait_for_completion(&compl);
1903
1904 if (cmd->error != 0) {
1905 rc = -EIO;
1906 goto err_read;
1907 }
1908 if (cmd->act_len != 8) {
1909 rc = -EIO;
1910 goto err_read;
1911 }
1912
1913
1914 nsec = be32_to_cpu(*(__be32 *)p) + 1;
1915 bsize = be32_to_cpu(*(__be32 *)(p + 4));
1916 switch (bsize) {
1917 case 512: shift = 0; break;
1918 case 1024: shift = 1; break;
1919 case 2048: shift = 2; break;
1920 case 4096: shift = 3; break;
1921 default:
1922 rc = -EDOM;
1923 goto err_inv_bsize;
1924 }
1925
1926 ret->bsize = bsize;
1927 ret->bshift = shift;
1928 ret->nsec = nsec << shift;
1929 rc = 0;
1930
1931err_inv_bsize:
1932err_read:
1933err_submit:
1934 kfree(cmd);
1935err_alloc:
1936 return rc;
1937}
1938
1939
1940
1941static void ub_probe_urb_complete(struct urb *urb)
1942{
1943 struct completion *cop = urb->context;
1944 complete(cop);
1945}
1946
1947static void ub_probe_timeout(unsigned long arg)
1948{
1949 struct completion *cop = (struct completion *) arg;
1950 complete(cop);
1951}
1952
1953
1954
1955
1956static int ub_sync_reset(struct ub_dev *sc)
1957{
1958 int ifnum = sc->intf->cur_altsetting->desc.bInterfaceNumber;
1959 struct usb_ctrlrequest *cr;
1960 struct completion compl;
1961 struct timer_list timer;
1962 int rc;
1963
1964 init_completion(&compl);
1965
1966 cr = &sc->work_cr;
1967 cr->bRequestType = USB_TYPE_CLASS | USB_RECIP_INTERFACE;
1968 cr->bRequest = US_BULK_RESET_REQUEST;
1969 cr->wValue = cpu_to_le16(0);
1970 cr->wIndex = cpu_to_le16(ifnum);
1971 cr->wLength = cpu_to_le16(0);
1972
1973 usb_fill_control_urb(&sc->work_urb, sc->dev, sc->send_ctrl_pipe,
1974 (unsigned char*) cr, NULL, 0, ub_probe_urb_complete, &compl);
1975
1976 if ((rc = usb_submit_urb(&sc->work_urb, GFP_KERNEL)) != 0) {
1977 printk(KERN_WARNING
1978 "%s: Unable to submit a bulk reset (%d)\n", sc->name, rc);
1979 return rc;
1980 }
1981
1982 init_timer(&timer);
1983 timer.function = ub_probe_timeout;
1984 timer.data = (unsigned long) &compl;
1985 timer.expires = jiffies + UB_CTRL_TIMEOUT;
1986 add_timer(&timer);
1987
1988 wait_for_completion(&compl);
1989
1990 del_timer_sync(&timer);
1991 usb_kill_urb(&sc->work_urb);
1992
1993 return sc->work_urb.status;
1994}
1995
1996
1997
1998
1999static int ub_sync_getmaxlun(struct ub_dev *sc)
2000{
2001 int ifnum = sc->intf->cur_altsetting->desc.bInterfaceNumber;
2002 unsigned char *p;
2003 enum { ALLOC_SIZE = 1 };
2004 struct usb_ctrlrequest *cr;
2005 struct completion compl;
2006 struct timer_list timer;
2007 int nluns;
2008 int rc;
2009
2010 init_completion(&compl);
2011
2012 rc = -ENOMEM;
2013 if ((p = kmalloc(ALLOC_SIZE, GFP_KERNEL)) == NULL)
2014 goto err_alloc;
2015 *p = 55;
2016
2017 cr = &sc->work_cr;
2018 cr->bRequestType = USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE;
2019 cr->bRequest = US_BULK_GET_MAX_LUN;
2020 cr->wValue = cpu_to_le16(0);
2021 cr->wIndex = cpu_to_le16(ifnum);
2022 cr->wLength = cpu_to_le16(1);
2023
2024 usb_fill_control_urb(&sc->work_urb, sc->dev, sc->recv_ctrl_pipe,
2025 (unsigned char*) cr, p, 1, ub_probe_urb_complete, &compl);
2026
2027 if ((rc = usb_submit_urb(&sc->work_urb, GFP_KERNEL)) != 0)
2028 goto err_submit;
2029
2030 init_timer(&timer);
2031 timer.function = ub_probe_timeout;
2032 timer.data = (unsigned long) &compl;
2033 timer.expires = jiffies + UB_CTRL_TIMEOUT;
2034 add_timer(&timer);
2035
2036 wait_for_completion(&compl);
2037
2038 del_timer_sync(&timer);
2039 usb_kill_urb(&sc->work_urb);
2040
2041 if ((rc = sc->work_urb.status) < 0)
2042 goto err_io;
2043
2044 if (sc->work_urb.actual_length != 1) {
2045 nluns = 0;
2046 } else {
2047 if ((nluns = *p) == 55) {
2048 nluns = 0;
2049 } else {
2050
2051 nluns += 1;
2052 if (nluns > UB_MAX_LUNS)
2053 nluns = UB_MAX_LUNS;
2054 }
2055 }
2056
2057 kfree(p);
2058 return nluns;
2059
2060err_io:
2061err_submit:
2062 kfree(p);
2063err_alloc:
2064 return rc;
2065}
2066
2067
2068
2069
2070static int ub_probe_clear_stall(struct ub_dev *sc, int stalled_pipe)
2071{
2072 int endp;
2073 struct usb_ctrlrequest *cr;
2074 struct completion compl;
2075 struct timer_list timer;
2076 int rc;
2077
2078 init_completion(&compl);
2079
2080 endp = usb_pipeendpoint(stalled_pipe);
2081 if (usb_pipein (stalled_pipe))
2082 endp |= USB_DIR_IN;
2083
2084 cr = &sc->work_cr;
2085 cr->bRequestType = USB_RECIP_ENDPOINT;
2086 cr->bRequest = USB_REQ_CLEAR_FEATURE;
2087 cr->wValue = cpu_to_le16(USB_ENDPOINT_HALT);
2088 cr->wIndex = cpu_to_le16(endp);
2089 cr->wLength = cpu_to_le16(0);
2090
2091 usb_fill_control_urb(&sc->work_urb, sc->dev, sc->send_ctrl_pipe,
2092 (unsigned char*) cr, NULL, 0, ub_probe_urb_complete, &compl);
2093
2094 if ((rc = usb_submit_urb(&sc->work_urb, GFP_KERNEL)) != 0) {
2095 printk(KERN_WARNING
2096 "%s: Unable to submit a probe clear (%d)\n", sc->name, rc);
2097 return rc;
2098 }
2099
2100 init_timer(&timer);
2101 timer.function = ub_probe_timeout;
2102 timer.data = (unsigned long) &compl;
2103 timer.expires = jiffies + UB_CTRL_TIMEOUT;
2104 add_timer(&timer);
2105
2106 wait_for_completion(&compl);
2107
2108 del_timer_sync(&timer);
2109 usb_kill_urb(&sc->work_urb);
2110
2111 usb_reset_endpoint(sc->dev, endp);
2112
2113 return 0;
2114}
2115
2116
2117
2118
2119static int ub_get_pipes(struct ub_dev *sc, struct usb_device *dev,
2120 struct usb_interface *intf)
2121{
2122 struct usb_host_interface *altsetting = intf->cur_altsetting;
2123 struct usb_endpoint_descriptor *ep_in = NULL;
2124 struct usb_endpoint_descriptor *ep_out = NULL;
2125 struct usb_endpoint_descriptor *ep;
2126 int i;
2127
2128
2129
2130
2131
2132
2133 for (i = 0; i < altsetting->desc.bNumEndpoints; i++) {
2134 ep = &altsetting->endpoint[i].desc;
2135
2136
2137 if (usb_endpoint_xfer_bulk(ep)) {
2138
2139 if (usb_endpoint_dir_in(ep)) {
2140 if (ep_in == NULL)
2141 ep_in = ep;
2142 } else {
2143 if (ep_out == NULL)
2144 ep_out = ep;
2145 }
2146 }
2147 }
2148
2149 if (ep_in == NULL || ep_out == NULL) {
2150 printk(KERN_NOTICE "%s: failed endpoint check\n", sc->name);
2151 return -ENODEV;
2152 }
2153
2154
2155 sc->send_ctrl_pipe = usb_sndctrlpipe(dev, 0);
2156 sc->recv_ctrl_pipe = usb_rcvctrlpipe(dev, 0);
2157 sc->send_bulk_pipe = usb_sndbulkpipe(dev,
2158 usb_endpoint_num(ep_out));
2159 sc->recv_bulk_pipe = usb_rcvbulkpipe(dev,
2160 usb_endpoint_num(ep_in));
2161
2162 return 0;
2163}
2164
2165
2166
2167
2168
2169static int ub_probe(struct usb_interface *intf,
2170 const struct usb_device_id *dev_id)
2171{
2172 struct ub_dev *sc;
2173 int nluns;
2174 int rc;
2175 int i;
2176
2177 if (usb_usual_check_type(dev_id, USB_US_TYPE_UB))
2178 return -ENXIO;
2179
2180 rc = -ENOMEM;
2181 if ((sc = kzalloc(sizeof(struct ub_dev), GFP_KERNEL)) == NULL)
2182 goto err_core;
2183 sc->lock = ub_next_lock();
2184 INIT_LIST_HEAD(&sc->luns);
2185 usb_init_urb(&sc->work_urb);
2186 tasklet_init(&sc->tasklet, ub_scsi_action, (unsigned long)sc);
2187 atomic_set(&sc->poison, 0);
2188 INIT_WORK(&sc->reset_work, ub_reset_task);
2189 init_waitqueue_head(&sc->reset_wait);
2190
2191 init_timer(&sc->work_timer);
2192 sc->work_timer.data = (unsigned long) sc;
2193 sc->work_timer.function = ub_urb_timeout;
2194
2195 ub_init_completion(&sc->work_done);
2196 sc->work_done.done = 1;
2197
2198 sc->dev = interface_to_usbdev(intf);
2199 sc->intf = intf;
2200
2201 usb_set_intfdata(intf, sc);
2202 usb_get_dev(sc->dev);
2203
2204
2205
2206
2207
2208 usb_get_intf(sc->intf);
2209
2210 snprintf(sc->name, 12, DRV_NAME "(%d.%d)",
2211 sc->dev->bus->busnum, sc->dev->devnum);
2212
2213
2214
2215 if (ub_get_pipes(sc, sc->dev, intf) != 0)
2216 goto err_dev_desc;
2217
2218
2219
2220
2221
2222
2223
2224
2225
2226
2227
2228
2229#if 0
2230 ub_probe_clear_stall(sc, sc->recv_bulk_pipe);
2231 ub_probe_clear_stall(sc, sc->send_bulk_pipe);
2232#endif
2233
2234
2235
2236
2237
2238
2239
2240
2241
2242
2243
2244
2245
2246
2247
2248
2249 for (i = 0; i < 3; i++) {
2250 if ((rc = ub_sync_tur(sc, NULL)) <= 0) break;
2251 if (rc != 0x6) break;
2252 msleep(10);
2253 }
2254
2255 nluns = 1;
2256 for (i = 0; i < 3; i++) {
2257 if ((rc = ub_sync_getmaxlun(sc)) < 0)
2258 break;
2259 if (rc != 0) {
2260 nluns = rc;
2261 break;
2262 }
2263 msleep(100);
2264 }
2265
2266 for (i = 0; i < nluns; i++) {
2267 ub_probe_lun(sc, i);
2268 }
2269 return 0;
2270
2271err_dev_desc:
2272 usb_set_intfdata(intf, NULL);
2273 usb_put_intf(sc->intf);
2274 usb_put_dev(sc->dev);
2275 kfree(sc);
2276err_core:
2277 return rc;
2278}
2279
2280static int ub_probe_lun(struct ub_dev *sc, int lnum)
2281{
2282 struct ub_lun *lun;
2283 struct request_queue *q;
2284 struct gendisk *disk;
2285 int rc;
2286
2287 rc = -ENOMEM;
2288 if ((lun = kzalloc(sizeof(struct ub_lun), GFP_KERNEL)) == NULL)
2289 goto err_alloc;
2290 lun->num = lnum;
2291
2292 rc = -ENOSR;
2293 if ((lun->id = ub_id_get()) == -1)
2294 goto err_id;
2295
2296 lun->udev = sc;
2297
2298 snprintf(lun->name, 16, DRV_NAME "%c(%d.%d.%d)",
2299 lun->id + 'a', sc->dev->bus->busnum, sc->dev->devnum, lun->num);
2300
2301 lun->removable = 1;
2302 lun->changed = 1;
2303 ub_revalidate(sc, lun);
2304
2305 rc = -ENOMEM;
2306 if ((disk = alloc_disk(UB_PARTS_PER_LUN)) == NULL)
2307 goto err_diskalloc;
2308
2309 sprintf(disk->disk_name, DRV_NAME "%c", lun->id + 'a');
2310 disk->major = UB_MAJOR;
2311 disk->first_minor = lun->id * UB_PARTS_PER_LUN;
2312 disk->fops = &ub_bd_fops;
2313 disk->private_data = lun;
2314 disk->driverfs_dev = &sc->intf->dev;
2315
2316 rc = -ENOMEM;
2317 if ((q = blk_init_queue(ub_request_fn, sc->lock)) == NULL)
2318 goto err_blkqinit;
2319
2320 disk->queue = q;
2321
2322 blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH);
2323 blk_queue_max_hw_segments(q, UB_MAX_REQ_SG);
2324 blk_queue_max_phys_segments(q, UB_MAX_REQ_SG);
2325 blk_queue_segment_boundary(q, 0xffffffff);
2326 blk_queue_max_sectors(q, UB_MAX_SECTORS);
2327 blk_queue_logical_block_size(q, lun->capacity.bsize);
2328
2329 lun->disk = disk;
2330 q->queuedata = lun;
2331 list_add(&lun->link, &sc->luns);
2332
2333 set_capacity(disk, lun->capacity.nsec);
2334 if (lun->removable)
2335 disk->flags |= GENHD_FL_REMOVABLE;
2336
2337 add_disk(disk);
2338
2339 return 0;
2340
2341err_blkqinit:
2342 put_disk(disk);
2343err_diskalloc:
2344 ub_id_put(lun->id);
2345err_id:
2346 kfree(lun);
2347err_alloc:
2348 return rc;
2349}
2350
2351static void ub_disconnect(struct usb_interface *intf)
2352{
2353 struct ub_dev *sc = usb_get_intfdata(intf);
2354 struct ub_lun *lun;
2355 unsigned long flags;
2356
2357
2358
2359
2360
2361
2362 spin_lock_irqsave(&ub_lock, flags);
2363 sc->openc++;
2364 spin_unlock_irqrestore(&ub_lock, flags);
2365
2366
2367
2368
2369
2370
2371 atomic_set(&sc->poison, 1);
2372
2373
2374
2375
2376 wait_event(sc->reset_wait, !sc->reset);
2377
2378
2379
2380
2381
2382
2383
2384
2385
2386
2387 spin_lock_irqsave(sc->lock, flags);
2388 {
2389 struct ub_scsi_cmd *cmd;
2390 int cnt = 0;
2391 while ((cmd = ub_cmdq_peek(sc)) != NULL) {
2392 cmd->error = -ENOTCONN;
2393 cmd->state = UB_CMDST_DONE;
2394 ub_cmdq_pop(sc);
2395 (*cmd->done)(sc, cmd);
2396 cnt++;
2397 }
2398 if (cnt != 0) {
2399 printk(KERN_WARNING "%s: "
2400 "%d was queued after shutdown\n", sc->name, cnt);
2401 }
2402 }
2403 spin_unlock_irqrestore(sc->lock, flags);
2404
2405
2406
2407
2408 list_for_each_entry(lun, &sc->luns, link) {
2409 del_gendisk(lun->disk);
2410
2411
2412
2413
2414
2415
2416 }
2417
2418
2419
2420
2421
2422 spin_lock_irqsave(sc->lock, flags);
2423 if (sc->work_urb.status == -EINPROGRESS) {
2424 printk(KERN_WARNING "%s: "
2425 "URB is active after disconnect\n", sc->name);
2426 }
2427 spin_unlock_irqrestore(sc->lock, flags);
2428
2429
2430
2431
2432
2433
2434 del_timer_sync(&sc->work_timer);
2435
2436
2437
2438
2439
2440
2441 ub_put(sc);
2442}
2443
2444static struct usb_driver ub_driver = {
2445 .name = "ub",
2446 .probe = ub_probe,
2447 .disconnect = ub_disconnect,
2448 .id_table = ub_usb_ids,
2449 .pre_reset = ub_pre_reset,
2450 .post_reset = ub_post_reset,
2451};
2452
2453static int __init ub_init(void)
2454{
2455 int rc;
2456 int i;
2457
2458 for (i = 0; i < UB_QLOCK_NUM; i++)
2459 spin_lock_init(&ub_qlockv[i]);
2460
2461 if ((rc = register_blkdev(UB_MAJOR, DRV_NAME)) != 0)
2462 goto err_regblkdev;
2463
2464 if ((rc = usb_register(&ub_driver)) != 0)
2465 goto err_register;
2466
2467 usb_usual_set_present(USB_US_TYPE_UB);
2468 return 0;
2469
2470err_register:
2471 unregister_blkdev(UB_MAJOR, DRV_NAME);
2472err_regblkdev:
2473 return rc;
2474}
2475
2476static void __exit ub_exit(void)
2477{
2478 usb_deregister(&ub_driver);
2479
2480 unregister_blkdev(UB_MAJOR, DRV_NAME);
2481 usb_usual_clear_present(USB_US_TYPE_UB);
2482}
2483
2484module_init(ub_init);
2485module_exit(ub_exit);
2486
2487MODULE_LICENSE("GPL");
2488