linux/drivers/block/ub.c
<<
>>
Prefs
   1/*
   2 * The low performance USB storage driver (ub).
   3 *
   4 * Copyright (c) 1999, 2000 Matthew Dharm (mdharm-usb@one-eyed-alien.net)
   5 * Copyright (C) 2004 Pete Zaitcev (zaitcev@yahoo.com)
   6 *
   7 * This work is a part of Linux kernel, is derived from it,
   8 * and is not licensed separately. See file COPYING for details.
   9 *
  10 * TODO (sorted by decreasing priority)
  11 *  -- Return sense now that rq allows it (we always auto-sense anyway).
  12 *  -- set readonly flag for CDs, set removable flag for CF readers
  13 *  -- do inquiry and verify we got a disk and not a tape (for LUN mismatch)
  14 *  -- verify the 13 conditions and do bulk resets
  15 *  -- highmem
  16 *  -- move top_sense and work_bcs into separate allocations (if they survive)
  17 *     for cache purists and esoteric architectures.
  18 *  -- Allocate structure for LUN 0 before the first ub_sync_tur, avoid NULL. ?
  19 *  -- prune comments, they are too volumnous
  20 *  -- Resove XXX's
  21 *  -- CLEAR, CLR2STS, CLRRS seem to be ripe for refactoring.
  22 */
  23#include <linux/kernel.h>
  24#include <linux/module.h>
  25#include <linux/usb.h>
  26#include <linux/usb_usual.h>
  27#include <linux/blkdev.h>
  28#include <linux/timer.h>
  29#include <linux/scatterlist.h>
  30#include <scsi/scsi.h>
  31
  32#define DRV_NAME "ub"
  33
  34#define UB_MAJOR 180
  35
  36/*
  37 * The command state machine is the key model for understanding of this driver.
  38 *
  39 * The general rule is that all transitions are done towards the bottom
  40 * of the diagram, thus preventing any loops.
  41 *
  42 * An exception to that is how the STAT state is handled. A counter allows it
  43 * to be re-entered along the path marked with [C].
  44 *
  45 *       +--------+
  46 *       ! INIT   !
  47 *       +--------+
  48 *           !
  49 *        ub_scsi_cmd_start fails ->--------------------------------------\
  50 *           !                                                            !
  51 *           V                                                            !
  52 *       +--------+                                                       !
  53 *       ! CMD    !                                                       !
  54 *       +--------+                                                       !
  55 *           !                                            +--------+      !
  56 *         was -EPIPE -->-------------------------------->! CLEAR  !      !
  57 *           !                                            +--------+      !
  58 *           !                                                !           !
  59 *         was error -->------------------------------------- ! --------->\
  60 *           !                                                !           !
  61 *  /--<-- cmd->dir == NONE ?                                 !           !
  62 *  !        !                                                !           !
  63 *  !        V                                                !           !
  64 *  !    +--------+                                           !           !
  65 *  !    ! DATA   !                                           !           !
  66 *  !    +--------+                                           !           !
  67 *  !        !                           +---------+          !           !
  68 *  !      was -EPIPE -->--------------->! CLR2STS !          !           !
  69 *  !        !                           +---------+          !           !
  70 *  !        !                                !               !           !
  71 *  !        !                              was error -->---- ! --------->\
  72 *  !      was error -->--------------------- ! ------------- ! --------->\
  73 *  !        !                                !               !           !
  74 *  !        V                                !               !           !
  75 *  \--->+--------+                           !               !           !
  76 *       ! STAT   !<--------------------------/               !           !
  77 *  /--->+--------+                                           !           !
  78 *  !        !                                                !           !
  79 * [C]     was -EPIPE -->-----------\                         !           !
  80 *  !        !                      !                         !           !
  81 *  +<---- len == 0                 !                         !           !
  82 *  !        !                      !                         !           !
  83 *  !      was error -->--------------------------------------!---------->\
  84 *  !        !                      !                         !           !
  85 *  +<---- bad CSW                  !                         !           !
  86 *  +<---- bad tag                  !                         !           !
  87 *  !        !                      V                         !           !
  88 *  !        !                 +--------+                     !           !
  89 *  !        !                 ! CLRRS  !                     !           !
  90 *  !        !                 +--------+                     !           !
  91 *  !        !                      !                         !           !
  92 *  \------- ! --------------------[C]--------\               !           !
  93 *           !                                !               !           !
  94 *         cmd->error---\                +--------+           !           !
  95 *           !          +--------------->! SENSE  !<----------/           !
  96 *         STAT_FAIL----/                +--------+                       !
  97 *           !                                !                           V
  98 *           !                                V                      +--------+
  99 *           \--------------------------------\--------------------->! DONE   !
 100 *                                                                   +--------+
 101 */
 102
 103/*
 104 * This many LUNs per USB device.
 105 * Every one of them takes a host, see UB_MAX_HOSTS.
 106 */
 107#define UB_MAX_LUNS   9
 108
 109/*
 110 */
 111
 112#define UB_PARTS_PER_LUN      8
 113
 114#define UB_MAX_CDB_SIZE      16         /* Corresponds to Bulk */
 115
 116#define UB_SENSE_SIZE  18
 117
 118/*
 119 */
 120
 121/* command block wrapper */
 122struct bulk_cb_wrap {
 123        __le32  Signature;              /* contains 'USBC' */
 124        u32     Tag;                    /* unique per command id */
 125        __le32  DataTransferLength;     /* size of data */
 126        u8      Flags;                  /* direction in bit 0 */
 127        u8      Lun;                    /* LUN */
 128        u8      Length;                 /* of of the CDB */
 129        u8      CDB[UB_MAX_CDB_SIZE];   /* max command */
 130};
 131
 132#define US_BULK_CB_WRAP_LEN     31
 133#define US_BULK_CB_SIGN         0x43425355      /*spells out USBC */
 134#define US_BULK_FLAG_IN         1
 135#define US_BULK_FLAG_OUT        0
 136
 137/* command status wrapper */
 138struct bulk_cs_wrap {
 139        __le32  Signature;              /* should = 'USBS' */
 140        u32     Tag;                    /* same as original command */
 141        __le32  Residue;                /* amount not transferred */
 142        u8      Status;                 /* see below */
 143};
 144
 145#define US_BULK_CS_WRAP_LEN     13
 146#define US_BULK_CS_SIGN         0x53425355      /* spells out 'USBS' */
 147#define US_BULK_STAT_OK         0
 148#define US_BULK_STAT_FAIL       1
 149#define US_BULK_STAT_PHASE      2
 150
 151/* bulk-only class specific requests */
 152#define US_BULK_RESET_REQUEST   0xff
 153#define US_BULK_GET_MAX_LUN     0xfe
 154
 155/*
 156 */
 157struct ub_dev;
 158
 159#define UB_MAX_REQ_SG   9       /* cdrecord requires 32KB and maybe a header */
 160#define UB_MAX_SECTORS 64
 161
 162/*
 163 * A second is more than enough for a 32K transfer (UB_MAX_SECTORS)
 164 * even if a webcam hogs the bus, but some devices need time to spin up.
 165 */
 166#define UB_URB_TIMEOUT  (HZ*2)
 167#define UB_DATA_TIMEOUT (HZ*5)  /* ZIP does spin-ups in the data phase */
 168#define UB_STAT_TIMEOUT (HZ*5)  /* Same spinups and eject for a dataless cmd. */
 169#define UB_CTRL_TIMEOUT (HZ/2)  /* 500ms ought to be enough to clear a stall */
 170
 171/*
 172 * An instance of a SCSI command in transit.
 173 */
 174#define UB_DIR_NONE     0
 175#define UB_DIR_READ     1
 176#define UB_DIR_ILLEGAL2 2
 177#define UB_DIR_WRITE    3
 178
 179#define UB_DIR_CHAR(c)  (((c)==UB_DIR_WRITE)? 'w': \
 180                         (((c)==UB_DIR_READ)? 'r': 'n'))
 181
 182enum ub_scsi_cmd_state {
 183        UB_CMDST_INIT,                  /* Initial state */
 184        UB_CMDST_CMD,                   /* Command submitted */
 185        UB_CMDST_DATA,                  /* Data phase */
 186        UB_CMDST_CLR2STS,               /* Clearing before requesting status */
 187        UB_CMDST_STAT,                  /* Status phase */
 188        UB_CMDST_CLEAR,                 /* Clearing a stall (halt, actually) */
 189        UB_CMDST_CLRRS,                 /* Clearing before retrying status */
 190        UB_CMDST_SENSE,                 /* Sending Request Sense */
 191        UB_CMDST_DONE                   /* Final state */
 192};
 193
 194struct ub_scsi_cmd {
 195        unsigned char cdb[UB_MAX_CDB_SIZE];
 196        unsigned char cdb_len;
 197
 198        unsigned char dir;              /* 0 - none, 1 - read, 3 - write. */
 199        enum ub_scsi_cmd_state state;
 200        unsigned int tag;
 201        struct ub_scsi_cmd *next;
 202
 203        int error;                      /* Return code - valid upon done */
 204        unsigned int act_len;           /* Return size */
 205        unsigned char key, asc, ascq;   /* May be valid if error==-EIO */
 206
 207        int stat_count;                 /* Retries getting status. */
 208        unsigned int timeo;             /* jiffies until rq->timeout changes */
 209
 210        unsigned int len;               /* Requested length */
 211        unsigned int current_sg;
 212        unsigned int nsg;               /* sgv[nsg] */
 213        struct scatterlist sgv[UB_MAX_REQ_SG];
 214
 215        struct ub_lun *lun;
 216        void (*done)(struct ub_dev *, struct ub_scsi_cmd *);
 217        void *back;
 218};
 219
 220struct ub_request {
 221        struct request *rq;
 222        unsigned int current_try;
 223        unsigned int nsg;               /* sgv[nsg] */
 224        struct scatterlist sgv[UB_MAX_REQ_SG];
 225};
 226
 227/*
 228 */
 229struct ub_capacity {
 230        unsigned long nsec;             /* Linux size - 512 byte sectors */
 231        unsigned int bsize;             /* Linux hardsect_size */
 232        unsigned int bshift;            /* Shift between 512 and hard sects */
 233};
 234
 235/*
 236 * This is a direct take-off from linux/include/completion.h
 237 * The difference is that I do not wait on this thing, just poll.
 238 * When I want to wait (ub_probe), I just use the stock completion.
 239 *
 240 * Note that INIT_COMPLETION takes no lock. It is correct. But why
 241 * in the bloody hell that thing takes struct instead of pointer to struct
 242 * is quite beyond me. I just copied it from the stock completion.
 243 */
 244struct ub_completion {
 245        unsigned int done;
 246        spinlock_t lock;
 247};
 248
 249static inline void ub_init_completion(struct ub_completion *x)
 250{
 251        x->done = 0;
 252        spin_lock_init(&x->lock);
 253}
 254
 255#define UB_INIT_COMPLETION(x)   ((x).done = 0)
 256
 257static void ub_complete(struct ub_completion *x)
 258{
 259        unsigned long flags;
 260
 261        spin_lock_irqsave(&x->lock, flags);
 262        x->done++;
 263        spin_unlock_irqrestore(&x->lock, flags);
 264}
 265
 266static int ub_is_completed(struct ub_completion *x)
 267{
 268        unsigned long flags;
 269        int ret;
 270
 271        spin_lock_irqsave(&x->lock, flags);
 272        ret = x->done;
 273        spin_unlock_irqrestore(&x->lock, flags);
 274        return ret;
 275}
 276
 277/*
 278 */
 279struct ub_scsi_cmd_queue {
 280        int qlen, qmax;
 281        struct ub_scsi_cmd *head, *tail;
 282};
 283
 284/*
 285 * The block device instance (one per LUN).
 286 */
 287struct ub_lun {
 288        struct ub_dev *udev;
 289        struct list_head link;
 290        struct gendisk *disk;
 291        int id;                         /* Host index */
 292        int num;                        /* LUN number */
 293        char name[16];
 294
 295        int changed;                    /* Media was changed */
 296        int removable;
 297        int readonly;
 298
 299        struct ub_request urq;
 300
 301        /* Use Ingo's mempool if or when we have more than one command. */
 302        /*
 303         * Currently we never need more than one command for the whole device.
 304         * However, giving every LUN a command is a cheap and automatic way
 305         * to enforce fairness between them.
 306         */
 307        int cmda[1];
 308        struct ub_scsi_cmd cmdv[1];
 309
 310        struct ub_capacity capacity; 
 311};
 312
 313/*
 314 * The USB device instance.
 315 */
 316struct ub_dev {
 317        spinlock_t *lock;
 318        atomic_t poison;                /* The USB device is disconnected */
 319        int openc;                      /* protected by ub_lock! */
 320                                        /* kref is too implicit for our taste */
 321        int reset;                      /* Reset is running */
 322        int bad_resid;
 323        unsigned int tagcnt;
 324        char name[12];
 325        struct usb_device *dev;
 326        struct usb_interface *intf;
 327
 328        struct list_head luns;
 329
 330        unsigned int send_bulk_pipe;    /* cached pipe values */
 331        unsigned int recv_bulk_pipe;
 332        unsigned int send_ctrl_pipe;
 333        unsigned int recv_ctrl_pipe;
 334
 335        struct tasklet_struct tasklet;
 336
 337        struct ub_scsi_cmd_queue cmd_queue;
 338        struct ub_scsi_cmd top_rqs_cmd; /* REQUEST SENSE */
 339        unsigned char top_sense[UB_SENSE_SIZE];
 340
 341        struct ub_completion work_done;
 342        struct urb work_urb;
 343        struct timer_list work_timer;
 344        int last_pipe;                  /* What might need clearing */
 345        __le32 signature;               /* Learned signature */
 346        struct bulk_cb_wrap work_bcb;
 347        struct bulk_cs_wrap work_bcs;
 348        struct usb_ctrlrequest work_cr;
 349
 350        struct work_struct reset_work;
 351        wait_queue_head_t reset_wait;
 352};
 353
 354/*
 355 */
 356static void ub_cleanup(struct ub_dev *sc);
 357static int ub_request_fn_1(struct ub_lun *lun, struct request *rq);
 358static void ub_cmd_build_block(struct ub_dev *sc, struct ub_lun *lun,
 359    struct ub_scsi_cmd *cmd, struct ub_request *urq);
 360static void ub_cmd_build_packet(struct ub_dev *sc, struct ub_lun *lun,
 361    struct ub_scsi_cmd *cmd, struct ub_request *urq);
 362static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
 363static void ub_end_rq(struct request *rq, unsigned int status);
 364static int ub_rw_cmd_retry(struct ub_dev *sc, struct ub_lun *lun,
 365    struct ub_request *urq, struct ub_scsi_cmd *cmd);
 366static int ub_submit_scsi(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
 367static void ub_urb_complete(struct urb *urb);
 368static void ub_scsi_action(unsigned long _dev);
 369static void ub_scsi_dispatch(struct ub_dev *sc);
 370static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
 371static void ub_data_start(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
 372static void ub_state_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd, int rc);
 373static int __ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
 374static void ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
 375static void ub_state_stat_counted(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
 376static void ub_state_sense(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
 377static int ub_submit_clear_stall(struct ub_dev *sc, struct ub_scsi_cmd *cmd,
 378    int stalled_pipe);
 379static void ub_top_sense_done(struct ub_dev *sc, struct ub_scsi_cmd *scmd);
 380static void ub_reset_enter(struct ub_dev *sc, int try);
 381static void ub_reset_task(struct work_struct *work);
 382static int ub_sync_tur(struct ub_dev *sc, struct ub_lun *lun);
 383static int ub_sync_read_cap(struct ub_dev *sc, struct ub_lun *lun,
 384    struct ub_capacity *ret);
 385static int ub_sync_reset(struct ub_dev *sc);
 386static int ub_probe_clear_stall(struct ub_dev *sc, int stalled_pipe);
 387static int ub_probe_lun(struct ub_dev *sc, int lnum);
 388
 389/*
 390 */
 391#ifdef CONFIG_USB_LIBUSUAL
 392
 393#define ub_usb_ids  usb_storage_usb_ids
 394#else
 395
 396static struct usb_device_id ub_usb_ids[] = {
 397        { USB_INTERFACE_INFO(USB_CLASS_MASS_STORAGE, US_SC_SCSI, US_PR_BULK) },
 398        { }
 399};
 400
 401MODULE_DEVICE_TABLE(usb, ub_usb_ids);
 402#endif /* CONFIG_USB_LIBUSUAL */
 403
 404/*
 405 * Find me a way to identify "next free minor" for add_disk(),
 406 * and the array disappears the next day. However, the number of
 407 * hosts has something to do with the naming and /proc/partitions.
 408 * This has to be thought out in detail before changing.
 409 * If UB_MAX_HOST was 1000, we'd use a bitmap. Or a better data structure.
 410 */
 411#define UB_MAX_HOSTS  26
 412static char ub_hostv[UB_MAX_HOSTS];
 413
 414#define UB_QLOCK_NUM 5
 415static spinlock_t ub_qlockv[UB_QLOCK_NUM];
 416static int ub_qlock_next = 0;
 417
 418static DEFINE_SPINLOCK(ub_lock);        /* Locks globals and ->openc */
 419
 420/*
 421 * The id allocator.
 422 *
 423 * This also stores the host for indexing by minor, which is somewhat dirty.
 424 */
 425static int ub_id_get(void)
 426{
 427        unsigned long flags;
 428        int i;
 429
 430        spin_lock_irqsave(&ub_lock, flags);
 431        for (i = 0; i < UB_MAX_HOSTS; i++) {
 432                if (ub_hostv[i] == 0) {
 433                        ub_hostv[i] = 1;
 434                        spin_unlock_irqrestore(&ub_lock, flags);
 435                        return i;
 436                }
 437        }
 438        spin_unlock_irqrestore(&ub_lock, flags);
 439        return -1;
 440}
 441
 442static void ub_id_put(int id)
 443{
 444        unsigned long flags;
 445
 446        if (id < 0 || id >= UB_MAX_HOSTS) {
 447                printk(KERN_ERR DRV_NAME ": bad host ID %d\n", id);
 448                return;
 449        }
 450
 451        spin_lock_irqsave(&ub_lock, flags);
 452        if (ub_hostv[id] == 0) {
 453                spin_unlock_irqrestore(&ub_lock, flags);
 454                printk(KERN_ERR DRV_NAME ": freeing free host ID %d\n", id);
 455                return;
 456        }
 457        ub_hostv[id] = 0;
 458        spin_unlock_irqrestore(&ub_lock, flags);
 459}
 460
 461/*
 462 * This is necessitated by the fact that blk_cleanup_queue does not
 463 * necesserily destroy the queue. Instead, it may merely decrease q->refcnt.
 464 * Since our blk_init_queue() passes a spinlock common with ub_dev,
 465 * we have life time issues when ub_cleanup frees ub_dev.
 466 */
 467static spinlock_t *ub_next_lock(void)
 468{
 469        unsigned long flags;
 470        spinlock_t *ret;
 471
 472        spin_lock_irqsave(&ub_lock, flags);
 473        ret = &ub_qlockv[ub_qlock_next];
 474        ub_qlock_next = (ub_qlock_next + 1) % UB_QLOCK_NUM;
 475        spin_unlock_irqrestore(&ub_lock, flags);
 476        return ret;
 477}
 478
 479/*
 480 * Downcount for deallocation. This rides on two assumptions:
 481 *  - once something is poisoned, its refcount cannot grow
 482 *  - opens cannot happen at this time (del_gendisk was done)
 483 * If the above is true, we can drop the lock, which we need for
 484 * blk_cleanup_queue(): the silly thing may attempt to sleep.
 485 * [Actually, it never needs to sleep for us, but it calls might_sleep()]
 486 */
 487static void ub_put(struct ub_dev *sc)
 488{
 489        unsigned long flags;
 490
 491        spin_lock_irqsave(&ub_lock, flags);
 492        --sc->openc;
 493        if (sc->openc == 0 && atomic_read(&sc->poison)) {
 494                spin_unlock_irqrestore(&ub_lock, flags);
 495                ub_cleanup(sc);
 496        } else {
 497                spin_unlock_irqrestore(&ub_lock, flags);
 498        }
 499}
 500
 501/*
 502 * Final cleanup and deallocation.
 503 */
 504static void ub_cleanup(struct ub_dev *sc)
 505{
 506        struct list_head *p;
 507        struct ub_lun *lun;
 508        struct request_queue *q;
 509
 510        while (!list_empty(&sc->luns)) {
 511                p = sc->luns.next;
 512                lun = list_entry(p, struct ub_lun, link);
 513                list_del(p);
 514
 515                /* I don't think queue can be NULL. But... Stolen from sx8.c */
 516                if ((q = lun->disk->queue) != NULL)
 517                        blk_cleanup_queue(q);
 518                /*
 519                 * If we zero disk->private_data BEFORE put_disk, we have
 520                 * to check for NULL all over the place in open, release,
 521                 * check_media and revalidate, because the block level
 522                 * semaphore is well inside the put_disk.
 523                 * But we cannot zero after the call, because *disk is gone.
 524                 * The sd.c is blatantly racy in this area.
 525                 */
 526                /* disk->private_data = NULL; */
 527                put_disk(lun->disk);
 528                lun->disk = NULL;
 529
 530                ub_id_put(lun->id);
 531                kfree(lun);
 532        }
 533
 534        usb_set_intfdata(sc->intf, NULL);
 535        usb_put_intf(sc->intf);
 536        usb_put_dev(sc->dev);
 537        kfree(sc);
 538}
 539
 540/*
 541 * The "command allocator".
 542 */
 543static struct ub_scsi_cmd *ub_get_cmd(struct ub_lun *lun)
 544{
 545        struct ub_scsi_cmd *ret;
 546
 547        if (lun->cmda[0])
 548                return NULL;
 549        ret = &lun->cmdv[0];
 550        lun->cmda[0] = 1;
 551        return ret;
 552}
 553
 554static void ub_put_cmd(struct ub_lun *lun, struct ub_scsi_cmd *cmd)
 555{
 556        if (cmd != &lun->cmdv[0]) {
 557                printk(KERN_WARNING "%s: releasing a foreign cmd %p\n",
 558                    lun->name, cmd);
 559                return;
 560        }
 561        if (!lun->cmda[0]) {
 562                printk(KERN_WARNING "%s: releasing a free cmd\n", lun->name);
 563                return;
 564        }
 565        lun->cmda[0] = 0;
 566}
 567
 568/*
 569 * The command queue.
 570 */
 571static void ub_cmdq_add(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
 572{
 573        struct ub_scsi_cmd_queue *t = &sc->cmd_queue;
 574
 575        if (t->qlen++ == 0) {
 576                t->head = cmd;
 577                t->tail = cmd;
 578        } else {
 579                t->tail->next = cmd;
 580                t->tail = cmd;
 581        }
 582
 583        if (t->qlen > t->qmax)
 584                t->qmax = t->qlen;
 585}
 586
 587static void ub_cmdq_insert(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
 588{
 589        struct ub_scsi_cmd_queue *t = &sc->cmd_queue;
 590
 591        if (t->qlen++ == 0) {
 592                t->head = cmd;
 593                t->tail = cmd;
 594        } else {
 595                cmd->next = t->head;
 596                t->head = cmd;
 597        }
 598
 599        if (t->qlen > t->qmax)
 600                t->qmax = t->qlen;
 601}
 602
 603static struct ub_scsi_cmd *ub_cmdq_pop(struct ub_dev *sc)
 604{
 605        struct ub_scsi_cmd_queue *t = &sc->cmd_queue;
 606        struct ub_scsi_cmd *cmd;
 607
 608        if (t->qlen == 0)
 609                return NULL;
 610        if (--t->qlen == 0)
 611                t->tail = NULL;
 612        cmd = t->head;
 613        t->head = cmd->next;
 614        cmd->next = NULL;
 615        return cmd;
 616}
 617
 618#define ub_cmdq_peek(sc)  ((sc)->cmd_queue.head)
 619
 620/*
 621 * The request function is our main entry point
 622 */
 623
 624static void ub_request_fn(struct request_queue *q)
 625{
 626        struct ub_lun *lun = q->queuedata;
 627        struct request *rq;
 628
 629        while ((rq = blk_peek_request(q)) != NULL) {
 630                if (ub_request_fn_1(lun, rq) != 0) {
 631                        blk_stop_queue(q);
 632                        break;
 633                }
 634        }
 635}
 636
 637static int ub_request_fn_1(struct ub_lun *lun, struct request *rq)
 638{
 639        struct ub_dev *sc = lun->udev;
 640        struct ub_scsi_cmd *cmd;
 641        struct ub_request *urq;
 642        int n_elem;
 643
 644        if (atomic_read(&sc->poison)) {
 645                blk_start_request(rq);
 646                ub_end_rq(rq, DID_NO_CONNECT << 16);
 647                return 0;
 648        }
 649
 650        if (lun->changed && !blk_pc_request(rq)) {
 651                blk_start_request(rq);
 652                ub_end_rq(rq, SAM_STAT_CHECK_CONDITION);
 653                return 0;
 654        }
 655
 656        if (lun->urq.rq != NULL)
 657                return -1;
 658        if ((cmd = ub_get_cmd(lun)) == NULL)
 659                return -1;
 660        memset(cmd, 0, sizeof(struct ub_scsi_cmd));
 661
 662        blk_start_request(rq);
 663
 664        urq = &lun->urq;
 665        memset(urq, 0, sizeof(struct ub_request));
 666        urq->rq = rq;
 667
 668        /*
 669         * get scatterlist from block layer
 670         */
 671        sg_init_table(&urq->sgv[0], UB_MAX_REQ_SG);
 672        n_elem = blk_rq_map_sg(lun->disk->queue, rq, &urq->sgv[0]);
 673        if (n_elem < 0) {
 674                /* Impossible, because blk_rq_map_sg should not hit ENOMEM. */
 675                printk(KERN_INFO "%s: failed request map (%d)\n",
 676                    lun->name, n_elem);
 677                goto drop;
 678        }
 679        if (n_elem > UB_MAX_REQ_SG) {   /* Paranoia */
 680                printk(KERN_WARNING "%s: request with %d segments\n",
 681                    lun->name, n_elem);
 682                goto drop;
 683        }
 684        urq->nsg = n_elem;
 685
 686        if (blk_pc_request(rq)) {
 687                ub_cmd_build_packet(sc, lun, cmd, urq);
 688        } else {
 689                ub_cmd_build_block(sc, lun, cmd, urq);
 690        }
 691        cmd->state = UB_CMDST_INIT;
 692        cmd->lun = lun;
 693        cmd->done = ub_rw_cmd_done;
 694        cmd->back = urq;
 695
 696        cmd->tag = sc->tagcnt++;
 697        if (ub_submit_scsi(sc, cmd) != 0)
 698                goto drop;
 699
 700        return 0;
 701
 702drop:
 703        ub_put_cmd(lun, cmd);
 704        ub_end_rq(rq, DID_ERROR << 16);
 705        return 0;
 706}
 707
 708static void ub_cmd_build_block(struct ub_dev *sc, struct ub_lun *lun,
 709    struct ub_scsi_cmd *cmd, struct ub_request *urq)
 710{
 711        struct request *rq = urq->rq;
 712        unsigned int block, nblks;
 713
 714        if (rq_data_dir(rq) == WRITE)
 715                cmd->dir = UB_DIR_WRITE;
 716        else
 717                cmd->dir = UB_DIR_READ;
 718
 719        cmd->nsg = urq->nsg;
 720        memcpy(cmd->sgv, urq->sgv, sizeof(struct scatterlist) * cmd->nsg);
 721
 722        /*
 723         * build the command
 724         *
 725         * The call to blk_queue_logical_block_size() guarantees that request
 726         * is aligned, but it is given in terms of 512 byte units, always.
 727         */
 728        block = blk_rq_pos(rq) >> lun->capacity.bshift;
 729        nblks = blk_rq_sectors(rq) >> lun->capacity.bshift;
 730
 731        cmd->cdb[0] = (cmd->dir == UB_DIR_READ)? READ_10: WRITE_10;
 732        /* 10-byte uses 4 bytes of LBA: 2147483648KB, 2097152MB, 2048GB */
 733        cmd->cdb[2] = block >> 24;
 734        cmd->cdb[3] = block >> 16;
 735        cmd->cdb[4] = block >> 8;
 736        cmd->cdb[5] = block;
 737        cmd->cdb[7] = nblks >> 8;
 738        cmd->cdb[8] = nblks;
 739        cmd->cdb_len = 10;
 740
 741        cmd->len = blk_rq_bytes(rq);
 742}
 743
 744static void ub_cmd_build_packet(struct ub_dev *sc, struct ub_lun *lun,
 745    struct ub_scsi_cmd *cmd, struct ub_request *urq)
 746{
 747        struct request *rq = urq->rq;
 748
 749        if (blk_rq_bytes(rq) == 0) {
 750                cmd->dir = UB_DIR_NONE;
 751        } else {
 752                if (rq_data_dir(rq) == WRITE)
 753                        cmd->dir = UB_DIR_WRITE;
 754                else
 755                        cmd->dir = UB_DIR_READ;
 756        }
 757
 758        cmd->nsg = urq->nsg;
 759        memcpy(cmd->sgv, urq->sgv, sizeof(struct scatterlist) * cmd->nsg);
 760
 761        memcpy(&cmd->cdb, rq->cmd, rq->cmd_len);
 762        cmd->cdb_len = rq->cmd_len;
 763
 764        cmd->len = blk_rq_bytes(rq);
 765
 766        /*
 767         * To reapply this to every URB is not as incorrect as it looks.
 768         * In return, we avoid any complicated tracking calculations.
 769         */
 770        cmd->timeo = rq->timeout;
 771}
 772
 773static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
 774{
 775        struct ub_lun *lun = cmd->lun;
 776        struct ub_request *urq = cmd->back;
 777        struct request *rq;
 778        unsigned int scsi_status;
 779
 780        rq = urq->rq;
 781
 782        if (cmd->error == 0) {
 783                if (blk_pc_request(rq)) {
 784                        if (cmd->act_len >= rq->resid_len)
 785                                rq->resid_len = 0;
 786                        else
 787                                rq->resid_len -= cmd->act_len;
 788                        scsi_status = 0;
 789                } else {
 790                        if (cmd->act_len != cmd->len) {
 791                                scsi_status = SAM_STAT_CHECK_CONDITION;
 792                        } else {
 793                                scsi_status = 0;
 794                        }
 795                }
 796        } else {
 797                if (blk_pc_request(rq)) {
 798                        /* UB_SENSE_SIZE is smaller than SCSI_SENSE_BUFFERSIZE */
 799                        memcpy(rq->sense, sc->top_sense, UB_SENSE_SIZE);
 800                        rq->sense_len = UB_SENSE_SIZE;
 801                        if (sc->top_sense[0] != 0)
 802                                scsi_status = SAM_STAT_CHECK_CONDITION;
 803                        else
 804                                scsi_status = DID_ERROR << 16;
 805                } else {
 806                        if (cmd->error == -EIO &&
 807                            (cmd->key == 0 ||
 808                             cmd->key == MEDIUM_ERROR ||
 809                             cmd->key == UNIT_ATTENTION)) {
 810                                if (ub_rw_cmd_retry(sc, lun, urq, cmd) == 0)
 811                                        return;
 812                        }
 813                        scsi_status = SAM_STAT_CHECK_CONDITION;
 814                }
 815        }
 816
 817        urq->rq = NULL;
 818
 819        ub_put_cmd(lun, cmd);
 820        ub_end_rq(rq, scsi_status);
 821        blk_start_queue(lun->disk->queue);
 822}
 823
 824static void ub_end_rq(struct request *rq, unsigned int scsi_status)
 825{
 826        int error;
 827
 828        if (scsi_status == 0) {
 829                error = 0;
 830        } else {
 831                error = -EIO;
 832                rq->errors = scsi_status;
 833        }
 834        __blk_end_request_all(rq, error);
 835}
 836
 837static int ub_rw_cmd_retry(struct ub_dev *sc, struct ub_lun *lun,
 838    struct ub_request *urq, struct ub_scsi_cmd *cmd)
 839{
 840
 841        if (atomic_read(&sc->poison))
 842                return -ENXIO;
 843
 844        ub_reset_enter(sc, urq->current_try);
 845
 846        if (urq->current_try >= 3)
 847                return -EIO;
 848        urq->current_try++;
 849
 850        /* Remove this if anyone complains of flooding. */
 851        printk(KERN_DEBUG "%s: dir %c len/act %d/%d "
 852            "[sense %x %02x %02x] retry %d\n",
 853            sc->name, UB_DIR_CHAR(cmd->dir), cmd->len, cmd->act_len,
 854            cmd->key, cmd->asc, cmd->ascq, urq->current_try);
 855
 856        memset(cmd, 0, sizeof(struct ub_scsi_cmd));
 857        ub_cmd_build_block(sc, lun, cmd, urq);
 858
 859        cmd->state = UB_CMDST_INIT;
 860        cmd->lun = lun;
 861        cmd->done = ub_rw_cmd_done;
 862        cmd->back = urq;
 863
 864        cmd->tag = sc->tagcnt++;
 865
 866#if 0 /* Wasteful */
 867        return ub_submit_scsi(sc, cmd);
 868#else
 869        ub_cmdq_add(sc, cmd);
 870        return 0;
 871#endif
 872}
 873
 874/*
 875 * Submit a regular SCSI operation (not an auto-sense).
 876 *
 877 * The Iron Law of Good Submit Routine is:
 878 * Zero return - callback is done, Nonzero return - callback is not done.
 879 * No exceptions.
 880 *
 881 * Host is assumed locked.
 882 */
 883static int ub_submit_scsi(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
 884{
 885
 886        if (cmd->state != UB_CMDST_INIT ||
 887            (cmd->dir != UB_DIR_NONE && cmd->len == 0)) {
 888                return -EINVAL;
 889        }
 890
 891        ub_cmdq_add(sc, cmd);
 892        /*
 893         * We can call ub_scsi_dispatch(sc) right away here, but it's a little
 894         * safer to jump to a tasklet, in case upper layers do something silly.
 895         */
 896        tasklet_schedule(&sc->tasklet);
 897        return 0;
 898}
 899
 900/*
 901 * Submit the first URB for the queued command.
 902 * This function does not deal with queueing in any way.
 903 */
 904static int ub_scsi_cmd_start(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
 905{
 906        struct bulk_cb_wrap *bcb;
 907        int rc;
 908
 909        bcb = &sc->work_bcb;
 910
 911        /*
 912         * ``If the allocation length is eighteen or greater, and a device
 913         * server returns less than eithteen bytes of data, the application
 914         * client should assume that the bytes not transferred would have been
 915         * zeroes had the device server returned those bytes.''
 916         *
 917         * We zero sense for all commands so that when a packet request
 918         * fails it does not return a stale sense.
 919         */
 920        memset(&sc->top_sense, 0, UB_SENSE_SIZE);
 921
 922        /* set up the command wrapper */
 923        bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
 924        bcb->Tag = cmd->tag;            /* Endianness is not important */
 925        bcb->DataTransferLength = cpu_to_le32(cmd->len);
 926        bcb->Flags = (cmd->dir == UB_DIR_READ) ? 0x80 : 0;
 927        bcb->Lun = (cmd->lun != NULL) ? cmd->lun->num : 0;
 928        bcb->Length = cmd->cdb_len;
 929
 930        /* copy the command payload */
 931        memcpy(bcb->CDB, cmd->cdb, UB_MAX_CDB_SIZE);
 932
 933        UB_INIT_COMPLETION(sc->work_done);
 934
 935        sc->last_pipe = sc->send_bulk_pipe;
 936        usb_fill_bulk_urb(&sc->work_urb, sc->dev, sc->send_bulk_pipe,
 937            bcb, US_BULK_CB_WRAP_LEN, ub_urb_complete, sc);
 938
 939        if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) {
 940                /* XXX Clear stalls */
 941                ub_complete(&sc->work_done);
 942                return rc;
 943        }
 944
 945        sc->work_timer.expires = jiffies + UB_URB_TIMEOUT;
 946        add_timer(&sc->work_timer);
 947
 948        cmd->state = UB_CMDST_CMD;
 949        return 0;
 950}
 951
 952/*
 953 * Timeout handler.
 954 */
 955static void ub_urb_timeout(unsigned long arg)
 956{
 957        struct ub_dev *sc = (struct ub_dev *) arg;
 958        unsigned long flags;
 959
 960        spin_lock_irqsave(sc->lock, flags);
 961        if (!ub_is_completed(&sc->work_done))
 962                usb_unlink_urb(&sc->work_urb);
 963        spin_unlock_irqrestore(sc->lock, flags);
 964}
 965
 966/*
 967 * Completion routine for the work URB.
 968 *
 969 * This can be called directly from usb_submit_urb (while we have
 970 * the sc->lock taken) and from an interrupt (while we do NOT have
 971 * the sc->lock taken). Therefore, bounce this off to a tasklet.
 972 */
 973static void ub_urb_complete(struct urb *urb)
 974{
 975        struct ub_dev *sc = urb->context;
 976
 977        ub_complete(&sc->work_done);
 978        tasklet_schedule(&sc->tasklet);
 979}
 980
 981static void ub_scsi_action(unsigned long _dev)
 982{
 983        struct ub_dev *sc = (struct ub_dev *) _dev;
 984        unsigned long flags;
 985
 986        spin_lock_irqsave(sc->lock, flags);
 987        ub_scsi_dispatch(sc);
 988        spin_unlock_irqrestore(sc->lock, flags);
 989}
 990
 991static void ub_scsi_dispatch(struct ub_dev *sc)
 992{
 993        struct ub_scsi_cmd *cmd;
 994        int rc;
 995
 996        while (!sc->reset && (cmd = ub_cmdq_peek(sc)) != NULL) {
 997                if (cmd->state == UB_CMDST_DONE) {
 998                        ub_cmdq_pop(sc);
 999                        (*cmd->done)(sc, cmd);
1000                } else if (cmd->state == UB_CMDST_INIT) {
1001                        if ((rc = ub_scsi_cmd_start(sc, cmd)) == 0)
1002                                break;
1003                        cmd->error = rc;
1004                        cmd->state = UB_CMDST_DONE;
1005                } else {
1006                        if (!ub_is_completed(&sc->work_done))
1007                                break;
1008                        del_timer(&sc->work_timer);
1009                        ub_scsi_urb_compl(sc, cmd);
1010                }
1011        }
1012}
1013
1014static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1015{
1016        struct urb *urb = &sc->work_urb;
1017        struct bulk_cs_wrap *bcs;
1018        int endp;
1019        int len;
1020        int rc;
1021
1022        if (atomic_read(&sc->poison)) {
1023                ub_state_done(sc, cmd, -ENODEV);
1024                return;
1025        }
1026
1027        endp = usb_pipeendpoint(sc->last_pipe);
1028        if (usb_pipein(sc->last_pipe))
1029                endp |= USB_DIR_IN;
1030
1031        if (cmd->state == UB_CMDST_CLEAR) {
1032                if (urb->status == -EPIPE) {
1033                        /*
1034                         * STALL while clearning STALL.
1035                         * The control pipe clears itself - nothing to do.
1036                         */
1037                        printk(KERN_NOTICE "%s: stall on control pipe\n",
1038                            sc->name);
1039                        goto Bad_End;
1040                }
1041
1042                /*
1043                 * We ignore the result for the halt clear.
1044                 */
1045
1046                usb_reset_endpoint(sc->dev, endp);
1047
1048                ub_state_sense(sc, cmd);
1049
1050        } else if (cmd->state == UB_CMDST_CLR2STS) {
1051                if (urb->status == -EPIPE) {
1052                        printk(KERN_NOTICE "%s: stall on control pipe\n",
1053                            sc->name);
1054                        goto Bad_End;
1055                }
1056
1057                /*
1058                 * We ignore the result for the halt clear.
1059                 */
1060
1061                usb_reset_endpoint(sc->dev, endp);
1062
1063                ub_state_stat(sc, cmd);
1064
1065        } else if (cmd->state == UB_CMDST_CLRRS) {
1066                if (urb->status == -EPIPE) {
1067                        printk(KERN_NOTICE "%s: stall on control pipe\n",
1068                            sc->name);
1069                        goto Bad_End;
1070                }
1071
1072                /*
1073                 * We ignore the result for the halt clear.
1074                 */
1075
1076                usb_reset_endpoint(sc->dev, endp);
1077
1078                ub_state_stat_counted(sc, cmd);
1079
1080        } else if (cmd->state == UB_CMDST_CMD) {
1081                switch (urb->status) {
1082                case 0:
1083                        break;
1084                case -EOVERFLOW:
1085                        goto Bad_End;
1086                case -EPIPE:
1087                        rc = ub_submit_clear_stall(sc, cmd, sc->last_pipe);
1088                        if (rc != 0) {
1089                                printk(KERN_NOTICE "%s: "
1090                                    "unable to submit clear (%d)\n",
1091                                    sc->name, rc);
1092                                /*
1093                                 * This is typically ENOMEM or some other such shit.
1094                                 * Retrying is pointless. Just do Bad End on it...
1095                                 */
1096                                ub_state_done(sc, cmd, rc);
1097                                return;
1098                        }
1099                        cmd->state = UB_CMDST_CLEAR;
1100                        return;
1101                case -ESHUTDOWN:        /* unplug */
1102                case -EILSEQ:           /* unplug timeout on uhci */
1103                        ub_state_done(sc, cmd, -ENODEV);
1104                        return;
1105                default:
1106                        goto Bad_End;
1107                }
1108                if (urb->actual_length != US_BULK_CB_WRAP_LEN) {
1109                        goto Bad_End;
1110                }
1111
1112                if (cmd->dir == UB_DIR_NONE || cmd->nsg < 1) {
1113                        ub_state_stat(sc, cmd);
1114                        return;
1115                }
1116
1117                // udelay(125);         // usb-storage has this
1118                ub_data_start(sc, cmd);
1119
1120        } else if (cmd->state == UB_CMDST_DATA) {
1121                if (urb->status == -EPIPE) {
1122                        rc = ub_submit_clear_stall(sc, cmd, sc->last_pipe);
1123                        if (rc != 0) {
1124                                printk(KERN_NOTICE "%s: "
1125                                    "unable to submit clear (%d)\n",
1126                                    sc->name, rc);
1127                                ub_state_done(sc, cmd, rc);
1128                                return;
1129                        }
1130                        cmd->state = UB_CMDST_CLR2STS;
1131                        return;
1132                }
1133                if (urb->status == -EOVERFLOW) {
1134                        /*
1135                         * A babble? Failure, but we must transfer CSW now.
1136                         */
1137                        cmd->error = -EOVERFLOW;        /* A cheap trick... */
1138                        ub_state_stat(sc, cmd);
1139                        return;
1140                }
1141
1142                if (cmd->dir == UB_DIR_WRITE) {
1143                        /*
1144                         * Do not continue writes in case of a failure.
1145                         * Doing so would cause sectors to be mixed up,
1146                         * which is worse than sectors lost.
1147                         *
1148                         * We must try to read the CSW, or many devices
1149                         * get confused.
1150                         */
1151                        len = urb->actual_length;
1152                        if (urb->status != 0 ||
1153                            len != cmd->sgv[cmd->current_sg].length) {
1154                                cmd->act_len += len;
1155
1156                                cmd->error = -EIO;
1157                                ub_state_stat(sc, cmd);
1158                                return;
1159                        }
1160
1161                } else {
1162                        /*
1163                         * If an error occurs on read, we record it, and
1164                         * continue to fetch data in order to avoid bubble.
1165                         *
1166                         * As a small shortcut, we stop if we detect that
1167                         * a CSW mixed into data.
1168                         */
1169                        if (urb->status != 0)
1170                                cmd->error = -EIO;
1171
1172                        len = urb->actual_length;
1173                        if (urb->status != 0 ||
1174                            len != cmd->sgv[cmd->current_sg].length) {
1175                                if ((len & 0x1FF) == US_BULK_CS_WRAP_LEN)
1176                                        goto Bad_End;
1177                        }
1178                }
1179
1180                cmd->act_len += urb->actual_length;
1181
1182                if (++cmd->current_sg < cmd->nsg) {
1183                        ub_data_start(sc, cmd);
1184                        return;
1185                }
1186                ub_state_stat(sc, cmd);
1187
1188        } else if (cmd->state == UB_CMDST_STAT) {
1189                if (urb->status == -EPIPE) {
1190                        rc = ub_submit_clear_stall(sc, cmd, sc->last_pipe);
1191                        if (rc != 0) {
1192                                printk(KERN_NOTICE "%s: "
1193                                    "unable to submit clear (%d)\n",
1194                                    sc->name, rc);
1195                                ub_state_done(sc, cmd, rc);
1196                                return;
1197                        }
1198
1199                        /*
1200                         * Having a stall when getting CSW is an error, so
1201                         * make sure uppper levels are not oblivious to it.
1202                         */
1203                        cmd->error = -EIO;              /* A cheap trick... */
1204
1205                        cmd->state = UB_CMDST_CLRRS;
1206                        return;
1207                }
1208
1209                /* Catch everything, including -EOVERFLOW and other nasties. */
1210                if (urb->status != 0)
1211                        goto Bad_End;
1212
1213                if (urb->actual_length == 0) {
1214                        ub_state_stat_counted(sc, cmd);
1215                        return;
1216                }
1217
1218                /*
1219                 * Check the returned Bulk protocol status.
1220                 * The status block has to be validated first.
1221                 */
1222
1223                bcs = &sc->work_bcs;
1224
1225                if (sc->signature == cpu_to_le32(0)) {
1226                        /*
1227                         * This is the first reply, so do not perform the check.
1228                         * Instead, remember the signature the device uses
1229                         * for future checks. But do not allow a nul.
1230                         */
1231                        sc->signature = bcs->Signature;
1232                        if (sc->signature == cpu_to_le32(0)) {
1233                                ub_state_stat_counted(sc, cmd);
1234                                return;
1235                        }
1236                } else {
1237                        if (bcs->Signature != sc->signature) {
1238                                ub_state_stat_counted(sc, cmd);
1239                                return;
1240                        }
1241                }
1242
1243                if (bcs->Tag != cmd->tag) {
1244                        /*
1245                         * This usually happens when we disagree with the
1246                         * device's microcode about something. For instance,
1247                         * a few of them throw this after timeouts. They buffer
1248                         * commands and reply at commands we timed out before.
1249                         * Without flushing these replies we loop forever.
1250                         */
1251                        ub_state_stat_counted(sc, cmd);
1252                        return;
1253                }
1254
1255                if (!sc->bad_resid) {
1256                        len = le32_to_cpu(bcs->Residue);
1257                        if (len != cmd->len - cmd->act_len) {
1258                                /*
1259                                 * Only start ignoring if this cmd ended well.
1260                                 */
1261                                if (cmd->len == cmd->act_len) {
1262                                        printk(KERN_NOTICE "%s: "
1263                                            "bad residual %d of %d, ignoring\n",
1264                                            sc->name, len, cmd->len);
1265                                        sc->bad_resid = 1;
1266                                }
1267                        }
1268                }
1269
1270                switch (bcs->Status) {
1271                case US_BULK_STAT_OK:
1272                        break;
1273                case US_BULK_STAT_FAIL:
1274                        ub_state_sense(sc, cmd);
1275                        return;
1276                case US_BULK_STAT_PHASE:
1277                        goto Bad_End;
1278                default:
1279                        printk(KERN_INFO "%s: unknown CSW status 0x%x\n",
1280                            sc->name, bcs->Status);
1281                        ub_state_done(sc, cmd, -EINVAL);
1282                        return;
1283                }
1284
1285                /* Not zeroing error to preserve a babble indicator */
1286                if (cmd->error != 0) {
1287                        ub_state_sense(sc, cmd);
1288                        return;
1289                }
1290                cmd->state = UB_CMDST_DONE;
1291                ub_cmdq_pop(sc);
1292                (*cmd->done)(sc, cmd);
1293
1294        } else if (cmd->state == UB_CMDST_SENSE) {
1295                ub_state_done(sc, cmd, -EIO);
1296
1297        } else {
1298                printk(KERN_WARNING "%s: wrong command state %d\n",
1299                    sc->name, cmd->state);
1300                ub_state_done(sc, cmd, -EINVAL);
1301                return;
1302        }
1303        return;
1304
1305Bad_End: /* Little Excel is dead */
1306        ub_state_done(sc, cmd, -EIO);
1307}
1308
1309/*
1310 * Factorization helper for the command state machine:
1311 * Initiate a data segment transfer.
1312 */
1313static void ub_data_start(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1314{
1315        struct scatterlist *sg = &cmd->sgv[cmd->current_sg];
1316        int pipe;
1317        int rc;
1318
1319        UB_INIT_COMPLETION(sc->work_done);
1320
1321        if (cmd->dir == UB_DIR_READ)
1322                pipe = sc->recv_bulk_pipe;
1323        else
1324                pipe = sc->send_bulk_pipe;
1325        sc->last_pipe = pipe;
1326        usb_fill_bulk_urb(&sc->work_urb, sc->dev, pipe, sg_virt(sg),
1327            sg->length, ub_urb_complete, sc);
1328
1329        if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) {
1330                /* XXX Clear stalls */
1331                ub_complete(&sc->work_done);
1332                ub_state_done(sc, cmd, rc);
1333                return;
1334        }
1335
1336        if (cmd->timeo)
1337                sc->work_timer.expires = jiffies + cmd->timeo;
1338        else
1339                sc->work_timer.expires = jiffies + UB_DATA_TIMEOUT;
1340        add_timer(&sc->work_timer);
1341
1342        cmd->state = UB_CMDST_DATA;
1343}
1344
1345/*
1346 * Factorization helper for the command state machine:
1347 * Finish the command.
1348 */
1349static void ub_state_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd, int rc)
1350{
1351
1352        cmd->error = rc;
1353        cmd->state = UB_CMDST_DONE;
1354        ub_cmdq_pop(sc);
1355        (*cmd->done)(sc, cmd);
1356}
1357
1358/*
1359 * Factorization helper for the command state machine:
1360 * Submit a CSW read.
1361 */
1362static int __ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1363{
1364        int rc;
1365
1366        UB_INIT_COMPLETION(sc->work_done);
1367
1368        sc->last_pipe = sc->recv_bulk_pipe;
1369        usb_fill_bulk_urb(&sc->work_urb, sc->dev, sc->recv_bulk_pipe,
1370            &sc->work_bcs, US_BULK_CS_WRAP_LEN, ub_urb_complete, sc);
1371
1372        if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) {
1373                /* XXX Clear stalls */
1374                ub_complete(&sc->work_done);
1375                ub_state_done(sc, cmd, rc);
1376                return -1;
1377        }
1378
1379        if (cmd->timeo)
1380                sc->work_timer.expires = jiffies + cmd->timeo;
1381        else
1382                sc->work_timer.expires = jiffies + UB_STAT_TIMEOUT;
1383        add_timer(&sc->work_timer);
1384        return 0;
1385}
1386
1387/*
1388 * Factorization helper for the command state machine:
1389 * Submit a CSW read and go to STAT state.
1390 */
1391static void ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1392{
1393
1394        if (__ub_state_stat(sc, cmd) != 0)
1395                return;
1396
1397        cmd->stat_count = 0;
1398        cmd->state = UB_CMDST_STAT;
1399}
1400
1401/*
1402 * Factorization helper for the command state machine:
1403 * Submit a CSW read and go to STAT state with counter (along [C] path).
1404 */
1405static void ub_state_stat_counted(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1406{
1407
1408        if (++cmd->stat_count >= 4) {
1409                ub_state_sense(sc, cmd);
1410                return;
1411        }
1412
1413        if (__ub_state_stat(sc, cmd) != 0)
1414                return;
1415
1416        cmd->state = UB_CMDST_STAT;
1417}
1418
1419/*
1420 * Factorization helper for the command state machine:
1421 * Submit a REQUEST SENSE and go to SENSE state.
1422 */
1423static void ub_state_sense(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1424{
1425        struct ub_scsi_cmd *scmd;
1426        struct scatterlist *sg;
1427        int rc;
1428
1429        if (cmd->cdb[0] == REQUEST_SENSE) {
1430                rc = -EPIPE;
1431                goto error;
1432        }
1433
1434        scmd = &sc->top_rqs_cmd;
1435        memset(scmd, 0, sizeof(struct ub_scsi_cmd));
1436        scmd->cdb[0] = REQUEST_SENSE;
1437        scmd->cdb[4] = UB_SENSE_SIZE;
1438        scmd->cdb_len = 6;
1439        scmd->dir = UB_DIR_READ;
1440        scmd->state = UB_CMDST_INIT;
1441        scmd->nsg = 1;
1442        sg = &scmd->sgv[0];
1443        sg_init_table(sg, UB_MAX_REQ_SG);
1444        sg_set_page(sg, virt_to_page(sc->top_sense), UB_SENSE_SIZE,
1445                        (unsigned long)sc->top_sense & (PAGE_SIZE-1));
1446        scmd->len = UB_SENSE_SIZE;
1447        scmd->lun = cmd->lun;
1448        scmd->done = ub_top_sense_done;
1449        scmd->back = cmd;
1450
1451        scmd->tag = sc->tagcnt++;
1452
1453        cmd->state = UB_CMDST_SENSE;
1454
1455        ub_cmdq_insert(sc, scmd);
1456        return;
1457
1458error:
1459        ub_state_done(sc, cmd, rc);
1460}
1461
1462/*
1463 * A helper for the command's state machine:
1464 * Submit a stall clear.
1465 */
1466static int ub_submit_clear_stall(struct ub_dev *sc, struct ub_scsi_cmd *cmd,
1467    int stalled_pipe)
1468{
1469        int endp;
1470        struct usb_ctrlrequest *cr;
1471        int rc;
1472
1473        endp = usb_pipeendpoint(stalled_pipe);
1474        if (usb_pipein (stalled_pipe))
1475                endp |= USB_DIR_IN;
1476
1477        cr = &sc->work_cr;
1478        cr->bRequestType = USB_RECIP_ENDPOINT;
1479        cr->bRequest = USB_REQ_CLEAR_FEATURE;
1480        cr->wValue = cpu_to_le16(USB_ENDPOINT_HALT);
1481        cr->wIndex = cpu_to_le16(endp);
1482        cr->wLength = cpu_to_le16(0);
1483
1484        UB_INIT_COMPLETION(sc->work_done);
1485
1486        usb_fill_control_urb(&sc->work_urb, sc->dev, sc->send_ctrl_pipe,
1487            (unsigned char*) cr, NULL, 0, ub_urb_complete, sc);
1488
1489        if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) {
1490                ub_complete(&sc->work_done);
1491                return rc;
1492        }
1493
1494        sc->work_timer.expires = jiffies + UB_CTRL_TIMEOUT;
1495        add_timer(&sc->work_timer);
1496        return 0;
1497}
1498
1499/*
1500 */
1501static void ub_top_sense_done(struct ub_dev *sc, struct ub_scsi_cmd *scmd)
1502{
1503        unsigned char *sense = sc->top_sense;
1504        struct ub_scsi_cmd *cmd;
1505
1506        /*
1507         * Find the command which triggered the unit attention or a check,
1508         * save the sense into it, and advance its state machine.
1509         */
1510        if ((cmd = ub_cmdq_peek(sc)) == NULL) {
1511                printk(KERN_WARNING "%s: sense done while idle\n", sc->name);
1512                return;
1513        }
1514        if (cmd != scmd->back) {
1515                printk(KERN_WARNING "%s: "
1516                    "sense done for wrong command 0x%x\n",
1517                    sc->name, cmd->tag);
1518                return;
1519        }
1520        if (cmd->state != UB_CMDST_SENSE) {
1521                printk(KERN_WARNING "%s: sense done with bad cmd state %d\n",
1522                    sc->name, cmd->state);
1523                return;
1524        }
1525
1526        /*
1527         * Ignoring scmd->act_len, because the buffer was pre-zeroed.
1528         */
1529        cmd->key = sense[2] & 0x0F;
1530        cmd->asc = sense[12];
1531        cmd->ascq = sense[13];
1532
1533        ub_scsi_urb_compl(sc, cmd);
1534}
1535
1536/*
1537 * Reset management
1538 */
1539
1540static void ub_reset_enter(struct ub_dev *sc, int try)
1541{
1542
1543        if (sc->reset) {
1544                /* This happens often on multi-LUN devices. */
1545                return;
1546        }
1547        sc->reset = try + 1;
1548
1549#if 0 /* Not needed because the disconnect waits for us. */
1550        unsigned long flags;
1551        spin_lock_irqsave(&ub_lock, flags);
1552        sc->openc++;
1553        spin_unlock_irqrestore(&ub_lock, flags);
1554#endif
1555
1556#if 0 /* We let them stop themselves. */
1557        struct ub_lun *lun;
1558        list_for_each_entry(lun, &sc->luns, link) {
1559                blk_stop_queue(lun->disk->queue);
1560        }
1561#endif
1562
1563        schedule_work(&sc->reset_work);
1564}
1565
1566static void ub_reset_task(struct work_struct *work)
1567{
1568        struct ub_dev *sc = container_of(work, struct ub_dev, reset_work);
1569        unsigned long flags;
1570        struct ub_lun *lun;
1571        int rc;
1572
1573        if (!sc->reset) {
1574                printk(KERN_WARNING "%s: Running reset unrequested\n",
1575                    sc->name);
1576                return;
1577        }
1578
1579        if (atomic_read(&sc->poison)) {
1580                ;
1581        } else if ((sc->reset & 1) == 0) {
1582                ub_sync_reset(sc);
1583                msleep(700);    /* usb-storage sleeps 6s (!) */
1584                ub_probe_clear_stall(sc, sc->recv_bulk_pipe);
1585                ub_probe_clear_stall(sc, sc->send_bulk_pipe);
1586        } else if (sc->dev->actconfig->desc.bNumInterfaces != 1) {
1587                ;
1588        } else {
1589                rc = usb_lock_device_for_reset(sc->dev, sc->intf);
1590                if (rc < 0) {
1591                        printk(KERN_NOTICE
1592                            "%s: usb_lock_device_for_reset failed (%d)\n",
1593                            sc->name, rc);
1594                } else {
1595                        rc = usb_reset_device(sc->dev);
1596                        if (rc < 0) {
1597                                printk(KERN_NOTICE "%s: "
1598                                    "usb_lock_device_for_reset failed (%d)\n",
1599                                    sc->name, rc);
1600                        }
1601                        usb_unlock_device(sc->dev);
1602                }
1603        }
1604
1605        /*
1606         * In theory, no commands can be running while reset is active,
1607         * so nobody can ask for another reset, and so we do not need any
1608         * queues of resets or anything. We do need a spinlock though,
1609         * to interact with block layer.
1610         */
1611        spin_lock_irqsave(sc->lock, flags);
1612        sc->reset = 0;
1613        tasklet_schedule(&sc->tasklet);
1614        list_for_each_entry(lun, &sc->luns, link) {
1615                blk_start_queue(lun->disk->queue);
1616        }
1617        wake_up(&sc->reset_wait);
1618        spin_unlock_irqrestore(sc->lock, flags);
1619}
1620
1621/*
1622 * XXX Reset brackets are too much hassle to implement, so just stub them
1623 * in order to prevent forced unbinding (which deadlocks solid when our
1624 * ->disconnect method waits for the reset to complete and this kills keventd).
1625 *
1626 * XXX Tell Alan to move usb_unlock_device inside of usb_reset_device,
1627 * or else the post_reset is invoked, and restats I/O on a locked device.
1628 */
1629static int ub_pre_reset(struct usb_interface *iface) {
1630        return 0;
1631}
1632
1633static int ub_post_reset(struct usb_interface *iface) {
1634        return 0;
1635}
1636
1637/*
1638 * This is called from a process context.
1639 */
1640static void ub_revalidate(struct ub_dev *sc, struct ub_lun *lun)
1641{
1642
1643        lun->readonly = 0;      /* XXX Query this from the device */
1644
1645        lun->capacity.nsec = 0;
1646        lun->capacity.bsize = 512;
1647        lun->capacity.bshift = 0;
1648
1649        if (ub_sync_tur(sc, lun) != 0)
1650                return;                 /* Not ready */
1651        lun->changed = 0;
1652
1653        if (ub_sync_read_cap(sc, lun, &lun->capacity) != 0) {
1654                /*
1655                 * The retry here means something is wrong, either with the
1656                 * device, with the transport, or with our code.
1657                 * We keep this because sd.c has retries for capacity.
1658                 */
1659                if (ub_sync_read_cap(sc, lun, &lun->capacity) != 0) {
1660                        lun->capacity.nsec = 0;
1661                        lun->capacity.bsize = 512;
1662                        lun->capacity.bshift = 0;
1663                }
1664        }
1665}
1666
1667/*
1668 * The open funcion.
1669 * This is mostly needed to keep refcounting, but also to support
1670 * media checks on removable media drives.
1671 */
1672static int ub_bd_open(struct block_device *bdev, fmode_t mode)
1673{
1674        struct ub_lun *lun = bdev->bd_disk->private_data;
1675        struct ub_dev *sc = lun->udev;
1676        unsigned long flags;
1677        int rc;
1678
1679        spin_lock_irqsave(&ub_lock, flags);
1680        if (atomic_read(&sc->poison)) {
1681                spin_unlock_irqrestore(&ub_lock, flags);
1682                return -ENXIO;
1683        }
1684        sc->openc++;
1685        spin_unlock_irqrestore(&ub_lock, flags);
1686
1687        if (lun->removable || lun->readonly)
1688                check_disk_change(bdev);
1689
1690        /*
1691         * The sd.c considers ->media_present and ->changed not equivalent,
1692         * under some pretty murky conditions (a failure of READ CAPACITY).
1693         * We may need it one day.
1694         */
1695        if (lun->removable && lun->changed && !(mode & FMODE_NDELAY)) {
1696                rc = -ENOMEDIUM;
1697                goto err_open;
1698        }
1699
1700        if (lun->readonly && (mode & FMODE_WRITE)) {
1701                rc = -EROFS;
1702                goto err_open;
1703        }
1704
1705        return 0;
1706
1707err_open:
1708        ub_put(sc);
1709        return rc;
1710}
1711
1712/*
1713 */
1714static int ub_bd_release(struct gendisk *disk, fmode_t mode)
1715{
1716        struct ub_lun *lun = disk->private_data;
1717        struct ub_dev *sc = lun->udev;
1718
1719        ub_put(sc);
1720        return 0;
1721}
1722
1723/*
1724 * The ioctl interface.
1725 */
1726static int ub_bd_ioctl(struct block_device *bdev, fmode_t mode,
1727    unsigned int cmd, unsigned long arg)
1728{
1729        struct gendisk *disk = bdev->bd_disk;
1730        void __user *usermem = (void __user *) arg;
1731
1732        return scsi_cmd_ioctl(disk->queue, disk, mode, cmd, usermem);
1733}
1734
1735/*
1736 * This is called by check_disk_change if we reported a media change.
1737 * The main onjective here is to discover the features of the media such as
1738 * the capacity, read-only status, etc. USB storage generally does not
1739 * need to be spun up, but if we needed it, this would be the place.
1740 *
1741 * This call can sleep.
1742 *
1743 * The return code is not used.
1744 */
1745static int ub_bd_revalidate(struct gendisk *disk)
1746{
1747        struct ub_lun *lun = disk->private_data;
1748
1749        ub_revalidate(lun->udev, lun);
1750
1751        /* XXX Support sector size switching like in sr.c */
1752        blk_queue_logical_block_size(disk->queue, lun->capacity.bsize);
1753        set_capacity(disk, lun->capacity.nsec);
1754        // set_disk_ro(sdkp->disk, lun->readonly);
1755
1756        return 0;
1757}
1758
1759/*
1760 * The check is called by the block layer to verify if the media
1761 * is still available. It is supposed to be harmless, lightweight and
1762 * non-intrusive in case the media was not changed.
1763 *
1764 * This call can sleep.
1765 *
1766 * The return code is bool!
1767 */
1768static int ub_bd_media_changed(struct gendisk *disk)
1769{
1770        struct ub_lun *lun = disk->private_data;
1771
1772        if (!lun->removable)
1773                return 0;
1774
1775        /*
1776         * We clean checks always after every command, so this is not
1777         * as dangerous as it looks. If the TEST_UNIT_READY fails here,
1778         * the device is actually not ready with operator or software
1779         * intervention required. One dangerous item might be a drive which
1780         * spins itself down, and come the time to write dirty pages, this
1781         * will fail, then block layer discards the data. Since we never
1782         * spin drives up, such devices simply cannot be used with ub anyway.
1783         */
1784        if (ub_sync_tur(lun->udev, lun) != 0) {
1785                lun->changed = 1;
1786                return 1;
1787        }
1788
1789        return lun->changed;
1790}
1791
1792static const struct block_device_operations ub_bd_fops = {
1793        .owner          = THIS_MODULE,
1794        .open           = ub_bd_open,
1795        .release        = ub_bd_release,
1796        .locked_ioctl   = ub_bd_ioctl,
1797        .media_changed  = ub_bd_media_changed,
1798        .revalidate_disk = ub_bd_revalidate,
1799};
1800
1801/*
1802 * Common ->done routine for commands executed synchronously.
1803 */
1804static void ub_probe_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1805{
1806        struct completion *cop = cmd->back;
1807        complete(cop);
1808}
1809
1810/*
1811 * Test if the device has a check condition on it, synchronously.
1812 */
1813static int ub_sync_tur(struct ub_dev *sc, struct ub_lun *lun)
1814{
1815        struct ub_scsi_cmd *cmd;
1816        enum { ALLOC_SIZE = sizeof(struct ub_scsi_cmd) };
1817        unsigned long flags;
1818        struct completion compl;
1819        int rc;
1820
1821        init_completion(&compl);
1822
1823        rc = -ENOMEM;
1824        if ((cmd = kzalloc(ALLOC_SIZE, GFP_KERNEL)) == NULL)
1825                goto err_alloc;
1826
1827        cmd->cdb[0] = TEST_UNIT_READY;
1828        cmd->cdb_len = 6;
1829        cmd->dir = UB_DIR_NONE;
1830        cmd->state = UB_CMDST_INIT;
1831        cmd->lun = lun;                 /* This may be NULL, but that's ok */
1832        cmd->done = ub_probe_done;
1833        cmd->back = &compl;
1834
1835        spin_lock_irqsave(sc->lock, flags);
1836        cmd->tag = sc->tagcnt++;
1837
1838        rc = ub_submit_scsi(sc, cmd);
1839        spin_unlock_irqrestore(sc->lock, flags);
1840
1841        if (rc != 0)
1842                goto err_submit;
1843
1844        wait_for_completion(&compl);
1845
1846        rc = cmd->error;
1847
1848        if (rc == -EIO && cmd->key != 0)        /* Retries for benh's key */
1849                rc = cmd->key;
1850
1851err_submit:
1852        kfree(cmd);
1853err_alloc:
1854        return rc;
1855}
1856
1857/*
1858 * Read the SCSI capacity synchronously (for probing).
1859 */
1860static int ub_sync_read_cap(struct ub_dev *sc, struct ub_lun *lun,
1861    struct ub_capacity *ret)
1862{
1863        struct ub_scsi_cmd *cmd;
1864        struct scatterlist *sg;
1865        char *p;
1866        enum { ALLOC_SIZE = sizeof(struct ub_scsi_cmd) + 8 };
1867        unsigned long flags;
1868        unsigned int bsize, shift;
1869        unsigned long nsec;
1870        struct completion compl;
1871        int rc;
1872
1873        init_completion(&compl);
1874
1875        rc = -ENOMEM;
1876        if ((cmd = kzalloc(ALLOC_SIZE, GFP_KERNEL)) == NULL)
1877                goto err_alloc;
1878        p = (char *)cmd + sizeof(struct ub_scsi_cmd);
1879
1880        cmd->cdb[0] = 0x25;
1881        cmd->cdb_len = 10;
1882        cmd->dir = UB_DIR_READ;
1883        cmd->state = UB_CMDST_INIT;
1884        cmd->nsg = 1;
1885        sg = &cmd->sgv[0];
1886        sg_init_table(sg, UB_MAX_REQ_SG);
1887        sg_set_page(sg, virt_to_page(p), 8, (unsigned long)p & (PAGE_SIZE-1));
1888        cmd->len = 8;
1889        cmd->lun = lun;
1890        cmd->done = ub_probe_done;
1891        cmd->back = &compl;
1892
1893        spin_lock_irqsave(sc->lock, flags);
1894        cmd->tag = sc->tagcnt++;
1895
1896        rc = ub_submit_scsi(sc, cmd);
1897        spin_unlock_irqrestore(sc->lock, flags);
1898
1899        if (rc != 0)
1900                goto err_submit;
1901
1902        wait_for_completion(&compl);
1903
1904        if (cmd->error != 0) {
1905                rc = -EIO;
1906                goto err_read;
1907        }
1908        if (cmd->act_len != 8) {
1909                rc = -EIO;
1910                goto err_read;
1911        }
1912
1913        /* sd.c special-cases sector size of 0 to mean 512. Needed? Safe? */
1914        nsec = be32_to_cpu(*(__be32 *)p) + 1;
1915        bsize = be32_to_cpu(*(__be32 *)(p + 4));
1916        switch (bsize) {
1917        case 512:       shift = 0;      break;
1918        case 1024:      shift = 1;      break;
1919        case 2048:      shift = 2;      break;
1920        case 4096:      shift = 3;      break;
1921        default:
1922                rc = -EDOM;
1923                goto err_inv_bsize;
1924        }
1925
1926        ret->bsize = bsize;
1927        ret->bshift = shift;
1928        ret->nsec = nsec << shift;
1929        rc = 0;
1930
1931err_inv_bsize:
1932err_read:
1933err_submit:
1934        kfree(cmd);
1935err_alloc:
1936        return rc;
1937}
1938
1939/*
1940 */
1941static void ub_probe_urb_complete(struct urb *urb)
1942{
1943        struct completion *cop = urb->context;
1944        complete(cop);
1945}
1946
1947static void ub_probe_timeout(unsigned long arg)
1948{
1949        struct completion *cop = (struct completion *) arg;
1950        complete(cop);
1951}
1952
1953/*
1954 * Reset with a Bulk reset.
1955 */
1956static int ub_sync_reset(struct ub_dev *sc)
1957{
1958        int ifnum = sc->intf->cur_altsetting->desc.bInterfaceNumber;
1959        struct usb_ctrlrequest *cr;
1960        struct completion compl;
1961        struct timer_list timer;
1962        int rc;
1963
1964        init_completion(&compl);
1965
1966        cr = &sc->work_cr;
1967        cr->bRequestType = USB_TYPE_CLASS | USB_RECIP_INTERFACE;
1968        cr->bRequest = US_BULK_RESET_REQUEST;
1969        cr->wValue = cpu_to_le16(0);
1970        cr->wIndex = cpu_to_le16(ifnum);
1971        cr->wLength = cpu_to_le16(0);
1972
1973        usb_fill_control_urb(&sc->work_urb, sc->dev, sc->send_ctrl_pipe,
1974            (unsigned char*) cr, NULL, 0, ub_probe_urb_complete, &compl);
1975
1976        if ((rc = usb_submit_urb(&sc->work_urb, GFP_KERNEL)) != 0) {
1977                printk(KERN_WARNING
1978                     "%s: Unable to submit a bulk reset (%d)\n", sc->name, rc);
1979                return rc;
1980        }
1981
1982        init_timer(&timer);
1983        timer.function = ub_probe_timeout;
1984        timer.data = (unsigned long) &compl;
1985        timer.expires = jiffies + UB_CTRL_TIMEOUT;
1986        add_timer(&timer);
1987
1988        wait_for_completion(&compl);
1989
1990        del_timer_sync(&timer);
1991        usb_kill_urb(&sc->work_urb);
1992
1993        return sc->work_urb.status;
1994}
1995
1996/*
1997 * Get number of LUNs by the way of Bulk GetMaxLUN command.
1998 */
1999static int ub_sync_getmaxlun(struct ub_dev *sc)
2000{
2001        int ifnum = sc->intf->cur_altsetting->desc.bInterfaceNumber;
2002        unsigned char *p;
2003        enum { ALLOC_SIZE = 1 };
2004        struct usb_ctrlrequest *cr;
2005        struct completion compl;
2006        struct timer_list timer;
2007        int nluns;
2008        int rc;
2009
2010        init_completion(&compl);
2011
2012        rc = -ENOMEM;
2013        if ((p = kmalloc(ALLOC_SIZE, GFP_KERNEL)) == NULL)
2014                goto err_alloc;
2015        *p = 55;
2016
2017        cr = &sc->work_cr;
2018        cr->bRequestType = USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE;
2019        cr->bRequest = US_BULK_GET_MAX_LUN;
2020        cr->wValue = cpu_to_le16(0);
2021        cr->wIndex = cpu_to_le16(ifnum);
2022        cr->wLength = cpu_to_le16(1);
2023
2024        usb_fill_control_urb(&sc->work_urb, sc->dev, sc->recv_ctrl_pipe,
2025            (unsigned char*) cr, p, 1, ub_probe_urb_complete, &compl);
2026
2027        if ((rc = usb_submit_urb(&sc->work_urb, GFP_KERNEL)) != 0)
2028                goto err_submit;
2029
2030        init_timer(&timer);
2031        timer.function = ub_probe_timeout;
2032        timer.data = (unsigned long) &compl;
2033        timer.expires = jiffies + UB_CTRL_TIMEOUT;
2034        add_timer(&timer);
2035
2036        wait_for_completion(&compl);
2037
2038        del_timer_sync(&timer);
2039        usb_kill_urb(&sc->work_urb);
2040
2041        if ((rc = sc->work_urb.status) < 0)
2042                goto err_io;
2043
2044        if (sc->work_urb.actual_length != 1) {
2045                nluns = 0;
2046        } else {
2047                if ((nluns = *p) == 55) {
2048                        nluns = 0;
2049                } else {
2050                        /* GetMaxLUN returns the maximum LUN number */
2051                        nluns += 1;
2052                        if (nluns > UB_MAX_LUNS)
2053                                nluns = UB_MAX_LUNS;
2054                }
2055        }
2056
2057        kfree(p);
2058        return nluns;
2059
2060err_io:
2061err_submit:
2062        kfree(p);
2063err_alloc:
2064        return rc;
2065}
2066
2067/*
2068 * Clear initial stalls.
2069 */
2070static int ub_probe_clear_stall(struct ub_dev *sc, int stalled_pipe)
2071{
2072        int endp;
2073        struct usb_ctrlrequest *cr;
2074        struct completion compl;
2075        struct timer_list timer;
2076        int rc;
2077
2078        init_completion(&compl);
2079
2080        endp = usb_pipeendpoint(stalled_pipe);
2081        if (usb_pipein (stalled_pipe))
2082                endp |= USB_DIR_IN;
2083
2084        cr = &sc->work_cr;
2085        cr->bRequestType = USB_RECIP_ENDPOINT;
2086        cr->bRequest = USB_REQ_CLEAR_FEATURE;
2087        cr->wValue = cpu_to_le16(USB_ENDPOINT_HALT);
2088        cr->wIndex = cpu_to_le16(endp);
2089        cr->wLength = cpu_to_le16(0);
2090
2091        usb_fill_control_urb(&sc->work_urb, sc->dev, sc->send_ctrl_pipe,
2092            (unsigned char*) cr, NULL, 0, ub_probe_urb_complete, &compl);
2093
2094        if ((rc = usb_submit_urb(&sc->work_urb, GFP_KERNEL)) != 0) {
2095                printk(KERN_WARNING
2096                     "%s: Unable to submit a probe clear (%d)\n", sc->name, rc);
2097                return rc;
2098        }
2099
2100        init_timer(&timer);
2101        timer.function = ub_probe_timeout;
2102        timer.data = (unsigned long) &compl;
2103        timer.expires = jiffies + UB_CTRL_TIMEOUT;
2104        add_timer(&timer);
2105
2106        wait_for_completion(&compl);
2107
2108        del_timer_sync(&timer);
2109        usb_kill_urb(&sc->work_urb);
2110
2111        usb_reset_endpoint(sc->dev, endp);
2112
2113        return 0;
2114}
2115
2116/*
2117 * Get the pipe settings.
2118 */
2119static int ub_get_pipes(struct ub_dev *sc, struct usb_device *dev,
2120    struct usb_interface *intf)
2121{
2122        struct usb_host_interface *altsetting = intf->cur_altsetting;
2123        struct usb_endpoint_descriptor *ep_in = NULL;
2124        struct usb_endpoint_descriptor *ep_out = NULL;
2125        struct usb_endpoint_descriptor *ep;
2126        int i;
2127
2128        /*
2129         * Find the endpoints we need.
2130         * We are expecting a minimum of 2 endpoints - in and out (bulk).
2131         * We will ignore any others.
2132         */
2133        for (i = 0; i < altsetting->desc.bNumEndpoints; i++) {
2134                ep = &altsetting->endpoint[i].desc;
2135
2136                /* Is it a BULK endpoint? */
2137                if (usb_endpoint_xfer_bulk(ep)) {
2138                        /* BULK in or out? */
2139                        if (usb_endpoint_dir_in(ep)) {
2140                                if (ep_in == NULL)
2141                                        ep_in = ep;
2142                        } else {
2143                                if (ep_out == NULL)
2144                                        ep_out = ep;
2145                        }
2146                }
2147        }
2148
2149        if (ep_in == NULL || ep_out == NULL) {
2150                printk(KERN_NOTICE "%s: failed endpoint check\n", sc->name);
2151                return -ENODEV;
2152        }
2153
2154        /* Calculate and store the pipe values */
2155        sc->send_ctrl_pipe = usb_sndctrlpipe(dev, 0);
2156        sc->recv_ctrl_pipe = usb_rcvctrlpipe(dev, 0);
2157        sc->send_bulk_pipe = usb_sndbulkpipe(dev,
2158                usb_endpoint_num(ep_out));
2159        sc->recv_bulk_pipe = usb_rcvbulkpipe(dev, 
2160                usb_endpoint_num(ep_in));
2161
2162        return 0;
2163}
2164
2165/*
2166 * Probing is done in the process context, which allows us to cheat
2167 * and not to build a state machine for the discovery.
2168 */
2169static int ub_probe(struct usb_interface *intf,
2170    const struct usb_device_id *dev_id)
2171{
2172        struct ub_dev *sc;
2173        int nluns;
2174        int rc;
2175        int i;
2176
2177        if (usb_usual_check_type(dev_id, USB_US_TYPE_UB))
2178                return -ENXIO;
2179
2180        rc = -ENOMEM;
2181        if ((sc = kzalloc(sizeof(struct ub_dev), GFP_KERNEL)) == NULL)
2182                goto err_core;
2183        sc->lock = ub_next_lock();
2184        INIT_LIST_HEAD(&sc->luns);
2185        usb_init_urb(&sc->work_urb);
2186        tasklet_init(&sc->tasklet, ub_scsi_action, (unsigned long)sc);
2187        atomic_set(&sc->poison, 0);
2188        INIT_WORK(&sc->reset_work, ub_reset_task);
2189        init_waitqueue_head(&sc->reset_wait);
2190
2191        init_timer(&sc->work_timer);
2192        sc->work_timer.data = (unsigned long) sc;
2193        sc->work_timer.function = ub_urb_timeout;
2194
2195        ub_init_completion(&sc->work_done);
2196        sc->work_done.done = 1;         /* A little yuk, but oh well... */
2197
2198        sc->dev = interface_to_usbdev(intf);
2199        sc->intf = intf;
2200        // sc->ifnum = intf->cur_altsetting->desc.bInterfaceNumber;
2201        usb_set_intfdata(intf, sc);
2202        usb_get_dev(sc->dev);
2203        /*
2204         * Since we give the interface struct to the block level through
2205         * disk->driverfs_dev, we have to pin it. Otherwise, block_uevent
2206         * oopses on close after a disconnect (kernels 2.6.16 and up).
2207         */
2208        usb_get_intf(sc->intf);
2209
2210        snprintf(sc->name, 12, DRV_NAME "(%d.%d)",
2211            sc->dev->bus->busnum, sc->dev->devnum);
2212
2213        /* XXX Verify that we can handle the device (from descriptors) */
2214
2215        if (ub_get_pipes(sc, sc->dev, intf) != 0)
2216                goto err_dev_desc;
2217
2218        /*
2219         * At this point, all USB initialization is done, do upper layer.
2220         * We really hate halfway initialized structures, so from the
2221         * invariants perspective, this ub_dev is fully constructed at
2222         * this point.
2223         */
2224
2225        /*
2226         * This is needed to clear toggles. It is a problem only if we do
2227         * `rmmod ub && modprobe ub` without disconnects, but we like that.
2228         */
2229#if 0 /* iPod Mini fails if we do this (big white iPod works) */
2230        ub_probe_clear_stall(sc, sc->recv_bulk_pipe);
2231        ub_probe_clear_stall(sc, sc->send_bulk_pipe);
2232#endif
2233
2234        /*
2235         * The way this is used by the startup code is a little specific.
2236         * A SCSI check causes a USB stall. Our common case code sees it
2237         * and clears the check, after which the device is ready for use.
2238         * But if a check was not present, any command other than
2239         * TEST_UNIT_READY ends with a lockup (including REQUEST_SENSE).
2240         *
2241         * If we neglect to clear the SCSI check, the first real command fails
2242         * (which is the capacity readout). We clear that and retry, but why
2243         * causing spurious retries for no reason.
2244         *
2245         * Revalidation may start with its own TEST_UNIT_READY, but that one
2246         * has to succeed, so we clear checks with an additional one here.
2247         * In any case it's not our business how revaliadation is implemented.
2248         */
2249        for (i = 0; i < 3; i++) {  /* Retries for the schwag key from KS'04 */
2250                if ((rc = ub_sync_tur(sc, NULL)) <= 0) break;
2251                if (rc != 0x6) break;
2252                msleep(10);
2253        }
2254
2255        nluns = 1;
2256        for (i = 0; i < 3; i++) {
2257                if ((rc = ub_sync_getmaxlun(sc)) < 0)
2258                        break;
2259                if (rc != 0) {
2260                        nluns = rc;
2261                        break;
2262                }
2263                msleep(100);
2264        }
2265
2266        for (i = 0; i < nluns; i++) {
2267                ub_probe_lun(sc, i);
2268        }
2269        return 0;
2270
2271err_dev_desc:
2272        usb_set_intfdata(intf, NULL);
2273        usb_put_intf(sc->intf);
2274        usb_put_dev(sc->dev);
2275        kfree(sc);
2276err_core:
2277        return rc;
2278}
2279
2280static int ub_probe_lun(struct ub_dev *sc, int lnum)
2281{
2282        struct ub_lun *lun;
2283        struct request_queue *q;
2284        struct gendisk *disk;
2285        int rc;
2286
2287        rc = -ENOMEM;
2288        if ((lun = kzalloc(sizeof(struct ub_lun), GFP_KERNEL)) == NULL)
2289                goto err_alloc;
2290        lun->num = lnum;
2291
2292        rc = -ENOSR;
2293        if ((lun->id = ub_id_get()) == -1)
2294                goto err_id;
2295
2296        lun->udev = sc;
2297
2298        snprintf(lun->name, 16, DRV_NAME "%c(%d.%d.%d)",
2299            lun->id + 'a', sc->dev->bus->busnum, sc->dev->devnum, lun->num);
2300
2301        lun->removable = 1;             /* XXX Query this from the device */
2302        lun->changed = 1;               /* ub_revalidate clears only */
2303        ub_revalidate(sc, lun);
2304
2305        rc = -ENOMEM;
2306        if ((disk = alloc_disk(UB_PARTS_PER_LUN)) == NULL)
2307                goto err_diskalloc;
2308
2309        sprintf(disk->disk_name, DRV_NAME "%c", lun->id + 'a');
2310        disk->major = UB_MAJOR;
2311        disk->first_minor = lun->id * UB_PARTS_PER_LUN;
2312        disk->fops = &ub_bd_fops;
2313        disk->private_data = lun;
2314        disk->driverfs_dev = &sc->intf->dev;
2315
2316        rc = -ENOMEM;
2317        if ((q = blk_init_queue(ub_request_fn, sc->lock)) == NULL)
2318                goto err_blkqinit;
2319
2320        disk->queue = q;
2321
2322        blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH);
2323        blk_queue_max_hw_segments(q, UB_MAX_REQ_SG);
2324        blk_queue_max_phys_segments(q, UB_MAX_REQ_SG);
2325        blk_queue_segment_boundary(q, 0xffffffff);      /* Dubious. */
2326        blk_queue_max_sectors(q, UB_MAX_SECTORS);
2327        blk_queue_logical_block_size(q, lun->capacity.bsize);
2328
2329        lun->disk = disk;
2330        q->queuedata = lun;
2331        list_add(&lun->link, &sc->luns);
2332
2333        set_capacity(disk, lun->capacity.nsec);
2334        if (lun->removable)
2335                disk->flags |= GENHD_FL_REMOVABLE;
2336
2337        add_disk(disk);
2338
2339        return 0;
2340
2341err_blkqinit:
2342        put_disk(disk);
2343err_diskalloc:
2344        ub_id_put(lun->id);
2345err_id:
2346        kfree(lun);
2347err_alloc:
2348        return rc;
2349}
2350
2351static void ub_disconnect(struct usb_interface *intf)
2352{
2353        struct ub_dev *sc = usb_get_intfdata(intf);
2354        struct ub_lun *lun;
2355        unsigned long flags;
2356
2357        /*
2358         * Prevent ub_bd_release from pulling the rug from under us.
2359         * XXX This is starting to look like a kref.
2360         * XXX Why not to take this ref at probe time?
2361         */
2362        spin_lock_irqsave(&ub_lock, flags);
2363        sc->openc++;
2364        spin_unlock_irqrestore(&ub_lock, flags);
2365
2366        /*
2367         * Fence stall clearings, operations triggered by unlinkings and so on.
2368         * We do not attempt to unlink any URBs, because we do not trust the
2369         * unlink paths in HC drivers. Also, we get -84 upon disconnect anyway.
2370         */
2371        atomic_set(&sc->poison, 1);
2372
2373        /*
2374         * Wait for reset to end, if any.
2375         */
2376        wait_event(sc->reset_wait, !sc->reset);
2377
2378        /*
2379         * Blow away queued commands.
2380         *
2381         * Actually, this never works, because before we get here
2382         * the HCD terminates outstanding URB(s). It causes our
2383         * SCSI command queue to advance, commands fail to submit,
2384         * and the whole queue drains. So, we just use this code to
2385         * print warnings.
2386         */
2387        spin_lock_irqsave(sc->lock, flags);
2388        {
2389                struct ub_scsi_cmd *cmd;
2390                int cnt = 0;
2391                while ((cmd = ub_cmdq_peek(sc)) != NULL) {
2392                        cmd->error = -ENOTCONN;
2393                        cmd->state = UB_CMDST_DONE;
2394                        ub_cmdq_pop(sc);
2395                        (*cmd->done)(sc, cmd);
2396                        cnt++;
2397                }
2398                if (cnt != 0) {
2399                        printk(KERN_WARNING "%s: "
2400                            "%d was queued after shutdown\n", sc->name, cnt);
2401                }
2402        }
2403        spin_unlock_irqrestore(sc->lock, flags);
2404
2405        /*
2406         * Unregister the upper layer.
2407         */
2408        list_for_each_entry(lun, &sc->luns, link) {
2409                del_gendisk(lun->disk);
2410                /*
2411                 * I wish I could do:
2412                 *    queue_flag_set(QUEUE_FLAG_DEAD, q);
2413                 * As it is, we rely on our internal poisoning and let
2414                 * the upper levels to spin furiously failing all the I/O.
2415                 */
2416        }
2417
2418        /*
2419         * Testing for -EINPROGRESS is always a bug, so we are bending
2420         * the rules a little.
2421         */
2422        spin_lock_irqsave(sc->lock, flags);
2423        if (sc->work_urb.status == -EINPROGRESS) {      /* janitors: ignore */
2424                printk(KERN_WARNING "%s: "
2425                    "URB is active after disconnect\n", sc->name);
2426        }
2427        spin_unlock_irqrestore(sc->lock, flags);
2428
2429        /*
2430         * There is virtually no chance that other CPU runs a timeout so long
2431         * after ub_urb_complete should have called del_timer, but only if HCD
2432         * didn't forget to deliver a callback on unlink.
2433         */
2434        del_timer_sync(&sc->work_timer);
2435
2436        /*
2437         * At this point there must be no commands coming from anyone
2438         * and no URBs left in transit.
2439         */
2440
2441        ub_put(sc);
2442}
2443
2444static struct usb_driver ub_driver = {
2445        .name =         "ub",
2446        .probe =        ub_probe,
2447        .disconnect =   ub_disconnect,
2448        .id_table =     ub_usb_ids,
2449        .pre_reset =    ub_pre_reset,
2450        .post_reset =   ub_post_reset,
2451};
2452
2453static int __init ub_init(void)
2454{
2455        int rc;
2456        int i;
2457
2458        for (i = 0; i < UB_QLOCK_NUM; i++)
2459                spin_lock_init(&ub_qlockv[i]);
2460
2461        if ((rc = register_blkdev(UB_MAJOR, DRV_NAME)) != 0)
2462                goto err_regblkdev;
2463
2464        if ((rc = usb_register(&ub_driver)) != 0)
2465                goto err_register;
2466
2467        usb_usual_set_present(USB_US_TYPE_UB);
2468        return 0;
2469
2470err_register:
2471        unregister_blkdev(UB_MAJOR, DRV_NAME);
2472err_regblkdev:
2473        return rc;
2474}
2475
2476static void __exit ub_exit(void)
2477{
2478        usb_deregister(&ub_driver);
2479
2480        unregister_blkdev(UB_MAJOR, DRV_NAME);
2481        usb_usual_clear_present(USB_US_TYPE_UB);
2482}
2483
2484module_init(ub_init);
2485module_exit(ub_exit);
2486
2487MODULE_LICENSE("GPL");
2488