linux/drivers/block/ub.c
<<
>>
Prefs
   1/*
   2 * The low performance USB storage driver (ub).
   3 *
   4 * Copyright (c) 1999, 2000 Matthew Dharm (mdharm-usb@one-eyed-alien.net)
   5 * Copyright (C) 2004 Pete Zaitcev (zaitcev@yahoo.com)
   6 *
   7 * This work is a part of Linux kernel, is derived from it,
   8 * and is not licensed separately. See file COPYING for details.
   9 *
  10 * TODO (sorted by decreasing priority)
  11 *  -- Return sense now that rq allows it (we always auto-sense anyway).
  12 *  -- set readonly flag for CDs, set removable flag for CF readers
  13 *  -- do inquiry and verify we got a disk and not a tape (for LUN mismatch)
  14 *  -- verify the 13 conditions and do bulk resets
  15 *  -- highmem
  16 *  -- move top_sense and work_bcs into separate allocations (if they survive)
  17 *     for cache purists and esoteric architectures.
  18 *  -- Allocate structure for LUN 0 before the first ub_sync_tur, avoid NULL. ?
  19 *  -- prune comments, they are too volumnous
  20 *  -- Resove XXX's
  21 *  -- CLEAR, CLR2STS, CLRRS seem to be ripe for refactoring.
  22 */
  23#include <linux/kernel.h>
  24#include <linux/module.h>
  25#include <linux/usb.h>
  26#include <linux/usb_usual.h>
  27#include <linux/blkdev.h>
  28#include <linux/timer.h>
  29#include <linux/scatterlist.h>
  30#include <linux/slab.h>
  31#include <linux/mutex.h>
  32#include <scsi/scsi.h>
  33
  34#define DRV_NAME "ub"
  35
  36#define UB_MAJOR 180
  37
  38/*
  39 * The command state machine is the key model for understanding of this driver.
  40 *
  41 * The general rule is that all transitions are done towards the bottom
  42 * of the diagram, thus preventing any loops.
  43 *
  44 * An exception to that is how the STAT state is handled. A counter allows it
  45 * to be re-entered along the path marked with [C].
  46 *
  47 *       +--------+
  48 *       ! INIT   !
  49 *       +--------+
  50 *           !
  51 *        ub_scsi_cmd_start fails ->--------------------------------------\
  52 *           !                                                            !
  53 *           V                                                            !
  54 *       +--------+                                                       !
  55 *       ! CMD    !                                                       !
  56 *       +--------+                                                       !
  57 *           !                                            +--------+      !
  58 *         was -EPIPE -->-------------------------------->! CLEAR  !      !
  59 *           !                                            +--------+      !
  60 *           !                                                !           !
  61 *         was error -->------------------------------------- ! --------->\
  62 *           !                                                !           !
  63 *  /--<-- cmd->dir == NONE ?                                 !           !
  64 *  !        !                                                !           !
  65 *  !        V                                                !           !
  66 *  !    +--------+                                           !           !
  67 *  !    ! DATA   !                                           !           !
  68 *  !    +--------+                                           !           !
  69 *  !        !                           +---------+          !           !
  70 *  !      was -EPIPE -->--------------->! CLR2STS !          !           !
  71 *  !        !                           +---------+          !           !
  72 *  !        !                                !               !           !
  73 *  !        !                              was error -->---- ! --------->\
  74 *  !      was error -->--------------------- ! ------------- ! --------->\
  75 *  !        !                                !               !           !
  76 *  !        V                                !               !           !
  77 *  \--->+--------+                           !               !           !
  78 *       ! STAT   !<--------------------------/               !           !
  79 *  /--->+--------+                                           !           !
  80 *  !        !                                                !           !
  81 * [C]     was -EPIPE -->-----------\                         !           !
  82 *  !        !                      !                         !           !
  83 *  +<---- len == 0                 !                         !           !
  84 *  !        !                      !                         !           !
  85 *  !      was error -->--------------------------------------!---------->\
  86 *  !        !                      !                         !           !
  87 *  +<---- bad CSW                  !                         !           !
  88 *  +<---- bad tag                  !                         !           !
  89 *  !        !                      V                         !           !
  90 *  !        !                 +--------+                     !           !
  91 *  !        !                 ! CLRRS  !                     !           !
  92 *  !        !                 +--------+                     !           !
  93 *  !        !                      !                         !           !
  94 *  \------- ! --------------------[C]--------\               !           !
  95 *           !                                !               !           !
  96 *         cmd->error---\                +--------+           !           !
  97 *           !          +--------------->! SENSE  !<----------/           !
  98 *         STAT_FAIL----/                +--------+                       !
  99 *           !                                !                           V
 100 *           !                                V                      +--------+
 101 *           \--------------------------------\--------------------->! DONE   !
 102 *                                                                   +--------+
 103 */
 104
 105/*
 106 * This many LUNs per USB device.
 107 * Every one of them takes a host, see UB_MAX_HOSTS.
 108 */
 109#define UB_MAX_LUNS   9
 110
 111/*
 112 */
 113
 114#define UB_PARTS_PER_LUN      8
 115
 116#define UB_MAX_CDB_SIZE      16         /* Corresponds to Bulk */
 117
 118#define UB_SENSE_SIZE  18
 119
 120/*
 121 */
 122
 123/* command block wrapper */
 124struct bulk_cb_wrap {
 125        __le32  Signature;              /* contains 'USBC' */
 126        u32     Tag;                    /* unique per command id */
 127        __le32  DataTransferLength;     /* size of data */
 128        u8      Flags;                  /* direction in bit 0 */
 129        u8      Lun;                    /* LUN */
 130        u8      Length;                 /* of of the CDB */
 131        u8      CDB[UB_MAX_CDB_SIZE];   /* max command */
 132};
 133
 134#define US_BULK_CB_WRAP_LEN     31
 135#define US_BULK_CB_SIGN         0x43425355      /*spells out USBC */
 136#define US_BULK_FLAG_IN         1
 137#define US_BULK_FLAG_OUT        0
 138
 139/* command status wrapper */
 140struct bulk_cs_wrap {
 141        __le32  Signature;              /* should = 'USBS' */
 142        u32     Tag;                    /* same as original command */
 143        __le32  Residue;                /* amount not transferred */
 144        u8      Status;                 /* see below */
 145};
 146
 147#define US_BULK_CS_WRAP_LEN     13
 148#define US_BULK_CS_SIGN         0x53425355      /* spells out 'USBS' */
 149#define US_BULK_STAT_OK         0
 150#define US_BULK_STAT_FAIL       1
 151#define US_BULK_STAT_PHASE      2
 152
 153/* bulk-only class specific requests */
 154#define US_BULK_RESET_REQUEST   0xff
 155#define US_BULK_GET_MAX_LUN     0xfe
 156
 157/*
 158 */
 159struct ub_dev;
 160
 161#define UB_MAX_REQ_SG   9       /* cdrecord requires 32KB and maybe a header */
 162#define UB_MAX_SECTORS 64
 163
 164/*
 165 * A second is more than enough for a 32K transfer (UB_MAX_SECTORS)
 166 * even if a webcam hogs the bus, but some devices need time to spin up.
 167 */
 168#define UB_URB_TIMEOUT  (HZ*2)
 169#define UB_DATA_TIMEOUT (HZ*5)  /* ZIP does spin-ups in the data phase */
 170#define UB_STAT_TIMEOUT (HZ*5)  /* Same spinups and eject for a dataless cmd. */
 171#define UB_CTRL_TIMEOUT (HZ/2)  /* 500ms ought to be enough to clear a stall */
 172
 173/*
 174 * An instance of a SCSI command in transit.
 175 */
 176#define UB_DIR_NONE     0
 177#define UB_DIR_READ     1
 178#define UB_DIR_ILLEGAL2 2
 179#define UB_DIR_WRITE    3
 180
 181#define UB_DIR_CHAR(c)  (((c)==UB_DIR_WRITE)? 'w': \
 182                         (((c)==UB_DIR_READ)? 'r': 'n'))
 183
 184enum ub_scsi_cmd_state {
 185        UB_CMDST_INIT,                  /* Initial state */
 186        UB_CMDST_CMD,                   /* Command submitted */
 187        UB_CMDST_DATA,                  /* Data phase */
 188        UB_CMDST_CLR2STS,               /* Clearing before requesting status */
 189        UB_CMDST_STAT,                  /* Status phase */
 190        UB_CMDST_CLEAR,                 /* Clearing a stall (halt, actually) */
 191        UB_CMDST_CLRRS,                 /* Clearing before retrying status */
 192        UB_CMDST_SENSE,                 /* Sending Request Sense */
 193        UB_CMDST_DONE                   /* Final state */
 194};
 195
 196struct ub_scsi_cmd {
 197        unsigned char cdb[UB_MAX_CDB_SIZE];
 198        unsigned char cdb_len;
 199
 200        unsigned char dir;              /* 0 - none, 1 - read, 3 - write. */
 201        enum ub_scsi_cmd_state state;
 202        unsigned int tag;
 203        struct ub_scsi_cmd *next;
 204
 205        int error;                      /* Return code - valid upon done */
 206        unsigned int act_len;           /* Return size */
 207        unsigned char key, asc, ascq;   /* May be valid if error==-EIO */
 208
 209        int stat_count;                 /* Retries getting status. */
 210        unsigned int timeo;             /* jiffies until rq->timeout changes */
 211
 212        unsigned int len;               /* Requested length */
 213        unsigned int current_sg;
 214        unsigned int nsg;               /* sgv[nsg] */
 215        struct scatterlist sgv[UB_MAX_REQ_SG];
 216
 217        struct ub_lun *lun;
 218        void (*done)(struct ub_dev *, struct ub_scsi_cmd *);
 219        void *back;
 220};
 221
 222struct ub_request {
 223        struct request *rq;
 224        unsigned int current_try;
 225        unsigned int nsg;               /* sgv[nsg] */
 226        struct scatterlist sgv[UB_MAX_REQ_SG];
 227};
 228
 229/*
 230 */
 231struct ub_capacity {
 232        unsigned long nsec;             /* Linux size - 512 byte sectors */
 233        unsigned int bsize;             /* Linux hardsect_size */
 234        unsigned int bshift;            /* Shift between 512 and hard sects */
 235};
 236
 237/*
 238 * This is a direct take-off from linux/include/completion.h
 239 * The difference is that I do not wait on this thing, just poll.
 240 * When I want to wait (ub_probe), I just use the stock completion.
 241 *
 242 * Note that INIT_COMPLETION takes no lock. It is correct. But why
 243 * in the bloody hell that thing takes struct instead of pointer to struct
 244 * is quite beyond me. I just copied it from the stock completion.
 245 */
 246struct ub_completion {
 247        unsigned int done;
 248        spinlock_t lock;
 249};
 250
 251static DEFINE_MUTEX(ub_mutex);
 252static inline void ub_init_completion(struct ub_completion *x)
 253{
 254        x->done = 0;
 255        spin_lock_init(&x->lock);
 256}
 257
 258#define UB_INIT_COMPLETION(x)   ((x).done = 0)
 259
 260static void ub_complete(struct ub_completion *x)
 261{
 262        unsigned long flags;
 263
 264        spin_lock_irqsave(&x->lock, flags);
 265        x->done++;
 266        spin_unlock_irqrestore(&x->lock, flags);
 267}
 268
 269static int ub_is_completed(struct ub_completion *x)
 270{
 271        unsigned long flags;
 272        int ret;
 273
 274        spin_lock_irqsave(&x->lock, flags);
 275        ret = x->done;
 276        spin_unlock_irqrestore(&x->lock, flags);
 277        return ret;
 278}
 279
 280/*
 281 */
 282struct ub_scsi_cmd_queue {
 283        int qlen, qmax;
 284        struct ub_scsi_cmd *head, *tail;
 285};
 286
 287/*
 288 * The block device instance (one per LUN).
 289 */
 290struct ub_lun {
 291        struct ub_dev *udev;
 292        struct list_head link;
 293        struct gendisk *disk;
 294        int id;                         /* Host index */
 295        int num;                        /* LUN number */
 296        char name[16];
 297
 298        int changed;                    /* Media was changed */
 299        int removable;
 300        int readonly;
 301
 302        struct ub_request urq;
 303
 304        /* Use Ingo's mempool if or when we have more than one command. */
 305        /*
 306         * Currently we never need more than one command for the whole device.
 307         * However, giving every LUN a command is a cheap and automatic way
 308         * to enforce fairness between them.
 309         */
 310        int cmda[1];
 311        struct ub_scsi_cmd cmdv[1];
 312
 313        struct ub_capacity capacity; 
 314};
 315
 316/*
 317 * The USB device instance.
 318 */
 319struct ub_dev {
 320        spinlock_t *lock;
 321        atomic_t poison;                /* The USB device is disconnected */
 322        int openc;                      /* protected by ub_lock! */
 323                                        /* kref is too implicit for our taste */
 324        int reset;                      /* Reset is running */
 325        int bad_resid;
 326        unsigned int tagcnt;
 327        char name[12];
 328        struct usb_device *dev;
 329        struct usb_interface *intf;
 330
 331        struct list_head luns;
 332
 333        unsigned int send_bulk_pipe;    /* cached pipe values */
 334        unsigned int recv_bulk_pipe;
 335        unsigned int send_ctrl_pipe;
 336        unsigned int recv_ctrl_pipe;
 337
 338        struct tasklet_struct tasklet;
 339
 340        struct ub_scsi_cmd_queue cmd_queue;
 341        struct ub_scsi_cmd top_rqs_cmd; /* REQUEST SENSE */
 342        unsigned char top_sense[UB_SENSE_SIZE];
 343
 344        struct ub_completion work_done;
 345        struct urb work_urb;
 346        struct timer_list work_timer;
 347        int last_pipe;                  /* What might need clearing */
 348        __le32 signature;               /* Learned signature */
 349        struct bulk_cb_wrap work_bcb;
 350        struct bulk_cs_wrap work_bcs;
 351        struct usb_ctrlrequest work_cr;
 352
 353        struct work_struct reset_work;
 354        wait_queue_head_t reset_wait;
 355};
 356
 357/*
 358 */
 359static void ub_cleanup(struct ub_dev *sc);
 360static int ub_request_fn_1(struct ub_lun *lun, struct request *rq);
 361static void ub_cmd_build_block(struct ub_dev *sc, struct ub_lun *lun,
 362    struct ub_scsi_cmd *cmd, struct ub_request *urq);
 363static void ub_cmd_build_packet(struct ub_dev *sc, struct ub_lun *lun,
 364    struct ub_scsi_cmd *cmd, struct ub_request *urq);
 365static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
 366static void ub_end_rq(struct request *rq, unsigned int status);
 367static int ub_rw_cmd_retry(struct ub_dev *sc, struct ub_lun *lun,
 368    struct ub_request *urq, struct ub_scsi_cmd *cmd);
 369static int ub_submit_scsi(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
 370static void ub_urb_complete(struct urb *urb);
 371static void ub_scsi_action(unsigned long _dev);
 372static void ub_scsi_dispatch(struct ub_dev *sc);
 373static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
 374static void ub_data_start(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
 375static void ub_state_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd, int rc);
 376static int __ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
 377static void ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
 378static void ub_state_stat_counted(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
 379static void ub_state_sense(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
 380static int ub_submit_clear_stall(struct ub_dev *sc, struct ub_scsi_cmd *cmd,
 381    int stalled_pipe);
 382static void ub_top_sense_done(struct ub_dev *sc, struct ub_scsi_cmd *scmd);
 383static void ub_reset_enter(struct ub_dev *sc, int try);
 384static void ub_reset_task(struct work_struct *work);
 385static int ub_sync_tur(struct ub_dev *sc, struct ub_lun *lun);
 386static int ub_sync_read_cap(struct ub_dev *sc, struct ub_lun *lun,
 387    struct ub_capacity *ret);
 388static int ub_sync_reset(struct ub_dev *sc);
 389static int ub_probe_clear_stall(struct ub_dev *sc, int stalled_pipe);
 390static int ub_probe_lun(struct ub_dev *sc, int lnum);
 391
 392/*
 393 */
 394#ifdef CONFIG_USB_LIBUSUAL
 395
 396#define ub_usb_ids  usb_storage_usb_ids
 397#else
 398
 399static const struct usb_device_id ub_usb_ids[] = {
 400        { USB_INTERFACE_INFO(USB_CLASS_MASS_STORAGE, USB_SC_SCSI, USB_PR_BULK) },
 401        { }
 402};
 403
 404MODULE_DEVICE_TABLE(usb, ub_usb_ids);
 405#endif /* CONFIG_USB_LIBUSUAL */
 406
 407/*
 408 * Find me a way to identify "next free minor" for add_disk(),
 409 * and the array disappears the next day. However, the number of
 410 * hosts has something to do with the naming and /proc/partitions.
 411 * This has to be thought out in detail before changing.
 412 * If UB_MAX_HOST was 1000, we'd use a bitmap. Or a better data structure.
 413 */
 414#define UB_MAX_HOSTS  26
 415static char ub_hostv[UB_MAX_HOSTS];
 416
 417#define UB_QLOCK_NUM 5
 418static spinlock_t ub_qlockv[UB_QLOCK_NUM];
 419static int ub_qlock_next = 0;
 420
 421static DEFINE_SPINLOCK(ub_lock);        /* Locks globals and ->openc */
 422
 423/*
 424 * The id allocator.
 425 *
 426 * This also stores the host for indexing by minor, which is somewhat dirty.
 427 */
 428static int ub_id_get(void)
 429{
 430        unsigned long flags;
 431        int i;
 432
 433        spin_lock_irqsave(&ub_lock, flags);
 434        for (i = 0; i < UB_MAX_HOSTS; i++) {
 435                if (ub_hostv[i] == 0) {
 436                        ub_hostv[i] = 1;
 437                        spin_unlock_irqrestore(&ub_lock, flags);
 438                        return i;
 439                }
 440        }
 441        spin_unlock_irqrestore(&ub_lock, flags);
 442        return -1;
 443}
 444
 445static void ub_id_put(int id)
 446{
 447        unsigned long flags;
 448
 449        if (id < 0 || id >= UB_MAX_HOSTS) {
 450                printk(KERN_ERR DRV_NAME ": bad host ID %d\n", id);
 451                return;
 452        }
 453
 454        spin_lock_irqsave(&ub_lock, flags);
 455        if (ub_hostv[id] == 0) {
 456                spin_unlock_irqrestore(&ub_lock, flags);
 457                printk(KERN_ERR DRV_NAME ": freeing free host ID %d\n", id);
 458                return;
 459        }
 460        ub_hostv[id] = 0;
 461        spin_unlock_irqrestore(&ub_lock, flags);
 462}
 463
 464/*
 465 * This is necessitated by the fact that blk_cleanup_queue does not
 466 * necesserily destroy the queue. Instead, it may merely decrease q->refcnt.
 467 * Since our blk_init_queue() passes a spinlock common with ub_dev,
 468 * we have life time issues when ub_cleanup frees ub_dev.
 469 */
 470static spinlock_t *ub_next_lock(void)
 471{
 472        unsigned long flags;
 473        spinlock_t *ret;
 474
 475        spin_lock_irqsave(&ub_lock, flags);
 476        ret = &ub_qlockv[ub_qlock_next];
 477        ub_qlock_next = (ub_qlock_next + 1) % UB_QLOCK_NUM;
 478        spin_unlock_irqrestore(&ub_lock, flags);
 479        return ret;
 480}
 481
 482/*
 483 * Downcount for deallocation. This rides on two assumptions:
 484 *  - once something is poisoned, its refcount cannot grow
 485 *  - opens cannot happen at this time (del_gendisk was done)
 486 * If the above is true, we can drop the lock, which we need for
 487 * blk_cleanup_queue(): the silly thing may attempt to sleep.
 488 * [Actually, it never needs to sleep for us, but it calls might_sleep()]
 489 */
 490static void ub_put(struct ub_dev *sc)
 491{
 492        unsigned long flags;
 493
 494        spin_lock_irqsave(&ub_lock, flags);
 495        --sc->openc;
 496        if (sc->openc == 0 && atomic_read(&sc->poison)) {
 497                spin_unlock_irqrestore(&ub_lock, flags);
 498                ub_cleanup(sc);
 499        } else {
 500                spin_unlock_irqrestore(&ub_lock, flags);
 501        }
 502}
 503
 504/*
 505 * Final cleanup and deallocation.
 506 */
 507static void ub_cleanup(struct ub_dev *sc)
 508{
 509        struct list_head *p;
 510        struct ub_lun *lun;
 511        struct request_queue *q;
 512
 513        while (!list_empty(&sc->luns)) {
 514                p = sc->luns.next;
 515                lun = list_entry(p, struct ub_lun, link);
 516                list_del(p);
 517
 518                /* I don't think queue can be NULL. But... Stolen from sx8.c */
 519                if ((q = lun->disk->queue) != NULL)
 520                        blk_cleanup_queue(q);
 521                /*
 522                 * If we zero disk->private_data BEFORE put_disk, we have
 523                 * to check for NULL all over the place in open, release,
 524                 * check_media and revalidate, because the block level
 525                 * semaphore is well inside the put_disk.
 526                 * But we cannot zero after the call, because *disk is gone.
 527                 * The sd.c is blatantly racy in this area.
 528                 */
 529                /* disk->private_data = NULL; */
 530                put_disk(lun->disk);
 531                lun->disk = NULL;
 532
 533                ub_id_put(lun->id);
 534                kfree(lun);
 535        }
 536
 537        usb_set_intfdata(sc->intf, NULL);
 538        usb_put_intf(sc->intf);
 539        usb_put_dev(sc->dev);
 540        kfree(sc);
 541}
 542
 543/*
 544 * The "command allocator".
 545 */
 546static struct ub_scsi_cmd *ub_get_cmd(struct ub_lun *lun)
 547{
 548        struct ub_scsi_cmd *ret;
 549
 550        if (lun->cmda[0])
 551                return NULL;
 552        ret = &lun->cmdv[0];
 553        lun->cmda[0] = 1;
 554        return ret;
 555}
 556
 557static void ub_put_cmd(struct ub_lun *lun, struct ub_scsi_cmd *cmd)
 558{
 559        if (cmd != &lun->cmdv[0]) {
 560                printk(KERN_WARNING "%s: releasing a foreign cmd %p\n",
 561                    lun->name, cmd);
 562                return;
 563        }
 564        if (!lun->cmda[0]) {
 565                printk(KERN_WARNING "%s: releasing a free cmd\n", lun->name);
 566                return;
 567        }
 568        lun->cmda[0] = 0;
 569}
 570
 571/*
 572 * The command queue.
 573 */
 574static void ub_cmdq_add(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
 575{
 576        struct ub_scsi_cmd_queue *t = &sc->cmd_queue;
 577
 578        if (t->qlen++ == 0) {
 579                t->head = cmd;
 580                t->tail = cmd;
 581        } else {
 582                t->tail->next = cmd;
 583                t->tail = cmd;
 584        }
 585
 586        if (t->qlen > t->qmax)
 587                t->qmax = t->qlen;
 588}
 589
 590static void ub_cmdq_insert(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
 591{
 592        struct ub_scsi_cmd_queue *t = &sc->cmd_queue;
 593
 594        if (t->qlen++ == 0) {
 595                t->head = cmd;
 596                t->tail = cmd;
 597        } else {
 598                cmd->next = t->head;
 599                t->head = cmd;
 600        }
 601
 602        if (t->qlen > t->qmax)
 603                t->qmax = t->qlen;
 604}
 605
 606static struct ub_scsi_cmd *ub_cmdq_pop(struct ub_dev *sc)
 607{
 608        struct ub_scsi_cmd_queue *t = &sc->cmd_queue;
 609        struct ub_scsi_cmd *cmd;
 610
 611        if (t->qlen == 0)
 612                return NULL;
 613        if (--t->qlen == 0)
 614                t->tail = NULL;
 615        cmd = t->head;
 616        t->head = cmd->next;
 617        cmd->next = NULL;
 618        return cmd;
 619}
 620
 621#define ub_cmdq_peek(sc)  ((sc)->cmd_queue.head)
 622
 623/*
 624 * The request function is our main entry point
 625 */
 626
 627static void ub_request_fn(struct request_queue *q)
 628{
 629        struct ub_lun *lun = q->queuedata;
 630        struct request *rq;
 631
 632        while ((rq = blk_peek_request(q)) != NULL) {
 633                if (ub_request_fn_1(lun, rq) != 0) {
 634                        blk_stop_queue(q);
 635                        break;
 636                }
 637        }
 638}
 639
 640static int ub_request_fn_1(struct ub_lun *lun, struct request *rq)
 641{
 642        struct ub_dev *sc = lun->udev;
 643        struct ub_scsi_cmd *cmd;
 644        struct ub_request *urq;
 645        int n_elem;
 646
 647        if (atomic_read(&sc->poison)) {
 648                blk_start_request(rq);
 649                ub_end_rq(rq, DID_NO_CONNECT << 16);
 650                return 0;
 651        }
 652
 653        if (lun->changed && rq->cmd_type != REQ_TYPE_BLOCK_PC) {
 654                blk_start_request(rq);
 655                ub_end_rq(rq, SAM_STAT_CHECK_CONDITION);
 656                return 0;
 657        }
 658
 659        if (lun->urq.rq != NULL)
 660                return -1;
 661        if ((cmd = ub_get_cmd(lun)) == NULL)
 662                return -1;
 663        memset(cmd, 0, sizeof(struct ub_scsi_cmd));
 664
 665        blk_start_request(rq);
 666
 667        urq = &lun->urq;
 668        memset(urq, 0, sizeof(struct ub_request));
 669        urq->rq = rq;
 670
 671        /*
 672         * get scatterlist from block layer
 673         */
 674        sg_init_table(&urq->sgv[0], UB_MAX_REQ_SG);
 675        n_elem = blk_rq_map_sg(lun->disk->queue, rq, &urq->sgv[0]);
 676        if (n_elem < 0) {
 677                /* Impossible, because blk_rq_map_sg should not hit ENOMEM. */
 678                printk(KERN_INFO "%s: failed request map (%d)\n",
 679                    lun->name, n_elem);
 680                goto drop;
 681        }
 682        if (n_elem > UB_MAX_REQ_SG) {   /* Paranoia */
 683                printk(KERN_WARNING "%s: request with %d segments\n",
 684                    lun->name, n_elem);
 685                goto drop;
 686        }
 687        urq->nsg = n_elem;
 688
 689        if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
 690                ub_cmd_build_packet(sc, lun, cmd, urq);
 691        } else {
 692                ub_cmd_build_block(sc, lun, cmd, urq);
 693        }
 694        cmd->state = UB_CMDST_INIT;
 695        cmd->lun = lun;
 696        cmd->done = ub_rw_cmd_done;
 697        cmd->back = urq;
 698
 699        cmd->tag = sc->tagcnt++;
 700        if (ub_submit_scsi(sc, cmd) != 0)
 701                goto drop;
 702
 703        return 0;
 704
 705drop:
 706        ub_put_cmd(lun, cmd);
 707        ub_end_rq(rq, DID_ERROR << 16);
 708        return 0;
 709}
 710
 711static void ub_cmd_build_block(struct ub_dev *sc, struct ub_lun *lun,
 712    struct ub_scsi_cmd *cmd, struct ub_request *urq)
 713{
 714        struct request *rq = urq->rq;
 715        unsigned int block, nblks;
 716
 717        if (rq_data_dir(rq) == WRITE)
 718                cmd->dir = UB_DIR_WRITE;
 719        else
 720                cmd->dir = UB_DIR_READ;
 721
 722        cmd->nsg = urq->nsg;
 723        memcpy(cmd->sgv, urq->sgv, sizeof(struct scatterlist) * cmd->nsg);
 724
 725        /*
 726         * build the command
 727         *
 728         * The call to blk_queue_logical_block_size() guarantees that request
 729         * is aligned, but it is given in terms of 512 byte units, always.
 730         */
 731        block = blk_rq_pos(rq) >> lun->capacity.bshift;
 732        nblks = blk_rq_sectors(rq) >> lun->capacity.bshift;
 733
 734        cmd->cdb[0] = (cmd->dir == UB_DIR_READ)? READ_10: WRITE_10;
 735        /* 10-byte uses 4 bytes of LBA: 2147483648KB, 2097152MB, 2048GB */
 736        cmd->cdb[2] = block >> 24;
 737        cmd->cdb[3] = block >> 16;
 738        cmd->cdb[4] = block >> 8;
 739        cmd->cdb[5] = block;
 740        cmd->cdb[7] = nblks >> 8;
 741        cmd->cdb[8] = nblks;
 742        cmd->cdb_len = 10;
 743
 744        cmd->len = blk_rq_bytes(rq);
 745}
 746
 747static void ub_cmd_build_packet(struct ub_dev *sc, struct ub_lun *lun,
 748    struct ub_scsi_cmd *cmd, struct ub_request *urq)
 749{
 750        struct request *rq = urq->rq;
 751
 752        if (blk_rq_bytes(rq) == 0) {
 753                cmd->dir = UB_DIR_NONE;
 754        } else {
 755                if (rq_data_dir(rq) == WRITE)
 756                        cmd->dir = UB_DIR_WRITE;
 757                else
 758                        cmd->dir = UB_DIR_READ;
 759        }
 760
 761        cmd->nsg = urq->nsg;
 762        memcpy(cmd->sgv, urq->sgv, sizeof(struct scatterlist) * cmd->nsg);
 763
 764        memcpy(&cmd->cdb, rq->cmd, rq->cmd_len);
 765        cmd->cdb_len = rq->cmd_len;
 766
 767        cmd->len = blk_rq_bytes(rq);
 768
 769        /*
 770         * To reapply this to every URB is not as incorrect as it looks.
 771         * In return, we avoid any complicated tracking calculations.
 772         */
 773        cmd->timeo = rq->timeout;
 774}
 775
 776static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
 777{
 778        struct ub_lun *lun = cmd->lun;
 779        struct ub_request *urq = cmd->back;
 780        struct request *rq;
 781        unsigned int scsi_status;
 782
 783        rq = urq->rq;
 784
 785        if (cmd->error == 0) {
 786                if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
 787                        if (cmd->act_len >= rq->resid_len)
 788                                rq->resid_len = 0;
 789                        else
 790                                rq->resid_len -= cmd->act_len;
 791                        scsi_status = 0;
 792                } else {
 793                        if (cmd->act_len != cmd->len) {
 794                                scsi_status = SAM_STAT_CHECK_CONDITION;
 795                        } else {
 796                                scsi_status = 0;
 797                        }
 798                }
 799        } else {
 800                if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
 801                        /* UB_SENSE_SIZE is smaller than SCSI_SENSE_BUFFERSIZE */
 802                        memcpy(rq->sense, sc->top_sense, UB_SENSE_SIZE);
 803                        rq->sense_len = UB_SENSE_SIZE;
 804                        if (sc->top_sense[0] != 0)
 805                                scsi_status = SAM_STAT_CHECK_CONDITION;
 806                        else
 807                                scsi_status = DID_ERROR << 16;
 808                } else {
 809                        if (cmd->error == -EIO &&
 810                            (cmd->key == 0 ||
 811                             cmd->key == MEDIUM_ERROR ||
 812                             cmd->key == UNIT_ATTENTION)) {
 813                                if (ub_rw_cmd_retry(sc, lun, urq, cmd) == 0)
 814                                        return;
 815                        }
 816                        scsi_status = SAM_STAT_CHECK_CONDITION;
 817                }
 818        }
 819
 820        urq->rq = NULL;
 821
 822        ub_put_cmd(lun, cmd);
 823        ub_end_rq(rq, scsi_status);
 824        blk_start_queue(lun->disk->queue);
 825}
 826
 827static void ub_end_rq(struct request *rq, unsigned int scsi_status)
 828{
 829        int error;
 830
 831        if (scsi_status == 0) {
 832                error = 0;
 833        } else {
 834                error = -EIO;
 835                rq->errors = scsi_status;
 836        }
 837        __blk_end_request_all(rq, error);
 838}
 839
 840static int ub_rw_cmd_retry(struct ub_dev *sc, struct ub_lun *lun,
 841    struct ub_request *urq, struct ub_scsi_cmd *cmd)
 842{
 843
 844        if (atomic_read(&sc->poison))
 845                return -ENXIO;
 846
 847        ub_reset_enter(sc, urq->current_try);
 848
 849        if (urq->current_try >= 3)
 850                return -EIO;
 851        urq->current_try++;
 852
 853        /* Remove this if anyone complains of flooding. */
 854        printk(KERN_DEBUG "%s: dir %c len/act %d/%d "
 855            "[sense %x %02x %02x] retry %d\n",
 856            sc->name, UB_DIR_CHAR(cmd->dir), cmd->len, cmd->act_len,
 857            cmd->key, cmd->asc, cmd->ascq, urq->current_try);
 858
 859        memset(cmd, 0, sizeof(struct ub_scsi_cmd));
 860        ub_cmd_build_block(sc, lun, cmd, urq);
 861
 862        cmd->state = UB_CMDST_INIT;
 863        cmd->lun = lun;
 864        cmd->done = ub_rw_cmd_done;
 865        cmd->back = urq;
 866
 867        cmd->tag = sc->tagcnt++;
 868
 869#if 0 /* Wasteful */
 870        return ub_submit_scsi(sc, cmd);
 871#else
 872        ub_cmdq_add(sc, cmd);
 873        return 0;
 874#endif
 875}
 876
 877/*
 878 * Submit a regular SCSI operation (not an auto-sense).
 879 *
 880 * The Iron Law of Good Submit Routine is:
 881 * Zero return - callback is done, Nonzero return - callback is not done.
 882 * No exceptions.
 883 *
 884 * Host is assumed locked.
 885 */
 886static int ub_submit_scsi(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
 887{
 888
 889        if (cmd->state != UB_CMDST_INIT ||
 890            (cmd->dir != UB_DIR_NONE && cmd->len == 0)) {
 891                return -EINVAL;
 892        }
 893
 894        ub_cmdq_add(sc, cmd);
 895        /*
 896         * We can call ub_scsi_dispatch(sc) right away here, but it's a little
 897         * safer to jump to a tasklet, in case upper layers do something silly.
 898         */
 899        tasklet_schedule(&sc->tasklet);
 900        return 0;
 901}
 902
 903/*
 904 * Submit the first URB for the queued command.
 905 * This function does not deal with queueing in any way.
 906 */
 907static int ub_scsi_cmd_start(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
 908{
 909        struct bulk_cb_wrap *bcb;
 910        int rc;
 911
 912        bcb = &sc->work_bcb;
 913
 914        /*
 915         * ``If the allocation length is eighteen or greater, and a device
 916         * server returns less than eithteen bytes of data, the application
 917         * client should assume that the bytes not transferred would have been
 918         * zeroes had the device server returned those bytes.''
 919         *
 920         * We zero sense for all commands so that when a packet request
 921         * fails it does not return a stale sense.
 922         */
 923        memset(&sc->top_sense, 0, UB_SENSE_SIZE);
 924
 925        /* set up the command wrapper */
 926        bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
 927        bcb->Tag = cmd->tag;            /* Endianness is not important */
 928        bcb->DataTransferLength = cpu_to_le32(cmd->len);
 929        bcb->Flags = (cmd->dir == UB_DIR_READ) ? 0x80 : 0;
 930        bcb->Lun = (cmd->lun != NULL) ? cmd->lun->num : 0;
 931        bcb->Length = cmd->cdb_len;
 932
 933        /* copy the command payload */
 934        memcpy(bcb->CDB, cmd->cdb, UB_MAX_CDB_SIZE);
 935
 936        UB_INIT_COMPLETION(sc->work_done);
 937
 938        sc->last_pipe = sc->send_bulk_pipe;
 939        usb_fill_bulk_urb(&sc->work_urb, sc->dev, sc->send_bulk_pipe,
 940            bcb, US_BULK_CB_WRAP_LEN, ub_urb_complete, sc);
 941
 942        if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) {
 943                /* XXX Clear stalls */
 944                ub_complete(&sc->work_done);
 945                return rc;
 946        }
 947
 948        sc->work_timer.expires = jiffies + UB_URB_TIMEOUT;
 949        add_timer(&sc->work_timer);
 950
 951        cmd->state = UB_CMDST_CMD;
 952        return 0;
 953}
 954
 955/*
 956 * Timeout handler.
 957 */
 958static void ub_urb_timeout(unsigned long arg)
 959{
 960        struct ub_dev *sc = (struct ub_dev *) arg;
 961        unsigned long flags;
 962
 963        spin_lock_irqsave(sc->lock, flags);
 964        if (!ub_is_completed(&sc->work_done))
 965                usb_unlink_urb(&sc->work_urb);
 966        spin_unlock_irqrestore(sc->lock, flags);
 967}
 968
 969/*
 970 * Completion routine for the work URB.
 971 *
 972 * This can be called directly from usb_submit_urb (while we have
 973 * the sc->lock taken) and from an interrupt (while we do NOT have
 974 * the sc->lock taken). Therefore, bounce this off to a tasklet.
 975 */
 976static void ub_urb_complete(struct urb *urb)
 977{
 978        struct ub_dev *sc = urb->context;
 979
 980        ub_complete(&sc->work_done);
 981        tasklet_schedule(&sc->tasklet);
 982}
 983
 984static void ub_scsi_action(unsigned long _dev)
 985{
 986        struct ub_dev *sc = (struct ub_dev *) _dev;
 987        unsigned long flags;
 988
 989        spin_lock_irqsave(sc->lock, flags);
 990        ub_scsi_dispatch(sc);
 991        spin_unlock_irqrestore(sc->lock, flags);
 992}
 993
 994static void ub_scsi_dispatch(struct ub_dev *sc)
 995{
 996        struct ub_scsi_cmd *cmd;
 997        int rc;
 998
 999        while (!sc->reset && (cmd = ub_cmdq_peek(sc)) != NULL) {
1000                if (cmd->state == UB_CMDST_DONE) {
1001                        ub_cmdq_pop(sc);
1002                        (*cmd->done)(sc, cmd);
1003                } else if (cmd->state == UB_CMDST_INIT) {
1004                        if ((rc = ub_scsi_cmd_start(sc, cmd)) == 0)
1005                                break;
1006                        cmd->error = rc;
1007                        cmd->state = UB_CMDST_DONE;
1008                } else {
1009                        if (!ub_is_completed(&sc->work_done))
1010                                break;
1011                        del_timer(&sc->work_timer);
1012                        ub_scsi_urb_compl(sc, cmd);
1013                }
1014        }
1015}
1016
1017static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1018{
1019        struct urb *urb = &sc->work_urb;
1020        struct bulk_cs_wrap *bcs;
1021        int endp;
1022        int len;
1023        int rc;
1024
1025        if (atomic_read(&sc->poison)) {
1026                ub_state_done(sc, cmd, -ENODEV);
1027                return;
1028        }
1029
1030        endp = usb_pipeendpoint(sc->last_pipe);
1031        if (usb_pipein(sc->last_pipe))
1032                endp |= USB_DIR_IN;
1033
1034        if (cmd->state == UB_CMDST_CLEAR) {
1035                if (urb->status == -EPIPE) {
1036                        /*
1037                         * STALL while clearning STALL.
1038                         * The control pipe clears itself - nothing to do.
1039                         */
1040                        printk(KERN_NOTICE "%s: stall on control pipe\n",
1041                            sc->name);
1042                        goto Bad_End;
1043                }
1044
1045                /*
1046                 * We ignore the result for the halt clear.
1047                 */
1048
1049                usb_reset_endpoint(sc->dev, endp);
1050
1051                ub_state_sense(sc, cmd);
1052
1053        } else if (cmd->state == UB_CMDST_CLR2STS) {
1054                if (urb->status == -EPIPE) {
1055                        printk(KERN_NOTICE "%s: stall on control pipe\n",
1056                            sc->name);
1057                        goto Bad_End;
1058                }
1059
1060                /*
1061                 * We ignore the result for the halt clear.
1062                 */
1063
1064                usb_reset_endpoint(sc->dev, endp);
1065
1066                ub_state_stat(sc, cmd);
1067
1068        } else if (cmd->state == UB_CMDST_CLRRS) {
1069                if (urb->status == -EPIPE) {
1070                        printk(KERN_NOTICE "%s: stall on control pipe\n",
1071                            sc->name);
1072                        goto Bad_End;
1073                }
1074
1075                /*
1076                 * We ignore the result for the halt clear.
1077                 */
1078
1079                usb_reset_endpoint(sc->dev, endp);
1080
1081                ub_state_stat_counted(sc, cmd);
1082
1083        } else if (cmd->state == UB_CMDST_CMD) {
1084                switch (urb->status) {
1085                case 0:
1086                        break;
1087                case -EOVERFLOW:
1088                        goto Bad_End;
1089                case -EPIPE:
1090                        rc = ub_submit_clear_stall(sc, cmd, sc->last_pipe);
1091                        if (rc != 0) {
1092                                printk(KERN_NOTICE "%s: "
1093                                    "unable to submit clear (%d)\n",
1094                                    sc->name, rc);
1095                                /*
1096                                 * This is typically ENOMEM or some other such shit.
1097                                 * Retrying is pointless. Just do Bad End on it...
1098                                 */
1099                                ub_state_done(sc, cmd, rc);
1100                                return;
1101                        }
1102                        cmd->state = UB_CMDST_CLEAR;
1103                        return;
1104                case -ESHUTDOWN:        /* unplug */
1105                case -EILSEQ:           /* unplug timeout on uhci */
1106                        ub_state_done(sc, cmd, -ENODEV);
1107                        return;
1108                default:
1109                        goto Bad_End;
1110                }
1111                if (urb->actual_length != US_BULK_CB_WRAP_LEN) {
1112                        goto Bad_End;
1113                }
1114
1115                if (cmd->dir == UB_DIR_NONE || cmd->nsg < 1) {
1116                        ub_state_stat(sc, cmd);
1117                        return;
1118                }
1119
1120                // udelay(125);         // usb-storage has this
1121                ub_data_start(sc, cmd);
1122
1123        } else if (cmd->state == UB_CMDST_DATA) {
1124                if (urb->status == -EPIPE) {
1125                        rc = ub_submit_clear_stall(sc, cmd, sc->last_pipe);
1126                        if (rc != 0) {
1127                                printk(KERN_NOTICE "%s: "
1128                                    "unable to submit clear (%d)\n",
1129                                    sc->name, rc);
1130                                ub_state_done(sc, cmd, rc);
1131                                return;
1132                        }
1133                        cmd->state = UB_CMDST_CLR2STS;
1134                        return;
1135                }
1136                if (urb->status == -EOVERFLOW) {
1137                        /*
1138                         * A babble? Failure, but we must transfer CSW now.
1139                         */
1140                        cmd->error = -EOVERFLOW;        /* A cheap trick... */
1141                        ub_state_stat(sc, cmd);
1142                        return;
1143                }
1144
1145                if (cmd->dir == UB_DIR_WRITE) {
1146                        /*
1147                         * Do not continue writes in case of a failure.
1148                         * Doing so would cause sectors to be mixed up,
1149                         * which is worse than sectors lost.
1150                         *
1151                         * We must try to read the CSW, or many devices
1152                         * get confused.
1153                         */
1154                        len = urb->actual_length;
1155                        if (urb->status != 0 ||
1156                            len != cmd->sgv[cmd->current_sg].length) {
1157                                cmd->act_len += len;
1158
1159                                cmd->error = -EIO;
1160                                ub_state_stat(sc, cmd);
1161                                return;
1162                        }
1163
1164                } else {
1165                        /*
1166                         * If an error occurs on read, we record it, and
1167                         * continue to fetch data in order to avoid bubble.
1168                         *
1169                         * As a small shortcut, we stop if we detect that
1170                         * a CSW mixed into data.
1171                         */
1172                        if (urb->status != 0)
1173                                cmd->error = -EIO;
1174
1175                        len = urb->actual_length;
1176                        if (urb->status != 0 ||
1177                            len != cmd->sgv[cmd->current_sg].length) {
1178                                if ((len & 0x1FF) == US_BULK_CS_WRAP_LEN)
1179                                        goto Bad_End;
1180                        }
1181                }
1182
1183                cmd->act_len += urb->actual_length;
1184
1185                if (++cmd->current_sg < cmd->nsg) {
1186                        ub_data_start(sc, cmd);
1187                        return;
1188                }
1189                ub_state_stat(sc, cmd);
1190
1191        } else if (cmd->state == UB_CMDST_STAT) {
1192                if (urb->status == -EPIPE) {
1193                        rc = ub_submit_clear_stall(sc, cmd, sc->last_pipe);
1194                        if (rc != 0) {
1195                                printk(KERN_NOTICE "%s: "
1196                                    "unable to submit clear (%d)\n",
1197                                    sc->name, rc);
1198                                ub_state_done(sc, cmd, rc);
1199                                return;
1200                        }
1201
1202                        /*
1203                         * Having a stall when getting CSW is an error, so
1204                         * make sure uppper levels are not oblivious to it.
1205                         */
1206                        cmd->error = -EIO;              /* A cheap trick... */
1207
1208                        cmd->state = UB_CMDST_CLRRS;
1209                        return;
1210                }
1211
1212                /* Catch everything, including -EOVERFLOW and other nasties. */
1213                if (urb->status != 0)
1214                        goto Bad_End;
1215
1216                if (urb->actual_length == 0) {
1217                        ub_state_stat_counted(sc, cmd);
1218                        return;
1219                }
1220
1221                /*
1222                 * Check the returned Bulk protocol status.
1223                 * The status block has to be validated first.
1224                 */
1225
1226                bcs = &sc->work_bcs;
1227
1228                if (sc->signature == cpu_to_le32(0)) {
1229                        /*
1230                         * This is the first reply, so do not perform the check.
1231                         * Instead, remember the signature the device uses
1232                         * for future checks. But do not allow a nul.
1233                         */
1234                        sc->signature = bcs->Signature;
1235                        if (sc->signature == cpu_to_le32(0)) {
1236                                ub_state_stat_counted(sc, cmd);
1237                                return;
1238                        }
1239                } else {
1240                        if (bcs->Signature != sc->signature) {
1241                                ub_state_stat_counted(sc, cmd);
1242                                return;
1243                        }
1244                }
1245
1246                if (bcs->Tag != cmd->tag) {
1247                        /*
1248                         * This usually happens when we disagree with the
1249                         * device's microcode about something. For instance,
1250                         * a few of them throw this after timeouts. They buffer
1251                         * commands and reply at commands we timed out before.
1252                         * Without flushing these replies we loop forever.
1253                         */
1254                        ub_state_stat_counted(sc, cmd);
1255                        return;
1256                }
1257
1258                if (!sc->bad_resid) {
1259                        len = le32_to_cpu(bcs->Residue);
1260                        if (len != cmd->len - cmd->act_len) {
1261                                /*
1262                                 * Only start ignoring if this cmd ended well.
1263                                 */
1264                                if (cmd->len == cmd->act_len) {
1265                                        printk(KERN_NOTICE "%s: "
1266                                            "bad residual %d of %d, ignoring\n",
1267                                            sc->name, len, cmd->len);
1268                                        sc->bad_resid = 1;
1269                                }
1270                        }
1271                }
1272
1273                switch (bcs->Status) {
1274                case US_BULK_STAT_OK:
1275                        break;
1276                case US_BULK_STAT_FAIL:
1277                        ub_state_sense(sc, cmd);
1278                        return;
1279                case US_BULK_STAT_PHASE:
1280                        goto Bad_End;
1281                default:
1282                        printk(KERN_INFO "%s: unknown CSW status 0x%x\n",
1283                            sc->name, bcs->Status);
1284                        ub_state_done(sc, cmd, -EINVAL);
1285                        return;
1286                }
1287
1288                /* Not zeroing error to preserve a babble indicator */
1289                if (cmd->error != 0) {
1290                        ub_state_sense(sc, cmd);
1291                        return;
1292                }
1293                cmd->state = UB_CMDST_DONE;
1294                ub_cmdq_pop(sc);
1295                (*cmd->done)(sc, cmd);
1296
1297        } else if (cmd->state == UB_CMDST_SENSE) {
1298                ub_state_done(sc, cmd, -EIO);
1299
1300        } else {
1301                printk(KERN_WARNING "%s: wrong command state %d\n",
1302                    sc->name, cmd->state);
1303                ub_state_done(sc, cmd, -EINVAL);
1304                return;
1305        }
1306        return;
1307
1308Bad_End: /* Little Excel is dead */
1309        ub_state_done(sc, cmd, -EIO);
1310}
1311
1312/*
1313 * Factorization helper for the command state machine:
1314 * Initiate a data segment transfer.
1315 */
1316static void ub_data_start(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1317{
1318        struct scatterlist *sg = &cmd->sgv[cmd->current_sg];
1319        int pipe;
1320        int rc;
1321
1322        UB_INIT_COMPLETION(sc->work_done);
1323
1324        if (cmd->dir == UB_DIR_READ)
1325                pipe = sc->recv_bulk_pipe;
1326        else
1327                pipe = sc->send_bulk_pipe;
1328        sc->last_pipe = pipe;
1329        usb_fill_bulk_urb(&sc->work_urb, sc->dev, pipe, sg_virt(sg),
1330            sg->length, ub_urb_complete, sc);
1331
1332        if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) {
1333                /* XXX Clear stalls */
1334                ub_complete(&sc->work_done);
1335                ub_state_done(sc, cmd, rc);
1336                return;
1337        }
1338
1339        if (cmd->timeo)
1340                sc->work_timer.expires = jiffies + cmd->timeo;
1341        else
1342                sc->work_timer.expires = jiffies + UB_DATA_TIMEOUT;
1343        add_timer(&sc->work_timer);
1344
1345        cmd->state = UB_CMDST_DATA;
1346}
1347
1348/*
1349 * Factorization helper for the command state machine:
1350 * Finish the command.
1351 */
1352static void ub_state_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd, int rc)
1353{
1354
1355        cmd->error = rc;
1356        cmd->state = UB_CMDST_DONE;
1357        ub_cmdq_pop(sc);
1358        (*cmd->done)(sc, cmd);
1359}
1360
1361/*
1362 * Factorization helper for the command state machine:
1363 * Submit a CSW read.
1364 */
1365static int __ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1366{
1367        int rc;
1368
1369        UB_INIT_COMPLETION(sc->work_done);
1370
1371        sc->last_pipe = sc->recv_bulk_pipe;
1372        usb_fill_bulk_urb(&sc->work_urb, sc->dev, sc->recv_bulk_pipe,
1373            &sc->work_bcs, US_BULK_CS_WRAP_LEN, ub_urb_complete, sc);
1374
1375        if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) {
1376                /* XXX Clear stalls */
1377                ub_complete(&sc->work_done);
1378                ub_state_done(sc, cmd, rc);
1379                return -1;
1380        }
1381
1382        if (cmd->timeo)
1383                sc->work_timer.expires = jiffies + cmd->timeo;
1384        else
1385                sc->work_timer.expires = jiffies + UB_STAT_TIMEOUT;
1386        add_timer(&sc->work_timer);
1387        return 0;
1388}
1389
1390/*
1391 * Factorization helper for the command state machine:
1392 * Submit a CSW read and go to STAT state.
1393 */
1394static void ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1395{
1396
1397        if (__ub_state_stat(sc, cmd) != 0)
1398                return;
1399
1400        cmd->stat_count = 0;
1401        cmd->state = UB_CMDST_STAT;
1402}
1403
1404/*
1405 * Factorization helper for the command state machine:
1406 * Submit a CSW read and go to STAT state with counter (along [C] path).
1407 */
1408static void ub_state_stat_counted(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1409{
1410
1411        if (++cmd->stat_count >= 4) {
1412                ub_state_sense(sc, cmd);
1413                return;
1414        }
1415
1416        if (__ub_state_stat(sc, cmd) != 0)
1417                return;
1418
1419        cmd->state = UB_CMDST_STAT;
1420}
1421
1422/*
1423 * Factorization helper for the command state machine:
1424 * Submit a REQUEST SENSE and go to SENSE state.
1425 */
1426static void ub_state_sense(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1427{
1428        struct ub_scsi_cmd *scmd;
1429        struct scatterlist *sg;
1430        int rc;
1431
1432        if (cmd->cdb[0] == REQUEST_SENSE) {
1433                rc = -EPIPE;
1434                goto error;
1435        }
1436
1437        scmd = &sc->top_rqs_cmd;
1438        memset(scmd, 0, sizeof(struct ub_scsi_cmd));
1439        scmd->cdb[0] = REQUEST_SENSE;
1440        scmd->cdb[4] = UB_SENSE_SIZE;
1441        scmd->cdb_len = 6;
1442        scmd->dir = UB_DIR_READ;
1443        scmd->state = UB_CMDST_INIT;
1444        scmd->nsg = 1;
1445        sg = &scmd->sgv[0];
1446        sg_init_table(sg, UB_MAX_REQ_SG);
1447        sg_set_page(sg, virt_to_page(sc->top_sense), UB_SENSE_SIZE,
1448                        (unsigned long)sc->top_sense & (PAGE_SIZE-1));
1449        scmd->len = UB_SENSE_SIZE;
1450        scmd->lun = cmd->lun;
1451        scmd->done = ub_top_sense_done;
1452        scmd->back = cmd;
1453
1454        scmd->tag = sc->tagcnt++;
1455
1456        cmd->state = UB_CMDST_SENSE;
1457
1458        ub_cmdq_insert(sc, scmd);
1459        return;
1460
1461error:
1462        ub_state_done(sc, cmd, rc);
1463}
1464
1465/*
1466 * A helper for the command's state machine:
1467 * Submit a stall clear.
1468 */
1469static int ub_submit_clear_stall(struct ub_dev *sc, struct ub_scsi_cmd *cmd,
1470    int stalled_pipe)
1471{
1472        int endp;
1473        struct usb_ctrlrequest *cr;
1474        int rc;
1475
1476        endp = usb_pipeendpoint(stalled_pipe);
1477        if (usb_pipein (stalled_pipe))
1478                endp |= USB_DIR_IN;
1479
1480        cr = &sc->work_cr;
1481        cr->bRequestType = USB_RECIP_ENDPOINT;
1482        cr->bRequest = USB_REQ_CLEAR_FEATURE;
1483        cr->wValue = cpu_to_le16(USB_ENDPOINT_HALT);
1484        cr->wIndex = cpu_to_le16(endp);
1485        cr->wLength = cpu_to_le16(0);
1486
1487        UB_INIT_COMPLETION(sc->work_done);
1488
1489        usb_fill_control_urb(&sc->work_urb, sc->dev, sc->send_ctrl_pipe,
1490            (unsigned char*) cr, NULL, 0, ub_urb_complete, sc);
1491
1492        if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) {
1493                ub_complete(&sc->work_done);
1494                return rc;
1495        }
1496
1497        sc->work_timer.expires = jiffies + UB_CTRL_TIMEOUT;
1498        add_timer(&sc->work_timer);
1499        return 0;
1500}
1501
1502/*
1503 */
1504static void ub_top_sense_done(struct ub_dev *sc, struct ub_scsi_cmd *scmd)
1505{
1506        unsigned char *sense = sc->top_sense;
1507        struct ub_scsi_cmd *cmd;
1508
1509        /*
1510         * Find the command which triggered the unit attention or a check,
1511         * save the sense into it, and advance its state machine.
1512         */
1513        if ((cmd = ub_cmdq_peek(sc)) == NULL) {
1514                printk(KERN_WARNING "%s: sense done while idle\n", sc->name);
1515                return;
1516        }
1517        if (cmd != scmd->back) {
1518                printk(KERN_WARNING "%s: "
1519                    "sense done for wrong command 0x%x\n",
1520                    sc->name, cmd->tag);
1521                return;
1522        }
1523        if (cmd->state != UB_CMDST_SENSE) {
1524                printk(KERN_WARNING "%s: sense done with bad cmd state %d\n",
1525                    sc->name, cmd->state);
1526                return;
1527        }
1528
1529        /*
1530         * Ignoring scmd->act_len, because the buffer was pre-zeroed.
1531         */
1532        cmd->key = sense[2] & 0x0F;
1533        cmd->asc = sense[12];
1534        cmd->ascq = sense[13];
1535
1536        ub_scsi_urb_compl(sc, cmd);
1537}
1538
1539/*
1540 * Reset management
1541 */
1542
1543static void ub_reset_enter(struct ub_dev *sc, int try)
1544{
1545
1546        if (sc->reset) {
1547                /* This happens often on multi-LUN devices. */
1548                return;
1549        }
1550        sc->reset = try + 1;
1551
1552#if 0 /* Not needed because the disconnect waits for us. */
1553        unsigned long flags;
1554        spin_lock_irqsave(&ub_lock, flags);
1555        sc->openc++;
1556        spin_unlock_irqrestore(&ub_lock, flags);
1557#endif
1558
1559#if 0 /* We let them stop themselves. */
1560        struct ub_lun *lun;
1561        list_for_each_entry(lun, &sc->luns, link) {
1562                blk_stop_queue(lun->disk->queue);
1563        }
1564#endif
1565
1566        schedule_work(&sc->reset_work);
1567}
1568
1569static void ub_reset_task(struct work_struct *work)
1570{
1571        struct ub_dev *sc = container_of(work, struct ub_dev, reset_work);
1572        unsigned long flags;
1573        struct ub_lun *lun;
1574        int rc;
1575
1576        if (!sc->reset) {
1577                printk(KERN_WARNING "%s: Running reset unrequested\n",
1578                    sc->name);
1579                return;
1580        }
1581
1582        if (atomic_read(&sc->poison)) {
1583                ;
1584        } else if ((sc->reset & 1) == 0) {
1585                ub_sync_reset(sc);
1586                msleep(700);    /* usb-storage sleeps 6s (!) */
1587                ub_probe_clear_stall(sc, sc->recv_bulk_pipe);
1588                ub_probe_clear_stall(sc, sc->send_bulk_pipe);
1589        } else if (sc->dev->actconfig->desc.bNumInterfaces != 1) {
1590                ;
1591        } else {
1592                rc = usb_lock_device_for_reset(sc->dev, sc->intf);
1593                if (rc < 0) {
1594                        printk(KERN_NOTICE
1595                            "%s: usb_lock_device_for_reset failed (%d)\n",
1596                            sc->name, rc);
1597                } else {
1598                        rc = usb_reset_device(sc->dev);
1599                        if (rc < 0) {
1600                                printk(KERN_NOTICE "%s: "
1601                                    "usb_lock_device_for_reset failed (%d)\n",
1602                                    sc->name, rc);
1603                        }
1604                        usb_unlock_device(sc->dev);
1605                }
1606        }
1607
1608        /*
1609         * In theory, no commands can be running while reset is active,
1610         * so nobody can ask for another reset, and so we do not need any
1611         * queues of resets or anything. We do need a spinlock though,
1612         * to interact with block layer.
1613         */
1614        spin_lock_irqsave(sc->lock, flags);
1615        sc->reset = 0;
1616        tasklet_schedule(&sc->tasklet);
1617        list_for_each_entry(lun, &sc->luns, link) {
1618                blk_start_queue(lun->disk->queue);
1619        }
1620        wake_up(&sc->reset_wait);
1621        spin_unlock_irqrestore(sc->lock, flags);
1622}
1623
1624/*
1625 * XXX Reset brackets are too much hassle to implement, so just stub them
1626 * in order to prevent forced unbinding (which deadlocks solid when our
1627 * ->disconnect method waits for the reset to complete and this kills keventd).
1628 *
1629 * XXX Tell Alan to move usb_unlock_device inside of usb_reset_device,
1630 * or else the post_reset is invoked, and restats I/O on a locked device.
1631 */
1632static int ub_pre_reset(struct usb_interface *iface) {
1633        return 0;
1634}
1635
1636static int ub_post_reset(struct usb_interface *iface) {
1637        return 0;
1638}
1639
1640/*
1641 * This is called from a process context.
1642 */
1643static void ub_revalidate(struct ub_dev *sc, struct ub_lun *lun)
1644{
1645
1646        lun->readonly = 0;      /* XXX Query this from the device */
1647
1648        lun->capacity.nsec = 0;
1649        lun->capacity.bsize = 512;
1650        lun->capacity.bshift = 0;
1651
1652        if (ub_sync_tur(sc, lun) != 0)
1653                return;                 /* Not ready */
1654        lun->changed = 0;
1655
1656        if (ub_sync_read_cap(sc, lun, &lun->capacity) != 0) {
1657                /*
1658                 * The retry here means something is wrong, either with the
1659                 * device, with the transport, or with our code.
1660                 * We keep this because sd.c has retries for capacity.
1661                 */
1662                if (ub_sync_read_cap(sc, lun, &lun->capacity) != 0) {
1663                        lun->capacity.nsec = 0;
1664                        lun->capacity.bsize = 512;
1665                        lun->capacity.bshift = 0;
1666                }
1667        }
1668}
1669
1670/*
1671 * The open funcion.
1672 * This is mostly needed to keep refcounting, but also to support
1673 * media checks on removable media drives.
1674 */
1675static int ub_bd_open(struct block_device *bdev, fmode_t mode)
1676{
1677        struct ub_lun *lun = bdev->bd_disk->private_data;
1678        struct ub_dev *sc = lun->udev;
1679        unsigned long flags;
1680        int rc;
1681
1682        spin_lock_irqsave(&ub_lock, flags);
1683        if (atomic_read(&sc->poison)) {
1684                spin_unlock_irqrestore(&ub_lock, flags);
1685                return -ENXIO;
1686        }
1687        sc->openc++;
1688        spin_unlock_irqrestore(&ub_lock, flags);
1689
1690        if (lun->removable || lun->readonly)
1691                check_disk_change(bdev);
1692
1693        /*
1694         * The sd.c considers ->media_present and ->changed not equivalent,
1695         * under some pretty murky conditions (a failure of READ CAPACITY).
1696         * We may need it one day.
1697         */
1698        if (lun->removable && lun->changed && !(mode & FMODE_NDELAY)) {
1699                rc = -ENOMEDIUM;
1700                goto err_open;
1701        }
1702
1703        if (lun->readonly && (mode & FMODE_WRITE)) {
1704                rc = -EROFS;
1705                goto err_open;
1706        }
1707
1708        return 0;
1709
1710err_open:
1711        ub_put(sc);
1712        return rc;
1713}
1714
1715static int ub_bd_unlocked_open(struct block_device *bdev, fmode_t mode)
1716{
1717        int ret;
1718
1719        mutex_lock(&ub_mutex);
1720        ret = ub_bd_open(bdev, mode);
1721        mutex_unlock(&ub_mutex);
1722
1723        return ret;
1724}
1725
1726
1727/*
1728 */
1729static int ub_bd_release(struct gendisk *disk, fmode_t mode)
1730{
1731        struct ub_lun *lun = disk->private_data;
1732        struct ub_dev *sc = lun->udev;
1733
1734        mutex_lock(&ub_mutex);
1735        ub_put(sc);
1736        mutex_unlock(&ub_mutex);
1737
1738        return 0;
1739}
1740
1741/*
1742 * The ioctl interface.
1743 */
1744static int ub_bd_ioctl(struct block_device *bdev, fmode_t mode,
1745    unsigned int cmd, unsigned long arg)
1746{
1747        struct gendisk *disk = bdev->bd_disk;
1748        void __user *usermem = (void __user *) arg;
1749        int ret;
1750
1751        mutex_lock(&ub_mutex);
1752        ret = scsi_cmd_ioctl(disk->queue, disk, mode, cmd, usermem);
1753        mutex_unlock(&ub_mutex);
1754
1755        return ret;
1756}
1757
1758/*
1759 * This is called by check_disk_change if we reported a media change.
1760 * The main onjective here is to discover the features of the media such as
1761 * the capacity, read-only status, etc. USB storage generally does not
1762 * need to be spun up, but if we needed it, this would be the place.
1763 *
1764 * This call can sleep.
1765 *
1766 * The return code is not used.
1767 */
1768static int ub_bd_revalidate(struct gendisk *disk)
1769{
1770        struct ub_lun *lun = disk->private_data;
1771
1772        ub_revalidate(lun->udev, lun);
1773
1774        /* XXX Support sector size switching like in sr.c */
1775        blk_queue_logical_block_size(disk->queue, lun->capacity.bsize);
1776        set_capacity(disk, lun->capacity.nsec);
1777        // set_disk_ro(sdkp->disk, lun->readonly);
1778
1779        return 0;
1780}
1781
1782/*
1783 * The check is called by the block layer to verify if the media
1784 * is still available. It is supposed to be harmless, lightweight and
1785 * non-intrusive in case the media was not changed.
1786 *
1787 * This call can sleep.
1788 *
1789 * The return code is bool!
1790 */
1791static int ub_bd_media_changed(struct gendisk *disk)
1792{
1793        struct ub_lun *lun = disk->private_data;
1794
1795        if (!lun->removable)
1796                return 0;
1797
1798        /*
1799         * We clean checks always after every command, so this is not
1800         * as dangerous as it looks. If the TEST_UNIT_READY fails here,
1801         * the device is actually not ready with operator or software
1802         * intervention required. One dangerous item might be a drive which
1803         * spins itself down, and come the time to write dirty pages, this
1804         * will fail, then block layer discards the data. Since we never
1805         * spin drives up, such devices simply cannot be used with ub anyway.
1806         */
1807        if (ub_sync_tur(lun->udev, lun) != 0) {
1808                lun->changed = 1;
1809                return 1;
1810        }
1811
1812        return lun->changed;
1813}
1814
1815static const struct block_device_operations ub_bd_fops = {
1816        .owner          = THIS_MODULE,
1817        .open           = ub_bd_unlocked_open,
1818        .release        = ub_bd_release,
1819        .ioctl          = ub_bd_ioctl,
1820        .media_changed  = ub_bd_media_changed,
1821        .revalidate_disk = ub_bd_revalidate,
1822};
1823
1824/*
1825 * Common ->done routine for commands executed synchronously.
1826 */
1827static void ub_probe_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1828{
1829        struct completion *cop = cmd->back;
1830        complete(cop);
1831}
1832
1833/*
1834 * Test if the device has a check condition on it, synchronously.
1835 */
1836static int ub_sync_tur(struct ub_dev *sc, struct ub_lun *lun)
1837{
1838        struct ub_scsi_cmd *cmd;
1839        enum { ALLOC_SIZE = sizeof(struct ub_scsi_cmd) };
1840        unsigned long flags;
1841        struct completion compl;
1842        int rc;
1843
1844        init_completion(&compl);
1845
1846        rc = -ENOMEM;
1847        if ((cmd = kzalloc(ALLOC_SIZE, GFP_KERNEL)) == NULL)
1848                goto err_alloc;
1849
1850        cmd->cdb[0] = TEST_UNIT_READY;
1851        cmd->cdb_len = 6;
1852        cmd->dir = UB_DIR_NONE;
1853        cmd->state = UB_CMDST_INIT;
1854        cmd->lun = lun;                 /* This may be NULL, but that's ok */
1855        cmd->done = ub_probe_done;
1856        cmd->back = &compl;
1857
1858        spin_lock_irqsave(sc->lock, flags);
1859        cmd->tag = sc->tagcnt++;
1860
1861        rc = ub_submit_scsi(sc, cmd);
1862        spin_unlock_irqrestore(sc->lock, flags);
1863
1864        if (rc != 0)
1865                goto err_submit;
1866
1867        wait_for_completion(&compl);
1868
1869        rc = cmd->error;
1870
1871        if (rc == -EIO && cmd->key != 0)        /* Retries for benh's key */
1872                rc = cmd->key;
1873
1874err_submit:
1875        kfree(cmd);
1876err_alloc:
1877        return rc;
1878}
1879
1880/*
1881 * Read the SCSI capacity synchronously (for probing).
1882 */
1883static int ub_sync_read_cap(struct ub_dev *sc, struct ub_lun *lun,
1884    struct ub_capacity *ret)
1885{
1886        struct ub_scsi_cmd *cmd;
1887        struct scatterlist *sg;
1888        char *p;
1889        enum { ALLOC_SIZE = sizeof(struct ub_scsi_cmd) + 8 };
1890        unsigned long flags;
1891        unsigned int bsize, shift;
1892        unsigned long nsec;
1893        struct completion compl;
1894        int rc;
1895
1896        init_completion(&compl);
1897
1898        rc = -ENOMEM;
1899        if ((cmd = kzalloc(ALLOC_SIZE, GFP_KERNEL)) == NULL)
1900                goto err_alloc;
1901        p = (char *)cmd + sizeof(struct ub_scsi_cmd);
1902
1903        cmd->cdb[0] = 0x25;
1904        cmd->cdb_len = 10;
1905        cmd->dir = UB_DIR_READ;
1906        cmd->state = UB_CMDST_INIT;
1907        cmd->nsg = 1;
1908        sg = &cmd->sgv[0];
1909        sg_init_table(sg, UB_MAX_REQ_SG);
1910        sg_set_page(sg, virt_to_page(p), 8, (unsigned long)p & (PAGE_SIZE-1));
1911        cmd->len = 8;
1912        cmd->lun = lun;
1913        cmd->done = ub_probe_done;
1914        cmd->back = &compl;
1915
1916        spin_lock_irqsave(sc->lock, flags);
1917        cmd->tag = sc->tagcnt++;
1918
1919        rc = ub_submit_scsi(sc, cmd);
1920        spin_unlock_irqrestore(sc->lock, flags);
1921
1922        if (rc != 0)
1923                goto err_submit;
1924
1925        wait_for_completion(&compl);
1926
1927        if (cmd->error != 0) {
1928                rc = -EIO;
1929                goto err_read;
1930        }
1931        if (cmd->act_len != 8) {
1932                rc = -EIO;
1933                goto err_read;
1934        }
1935
1936        /* sd.c special-cases sector size of 0 to mean 512. Needed? Safe? */
1937        nsec = be32_to_cpu(*(__be32 *)p) + 1;
1938        bsize = be32_to_cpu(*(__be32 *)(p + 4));
1939        switch (bsize) {
1940        case 512:       shift = 0;      break;
1941        case 1024:      shift = 1;      break;
1942        case 2048:      shift = 2;      break;
1943        case 4096:      shift = 3;      break;
1944        default:
1945                rc = -EDOM;
1946                goto err_inv_bsize;
1947        }
1948
1949        ret->bsize = bsize;
1950        ret->bshift = shift;
1951        ret->nsec = nsec << shift;
1952        rc = 0;
1953
1954err_inv_bsize:
1955err_read:
1956err_submit:
1957        kfree(cmd);
1958err_alloc:
1959        return rc;
1960}
1961
1962/*
1963 */
1964static void ub_probe_urb_complete(struct urb *urb)
1965{
1966        struct completion *cop = urb->context;
1967        complete(cop);
1968}
1969
1970static void ub_probe_timeout(unsigned long arg)
1971{
1972        struct completion *cop = (struct completion *) arg;
1973        complete(cop);
1974}
1975
1976/*
1977 * Reset with a Bulk reset.
1978 */
1979static int ub_sync_reset(struct ub_dev *sc)
1980{
1981        int ifnum = sc->intf->cur_altsetting->desc.bInterfaceNumber;
1982        struct usb_ctrlrequest *cr;
1983        struct completion compl;
1984        struct timer_list timer;
1985        int rc;
1986
1987        init_completion(&compl);
1988
1989        cr = &sc->work_cr;
1990        cr->bRequestType = USB_TYPE_CLASS | USB_RECIP_INTERFACE;
1991        cr->bRequest = US_BULK_RESET_REQUEST;
1992        cr->wValue = cpu_to_le16(0);
1993        cr->wIndex = cpu_to_le16(ifnum);
1994        cr->wLength = cpu_to_le16(0);
1995
1996        usb_fill_control_urb(&sc->work_urb, sc->dev, sc->send_ctrl_pipe,
1997            (unsigned char*) cr, NULL, 0, ub_probe_urb_complete, &compl);
1998
1999        if ((rc = usb_submit_urb(&sc->work_urb, GFP_KERNEL)) != 0) {
2000                printk(KERN_WARNING
2001                     "%s: Unable to submit a bulk reset (%d)\n", sc->name, rc);
2002                return rc;
2003        }
2004
2005        init_timer(&timer);
2006        timer.function = ub_probe_timeout;
2007        timer.data = (unsigned long) &compl;
2008        timer.expires = jiffies + UB_CTRL_TIMEOUT;
2009        add_timer(&timer);
2010
2011        wait_for_completion(&compl);
2012
2013        del_timer_sync(&timer);
2014        usb_kill_urb(&sc->work_urb);
2015
2016        return sc->work_urb.status;
2017}
2018
2019/*
2020 * Get number of LUNs by the way of Bulk GetMaxLUN command.
2021 */
2022static int ub_sync_getmaxlun(struct ub_dev *sc)
2023{
2024        int ifnum = sc->intf->cur_altsetting->desc.bInterfaceNumber;
2025        unsigned char *p;
2026        enum { ALLOC_SIZE = 1 };
2027        struct usb_ctrlrequest *cr;
2028        struct completion compl;
2029        struct timer_list timer;
2030        int nluns;
2031        int rc;
2032
2033        init_completion(&compl);
2034
2035        rc = -ENOMEM;
2036        if ((p = kmalloc(ALLOC_SIZE, GFP_KERNEL)) == NULL)
2037                goto err_alloc;
2038        *p = 55;
2039
2040        cr = &sc->work_cr;
2041        cr->bRequestType = USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE;
2042        cr->bRequest = US_BULK_GET_MAX_LUN;
2043        cr->wValue = cpu_to_le16(0);
2044        cr->wIndex = cpu_to_le16(ifnum);
2045        cr->wLength = cpu_to_le16(1);
2046
2047        usb_fill_control_urb(&sc->work_urb, sc->dev, sc->recv_ctrl_pipe,
2048            (unsigned char*) cr, p, 1, ub_probe_urb_complete, &compl);
2049
2050        if ((rc = usb_submit_urb(&sc->work_urb, GFP_KERNEL)) != 0)
2051                goto err_submit;
2052
2053        init_timer(&timer);
2054        timer.function = ub_probe_timeout;
2055        timer.data = (unsigned long) &compl;
2056        timer.expires = jiffies + UB_CTRL_TIMEOUT;
2057        add_timer(&timer);
2058
2059        wait_for_completion(&compl);
2060
2061        del_timer_sync(&timer);
2062        usb_kill_urb(&sc->work_urb);
2063
2064        if ((rc = sc->work_urb.status) < 0)
2065                goto err_io;
2066
2067        if (sc->work_urb.actual_length != 1) {
2068                nluns = 0;
2069        } else {
2070                if ((nluns = *p) == 55) {
2071                        nluns = 0;
2072                } else {
2073                        /* GetMaxLUN returns the maximum LUN number */
2074                        nluns += 1;
2075                        if (nluns > UB_MAX_LUNS)
2076                                nluns = UB_MAX_LUNS;
2077                }
2078        }
2079
2080        kfree(p);
2081        return nluns;
2082
2083err_io:
2084err_submit:
2085        kfree(p);
2086err_alloc:
2087        return rc;
2088}
2089
2090/*
2091 * Clear initial stalls.
2092 */
2093static int ub_probe_clear_stall(struct ub_dev *sc, int stalled_pipe)
2094{
2095        int endp;
2096        struct usb_ctrlrequest *cr;
2097        struct completion compl;
2098        struct timer_list timer;
2099        int rc;
2100
2101        init_completion(&compl);
2102
2103        endp = usb_pipeendpoint(stalled_pipe);
2104        if (usb_pipein (stalled_pipe))
2105                endp |= USB_DIR_IN;
2106
2107        cr = &sc->work_cr;
2108        cr->bRequestType = USB_RECIP_ENDPOINT;
2109        cr->bRequest = USB_REQ_CLEAR_FEATURE;
2110        cr->wValue = cpu_to_le16(USB_ENDPOINT_HALT);
2111        cr->wIndex = cpu_to_le16(endp);
2112        cr->wLength = cpu_to_le16(0);
2113
2114        usb_fill_control_urb(&sc->work_urb, sc->dev, sc->send_ctrl_pipe,
2115            (unsigned char*) cr, NULL, 0, ub_probe_urb_complete, &compl);
2116
2117        if ((rc = usb_submit_urb(&sc->work_urb, GFP_KERNEL)) != 0) {
2118                printk(KERN_WARNING
2119                     "%s: Unable to submit a probe clear (%d)\n", sc->name, rc);
2120                return rc;
2121        }
2122
2123        init_timer(&timer);
2124        timer.function = ub_probe_timeout;
2125        timer.data = (unsigned long) &compl;
2126        timer.expires = jiffies + UB_CTRL_TIMEOUT;
2127        add_timer(&timer);
2128
2129        wait_for_completion(&compl);
2130
2131        del_timer_sync(&timer);
2132        usb_kill_urb(&sc->work_urb);
2133
2134        usb_reset_endpoint(sc->dev, endp);
2135
2136        return 0;
2137}
2138
2139/*
2140 * Get the pipe settings.
2141 */
2142static int ub_get_pipes(struct ub_dev *sc, struct usb_device *dev,
2143    struct usb_interface *intf)
2144{
2145        struct usb_host_interface *altsetting = intf->cur_altsetting;
2146        struct usb_endpoint_descriptor *ep_in = NULL;
2147        struct usb_endpoint_descriptor *ep_out = NULL;
2148        struct usb_endpoint_descriptor *ep;
2149        int i;
2150
2151        /*
2152         * Find the endpoints we need.
2153         * We are expecting a minimum of 2 endpoints - in and out (bulk).
2154         * We will ignore any others.
2155         */
2156        for (i = 0; i < altsetting->desc.bNumEndpoints; i++) {
2157                ep = &altsetting->endpoint[i].desc;
2158
2159                /* Is it a BULK endpoint? */
2160                if (usb_endpoint_xfer_bulk(ep)) {
2161                        /* BULK in or out? */
2162                        if (usb_endpoint_dir_in(ep)) {
2163                                if (ep_in == NULL)
2164                                        ep_in = ep;
2165                        } else {
2166                                if (ep_out == NULL)
2167                                        ep_out = ep;
2168                        }
2169                }
2170        }
2171
2172        if (ep_in == NULL || ep_out == NULL) {
2173                printk(KERN_NOTICE "%s: failed endpoint check\n", sc->name);
2174                return -ENODEV;
2175        }
2176
2177        /* Calculate and store the pipe values */
2178        sc->send_ctrl_pipe = usb_sndctrlpipe(dev, 0);
2179        sc->recv_ctrl_pipe = usb_rcvctrlpipe(dev, 0);
2180        sc->send_bulk_pipe = usb_sndbulkpipe(dev,
2181                usb_endpoint_num(ep_out));
2182        sc->recv_bulk_pipe = usb_rcvbulkpipe(dev, 
2183                usb_endpoint_num(ep_in));
2184
2185        return 0;
2186}
2187
2188/*
2189 * Probing is done in the process context, which allows us to cheat
2190 * and not to build a state machine for the discovery.
2191 */
2192static int ub_probe(struct usb_interface *intf,
2193    const struct usb_device_id *dev_id)
2194{
2195        struct ub_dev *sc;
2196        int nluns;
2197        int rc;
2198        int i;
2199
2200        if (usb_usual_check_type(dev_id, USB_US_TYPE_UB))
2201                return -ENXIO;
2202
2203        rc = -ENOMEM;
2204        if ((sc = kzalloc(sizeof(struct ub_dev), GFP_KERNEL)) == NULL)
2205                goto err_core;
2206        sc->lock = ub_next_lock();
2207        INIT_LIST_HEAD(&sc->luns);
2208        usb_init_urb(&sc->work_urb);
2209        tasklet_init(&sc->tasklet, ub_scsi_action, (unsigned long)sc);
2210        atomic_set(&sc->poison, 0);
2211        INIT_WORK(&sc->reset_work, ub_reset_task);
2212        init_waitqueue_head(&sc->reset_wait);
2213
2214        init_timer(&sc->work_timer);
2215        sc->work_timer.data = (unsigned long) sc;
2216        sc->work_timer.function = ub_urb_timeout;
2217
2218        ub_init_completion(&sc->work_done);
2219        sc->work_done.done = 1;         /* A little yuk, but oh well... */
2220
2221        sc->dev = interface_to_usbdev(intf);
2222        sc->intf = intf;
2223        // sc->ifnum = intf->cur_altsetting->desc.bInterfaceNumber;
2224        usb_set_intfdata(intf, sc);
2225        usb_get_dev(sc->dev);
2226        /*
2227         * Since we give the interface struct to the block level through
2228         * disk->driverfs_dev, we have to pin it. Otherwise, block_uevent
2229         * oopses on close after a disconnect (kernels 2.6.16 and up).
2230         */
2231        usb_get_intf(sc->intf);
2232
2233        snprintf(sc->name, 12, DRV_NAME "(%d.%d)",
2234            sc->dev->bus->busnum, sc->dev->devnum);
2235
2236        /* XXX Verify that we can handle the device (from descriptors) */
2237
2238        if (ub_get_pipes(sc, sc->dev, intf) != 0)
2239                goto err_dev_desc;
2240
2241        /*
2242         * At this point, all USB initialization is done, do upper layer.
2243         * We really hate halfway initialized structures, so from the
2244         * invariants perspective, this ub_dev is fully constructed at
2245         * this point.
2246         */
2247
2248        /*
2249         * This is needed to clear toggles. It is a problem only if we do
2250         * `rmmod ub && modprobe ub` without disconnects, but we like that.
2251         */
2252#if 0 /* iPod Mini fails if we do this (big white iPod works) */
2253        ub_probe_clear_stall(sc, sc->recv_bulk_pipe);
2254        ub_probe_clear_stall(sc, sc->send_bulk_pipe);
2255#endif
2256
2257        /*
2258         * The way this is used by the startup code is a little specific.
2259         * A SCSI check causes a USB stall. Our common case code sees it
2260         * and clears the check, after which the device is ready for use.
2261         * But if a check was not present, any command other than
2262         * TEST_UNIT_READY ends with a lockup (including REQUEST_SENSE).
2263         *
2264         * If we neglect to clear the SCSI check, the first real command fails
2265         * (which is the capacity readout). We clear that and retry, but why
2266         * causing spurious retries for no reason.
2267         *
2268         * Revalidation may start with its own TEST_UNIT_READY, but that one
2269         * has to succeed, so we clear checks with an additional one here.
2270         * In any case it's not our business how revaliadation is implemented.
2271         */
2272        for (i = 0; i < 3; i++) {  /* Retries for the schwag key from KS'04 */
2273                if ((rc = ub_sync_tur(sc, NULL)) <= 0) break;
2274                if (rc != 0x6) break;
2275                msleep(10);
2276        }
2277
2278        nluns = 1;
2279        for (i = 0; i < 3; i++) {
2280                if ((rc = ub_sync_getmaxlun(sc)) < 0)
2281                        break;
2282                if (rc != 0) {
2283                        nluns = rc;
2284                        break;
2285                }
2286                msleep(100);
2287        }
2288
2289        for (i = 0; i < nluns; i++) {
2290                ub_probe_lun(sc, i);
2291        }
2292        return 0;
2293
2294err_dev_desc:
2295        usb_set_intfdata(intf, NULL);
2296        usb_put_intf(sc->intf);
2297        usb_put_dev(sc->dev);
2298        kfree(sc);
2299err_core:
2300        return rc;
2301}
2302
2303static int ub_probe_lun(struct ub_dev *sc, int lnum)
2304{
2305        struct ub_lun *lun;
2306        struct request_queue *q;
2307        struct gendisk *disk;
2308        int rc;
2309
2310        rc = -ENOMEM;
2311        if ((lun = kzalloc(sizeof(struct ub_lun), GFP_KERNEL)) == NULL)
2312                goto err_alloc;
2313        lun->num = lnum;
2314
2315        rc = -ENOSR;
2316        if ((lun->id = ub_id_get()) == -1)
2317                goto err_id;
2318
2319        lun->udev = sc;
2320
2321        snprintf(lun->name, 16, DRV_NAME "%c(%d.%d.%d)",
2322            lun->id + 'a', sc->dev->bus->busnum, sc->dev->devnum, lun->num);
2323
2324        lun->removable = 1;             /* XXX Query this from the device */
2325        lun->changed = 1;               /* ub_revalidate clears only */
2326        ub_revalidate(sc, lun);
2327
2328        rc = -ENOMEM;
2329        if ((disk = alloc_disk(UB_PARTS_PER_LUN)) == NULL)
2330                goto err_diskalloc;
2331
2332        sprintf(disk->disk_name, DRV_NAME "%c", lun->id + 'a');
2333        disk->major = UB_MAJOR;
2334        disk->first_minor = lun->id * UB_PARTS_PER_LUN;
2335        disk->fops = &ub_bd_fops;
2336        disk->private_data = lun;
2337        disk->driverfs_dev = &sc->intf->dev;
2338
2339        rc = -ENOMEM;
2340        if ((q = blk_init_queue(ub_request_fn, sc->lock)) == NULL)
2341                goto err_blkqinit;
2342
2343        disk->queue = q;
2344
2345        blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH);
2346        blk_queue_max_segments(q, UB_MAX_REQ_SG);
2347        blk_queue_segment_boundary(q, 0xffffffff);      /* Dubious. */
2348        blk_queue_max_hw_sectors(q, UB_MAX_SECTORS);
2349        blk_queue_logical_block_size(q, lun->capacity.bsize);
2350
2351        lun->disk = disk;
2352        q->queuedata = lun;
2353        list_add(&lun->link, &sc->luns);
2354
2355        set_capacity(disk, lun->capacity.nsec);
2356        if (lun->removable)
2357                disk->flags |= GENHD_FL_REMOVABLE;
2358
2359        add_disk(disk);
2360
2361        return 0;
2362
2363err_blkqinit:
2364        put_disk(disk);
2365err_diskalloc:
2366        ub_id_put(lun->id);
2367err_id:
2368        kfree(lun);
2369err_alloc:
2370        return rc;
2371}
2372
2373static void ub_disconnect(struct usb_interface *intf)
2374{
2375        struct ub_dev *sc = usb_get_intfdata(intf);
2376        struct ub_lun *lun;
2377        unsigned long flags;
2378
2379        /*
2380         * Prevent ub_bd_release from pulling the rug from under us.
2381         * XXX This is starting to look like a kref.
2382         * XXX Why not to take this ref at probe time?
2383         */
2384        spin_lock_irqsave(&ub_lock, flags);
2385        sc->openc++;
2386        spin_unlock_irqrestore(&ub_lock, flags);
2387
2388        /*
2389         * Fence stall clearings, operations triggered by unlinkings and so on.
2390         * We do not attempt to unlink any URBs, because we do not trust the
2391         * unlink paths in HC drivers. Also, we get -84 upon disconnect anyway.
2392         */
2393        atomic_set(&sc->poison, 1);
2394
2395        /*
2396         * Wait for reset to end, if any.
2397         */
2398        wait_event(sc->reset_wait, !sc->reset);
2399
2400        /*
2401         * Blow away queued commands.
2402         *
2403         * Actually, this never works, because before we get here
2404         * the HCD terminates outstanding URB(s). It causes our
2405         * SCSI command queue to advance, commands fail to submit,
2406         * and the whole queue drains. So, we just use this code to
2407         * print warnings.
2408         */
2409        spin_lock_irqsave(sc->lock, flags);
2410        {
2411                struct ub_scsi_cmd *cmd;
2412                int cnt = 0;
2413                while ((cmd = ub_cmdq_peek(sc)) != NULL) {
2414                        cmd->error = -ENOTCONN;
2415                        cmd->state = UB_CMDST_DONE;
2416                        ub_cmdq_pop(sc);
2417                        (*cmd->done)(sc, cmd);
2418                        cnt++;
2419                }
2420                if (cnt != 0) {
2421                        printk(KERN_WARNING "%s: "
2422                            "%d was queued after shutdown\n", sc->name, cnt);
2423                }
2424        }
2425        spin_unlock_irqrestore(sc->lock, flags);
2426
2427        /*
2428         * Unregister the upper layer.
2429         */
2430        list_for_each_entry(lun, &sc->luns, link) {
2431                del_gendisk(lun->disk);
2432                /*
2433                 * I wish I could do:
2434                 *    queue_flag_set(QUEUE_FLAG_DEAD, q);
2435                 * As it is, we rely on our internal poisoning and let
2436                 * the upper levels to spin furiously failing all the I/O.
2437                 */
2438        }
2439
2440        /*
2441         * Testing for -EINPROGRESS is always a bug, so we are bending
2442         * the rules a little.
2443         */
2444        spin_lock_irqsave(sc->lock, flags);
2445        if (sc->work_urb.status == -EINPROGRESS) {      /* janitors: ignore */
2446                printk(KERN_WARNING "%s: "
2447                    "URB is active after disconnect\n", sc->name);
2448        }
2449        spin_unlock_irqrestore(sc->lock, flags);
2450
2451        /*
2452         * There is virtually no chance that other CPU runs a timeout so long
2453         * after ub_urb_complete should have called del_timer, but only if HCD
2454         * didn't forget to deliver a callback on unlink.
2455         */
2456        del_timer_sync(&sc->work_timer);
2457
2458        /*
2459         * At this point there must be no commands coming from anyone
2460         * and no URBs left in transit.
2461         */
2462
2463        ub_put(sc);
2464}
2465
2466static struct usb_driver ub_driver = {
2467        .name =         "ub",
2468        .probe =        ub_probe,
2469        .disconnect =   ub_disconnect,
2470        .id_table =     ub_usb_ids,
2471        .pre_reset =    ub_pre_reset,
2472        .post_reset =   ub_post_reset,
2473};
2474
2475static int __init ub_init(void)
2476{
2477        int rc;
2478        int i;
2479
2480        for (i = 0; i < UB_QLOCK_NUM; i++)
2481                spin_lock_init(&ub_qlockv[i]);
2482
2483        if ((rc = register_blkdev(UB_MAJOR, DRV_NAME)) != 0)
2484                goto err_regblkdev;
2485
2486        if ((rc = usb_register(&ub_driver)) != 0)
2487                goto err_register;
2488
2489        usb_usual_set_present(USB_US_TYPE_UB);
2490        return 0;
2491
2492err_register:
2493        unregister_blkdev(UB_MAJOR, DRV_NAME);
2494err_regblkdev:
2495        return rc;
2496}
2497
2498static void __exit ub_exit(void)
2499{
2500        usb_deregister(&ub_driver);
2501
2502        unregister_blkdev(UB_MAJOR, DRV_NAME);
2503        usb_usual_clear_present(USB_US_TYPE_UB);
2504}
2505
2506module_init(ub_init);
2507module_exit(ub_exit);
2508
2509MODULE_LICENSE("GPL");
2510