linux/drivers/char/xillybus/xillyusb.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright 2020 Xillybus Ltd, http://xillybus.com
   4 *
   5 * Driver for the XillyUSB FPGA/host framework.
   6 *
   7 * This driver interfaces with a special IP core in an FPGA, setting up
   8 * a pipe between a hardware FIFO in the programmable logic and a device
   9 * file in the host. The number of such pipes and their attributes are
  10 * set up on the logic. This driver detects these automatically and
  11 * creates the device files accordingly.
  12 */
  13
  14#include <linux/types.h>
  15#include <linux/slab.h>
  16#include <linux/list.h>
  17#include <linux/device.h>
  18#include <linux/module.h>
  19#include <asm/byteorder.h>
  20#include <linux/io.h>
  21#include <linux/interrupt.h>
  22#include <linux/sched.h>
  23#include <linux/fs.h>
  24#include <linux/spinlock.h>
  25#include <linux/mutex.h>
  26#include <linux/workqueue.h>
  27#include <linux/crc32.h>
  28#include <linux/poll.h>
  29#include <linux/delay.h>
  30#include <linux/usb.h>
  31
  32#include "xillybus_class.h"
  33
  34MODULE_DESCRIPTION("Driver for XillyUSB FPGA IP Core");
  35MODULE_AUTHOR("Eli Billauer, Xillybus Ltd.");
  36MODULE_ALIAS("xillyusb");
  37MODULE_LICENSE("GPL v2");
  38
  39#define XILLY_RX_TIMEOUT                (10 * HZ / 1000)
  40#define XILLY_RESPONSE_TIMEOUT          (500 * HZ / 1000)
  41
  42#define BUF_SIZE_ORDER                  4
  43#define BUFNUM                          8
  44#define LOG2_IDT_FIFO_SIZE              16
  45#define LOG2_INITIAL_FIFO_BUF_SIZE      16
  46
  47#define MSG_EP_NUM                      1
  48#define IN_EP_NUM                       1
  49
  50static const char xillyname[] = "xillyusb";
  51
  52static unsigned int fifo_buf_order;
  53
  54#define USB_VENDOR_ID_XILINX            0x03fd
  55#define USB_VENDOR_ID_ALTERA            0x09fb
  56
  57#define USB_PRODUCT_ID_XILLYUSB         0xebbe
  58
  59static const struct usb_device_id xillyusb_table[] = {
  60        { USB_DEVICE(USB_VENDOR_ID_XILINX, USB_PRODUCT_ID_XILLYUSB) },
  61        { USB_DEVICE(USB_VENDOR_ID_ALTERA, USB_PRODUCT_ID_XILLYUSB) },
  62        { }
  63};
  64
  65MODULE_DEVICE_TABLE(usb, xillyusb_table);
  66
  67struct xillyusb_dev;
  68
  69struct xillyfifo {
  70        unsigned int bufsize; /* In bytes, always a power of 2 */
  71        unsigned int bufnum;
  72        unsigned int size; /* Lazy: Equals bufsize * bufnum */
  73        unsigned int buf_order;
  74
  75        int fill; /* Number of bytes in the FIFO */
  76        spinlock_t lock;
  77        wait_queue_head_t waitq;
  78
  79        unsigned int readpos;
  80        unsigned int readbuf;
  81        unsigned int writepos;
  82        unsigned int writebuf;
  83        char **mem;
  84};
  85
  86struct xillyusb_channel;
  87
  88struct xillyusb_endpoint {
  89        struct xillyusb_dev *xdev;
  90
  91        struct mutex ep_mutex; /* serialize operations on endpoint */
  92
  93        struct list_head buffers;
  94        struct list_head filled_buffers;
  95        spinlock_t buffers_lock; /* protect these two lists */
  96
  97        unsigned int order;
  98        unsigned int buffer_size;
  99
 100        unsigned int fill_mask;
 101
 102        int outstanding_urbs;
 103
 104        struct usb_anchor anchor;
 105
 106        struct xillyfifo fifo;
 107
 108        struct work_struct workitem;
 109
 110        bool shutting_down;
 111        bool drained;
 112        bool wake_on_drain;
 113
 114        u8 ep_num;
 115};
 116
 117struct xillyusb_channel {
 118        struct xillyusb_dev *xdev;
 119
 120        struct xillyfifo *in_fifo;
 121        struct xillyusb_endpoint *out_ep;
 122        struct mutex lock; /* protect @out_ep, @in_fifo, bit fields below */
 123
 124        struct mutex in_mutex; /* serialize fops on FPGA to host stream */
 125        struct mutex out_mutex; /* serialize fops on host to FPGA stream */
 126        wait_queue_head_t flushq;
 127
 128        int chan_idx;
 129
 130        u32 in_consumed_bytes;
 131        u32 in_current_checkpoint;
 132        u32 out_bytes;
 133
 134        unsigned int in_log2_element_size;
 135        unsigned int out_log2_element_size;
 136        unsigned int in_log2_fifo_size;
 137        unsigned int out_log2_fifo_size;
 138
 139        unsigned int read_data_ok; /* EOF not arrived (yet) */
 140        unsigned int poll_used;
 141        unsigned int flushing;
 142        unsigned int flushed;
 143        unsigned int canceled;
 144
 145        /* Bit fields protected by @lock except for initialization */
 146        unsigned readable:1;
 147        unsigned writable:1;
 148        unsigned open_for_read:1;
 149        unsigned open_for_write:1;
 150        unsigned in_synchronous:1;
 151        unsigned out_synchronous:1;
 152        unsigned in_seekable:1;
 153        unsigned out_seekable:1;
 154};
 155
 156struct xillybuffer {
 157        struct list_head entry;
 158        struct xillyusb_endpoint *ep;
 159        void *buf;
 160        unsigned int len;
 161};
 162
 163struct xillyusb_dev {
 164        struct xillyusb_channel *channels;
 165
 166        struct usb_device       *udev;
 167        struct device           *dev; /* For dev_err() and such */
 168        struct kref             kref;
 169        struct workqueue_struct *workq;
 170
 171        int error;
 172        spinlock_t error_lock; /* protect @error */
 173        struct work_struct wakeup_workitem;
 174
 175        int num_channels;
 176
 177        struct xillyusb_endpoint *msg_ep;
 178        struct xillyusb_endpoint *in_ep;
 179
 180        struct mutex msg_mutex; /* serialize opcode transmission */
 181        int in_bytes_left;
 182        int leftover_chan_num;
 183        unsigned int in_counter;
 184        struct mutex process_in_mutex; /* synchronize wakeup_all() */
 185};
 186
 187/* FPGA to host opcodes */
 188enum {
 189        OPCODE_DATA = 0,
 190        OPCODE_QUIESCE_ACK = 1,
 191        OPCODE_EOF = 2,
 192        OPCODE_REACHED_CHECKPOINT = 3,
 193        OPCODE_CANCELED_CHECKPOINT = 4,
 194};
 195
 196/* Host to FPGA opcodes */
 197enum {
 198        OPCODE_QUIESCE = 0,
 199        OPCODE_REQ_IDT = 1,
 200        OPCODE_SET_CHECKPOINT = 2,
 201        OPCODE_CLOSE = 3,
 202        OPCODE_SET_PUSH = 4,
 203        OPCODE_UPDATE_PUSH = 5,
 204        OPCODE_CANCEL_CHECKPOINT = 6,
 205        OPCODE_SET_ADDR = 7,
 206};
 207
 208/*
 209 * fifo_write() and fifo_read() are NOT reentrant (i.e. concurrent multiple
 210 * calls to each on the same FIFO is not allowed) however it's OK to have
 211 * threads calling each of the two functions once on the same FIFO, and
 212 * at the same time.
 213 */
 214
 215static int fifo_write(struct xillyfifo *fifo,
 216                      const void *data, unsigned int len,
 217                      int (*copier)(void *, const void *, int))
 218{
 219        unsigned int done = 0;
 220        unsigned int todo = len;
 221        unsigned int nmax;
 222        unsigned int writepos = fifo->writepos;
 223        unsigned int writebuf = fifo->writebuf;
 224        unsigned long flags;
 225        int rc;
 226
 227        nmax = fifo->size - READ_ONCE(fifo->fill);
 228
 229        while (1) {
 230                unsigned int nrail = fifo->bufsize - writepos;
 231                unsigned int n = min(todo, nmax);
 232
 233                if (n == 0) {
 234                        spin_lock_irqsave(&fifo->lock, flags);
 235                        fifo->fill += done;
 236                        spin_unlock_irqrestore(&fifo->lock, flags);
 237
 238                        fifo->writepos = writepos;
 239                        fifo->writebuf = writebuf;
 240
 241                        return done;
 242                }
 243
 244                if (n > nrail)
 245                        n = nrail;
 246
 247                rc = (*copier)(fifo->mem[writebuf] + writepos, data + done, n);
 248
 249                if (rc)
 250                        return rc;
 251
 252                done += n;
 253                todo -= n;
 254
 255                writepos += n;
 256                nmax -= n;
 257
 258                if (writepos == fifo->bufsize) {
 259                        writepos = 0;
 260                        writebuf++;
 261
 262                        if (writebuf == fifo->bufnum)
 263                                writebuf = 0;
 264                }
 265        }
 266}
 267
 268static int fifo_read(struct xillyfifo *fifo,
 269                     void *data, unsigned int len,
 270                     int (*copier)(void *, const void *, int))
 271{
 272        unsigned int done = 0;
 273        unsigned int todo = len;
 274        unsigned int fill;
 275        unsigned int readpos = fifo->readpos;
 276        unsigned int readbuf = fifo->readbuf;
 277        unsigned long flags;
 278        int rc;
 279
 280        /*
 281         * The spinlock here is necessary, because otherwise fifo->fill
 282         * could have been increased by fifo_write() after writing data
 283         * to the buffer, but this data would potentially not have been
 284         * visible on this thread at the time the updated fifo->fill was.
 285         * That could lead to reading invalid data.
 286         */
 287
 288        spin_lock_irqsave(&fifo->lock, flags);
 289        fill = fifo->fill;
 290        spin_unlock_irqrestore(&fifo->lock, flags);
 291
 292        while (1) {
 293                unsigned int nrail = fifo->bufsize - readpos;
 294                unsigned int n = min(todo, fill);
 295
 296                if (n == 0) {
 297                        spin_lock_irqsave(&fifo->lock, flags);
 298                        fifo->fill -= done;
 299                        spin_unlock_irqrestore(&fifo->lock, flags);
 300
 301                        fifo->readpos = readpos;
 302                        fifo->readbuf = readbuf;
 303
 304                        return done;
 305                }
 306
 307                if (n > nrail)
 308                        n = nrail;
 309
 310                rc = (*copier)(data + done, fifo->mem[readbuf] + readpos, n);
 311
 312                if (rc)
 313                        return rc;
 314
 315                done += n;
 316                todo -= n;
 317
 318                readpos += n;
 319                fill -= n;
 320
 321                if (readpos == fifo->bufsize) {
 322                        readpos = 0;
 323                        readbuf++;
 324
 325                        if (readbuf == fifo->bufnum)
 326                                readbuf = 0;
 327                }
 328        }
 329}
 330
 331/*
 332 * These three wrapper functions are used as the @copier argument to
 333 * fifo_write() and fifo_read(), so that they can work directly with
 334 * user memory as well.
 335 */
 336
 337static int xilly_copy_from_user(void *dst, const void *src, int n)
 338{
 339        if (copy_from_user(dst, (const void __user *)src, n))
 340                return -EFAULT;
 341
 342        return 0;
 343}
 344
 345static int xilly_copy_to_user(void *dst, const void *src, int n)
 346{
 347        if (copy_to_user((void __user *)dst, src, n))
 348                return -EFAULT;
 349
 350        return 0;
 351}
 352
 353static int xilly_memcpy(void *dst, const void *src, int n)
 354{
 355        memcpy(dst, src, n);
 356
 357        return 0;
 358}
 359
 360static int fifo_init(struct xillyfifo *fifo,
 361                     unsigned int log2_size)
 362{
 363        unsigned int log2_bufnum;
 364        unsigned int buf_order;
 365        int i;
 366
 367        unsigned int log2_fifo_buf_size;
 368
 369retry:
 370        log2_fifo_buf_size = fifo_buf_order + PAGE_SHIFT;
 371
 372        if (log2_size > log2_fifo_buf_size) {
 373                log2_bufnum = log2_size - log2_fifo_buf_size;
 374                buf_order = fifo_buf_order;
 375                fifo->bufsize = 1 << log2_fifo_buf_size;
 376        } else {
 377                log2_bufnum = 0;
 378                buf_order = (log2_size > PAGE_SHIFT) ?
 379                        log2_size - PAGE_SHIFT : 0;
 380                fifo->bufsize = 1 << log2_size;
 381        }
 382
 383        fifo->bufnum = 1 << log2_bufnum;
 384        fifo->size = fifo->bufnum * fifo->bufsize;
 385        fifo->buf_order = buf_order;
 386
 387        fifo->mem = kmalloc_array(fifo->bufnum, sizeof(void *), GFP_KERNEL);
 388
 389        if (!fifo->mem)
 390                return -ENOMEM;
 391
 392        for (i = 0; i < fifo->bufnum; i++) {
 393                fifo->mem[i] = (void *)
 394                        __get_free_pages(GFP_KERNEL, buf_order);
 395
 396                if (!fifo->mem[i])
 397                        goto memfail;
 398        }
 399
 400        fifo->fill = 0;
 401        fifo->readpos = 0;
 402        fifo->readbuf = 0;
 403        fifo->writepos = 0;
 404        fifo->writebuf = 0;
 405        spin_lock_init(&fifo->lock);
 406        init_waitqueue_head(&fifo->waitq);
 407        return 0;
 408
 409memfail:
 410        for (i--; i >= 0; i--)
 411                free_pages((unsigned long)fifo->mem[i], buf_order);
 412
 413        kfree(fifo->mem);
 414        fifo->mem = NULL;
 415
 416        if (fifo_buf_order) {
 417                fifo_buf_order--;
 418                goto retry;
 419        } else {
 420                return -ENOMEM;
 421        }
 422}
 423
 424static void fifo_mem_release(struct xillyfifo *fifo)
 425{
 426        int i;
 427
 428        if (!fifo->mem)
 429                return;
 430
 431        for (i = 0; i < fifo->bufnum; i++)
 432                free_pages((unsigned long)fifo->mem[i], fifo->buf_order);
 433
 434        kfree(fifo->mem);
 435}
 436
 437/*
 438 * When endpoint_quiesce() returns, the endpoint has no URBs submitted,
 439 * won't accept any new URB submissions, and its related work item doesn't
 440 * and won't run anymore.
 441 */
 442
 443static void endpoint_quiesce(struct xillyusb_endpoint *ep)
 444{
 445        mutex_lock(&ep->ep_mutex);
 446        ep->shutting_down = true;
 447        mutex_unlock(&ep->ep_mutex);
 448
 449        usb_kill_anchored_urbs(&ep->anchor);
 450        cancel_work_sync(&ep->workitem);
 451}
 452
 453/*
 454 * Note that endpoint_dealloc() also frees fifo memory (if allocated), even
 455 * though endpoint_alloc doesn't allocate that memory.
 456 */
 457
 458static void endpoint_dealloc(struct xillyusb_endpoint *ep)
 459{
 460        struct list_head *this, *next;
 461
 462        fifo_mem_release(&ep->fifo);
 463
 464        /* Join @filled_buffers with @buffers to free these entries too */
 465        list_splice(&ep->filled_buffers, &ep->buffers);
 466
 467        list_for_each_safe(this, next, &ep->buffers) {
 468                struct xillybuffer *xb =
 469                        list_entry(this, struct xillybuffer, entry);
 470
 471                free_pages((unsigned long)xb->buf, ep->order);
 472                kfree(xb);
 473        }
 474
 475        kfree(ep);
 476}
 477
 478static struct xillyusb_endpoint
 479*endpoint_alloc(struct xillyusb_dev *xdev,
 480                u8 ep_num,
 481                void (*work)(struct work_struct *),
 482                unsigned int order,
 483                int bufnum)
 484{
 485        int i;
 486
 487        struct xillyusb_endpoint *ep;
 488
 489        ep = kzalloc(sizeof(*ep), GFP_KERNEL);
 490
 491        if (!ep)
 492                return NULL;
 493
 494        INIT_LIST_HEAD(&ep->buffers);
 495        INIT_LIST_HEAD(&ep->filled_buffers);
 496
 497        spin_lock_init(&ep->buffers_lock);
 498        mutex_init(&ep->ep_mutex);
 499
 500        init_usb_anchor(&ep->anchor);
 501        INIT_WORK(&ep->workitem, work);
 502
 503        ep->order = order;
 504        ep->buffer_size =  1 << (PAGE_SHIFT + order);
 505        ep->outstanding_urbs = 0;
 506        ep->drained = true;
 507        ep->wake_on_drain = false;
 508        ep->xdev = xdev;
 509        ep->ep_num = ep_num;
 510        ep->shutting_down = false;
 511
 512        for (i = 0; i < bufnum; i++) {
 513                struct xillybuffer *xb;
 514                unsigned long addr;
 515
 516                xb = kzalloc(sizeof(*xb), GFP_KERNEL);
 517
 518                if (!xb) {
 519                        endpoint_dealloc(ep);
 520                        return NULL;
 521                }
 522
 523                addr = __get_free_pages(GFP_KERNEL, order);
 524
 525                if (!addr) {
 526                        kfree(xb);
 527                        endpoint_dealloc(ep);
 528                        return NULL;
 529                }
 530
 531                xb->buf = (void *)addr;
 532                xb->ep = ep;
 533                list_add_tail(&xb->entry, &ep->buffers);
 534        }
 535        return ep;
 536}
 537
 538static void cleanup_dev(struct kref *kref)
 539{
 540        struct xillyusb_dev *xdev =
 541                container_of(kref, struct xillyusb_dev, kref);
 542
 543        if (xdev->in_ep)
 544                endpoint_dealloc(xdev->in_ep);
 545
 546        if (xdev->msg_ep)
 547                endpoint_dealloc(xdev->msg_ep);
 548
 549        if (xdev->workq)
 550                destroy_workqueue(xdev->workq);
 551
 552        kfree(xdev->channels); /* Argument may be NULL, and that's fine */
 553        kfree(xdev);
 554}
 555
 556/*
 557 * @process_in_mutex is taken to ensure that bulk_in_work() won't call
 558 * process_bulk_in() after wakeup_all()'s execution: The latter zeroes all
 559 * @read_data_ok entries, which will make process_bulk_in() report false
 560 * errors if executed. The mechanism relies on that xdev->error is assigned
 561 * a non-zero value by report_io_error() prior to queueing wakeup_all(),
 562 * which prevents bulk_in_work() from calling process_bulk_in().
 563 *
 564 * The fact that wakeup_all() and bulk_in_work() are queued on the same
 565 * workqueue makes their concurrent execution very unlikely, however the
 566 * kernel's API doesn't seem to ensure this strictly.
 567 */
 568
 569static void wakeup_all(struct work_struct *work)
 570{
 571        int i;
 572        struct xillyusb_dev *xdev = container_of(work, struct xillyusb_dev,
 573                                                 wakeup_workitem);
 574
 575        mutex_lock(&xdev->process_in_mutex);
 576
 577        for (i = 0; i < xdev->num_channels; i++) {
 578                struct xillyusb_channel *chan = &xdev->channels[i];
 579
 580                mutex_lock(&chan->lock);
 581
 582                if (chan->in_fifo) {
 583                        /*
 584                         * Fake an EOF: Even if such arrives, it won't be
 585                         * processed.
 586                         */
 587                        chan->read_data_ok = 0;
 588                        wake_up_interruptible(&chan->in_fifo->waitq);
 589                }
 590
 591                if (chan->out_ep)
 592                        wake_up_interruptible(&chan->out_ep->fifo.waitq);
 593
 594                mutex_unlock(&chan->lock);
 595
 596                wake_up_interruptible(&chan->flushq);
 597        }
 598
 599        mutex_unlock(&xdev->process_in_mutex);
 600
 601        wake_up_interruptible(&xdev->msg_ep->fifo.waitq);
 602
 603        kref_put(&xdev->kref, cleanup_dev);
 604}
 605
 606static void report_io_error(struct xillyusb_dev *xdev,
 607                            int errcode)
 608{
 609        unsigned long flags;
 610        bool do_once = false;
 611
 612        spin_lock_irqsave(&xdev->error_lock, flags);
 613        if (!xdev->error) {
 614                xdev->error = errcode;
 615                do_once = true;
 616        }
 617        spin_unlock_irqrestore(&xdev->error_lock, flags);
 618
 619        if (do_once) {
 620                kref_get(&xdev->kref); /* xdev is used by work item */
 621                queue_work(xdev->workq, &xdev->wakeup_workitem);
 622        }
 623}
 624
 625/*
 626 * safely_assign_in_fifo() changes the value of chan->in_fifo and ensures
 627 * the previous pointer is never used after its return.
 628 */
 629
 630static void safely_assign_in_fifo(struct xillyusb_channel *chan,
 631                                  struct xillyfifo *fifo)
 632{
 633        mutex_lock(&chan->lock);
 634        chan->in_fifo = fifo;
 635        mutex_unlock(&chan->lock);
 636
 637        flush_work(&chan->xdev->in_ep->workitem);
 638}
 639
 640static void bulk_in_completer(struct urb *urb)
 641{
 642        struct xillybuffer *xb = urb->context;
 643        struct xillyusb_endpoint *ep = xb->ep;
 644        unsigned long flags;
 645
 646        if (urb->status) {
 647                if (!(urb->status == -ENOENT ||
 648                      urb->status == -ECONNRESET ||
 649                      urb->status == -ESHUTDOWN))
 650                        report_io_error(ep->xdev, -EIO);
 651
 652                spin_lock_irqsave(&ep->buffers_lock, flags);
 653                list_add_tail(&xb->entry, &ep->buffers);
 654                ep->outstanding_urbs--;
 655                spin_unlock_irqrestore(&ep->buffers_lock, flags);
 656
 657                return;
 658        }
 659
 660        xb->len = urb->actual_length;
 661
 662        spin_lock_irqsave(&ep->buffers_lock, flags);
 663        list_add_tail(&xb->entry, &ep->filled_buffers);
 664        spin_unlock_irqrestore(&ep->buffers_lock, flags);
 665
 666        if (!ep->shutting_down)
 667                queue_work(ep->xdev->workq, &ep->workitem);
 668}
 669
 670static void bulk_out_completer(struct urb *urb)
 671{
 672        struct xillybuffer *xb = urb->context;
 673        struct xillyusb_endpoint *ep = xb->ep;
 674        unsigned long flags;
 675
 676        if (urb->status &&
 677            (!(urb->status == -ENOENT ||
 678               urb->status == -ECONNRESET ||
 679               urb->status == -ESHUTDOWN)))
 680                report_io_error(ep->xdev, -EIO);
 681
 682        spin_lock_irqsave(&ep->buffers_lock, flags);
 683        list_add_tail(&xb->entry, &ep->buffers);
 684        ep->outstanding_urbs--;
 685        spin_unlock_irqrestore(&ep->buffers_lock, flags);
 686
 687        if (!ep->shutting_down)
 688                queue_work(ep->xdev->workq, &ep->workitem);
 689}
 690
 691static void try_queue_bulk_in(struct xillyusb_endpoint *ep)
 692{
 693        struct xillyusb_dev *xdev = ep->xdev;
 694        struct xillybuffer *xb;
 695        struct urb *urb;
 696
 697        int rc;
 698        unsigned long flags;
 699        unsigned int bufsize = ep->buffer_size;
 700
 701        mutex_lock(&ep->ep_mutex);
 702
 703        if (ep->shutting_down || xdev->error)
 704                goto done;
 705
 706        while (1) {
 707                spin_lock_irqsave(&ep->buffers_lock, flags);
 708
 709                if (list_empty(&ep->buffers)) {
 710                        spin_unlock_irqrestore(&ep->buffers_lock, flags);
 711                        goto done;
 712                }
 713
 714                xb = list_first_entry(&ep->buffers, struct xillybuffer, entry);
 715                list_del(&xb->entry);
 716                ep->outstanding_urbs++;
 717
 718                spin_unlock_irqrestore(&ep->buffers_lock, flags);
 719
 720                urb = usb_alloc_urb(0, GFP_KERNEL);
 721                if (!urb) {
 722                        report_io_error(xdev, -ENOMEM);
 723                        goto relist;
 724                }
 725
 726                usb_fill_bulk_urb(urb, xdev->udev,
 727                                  usb_rcvbulkpipe(xdev->udev, ep->ep_num),
 728                                  xb->buf, bufsize, bulk_in_completer, xb);
 729
 730                usb_anchor_urb(urb, &ep->anchor);
 731
 732                rc = usb_submit_urb(urb, GFP_KERNEL);
 733
 734                if (rc) {
 735                        report_io_error(xdev, (rc == -ENOMEM) ? -ENOMEM :
 736                                        -EIO);
 737                        goto unanchor;
 738                }
 739
 740                usb_free_urb(urb); /* This just decrements reference count */
 741        }
 742
 743unanchor:
 744        usb_unanchor_urb(urb);
 745        usb_free_urb(urb);
 746
 747relist:
 748        spin_lock_irqsave(&ep->buffers_lock, flags);
 749        list_add_tail(&xb->entry, &ep->buffers);
 750        ep->outstanding_urbs--;
 751        spin_unlock_irqrestore(&ep->buffers_lock, flags);
 752
 753done:
 754        mutex_unlock(&ep->ep_mutex);
 755}
 756
 757static void try_queue_bulk_out(struct xillyusb_endpoint *ep)
 758{
 759        struct xillyfifo *fifo = &ep->fifo;
 760        struct xillyusb_dev *xdev = ep->xdev;
 761        struct xillybuffer *xb;
 762        struct urb *urb;
 763
 764        int rc;
 765        unsigned int fill;
 766        unsigned long flags;
 767        bool do_wake = false;
 768
 769        mutex_lock(&ep->ep_mutex);
 770
 771        if (ep->shutting_down || xdev->error)
 772                goto done;
 773
 774        fill = READ_ONCE(fifo->fill) & ep->fill_mask;
 775
 776        while (1) {
 777                int count;
 778                unsigned int max_read;
 779
 780                spin_lock_irqsave(&ep->buffers_lock, flags);
 781
 782                /*
 783                 * Race conditions might have the FIFO filled while the
 784                 * endpoint is marked as drained here. That doesn't matter,
 785                 * because the sole purpose of @drained is to ensure that
 786                 * certain data has been sent on the USB channel before
 787                 * shutting it down. Hence knowing that the FIFO appears
 788                 * to be empty with no outstanding URBs at some moment
 789                 * is good enough.
 790                 */
 791
 792                if (!fill) {
 793                        ep->drained = !ep->outstanding_urbs;
 794                        if (ep->drained && ep->wake_on_drain)
 795                                do_wake = true;
 796
 797                        spin_unlock_irqrestore(&ep->buffers_lock, flags);
 798                        goto done;
 799                }
 800
 801                ep->drained = false;
 802
 803                if ((fill < ep->buffer_size && ep->outstanding_urbs) ||
 804                    list_empty(&ep->buffers)) {
 805                        spin_unlock_irqrestore(&ep->buffers_lock, flags);
 806                        goto done;
 807                }
 808
 809                xb = list_first_entry(&ep->buffers, struct xillybuffer, entry);
 810                list_del(&xb->entry);
 811                ep->outstanding_urbs++;
 812
 813                spin_unlock_irqrestore(&ep->buffers_lock, flags);
 814
 815                max_read = min(fill, ep->buffer_size);
 816
 817                count = fifo_read(&ep->fifo, xb->buf, max_read, xilly_memcpy);
 818
 819                /*
 820                 * xilly_memcpy always returns 0 => fifo_read can't fail =>
 821                 * count > 0
 822                 */
 823
 824                urb = usb_alloc_urb(0, GFP_KERNEL);
 825                if (!urb) {
 826                        report_io_error(xdev, -ENOMEM);
 827                        goto relist;
 828                }
 829
 830                usb_fill_bulk_urb(urb, xdev->udev,
 831                                  usb_sndbulkpipe(xdev->udev, ep->ep_num),
 832                                  xb->buf, count, bulk_out_completer, xb);
 833
 834                usb_anchor_urb(urb, &ep->anchor);
 835
 836                rc = usb_submit_urb(urb, GFP_KERNEL);
 837
 838                if (rc) {
 839                        report_io_error(xdev, (rc == -ENOMEM) ? -ENOMEM :
 840                                        -EIO);
 841                        goto unanchor;
 842                }
 843
 844                usb_free_urb(urb); /* This just decrements reference count */
 845
 846                fill -= count;
 847                do_wake = true;
 848        }
 849
 850unanchor:
 851        usb_unanchor_urb(urb);
 852        usb_free_urb(urb);
 853
 854relist:
 855        spin_lock_irqsave(&ep->buffers_lock, flags);
 856        list_add_tail(&xb->entry, &ep->buffers);
 857        ep->outstanding_urbs--;
 858        spin_unlock_irqrestore(&ep->buffers_lock, flags);
 859
 860done:
 861        mutex_unlock(&ep->ep_mutex);
 862
 863        if (do_wake)
 864                wake_up_interruptible(&fifo->waitq);
 865}
 866
 867static void bulk_out_work(struct work_struct *work)
 868{
 869        struct xillyusb_endpoint *ep = container_of(work,
 870                                                    struct xillyusb_endpoint,
 871                                                    workitem);
 872        try_queue_bulk_out(ep);
 873}
 874
 875static int process_in_opcode(struct xillyusb_dev *xdev,
 876                             int opcode,
 877                             int chan_num)
 878{
 879        struct xillyusb_channel *chan;
 880        struct device *dev = xdev->dev;
 881        int chan_idx = chan_num >> 1;
 882
 883        if (chan_idx >= xdev->num_channels) {
 884                dev_err(dev, "Received illegal channel ID %d from FPGA\n",
 885                        chan_num);
 886                return -EIO;
 887        }
 888
 889        chan = &xdev->channels[chan_idx];
 890
 891        switch (opcode) {
 892        case OPCODE_EOF:
 893                if (!chan->read_data_ok) {
 894                        dev_err(dev, "Received unexpected EOF for channel %d\n",
 895                                chan_num);
 896                        return -EIO;
 897                }
 898
 899                /*
 900                 * A write memory barrier ensures that the FIFO's fill level
 901                 * is visible before read_data_ok turns zero, so the data in
 902                 * the FIFO isn't missed by the consumer.
 903                 */
 904                smp_wmb();
 905                WRITE_ONCE(chan->read_data_ok, 0);
 906                wake_up_interruptible(&chan->in_fifo->waitq);
 907                break;
 908
 909        case OPCODE_REACHED_CHECKPOINT:
 910                chan->flushing = 0;
 911                wake_up_interruptible(&chan->flushq);
 912                break;
 913
 914        case OPCODE_CANCELED_CHECKPOINT:
 915                chan->canceled = 1;
 916                wake_up_interruptible(&chan->flushq);
 917                break;
 918
 919        default:
 920                dev_err(dev, "Received illegal opcode %d from FPGA\n",
 921                        opcode);
 922                return -EIO;
 923        }
 924
 925        return 0;
 926}
 927
 928static int process_bulk_in(struct xillybuffer *xb)
 929{
 930        struct xillyusb_endpoint *ep = xb->ep;
 931        struct xillyusb_dev *xdev = ep->xdev;
 932        struct device *dev = xdev->dev;
 933        int dws = xb->len >> 2;
 934        __le32 *p = xb->buf;
 935        u32 ctrlword;
 936        struct xillyusb_channel *chan;
 937        struct xillyfifo *fifo;
 938        int chan_num = 0, opcode;
 939        int chan_idx;
 940        int bytes, count, dwconsume;
 941        int in_bytes_left = 0;
 942        int rc;
 943
 944        if ((dws << 2) != xb->len) {
 945                dev_err(dev, "Received BULK IN transfer with %d bytes, not a multiple of 4\n",
 946                        xb->len);
 947                return -EIO;
 948        }
 949
 950        if (xdev->in_bytes_left) {
 951                bytes = min(xdev->in_bytes_left, dws << 2);
 952                in_bytes_left = xdev->in_bytes_left - bytes;
 953                chan_num = xdev->leftover_chan_num;
 954                goto resume_leftovers;
 955        }
 956
 957        while (dws) {
 958                ctrlword = le32_to_cpu(*p++);
 959                dws--;
 960
 961                chan_num = ctrlword & 0xfff;
 962                count = (ctrlword >> 12) & 0x3ff;
 963                opcode = (ctrlword >> 24) & 0xf;
 964
 965                if (opcode != OPCODE_DATA) {
 966                        unsigned int in_counter = xdev->in_counter++ & 0x3ff;
 967
 968                        if (count != in_counter) {
 969                                dev_err(dev, "Expected opcode counter %d, got %d\n",
 970                                        in_counter, count);
 971                                return -EIO;
 972                        }
 973
 974                        rc = process_in_opcode(xdev, opcode, chan_num);
 975
 976                        if (rc)
 977                                return rc;
 978
 979                        continue;
 980                }
 981
 982                bytes = min(count + 1, dws << 2);
 983                in_bytes_left = count + 1 - bytes;
 984
 985resume_leftovers:
 986                chan_idx = chan_num >> 1;
 987
 988                if (!(chan_num & 1) || chan_idx >= xdev->num_channels ||
 989                    !xdev->channels[chan_idx].read_data_ok) {
 990                        dev_err(dev, "Received illegal channel ID %d from FPGA\n",
 991                                chan_num);
 992                        return -EIO;
 993                }
 994                chan = &xdev->channels[chan_idx];
 995
 996                fifo = chan->in_fifo;
 997
 998                if (unlikely(!fifo))
 999                        return -EIO; /* We got really unexpected data */
1000
1001                if (bytes != fifo_write(fifo, p, bytes, xilly_memcpy)) {
1002                        dev_err(dev, "Misbehaving FPGA overflowed an upstream FIFO!\n");
1003                        return -EIO;
1004                }
1005
1006                wake_up_interruptible(&fifo->waitq);
1007
1008                dwconsume = (bytes + 3) >> 2;
1009                dws -= dwconsume;
1010                p += dwconsume;
1011        }
1012
1013        xdev->in_bytes_left = in_bytes_left;
1014        xdev->leftover_chan_num = chan_num;
1015        return 0;
1016}
1017
1018static void bulk_in_work(struct work_struct *work)
1019{
1020        struct xillyusb_endpoint *ep =
1021                container_of(work, struct xillyusb_endpoint, workitem);
1022        struct xillyusb_dev *xdev = ep->xdev;
1023        unsigned long flags;
1024        struct xillybuffer *xb;
1025        bool consumed = false;
1026        int rc = 0;
1027
1028        mutex_lock(&xdev->process_in_mutex);
1029
1030        spin_lock_irqsave(&ep->buffers_lock, flags);
1031
1032        while (1) {
1033                if (rc || list_empty(&ep->filled_buffers)) {
1034                        spin_unlock_irqrestore(&ep->buffers_lock, flags);
1035                        mutex_unlock(&xdev->process_in_mutex);
1036
1037                        if (rc)
1038                                report_io_error(xdev, rc);
1039                        else if (consumed)
1040                                try_queue_bulk_in(ep);
1041
1042                        return;
1043                }
1044
1045                xb = list_first_entry(&ep->filled_buffers, struct xillybuffer,
1046                                      entry);
1047                list_del(&xb->entry);
1048
1049                spin_unlock_irqrestore(&ep->buffers_lock, flags);
1050
1051                consumed = true;
1052
1053                if (!xdev->error)
1054                        rc = process_bulk_in(xb);
1055
1056                spin_lock_irqsave(&ep->buffers_lock, flags);
1057                list_add_tail(&xb->entry, &ep->buffers);
1058                ep->outstanding_urbs--;
1059        }
1060}
1061
1062static int xillyusb_send_opcode(struct xillyusb_dev *xdev,
1063                                int chan_num, char opcode, u32 data)
1064{
1065        struct xillyusb_endpoint *ep = xdev->msg_ep;
1066        struct xillyfifo *fifo = &ep->fifo;
1067        __le32 msg[2];
1068
1069        int rc = 0;
1070
1071        msg[0] = cpu_to_le32((chan_num & 0xfff) |
1072                             ((opcode & 0xf) << 24));
1073        msg[1] = cpu_to_le32(data);
1074
1075        mutex_lock(&xdev->msg_mutex);
1076
1077        /*
1078         * The wait queue is woken with the interruptible variant, so the
1079         * wait function matches, however returning because of an interrupt
1080         * will mess things up considerably, in particular when the caller is
1081         * the release method. And the xdev->error part prevents being stuck
1082         * forever in the event of a bizarre hardware bug: Pull the USB plug.
1083         */
1084
1085        while (wait_event_interruptible(fifo->waitq,
1086                                        fifo->fill <= (fifo->size - 8) ||
1087                                        xdev->error))
1088                ; /* Empty loop */
1089
1090        if (xdev->error) {
1091                rc = xdev->error;
1092                goto unlock_done;
1093        }
1094
1095        fifo_write(fifo, (void *)msg, 8, xilly_memcpy);
1096
1097        try_queue_bulk_out(ep);
1098
1099unlock_done:
1100        mutex_unlock(&xdev->msg_mutex);
1101
1102        return rc;
1103}
1104
1105/*
1106 * Note that flush_downstream() merely waits for the data to arrive to
1107 * the application logic at the FPGA -- unlike PCIe Xillybus' counterpart,
1108 * it does nothing to make it happen (and neither is it necessary).
1109 *
1110 * This function is not reentrant for the same @chan, but this is covered
1111 * by the fact that for any given @chan, it's called either by the open,
1112 * write, llseek and flush fops methods, which can't run in parallel (and the
1113 * write + flush and llseek method handlers are protected with out_mutex).
1114 *
1115 * chan->flushed is there to avoid multiple flushes at the same position,
1116 * in particular as a result of programs that close the file descriptor
1117 * e.g. after a dup2() for redirection.
1118 */
1119
1120static int flush_downstream(struct xillyusb_channel *chan,
1121                            long timeout,
1122                            bool interruptible)
1123{
1124        struct xillyusb_dev *xdev = chan->xdev;
1125        int chan_num = chan->chan_idx << 1;
1126        long deadline, left_to_sleep;
1127        int rc;
1128
1129        if (chan->flushed)
1130                return 0;
1131
1132        deadline = jiffies + 1 + timeout;
1133
1134        if (chan->flushing) {
1135                long cancel_deadline = jiffies + 1 + XILLY_RESPONSE_TIMEOUT;
1136
1137                chan->canceled = 0;
1138                rc = xillyusb_send_opcode(xdev, chan_num,
1139                                          OPCODE_CANCEL_CHECKPOINT, 0);
1140
1141                if (rc)
1142                        return rc; /* Only real error, never -EINTR */
1143
1144                /* Ignoring interrupts. Cancellation must be handled */
1145                while (!chan->canceled) {
1146                        left_to_sleep = cancel_deadline - ((long)jiffies);
1147
1148                        if (left_to_sleep <= 0) {
1149                                report_io_error(xdev, -EIO);
1150                                return -EIO;
1151                        }
1152
1153                        rc = wait_event_interruptible_timeout(chan->flushq,
1154                                                              chan->canceled ||
1155                                                              xdev->error,
1156                                                              left_to_sleep);
1157
1158                        if (xdev->error)
1159                                return xdev->error;
1160                }
1161        }
1162
1163        chan->flushing = 1;
1164
1165        /*
1166         * The checkpoint is given in terms of data elements, not bytes. As
1167         * a result, if less than an element's worth of data is stored in the
1168         * FIFO, it's not flushed, including the flush before closing, which
1169         * means that such data is lost. This is consistent with PCIe Xillybus.
1170         */
1171
1172        rc = xillyusb_send_opcode(xdev, chan_num,
1173                                  OPCODE_SET_CHECKPOINT,
1174                                  chan->out_bytes >>
1175                                  chan->out_log2_element_size);
1176
1177        if (rc)
1178                return rc; /* Only real error, never -EINTR */
1179
1180        if (!timeout) {
1181                while (chan->flushing) {
1182                        rc = wait_event_interruptible(chan->flushq,
1183                                                      !chan->flushing ||
1184                                                      xdev->error);
1185                        if (xdev->error)
1186                                return xdev->error;
1187
1188                        if (interruptible && rc)
1189                                return -EINTR;
1190                }
1191
1192                goto done;
1193        }
1194
1195        while (chan->flushing) {
1196                left_to_sleep = deadline - ((long)jiffies);
1197
1198                if (left_to_sleep <= 0)
1199                        return -ETIMEDOUT;
1200
1201                rc = wait_event_interruptible_timeout(chan->flushq,
1202                                                      !chan->flushing ||
1203                                                      xdev->error,
1204                                                      left_to_sleep);
1205
1206                if (xdev->error)
1207                        return xdev->error;
1208
1209                if (interruptible && rc < 0)
1210                        return -EINTR;
1211        }
1212
1213done:
1214        chan->flushed = 1;
1215        return 0;
1216}
1217
1218/* request_read_anything(): Ask the FPGA for any little amount of data */
1219static int request_read_anything(struct xillyusb_channel *chan,
1220                                 char opcode)
1221{
1222        struct xillyusb_dev *xdev = chan->xdev;
1223        unsigned int sh = chan->in_log2_element_size;
1224        int chan_num = (chan->chan_idx << 1) | 1;
1225        u32 mercy = chan->in_consumed_bytes + (2 << sh) - 1;
1226
1227        return xillyusb_send_opcode(xdev, chan_num, opcode, mercy >> sh);
1228}
1229
1230static int xillyusb_open(struct inode *inode, struct file *filp)
1231{
1232        struct xillyusb_dev *xdev;
1233        struct xillyusb_channel *chan;
1234        struct xillyfifo *in_fifo = NULL;
1235        struct xillyusb_endpoint *out_ep = NULL;
1236        int rc;
1237        int index;
1238
1239        rc = xillybus_find_inode(inode, (void **)&xdev, &index);
1240        if (rc)
1241                return rc;
1242
1243        chan = &xdev->channels[index];
1244        filp->private_data = chan;
1245
1246        mutex_lock(&chan->lock);
1247
1248        rc = -ENODEV;
1249
1250        if (xdev->error)
1251                goto unmutex_fail;
1252
1253        if (((filp->f_mode & FMODE_READ) && !chan->readable) ||
1254            ((filp->f_mode & FMODE_WRITE) && !chan->writable))
1255                goto unmutex_fail;
1256
1257        if ((filp->f_flags & O_NONBLOCK) && (filp->f_mode & FMODE_READ) &&
1258            chan->in_synchronous) {
1259                dev_err(xdev->dev,
1260                        "open() failed: O_NONBLOCK not allowed for read on this device\n");
1261                goto unmutex_fail;
1262        }
1263
1264        if ((filp->f_flags & O_NONBLOCK) && (filp->f_mode & FMODE_WRITE) &&
1265            chan->out_synchronous) {
1266                dev_err(xdev->dev,
1267                        "open() failed: O_NONBLOCK not allowed for write on this device\n");
1268                goto unmutex_fail;
1269        }
1270
1271        rc = -EBUSY;
1272
1273        if (((filp->f_mode & FMODE_READ) && chan->open_for_read) ||
1274            ((filp->f_mode & FMODE_WRITE) && chan->open_for_write))
1275                goto unmutex_fail;
1276
1277        kref_get(&xdev->kref);
1278
1279        if (filp->f_mode & FMODE_READ)
1280                chan->open_for_read = 1;
1281
1282        if (filp->f_mode & FMODE_WRITE)
1283                chan->open_for_write = 1;
1284
1285        mutex_unlock(&chan->lock);
1286
1287        if (filp->f_mode & FMODE_WRITE) {
1288                out_ep = endpoint_alloc(xdev,
1289                                        (chan->chan_idx + 2) | USB_DIR_OUT,
1290                                        bulk_out_work, BUF_SIZE_ORDER, BUFNUM);
1291
1292                if (!out_ep) {
1293                        rc = -ENOMEM;
1294                        goto unopen;
1295                }
1296
1297                rc = fifo_init(&out_ep->fifo, chan->out_log2_fifo_size);
1298
1299                if (rc)
1300                        goto late_unopen;
1301
1302                out_ep->fill_mask = -(1 << chan->out_log2_element_size);
1303                chan->out_bytes = 0;
1304                chan->flushed = 0;
1305
1306                /*
1307                 * Sending a flush request to a previously closed stream
1308                 * effectively opens it, and also waits until the command is
1309                 * confirmed by the FPGA. The latter is necessary because the
1310                 * data is sent through a separate BULK OUT endpoint, and the
1311                 * xHCI controller is free to reorder transmissions.
1312                 *
1313                 * This can't go wrong unless there's a serious hardware error
1314                 * (or the computer is stuck for 500 ms?)
1315                 */
1316                rc = flush_downstream(chan, XILLY_RESPONSE_TIMEOUT, false);
1317
1318                if (rc == -ETIMEDOUT) {
1319                        rc = -EIO;
1320                        report_io_error(xdev, rc);
1321                }
1322
1323                if (rc)
1324                        goto late_unopen;
1325        }
1326
1327        if (filp->f_mode & FMODE_READ) {
1328                in_fifo = kzalloc(sizeof(*in_fifo), GFP_KERNEL);
1329
1330                if (!in_fifo) {
1331                        rc = -ENOMEM;
1332                        goto late_unopen;
1333                }
1334
1335                rc = fifo_init(in_fifo, chan->in_log2_fifo_size);
1336
1337                if (rc) {
1338                        kfree(in_fifo);
1339                        goto late_unopen;
1340                }
1341        }
1342
1343        mutex_lock(&chan->lock);
1344        if (in_fifo) {
1345                chan->in_fifo = in_fifo;
1346                chan->read_data_ok = 1;
1347        }
1348        if (out_ep)
1349                chan->out_ep = out_ep;
1350        mutex_unlock(&chan->lock);
1351
1352        if (in_fifo) {
1353                u32 in_checkpoint = 0;
1354
1355                if (!chan->in_synchronous)
1356                        in_checkpoint = in_fifo->size >>
1357                                chan->in_log2_element_size;
1358
1359                chan->in_consumed_bytes = 0;
1360                chan->poll_used = 0;
1361                chan->in_current_checkpoint = in_checkpoint;
1362                rc = xillyusb_send_opcode(xdev, (chan->chan_idx << 1) | 1,
1363                                          OPCODE_SET_CHECKPOINT,
1364                                          in_checkpoint);
1365
1366                if (rc) /* Failure guarantees that opcode wasn't sent */
1367                        goto unfifo;
1368
1369                /*
1370                 * In non-blocking mode, request the FPGA to send any data it
1371                 * has right away. Otherwise, the first read() will always
1372                 * return -EAGAIN, which is OK strictly speaking, but ugly.
1373                 * Checking and unrolling if this fails isn't worth the
1374                 * effort -- the error is propagated to the first read()
1375                 * anyhow.
1376                 */
1377                if (filp->f_flags & O_NONBLOCK)
1378                        request_read_anything(chan, OPCODE_SET_PUSH);
1379        }
1380
1381        return 0;
1382
1383unfifo:
1384        chan->read_data_ok = 0;
1385        safely_assign_in_fifo(chan, NULL);
1386        fifo_mem_release(in_fifo);
1387        kfree(in_fifo);
1388
1389        if (out_ep) {
1390                mutex_lock(&chan->lock);
1391                chan->out_ep = NULL;
1392                mutex_unlock(&chan->lock);
1393        }
1394
1395late_unopen:
1396        if (out_ep)
1397                endpoint_dealloc(out_ep);
1398
1399unopen:
1400        mutex_lock(&chan->lock);
1401
1402        if (filp->f_mode & FMODE_READ)
1403                chan->open_for_read = 0;
1404
1405        if (filp->f_mode & FMODE_WRITE)
1406                chan->open_for_write = 0;
1407
1408        mutex_unlock(&chan->lock);
1409
1410        kref_put(&xdev->kref, cleanup_dev);
1411
1412        return rc;
1413
1414unmutex_fail:
1415        mutex_unlock(&chan->lock);
1416        return rc;
1417}
1418
1419static ssize_t xillyusb_read(struct file *filp, char __user *userbuf,
1420                             size_t count, loff_t *f_pos)
1421{
1422        struct xillyusb_channel *chan = filp->private_data;
1423        struct xillyusb_dev *xdev = chan->xdev;
1424        struct xillyfifo *fifo = chan->in_fifo;
1425        int chan_num = (chan->chan_idx << 1) | 1;
1426
1427        long deadline, left_to_sleep;
1428        int bytes_done = 0;
1429        bool sent_set_push = false;
1430        int rc;
1431
1432        deadline = jiffies + 1 + XILLY_RX_TIMEOUT;
1433
1434        rc = mutex_lock_interruptible(&chan->in_mutex);
1435
1436        if (rc)
1437                return rc;
1438
1439        while (1) {
1440                u32 fifo_checkpoint_bytes, complete_checkpoint_bytes;
1441                u32 complete_checkpoint, fifo_checkpoint;
1442                u32 checkpoint;
1443                s32 diff, leap;
1444                unsigned int sh = chan->in_log2_element_size;
1445                bool checkpoint_for_complete;
1446
1447                rc = fifo_read(fifo, (__force void *)userbuf + bytes_done,
1448                               count - bytes_done, xilly_copy_to_user);
1449
1450                if (rc < 0)
1451                        break;
1452
1453                bytes_done += rc;
1454                chan->in_consumed_bytes += rc;
1455
1456                left_to_sleep = deadline - ((long)jiffies);
1457
1458                /*
1459                 * Some 32-bit arithmetic that may wrap. Note that
1460                 * complete_checkpoint is rounded up to the closest element
1461                 * boundary, because the read() can't be completed otherwise.
1462                 * fifo_checkpoint_bytes is rounded down, because it protects
1463                 * in_fifo from overflowing.
1464                 */
1465
1466                fifo_checkpoint_bytes = chan->in_consumed_bytes + fifo->size;
1467                complete_checkpoint_bytes =
1468                        chan->in_consumed_bytes + count - bytes_done;
1469
1470                fifo_checkpoint = fifo_checkpoint_bytes >> sh;
1471                complete_checkpoint =
1472                        (complete_checkpoint_bytes + (1 << sh) - 1) >> sh;
1473
1474                diff = (fifo_checkpoint - complete_checkpoint) << sh;
1475
1476                if (chan->in_synchronous && diff >= 0) {
1477                        checkpoint = complete_checkpoint;
1478                        checkpoint_for_complete = true;
1479                } else {
1480                        checkpoint = fifo_checkpoint;
1481                        checkpoint_for_complete = false;
1482                }
1483
1484                leap = (checkpoint - chan->in_current_checkpoint) << sh;
1485
1486                /*
1487                 * To prevent flooding of OPCODE_SET_CHECKPOINT commands as
1488                 * data is consumed, it's issued only if it moves the
1489                 * checkpoint by at least an 8th of the FIFO's size, or if
1490                 * it's necessary to complete the number of bytes requested by
1491                 * the read() call.
1492                 *
1493                 * chan->read_data_ok is checked to spare an unnecessary
1494                 * submission after receiving EOF, however it's harmless if
1495                 * such slips away.
1496                 */
1497
1498                if (chan->read_data_ok &&
1499                    (leap > (fifo->size >> 3) ||
1500                     (checkpoint_for_complete && leap > 0))) {
1501                        chan->in_current_checkpoint = checkpoint;
1502                        rc = xillyusb_send_opcode(xdev, chan_num,
1503                                                  OPCODE_SET_CHECKPOINT,
1504                                                  checkpoint);
1505
1506                        if (rc)
1507                                break;
1508                }
1509
1510                if (bytes_done == count ||
1511                    (left_to_sleep <= 0 && bytes_done))
1512                        break;
1513
1514                /*
1515                 * Reaching here means that the FIFO was empty when
1516                 * fifo_read() returned, but not necessarily right now. Error
1517                 * and EOF are checked and reported only now, so that no data
1518                 * that managed its way to the FIFO is lost.
1519                 */
1520
1521                if (!READ_ONCE(chan->read_data_ok)) { /* FPGA has sent EOF */
1522                        /* Has data slipped into the FIFO since fifo_read()? */
1523                        smp_rmb();
1524                        if (READ_ONCE(fifo->fill))
1525                                continue;
1526
1527                        rc = 0;
1528                        break;
1529                }
1530
1531                if (xdev->error) {
1532                        rc = xdev->error;
1533                        break;
1534                }
1535
1536                if (filp->f_flags & O_NONBLOCK) {
1537                        rc = -EAGAIN;
1538                        break;
1539                }
1540
1541                if (!sent_set_push) {
1542                        rc = xillyusb_send_opcode(xdev, chan_num,
1543                                                  OPCODE_SET_PUSH,
1544                                                  complete_checkpoint);
1545
1546                        if (rc)
1547                                break;
1548
1549                        sent_set_push = true;
1550                }
1551
1552                if (left_to_sleep > 0) {
1553                        /*
1554                         * Note that when xdev->error is set (e.g. when the
1555                         * device is unplugged), read_data_ok turns zero and
1556                         * fifo->waitq is awaken.
1557                         * Therefore no special attention to xdev->error.
1558                         */
1559
1560                        rc = wait_event_interruptible_timeout
1561                                (fifo->waitq,
1562                                 fifo->fill || !chan->read_data_ok,
1563                                 left_to_sleep);
1564                } else { /* bytes_done == 0 */
1565                        /* Tell FPGA to send anything it has */
1566                        rc = request_read_anything(chan, OPCODE_UPDATE_PUSH);
1567
1568                        if (rc)
1569                                break;
1570
1571                        rc = wait_event_interruptible
1572                                (fifo->waitq,
1573                                 fifo->fill || !chan->read_data_ok);
1574                }
1575
1576                if (rc < 0) {
1577                        rc = -EINTR;
1578                        break;
1579                }
1580        }
1581
1582        if (((filp->f_flags & O_NONBLOCK) || chan->poll_used) &&
1583            !READ_ONCE(fifo->fill))
1584                request_read_anything(chan, OPCODE_SET_PUSH);
1585
1586        mutex_unlock(&chan->in_mutex);
1587
1588        if (bytes_done)
1589                return bytes_done;
1590
1591        return rc;
1592}
1593
1594static int xillyusb_flush(struct file *filp, fl_owner_t id)
1595{
1596        struct xillyusb_channel *chan = filp->private_data;
1597        int rc;
1598
1599        if (!(filp->f_mode & FMODE_WRITE))
1600                return 0;
1601
1602        rc = mutex_lock_interruptible(&chan->out_mutex);
1603
1604        if (rc)
1605                return rc;
1606
1607        /*
1608         * One second's timeout on flushing. Interrupts are ignored, because if
1609         * the user pressed CTRL-C, that interrupt will still be in flight by
1610         * the time we reach here, and the opportunity to flush is lost.
1611         */
1612        rc = flush_downstream(chan, HZ, false);
1613
1614        mutex_unlock(&chan->out_mutex);
1615
1616        if (rc == -ETIMEDOUT) {
1617                /* The things you do to use dev_warn() and not pr_warn() */
1618                struct xillyusb_dev *xdev = chan->xdev;
1619
1620                mutex_lock(&chan->lock);
1621                if (!xdev->error)
1622                        dev_warn(xdev->dev,
1623                                 "Timed out while flushing. Output data may be lost.\n");
1624                mutex_unlock(&chan->lock);
1625        }
1626
1627        return rc;
1628}
1629
1630static ssize_t xillyusb_write(struct file *filp, const char __user *userbuf,
1631                              size_t count, loff_t *f_pos)
1632{
1633        struct xillyusb_channel *chan = filp->private_data;
1634        struct xillyusb_dev *xdev = chan->xdev;
1635        struct xillyfifo *fifo = &chan->out_ep->fifo;
1636        int rc;
1637
1638        rc = mutex_lock_interruptible(&chan->out_mutex);
1639
1640        if (rc)
1641                return rc;
1642
1643        while (1) {
1644                if (xdev->error) {
1645                        rc = xdev->error;
1646                        break;
1647                }
1648
1649                if (count == 0)
1650                        break;
1651
1652                rc = fifo_write(fifo, (__force void *)userbuf, count,
1653                                xilly_copy_from_user);
1654
1655                if (rc != 0)
1656                        break;
1657
1658                if (filp->f_flags & O_NONBLOCK) {
1659                        rc = -EAGAIN;
1660                        break;
1661                }
1662
1663                if (wait_event_interruptible
1664                    (fifo->waitq,
1665                     fifo->fill != fifo->size || xdev->error)) {
1666                        rc = -EINTR;
1667                        break;
1668                }
1669        }
1670
1671        if (rc < 0)
1672                goto done;
1673
1674        chan->out_bytes += rc;
1675
1676        if (rc) {
1677                try_queue_bulk_out(chan->out_ep);
1678                chan->flushed = 0;
1679        }
1680
1681        if (chan->out_synchronous) {
1682                int flush_rc = flush_downstream(chan, 0, true);
1683
1684                if (flush_rc && !rc)
1685                        rc = flush_rc;
1686        }
1687
1688done:
1689        mutex_unlock(&chan->out_mutex);
1690
1691        return rc;
1692}
1693
1694static int xillyusb_release(struct inode *inode, struct file *filp)
1695{
1696        struct xillyusb_channel *chan = filp->private_data;
1697        struct xillyusb_dev *xdev = chan->xdev;
1698        int rc_read = 0, rc_write = 0;
1699
1700        if (filp->f_mode & FMODE_READ) {
1701                struct xillyfifo *in_fifo = chan->in_fifo;
1702
1703                rc_read = xillyusb_send_opcode(xdev, (chan->chan_idx << 1) | 1,
1704                                               OPCODE_CLOSE, 0);
1705                /*
1706                 * If rc_read is nonzero, xdev->error indicates a global
1707                 * device error. The error is reported later, so that
1708                 * resources are freed.
1709                 *
1710                 * Looping on wait_event_interruptible() kinda breaks the idea
1711                 * of being interruptible, and this should have been
1712                 * wait_event(). Only it's being waken with
1713                 * wake_up_interruptible() for the sake of other uses. If
1714                 * there's a global device error, chan->read_data_ok is
1715                 * deasserted and the wait queue is awaken, so this is covered.
1716                 */
1717
1718                while (wait_event_interruptible(in_fifo->waitq,
1719                                                !chan->read_data_ok))
1720                        ; /* Empty loop */
1721
1722                safely_assign_in_fifo(chan, NULL);
1723                fifo_mem_release(in_fifo);
1724                kfree(in_fifo);
1725
1726                mutex_lock(&chan->lock);
1727                chan->open_for_read = 0;
1728                mutex_unlock(&chan->lock);
1729        }
1730
1731        if (filp->f_mode & FMODE_WRITE) {
1732                struct xillyusb_endpoint *ep = chan->out_ep;
1733                /*
1734                 * chan->flushing isn't zeroed. If the pre-release flush timed
1735                 * out, a cancel request will be sent before the next
1736                 * OPCODE_SET_CHECKPOINT (i.e. when the file is opened again).
1737                 * This is despite that the FPGA forgets about the checkpoint
1738                 * request as the file closes. Still, in an exceptional race
1739                 * condition, the FPGA could send an OPCODE_REACHED_CHECKPOINT
1740                 * just before closing that would reach the host after the
1741                 * file has re-opened.
1742                 */
1743
1744                mutex_lock(&chan->lock);
1745                chan->out_ep = NULL;
1746                mutex_unlock(&chan->lock);
1747
1748                endpoint_quiesce(ep);
1749                endpoint_dealloc(ep);
1750
1751                /* See comments on rc_read above */
1752                rc_write = xillyusb_send_opcode(xdev, chan->chan_idx << 1,
1753                                                OPCODE_CLOSE, 0);
1754
1755                mutex_lock(&chan->lock);
1756                chan->open_for_write = 0;
1757                mutex_unlock(&chan->lock);
1758        }
1759
1760        kref_put(&xdev->kref, cleanup_dev);
1761
1762        return rc_read ? rc_read : rc_write;
1763}
1764
1765/*
1766 * Xillybus' API allows device nodes to be seekable, giving the user
1767 * application access to a RAM array on the FPGA (or logic emulating it).
1768 */
1769
1770static loff_t xillyusb_llseek(struct file *filp, loff_t offset, int whence)
1771{
1772        struct xillyusb_channel *chan = filp->private_data;
1773        struct xillyusb_dev *xdev = chan->xdev;
1774        loff_t pos = filp->f_pos;
1775        int rc = 0;
1776        unsigned int log2_element_size = chan->readable ?
1777                chan->in_log2_element_size : chan->out_log2_element_size;
1778
1779        /*
1780         * Take both mutexes not allowing interrupts, since it seems like
1781         * common applications don't expect an -EINTR here. Besides, multiple
1782         * access to a single file descriptor on seekable devices is a mess
1783         * anyhow.
1784         */
1785
1786        mutex_lock(&chan->out_mutex);
1787        mutex_lock(&chan->in_mutex);
1788
1789        switch (whence) {
1790        case SEEK_SET:
1791                pos = offset;
1792                break;
1793        case SEEK_CUR:
1794                pos += offset;
1795                break;
1796        case SEEK_END:
1797                pos = offset; /* Going to the end => to the beginning */
1798                break;
1799        default:
1800                rc = -EINVAL;
1801                goto end;
1802        }
1803
1804        /* In any case, we must finish on an element boundary */
1805        if (pos & ((1 << log2_element_size) - 1)) {
1806                rc = -EINVAL;
1807                goto end;
1808        }
1809
1810        rc = xillyusb_send_opcode(xdev, chan->chan_idx << 1,
1811                                  OPCODE_SET_ADDR,
1812                                  pos >> log2_element_size);
1813
1814        if (rc)
1815                goto end;
1816
1817        if (chan->writable) {
1818                chan->flushed = 0;
1819                rc = flush_downstream(chan, HZ, false);
1820        }
1821
1822end:
1823        mutex_unlock(&chan->out_mutex);
1824        mutex_unlock(&chan->in_mutex);
1825
1826        if (rc) /* Return error after releasing mutexes */
1827                return rc;
1828
1829        filp->f_pos = pos;
1830
1831        return pos;
1832}
1833
1834static __poll_t xillyusb_poll(struct file *filp, poll_table *wait)
1835{
1836        struct xillyusb_channel *chan = filp->private_data;
1837        __poll_t mask = 0;
1838
1839        if (chan->in_fifo)
1840                poll_wait(filp, &chan->in_fifo->waitq, wait);
1841
1842        if (chan->out_ep)
1843                poll_wait(filp, &chan->out_ep->fifo.waitq, wait);
1844
1845        /*
1846         * If this is the first time poll() is called, and the file is
1847         * readable, set the relevant flag. Also tell the FPGA to send all it
1848         * has, to kickstart the mechanism that ensures there's always some
1849         * data in in_fifo unless the stream is dry end-to-end. Note that the
1850         * first poll() may not return a EPOLLIN, even if there's data on the
1851         * FPGA. Rather, the data will arrive soon, and trigger the relevant
1852         * wait queue.
1853         */
1854
1855        if (!chan->poll_used && chan->in_fifo) {
1856                chan->poll_used = 1;
1857                request_read_anything(chan, OPCODE_SET_PUSH);
1858        }
1859
1860        /*
1861         * poll() won't play ball regarding read() channels which
1862         * are synchronous. Allowing that will create situations where data has
1863         * been delivered at the FPGA, and users expecting select() to wake up,
1864         * which it may not. So make it never work.
1865         */
1866
1867        if (chan->in_fifo && !chan->in_synchronous &&
1868            (READ_ONCE(chan->in_fifo->fill) || !chan->read_data_ok))
1869                mask |= EPOLLIN | EPOLLRDNORM;
1870
1871        if (chan->out_ep &&
1872            (READ_ONCE(chan->out_ep->fifo.fill) != chan->out_ep->fifo.size))
1873                mask |= EPOLLOUT | EPOLLWRNORM;
1874
1875        if (chan->xdev->error)
1876                mask |= EPOLLERR;
1877
1878        return mask;
1879}
1880
1881static const struct file_operations xillyusb_fops = {
1882        .owner      = THIS_MODULE,
1883        .read       = xillyusb_read,
1884        .write      = xillyusb_write,
1885        .open       = xillyusb_open,
1886        .flush      = xillyusb_flush,
1887        .release    = xillyusb_release,
1888        .llseek     = xillyusb_llseek,
1889        .poll       = xillyusb_poll,
1890};
1891
1892static int xillyusb_setup_base_eps(struct xillyusb_dev *xdev)
1893{
1894        xdev->msg_ep = endpoint_alloc(xdev, MSG_EP_NUM | USB_DIR_OUT,
1895                                      bulk_out_work, 1, 2);
1896        if (!xdev->msg_ep)
1897                return -ENOMEM;
1898
1899        if (fifo_init(&xdev->msg_ep->fifo, 13)) /* 8 kiB */
1900                goto dealloc;
1901
1902        xdev->msg_ep->fill_mask = -8; /* 8 bytes granularity */
1903
1904        xdev->in_ep = endpoint_alloc(xdev, IN_EP_NUM | USB_DIR_IN,
1905                                     bulk_in_work, BUF_SIZE_ORDER, BUFNUM);
1906        if (!xdev->in_ep)
1907                goto dealloc;
1908
1909        try_queue_bulk_in(xdev->in_ep);
1910
1911        return 0;
1912
1913dealloc:
1914        endpoint_dealloc(xdev->msg_ep); /* Also frees FIFO mem if allocated */
1915        return -ENOMEM;
1916}
1917
1918static int setup_channels(struct xillyusb_dev *xdev,
1919                          __le16 *chandesc,
1920                          int num_channels)
1921{
1922        struct xillyusb_channel *chan;
1923        int i;
1924
1925        chan = kcalloc(num_channels, sizeof(*chan), GFP_KERNEL);
1926        if (!chan)
1927                return -ENOMEM;
1928
1929        xdev->channels = chan;
1930
1931        for (i = 0; i < num_channels; i++, chan++) {
1932                unsigned int in_desc = le16_to_cpu(*chandesc++);
1933                unsigned int out_desc = le16_to_cpu(*chandesc++);
1934
1935                chan->xdev = xdev;
1936                mutex_init(&chan->in_mutex);
1937                mutex_init(&chan->out_mutex);
1938                mutex_init(&chan->lock);
1939                init_waitqueue_head(&chan->flushq);
1940
1941                chan->chan_idx = i;
1942
1943                if (in_desc & 0x80) { /* Entry is valid */
1944                        chan->readable = 1;
1945                        chan->in_synchronous = !!(in_desc & 0x40);
1946                        chan->in_seekable = !!(in_desc & 0x20);
1947                        chan->in_log2_element_size = in_desc & 0x0f;
1948                        chan->in_log2_fifo_size = ((in_desc >> 8) & 0x1f) + 16;
1949                }
1950
1951                /*
1952                 * A downstream channel should never exist above index 13,
1953                 * as it would request a nonexistent BULK endpoint > 15.
1954                 * In the peculiar case that it does, it's ignored silently.
1955                 */
1956
1957                if ((out_desc & 0x80) && i < 14) { /* Entry is valid */
1958                        chan->writable = 1;
1959                        chan->out_synchronous = !!(out_desc & 0x40);
1960                        chan->out_seekable = !!(out_desc & 0x20);
1961                        chan->out_log2_element_size = out_desc & 0x0f;
1962                        chan->out_log2_fifo_size =
1963                                ((out_desc >> 8) & 0x1f) + 16;
1964                }
1965        }
1966
1967        return 0;
1968}
1969
1970static int xillyusb_discovery(struct usb_interface *interface)
1971{
1972        int rc;
1973        struct xillyusb_dev *xdev = usb_get_intfdata(interface);
1974        __le16 bogus_chandesc[2];
1975        struct xillyfifo idt_fifo;
1976        struct xillyusb_channel *chan;
1977        unsigned int idt_len, names_offset;
1978        unsigned char *idt;
1979        int num_channels;
1980
1981        rc = xillyusb_send_opcode(xdev, ~0, OPCODE_QUIESCE, 0);
1982
1983        if (rc) {
1984                dev_err(&interface->dev, "Failed to send quiesce request. Aborting.\n");
1985                return rc;
1986        }
1987
1988        /* Phase I: Set up one fake upstream channel and obtain IDT */
1989
1990        /* Set up a fake IDT with one async IN stream */
1991        bogus_chandesc[0] = cpu_to_le16(0x80);
1992        bogus_chandesc[1] = cpu_to_le16(0);
1993
1994        rc = setup_channels(xdev, bogus_chandesc, 1);
1995
1996        if (rc)
1997                return rc;
1998
1999        rc = fifo_init(&idt_fifo, LOG2_IDT_FIFO_SIZE);
2000
2001        if (rc)
2002                return rc;
2003
2004        chan = xdev->channels;
2005
2006        chan->in_fifo = &idt_fifo;
2007        chan->read_data_ok = 1;
2008
2009        xdev->num_channels = 1;
2010
2011        rc = xillyusb_send_opcode(xdev, ~0, OPCODE_REQ_IDT, 0);
2012
2013        if (rc) {
2014                dev_err(&interface->dev, "Failed to send IDT request. Aborting.\n");
2015                goto unfifo;
2016        }
2017
2018        rc = wait_event_interruptible_timeout(idt_fifo.waitq,
2019                                              !chan->read_data_ok,
2020                                              XILLY_RESPONSE_TIMEOUT);
2021
2022        if (xdev->error) {
2023                rc = xdev->error;
2024                goto unfifo;
2025        }
2026
2027        if (rc < 0) {
2028                rc = -EINTR; /* Interrupt on probe method? Interesting. */
2029                goto unfifo;
2030        }
2031
2032        if (chan->read_data_ok) {
2033                rc = -ETIMEDOUT;
2034                dev_err(&interface->dev, "No response from FPGA. Aborting.\n");
2035                goto unfifo;
2036        }
2037
2038        idt_len = READ_ONCE(idt_fifo.fill);
2039        idt = kmalloc(idt_len, GFP_KERNEL);
2040
2041        if (!idt) {
2042                rc = -ENOMEM;
2043                goto unfifo;
2044        }
2045
2046        fifo_read(&idt_fifo, idt, idt_len, xilly_memcpy);
2047
2048        if (crc32_le(~0, idt, idt_len) != 0) {
2049                dev_err(&interface->dev, "IDT failed CRC check. Aborting.\n");
2050                rc = -ENODEV;
2051                goto unidt;
2052        }
2053
2054        if (*idt > 0x90) {
2055                dev_err(&interface->dev, "No support for IDT version 0x%02x. Maybe the xillyusb driver needs an upgrade. Aborting.\n",
2056                        (int)*idt);
2057                rc = -ENODEV;
2058                goto unidt;
2059        }
2060
2061        /* Phase II: Set up the streams as defined in IDT */
2062
2063        num_channels = le16_to_cpu(*((__le16 *)(idt + 1)));
2064        names_offset = 3 + num_channels * 4;
2065        idt_len -= 4; /* Exclude CRC */
2066
2067        if (idt_len < names_offset) {
2068                dev_err(&interface->dev, "IDT too short. This is exceptionally weird, because its CRC is OK\n");
2069                rc = -ENODEV;
2070                goto unidt;
2071        }
2072
2073        rc = setup_channels(xdev, (void *)idt + 3, num_channels);
2074
2075        if (rc)
2076                goto unidt;
2077
2078        /*
2079         * Except for wildly misbehaving hardware, or if it was disconnected
2080         * just after responding with the IDT, there is no reason for any
2081         * work item to be running now. To be sure that xdev->channels
2082         * is updated on anything that might run in parallel, flush the
2083         * workqueue, which rarely does anything.
2084         */
2085        flush_workqueue(xdev->workq);
2086
2087        xdev->num_channels = num_channels;
2088
2089        fifo_mem_release(&idt_fifo);
2090        kfree(chan);
2091
2092        rc = xillybus_init_chrdev(&interface->dev, &xillyusb_fops,
2093                                  THIS_MODULE, xdev,
2094                                  idt + names_offset,
2095                                  idt_len - names_offset,
2096                                  num_channels,
2097                                  xillyname, true);
2098
2099        kfree(idt);
2100
2101        return rc;
2102
2103unidt:
2104        kfree(idt);
2105
2106unfifo:
2107        safely_assign_in_fifo(chan, NULL);
2108        fifo_mem_release(&idt_fifo);
2109
2110        return rc;
2111}
2112
2113static int xillyusb_probe(struct usb_interface *interface,
2114                          const struct usb_device_id *id)
2115{
2116        struct xillyusb_dev *xdev;
2117        int rc;
2118
2119        xdev = kzalloc(sizeof(*xdev), GFP_KERNEL);
2120        if (!xdev)
2121                return -ENOMEM;
2122
2123        kref_init(&xdev->kref);
2124        mutex_init(&xdev->process_in_mutex);
2125        mutex_init(&xdev->msg_mutex);
2126
2127        xdev->udev = usb_get_dev(interface_to_usbdev(interface));
2128        xdev->dev = &interface->dev;
2129        xdev->error = 0;
2130        spin_lock_init(&xdev->error_lock);
2131        xdev->in_counter = 0;
2132        xdev->in_bytes_left = 0;
2133        xdev->workq = alloc_workqueue(xillyname, WQ_HIGHPRI, 0);
2134
2135        if (!xdev->workq) {
2136                dev_err(&interface->dev, "Failed to allocate work queue\n");
2137                rc = -ENOMEM;
2138                goto fail;
2139        }
2140
2141        INIT_WORK(&xdev->wakeup_workitem, wakeup_all);
2142
2143        usb_set_intfdata(interface, xdev);
2144
2145        rc = xillyusb_setup_base_eps(xdev);
2146        if (rc)
2147                goto fail;
2148
2149        rc = xillyusb_discovery(interface);
2150        if (rc)
2151                goto latefail;
2152
2153        return 0;
2154
2155latefail:
2156        endpoint_quiesce(xdev->in_ep);
2157        endpoint_quiesce(xdev->msg_ep);
2158
2159fail:
2160        usb_set_intfdata(interface, NULL);
2161        kref_put(&xdev->kref, cleanup_dev);
2162        return rc;
2163}
2164
2165static void xillyusb_disconnect(struct usb_interface *interface)
2166{
2167        struct xillyusb_dev *xdev = usb_get_intfdata(interface);
2168        struct xillyusb_endpoint *msg_ep = xdev->msg_ep;
2169        struct xillyfifo *fifo = &msg_ep->fifo;
2170        int rc;
2171        int i;
2172
2173        xillybus_cleanup_chrdev(xdev, &interface->dev);
2174
2175        /*
2176         * Try to send OPCODE_QUIESCE, which will fail silently if the device
2177         * was disconnected, but makes sense on module unload.
2178         */
2179
2180        msg_ep->wake_on_drain = true;
2181        xillyusb_send_opcode(xdev, ~0, OPCODE_QUIESCE, 0);
2182
2183        /*
2184         * If the device has been disconnected, sending the opcode causes
2185         * a global device error with xdev->error, if such error didn't
2186         * occur earlier. Hence timing out means that the USB link is fine,
2187         * but somehow the message wasn't sent. Should never happen.
2188         */
2189
2190        rc = wait_event_interruptible_timeout(fifo->waitq,
2191                                              msg_ep->drained || xdev->error,
2192                                              XILLY_RESPONSE_TIMEOUT);
2193
2194        if (!rc)
2195                dev_err(&interface->dev,
2196                        "Weird timeout condition on sending quiesce request.\n");
2197
2198        report_io_error(xdev, -ENODEV); /* Discourage further activity */
2199
2200        /*
2201         * This device driver is declared with soft_unbind set, or else
2202         * sending OPCODE_QUIESCE above would always fail. The price is
2203         * that the USB framework didn't kill outstanding URBs, so it has
2204         * to be done explicitly before returning from this call.
2205         */
2206
2207        for (i = 0; i < xdev->num_channels; i++) {
2208                struct xillyusb_channel *chan = &xdev->channels[i];
2209
2210                /*
2211                 * Lock taken to prevent chan->out_ep from changing. It also
2212                 * ensures xillyusb_open() and xillyusb_flush() don't access
2213                 * xdev->dev after being nullified below.
2214                 */
2215                mutex_lock(&chan->lock);
2216                if (chan->out_ep)
2217                        endpoint_quiesce(chan->out_ep);
2218                mutex_unlock(&chan->lock);
2219        }
2220
2221        endpoint_quiesce(xdev->in_ep);
2222        endpoint_quiesce(xdev->msg_ep);
2223
2224        usb_set_intfdata(interface, NULL);
2225
2226        xdev->dev = NULL;
2227
2228        kref_put(&xdev->kref, cleanup_dev);
2229}
2230
2231static struct usb_driver xillyusb_driver = {
2232        .name = xillyname,
2233        .id_table = xillyusb_table,
2234        .probe = xillyusb_probe,
2235        .disconnect = xillyusb_disconnect,
2236        .soft_unbind = 1,
2237};
2238
2239static int __init xillyusb_init(void)
2240{
2241        int rc = 0;
2242
2243        if (LOG2_INITIAL_FIFO_BUF_SIZE > PAGE_SHIFT)
2244                fifo_buf_order = LOG2_INITIAL_FIFO_BUF_SIZE - PAGE_SHIFT;
2245        else
2246                fifo_buf_order = 0;
2247
2248        rc = usb_register(&xillyusb_driver);
2249
2250        return rc;
2251}
2252
2253static void __exit xillyusb_exit(void)
2254{
2255        usb_deregister(&xillyusb_driver);
2256}
2257
2258module_init(xillyusb_init);
2259module_exit(xillyusb_exit);
2260