linux/drivers/usb/host/whci/qset.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Wireless Host Controller (WHC) qset management.
   4 *
   5 * Copyright (C) 2007 Cambridge Silicon Radio Ltd.
   6 */
   7#include <linux/kernel.h>
   8#include <linux/dma-mapping.h>
   9#include <linux/slab.h>
  10#include <linux/uwb/umc.h>
  11#include <linux/usb.h>
  12
  13#include "../../wusbcore/wusbhc.h"
  14
  15#include "whcd.h"
  16
  17struct whc_qset *qset_alloc(struct whc *whc, gfp_t mem_flags)
  18{
  19        struct whc_qset *qset;
  20        dma_addr_t dma;
  21
  22        qset = dma_pool_zalloc(whc->qset_pool, mem_flags, &dma);
  23        if (qset == NULL)
  24                return NULL;
  25
  26        qset->qset_dma = dma;
  27        qset->whc = whc;
  28
  29        INIT_LIST_HEAD(&qset->list_node);
  30        INIT_LIST_HEAD(&qset->stds);
  31
  32        return qset;
  33}
  34
  35/**
  36 * qset_fill_qh - fill the static endpoint state in a qset's QHead
  37 * @qset: the qset whose QH needs initializing with static endpoint
  38 *        state
  39 * @urb:  an urb for a transfer to this endpoint
  40 */
  41static void qset_fill_qh(struct whc *whc, struct whc_qset *qset, struct urb *urb)
  42{
  43        struct usb_device *usb_dev = urb->dev;
  44        struct wusb_dev *wusb_dev = usb_dev->wusb_dev;
  45        struct usb_wireless_ep_comp_descriptor *epcd;
  46        bool is_out;
  47        uint8_t phy_rate;
  48
  49        is_out = usb_pipeout(urb->pipe);
  50
  51        qset->max_packet = le16_to_cpu(urb->ep->desc.wMaxPacketSize);
  52
  53        epcd = (struct usb_wireless_ep_comp_descriptor *)qset->ep->extra;
  54        if (epcd) {
  55                qset->max_seq = epcd->bMaxSequence;
  56                qset->max_burst = epcd->bMaxBurst;
  57        } else {
  58                qset->max_seq = 2;
  59                qset->max_burst = 1;
  60        }
  61
  62        /*
  63         * Initial PHY rate is 53.3 Mbit/s for control endpoints or
  64         * the maximum supported by the device for other endpoints
  65         * (unless limited by the user).
  66         */
  67        if (usb_pipecontrol(urb->pipe))
  68                phy_rate = UWB_PHY_RATE_53;
  69        else {
  70                uint16_t phy_rates;
  71
  72                phy_rates = le16_to_cpu(wusb_dev->wusb_cap_descr->wPHYRates);
  73                phy_rate = fls(phy_rates) - 1;
  74                if (phy_rate > whc->wusbhc.phy_rate)
  75                        phy_rate = whc->wusbhc.phy_rate;
  76        }
  77
  78        qset->qh.info1 = cpu_to_le32(
  79                QH_INFO1_EP(usb_pipeendpoint(urb->pipe))
  80                | (is_out ? QH_INFO1_DIR_OUT : QH_INFO1_DIR_IN)
  81                | usb_pipe_to_qh_type(urb->pipe)
  82                | QH_INFO1_DEV_INFO_IDX(wusb_port_no_to_idx(usb_dev->portnum))
  83                | QH_INFO1_MAX_PKT_LEN(qset->max_packet)
  84                );
  85        qset->qh.info2 = cpu_to_le32(
  86                QH_INFO2_BURST(qset->max_burst)
  87                | QH_INFO2_DBP(0)
  88                | QH_INFO2_MAX_COUNT(3)
  89                | QH_INFO2_MAX_RETRY(3)
  90                | QH_INFO2_MAX_SEQ(qset->max_seq - 1)
  91                );
  92        /* FIXME: where can we obtain these Tx parameters from?  Why
  93         * doesn't the chip know what Tx power to use? It knows the Rx
  94         * strength and can presumably guess the Tx power required
  95         * from that? */
  96        qset->qh.info3 = cpu_to_le32(
  97                QH_INFO3_TX_RATE(phy_rate)
  98                | QH_INFO3_TX_PWR(0) /* 0 == max power */
  99                );
 100
 101        qset->qh.cur_window = cpu_to_le32((1 << qset->max_burst) - 1);
 102}
 103
 104/**
 105 * qset_clear - clear fields in a qset so it may be reinserted into a
 106 * schedule.
 107 *
 108 * The sequence number and current window are not cleared (see
 109 * qset_reset()).
 110 */
 111void qset_clear(struct whc *whc, struct whc_qset *qset)
 112{
 113        qset->td_start = qset->td_end = qset->ntds = 0;
 114
 115        qset->qh.link = cpu_to_le64(QH_LINK_NTDS(8) | QH_LINK_T);
 116        qset->qh.status = qset->qh.status & QH_STATUS_SEQ_MASK;
 117        qset->qh.err_count = 0;
 118        qset->qh.scratch[0] = 0;
 119        qset->qh.scratch[1] = 0;
 120        qset->qh.scratch[2] = 0;
 121
 122        memset(&qset->qh.overlay, 0, sizeof(qset->qh.overlay));
 123
 124        init_completion(&qset->remove_complete);
 125}
 126
 127/**
 128 * qset_reset - reset endpoint state in a qset.
 129 *
 130 * Clears the sequence number and current window.  This qset must not
 131 * be in the ASL or PZL.
 132 */
 133void qset_reset(struct whc *whc, struct whc_qset *qset)
 134{
 135        qset->reset = 0;
 136
 137        qset->qh.status &= ~QH_STATUS_SEQ_MASK;
 138        qset->qh.cur_window = cpu_to_le32((1 << qset->max_burst) - 1);
 139}
 140
 141/**
 142 * get_qset - get the qset for an async endpoint
 143 *
 144 * A new qset is created if one does not already exist.
 145 */
 146struct whc_qset *get_qset(struct whc *whc, struct urb *urb,
 147                                 gfp_t mem_flags)
 148{
 149        struct whc_qset *qset;
 150
 151        qset = urb->ep->hcpriv;
 152        if (qset == NULL) {
 153                qset = qset_alloc(whc, mem_flags);
 154                if (qset == NULL)
 155                        return NULL;
 156
 157                qset->ep = urb->ep;
 158                urb->ep->hcpriv = qset;
 159                qset_fill_qh(whc, qset, urb);
 160        }
 161        return qset;
 162}
 163
 164void qset_remove_complete(struct whc *whc, struct whc_qset *qset)
 165{
 166        qset->remove = 0;
 167        list_del_init(&qset->list_node);
 168        complete(&qset->remove_complete);
 169}
 170
 171/**
 172 * qset_add_qtds - add qTDs for an URB to a qset
 173 *
 174 * Returns true if the list (ASL/PZL) must be updated because (for a
 175 * WHCI 0.95 controller) an activated qTD was pointed to be iCur.
 176 */
 177enum whc_update qset_add_qtds(struct whc *whc, struct whc_qset *qset)
 178{
 179        struct whc_std *std;
 180        enum whc_update update = 0;
 181
 182        list_for_each_entry(std, &qset->stds, list_node) {
 183                struct whc_qtd *qtd;
 184                uint32_t status;
 185
 186                if (qset->ntds >= WHCI_QSET_TD_MAX
 187                    || (qset->pause_after_urb && std->urb != qset->pause_after_urb))
 188                        break;
 189
 190                if (std->qtd)
 191                        continue; /* already has a qTD */
 192
 193                qtd = std->qtd = &qset->qtd[qset->td_end];
 194
 195                /* Fill in setup bytes for control transfers. */
 196                if (usb_pipecontrol(std->urb->pipe))
 197                        memcpy(qtd->setup, std->urb->setup_packet, 8);
 198
 199                status = QTD_STS_ACTIVE | QTD_STS_LEN(std->len);
 200
 201                if (whc_std_last(std) && usb_pipeout(std->urb->pipe))
 202                        status |= QTD_STS_LAST_PKT;
 203
 204                /*
 205                 * For an IN transfer the iAlt field should be set so
 206                 * the h/w will automatically advance to the next
 207                 * transfer. However, if there are 8 or more TDs
 208                 * remaining in this transfer then iAlt cannot be set
 209                 * as it could point to somewhere in this transfer.
 210                 */
 211                if (std->ntds_remaining < WHCI_QSET_TD_MAX) {
 212                        int ialt;
 213                        ialt = (qset->td_end + std->ntds_remaining) % WHCI_QSET_TD_MAX;
 214                        status |= QTD_STS_IALT(ialt);
 215                } else if (usb_pipein(std->urb->pipe))
 216                        qset->pause_after_urb = std->urb;
 217
 218                if (std->num_pointers)
 219                        qtd->options = cpu_to_le32(QTD_OPT_IOC);
 220                else
 221                        qtd->options = cpu_to_le32(QTD_OPT_IOC | QTD_OPT_SMALL);
 222                qtd->page_list_ptr = cpu_to_le64(std->dma_addr);
 223
 224                qtd->status = cpu_to_le32(status);
 225
 226                if (QH_STATUS_TO_ICUR(qset->qh.status) == qset->td_end)
 227                        update = WHC_UPDATE_UPDATED;
 228
 229                if (++qset->td_end >= WHCI_QSET_TD_MAX)
 230                        qset->td_end = 0;
 231                qset->ntds++;
 232        }
 233
 234        return update;
 235}
 236
 237/**
 238 * qset_remove_qtd - remove the first qTD from a qset.
 239 *
 240 * The qTD might be still active (if it's part of a IN URB that
 241 * resulted in a short read) so ensure it's deactivated.
 242 */
 243static void qset_remove_qtd(struct whc *whc, struct whc_qset *qset)
 244{
 245        qset->qtd[qset->td_start].status = 0;
 246
 247        if (++qset->td_start >= WHCI_QSET_TD_MAX)
 248                qset->td_start = 0;
 249        qset->ntds--;
 250}
 251
 252static void qset_copy_bounce_to_sg(struct whc *whc, struct whc_std *std)
 253{
 254        struct scatterlist *sg;
 255        void *bounce;
 256        size_t remaining, offset;
 257
 258        bounce = std->bounce_buf;
 259        remaining = std->len;
 260
 261        sg = std->bounce_sg;
 262        offset = std->bounce_offset;
 263
 264        while (remaining) {
 265                size_t len;
 266
 267                len = min(sg->length - offset, remaining);
 268                memcpy(sg_virt(sg) + offset, bounce, len);
 269
 270                bounce += len;
 271                remaining -= len;
 272
 273                offset += len;
 274                if (offset >= sg->length) {
 275                        sg = sg_next(sg);
 276                        offset = 0;
 277                }
 278        }
 279
 280}
 281
 282/**
 283 * qset_free_std - remove an sTD and free it.
 284 * @whc: the WHCI host controller
 285 * @std: the sTD to remove and free.
 286 */
 287void qset_free_std(struct whc *whc, struct whc_std *std)
 288{
 289        list_del(&std->list_node);
 290        if (std->bounce_buf) {
 291                bool is_out = usb_pipeout(std->urb->pipe);
 292                dma_addr_t dma_addr;
 293
 294                if (std->num_pointers)
 295                        dma_addr = le64_to_cpu(std->pl_virt[0].buf_ptr);
 296                else
 297                        dma_addr = std->dma_addr;
 298
 299                dma_unmap_single(whc->wusbhc.dev, dma_addr,
 300                                 std->len, is_out ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
 301                if (!is_out)
 302                        qset_copy_bounce_to_sg(whc, std);
 303                kfree(std->bounce_buf);
 304        }
 305        if (std->pl_virt) {
 306                if (!dma_mapping_error(whc->wusbhc.dev, std->dma_addr))
 307                        dma_unmap_single(whc->wusbhc.dev, std->dma_addr,
 308                                         std->num_pointers * sizeof(struct whc_page_list_entry),
 309                                         DMA_TO_DEVICE);
 310                kfree(std->pl_virt);
 311                std->pl_virt = NULL;
 312        }
 313        kfree(std);
 314}
 315
 316/**
 317 * qset_remove_qtds - remove an URB's qTDs (and sTDs).
 318 */
 319static void qset_remove_qtds(struct whc *whc, struct whc_qset *qset,
 320                             struct urb *urb)
 321{
 322        struct whc_std *std, *t;
 323
 324        list_for_each_entry_safe(std, t, &qset->stds, list_node) {
 325                if (std->urb != urb)
 326                        break;
 327                if (std->qtd != NULL)
 328                        qset_remove_qtd(whc, qset);
 329                qset_free_std(whc, std);
 330        }
 331}
 332
 333/**
 334 * qset_free_stds - free any remaining sTDs for an URB.
 335 */
 336static void qset_free_stds(struct whc_qset *qset, struct urb *urb)
 337{
 338        struct whc_std *std, *t;
 339
 340        list_for_each_entry_safe(std, t, &qset->stds, list_node) {
 341                if (std->urb == urb)
 342                        qset_free_std(qset->whc, std);
 343        }
 344}
 345
 346static int qset_fill_page_list(struct whc *whc, struct whc_std *std, gfp_t mem_flags)
 347{
 348        dma_addr_t dma_addr = std->dma_addr;
 349        dma_addr_t sp, ep;
 350        size_t pl_len;
 351        int p;
 352
 353        /* Short buffers don't need a page list. */
 354        if (std->len <= WHCI_PAGE_SIZE) {
 355                std->num_pointers = 0;
 356                return 0;
 357        }
 358
 359        sp = dma_addr & ~(WHCI_PAGE_SIZE-1);
 360        ep = dma_addr + std->len;
 361        std->num_pointers = DIV_ROUND_UP(ep - sp, WHCI_PAGE_SIZE);
 362
 363        pl_len = std->num_pointers * sizeof(struct whc_page_list_entry);
 364        std->pl_virt = kmalloc(pl_len, mem_flags);
 365        if (std->pl_virt == NULL)
 366                return -ENOMEM;
 367        std->dma_addr = dma_map_single(whc->wusbhc.dev, std->pl_virt, pl_len, DMA_TO_DEVICE);
 368        if (dma_mapping_error(whc->wusbhc.dev, std->dma_addr)) {
 369                kfree(std->pl_virt);
 370                return -EFAULT;
 371        }
 372
 373        for (p = 0; p < std->num_pointers; p++) {
 374                std->pl_virt[p].buf_ptr = cpu_to_le64(dma_addr);
 375                dma_addr = (dma_addr + WHCI_PAGE_SIZE) & ~(WHCI_PAGE_SIZE-1);
 376        }
 377
 378        return 0;
 379}
 380
 381/**
 382 * urb_dequeue_work - executes asl/pzl update and gives back the urb to the system.
 383 */
 384static void urb_dequeue_work(struct work_struct *work)
 385{
 386        struct whc_urb *wurb = container_of(work, struct whc_urb, dequeue_work);
 387        struct whc_qset *qset = wurb->qset;
 388        struct whc *whc = qset->whc;
 389        unsigned long flags;
 390
 391        if (wurb->is_async)
 392                asl_update(whc, WUSBCMD_ASYNC_UPDATED
 393                           | WUSBCMD_ASYNC_SYNCED_DB
 394                           | WUSBCMD_ASYNC_QSET_RM);
 395        else
 396                pzl_update(whc, WUSBCMD_PERIODIC_UPDATED
 397                           | WUSBCMD_PERIODIC_SYNCED_DB
 398                           | WUSBCMD_PERIODIC_QSET_RM);
 399
 400        spin_lock_irqsave(&whc->lock, flags);
 401        qset_remove_urb(whc, qset, wurb->urb, wurb->status);
 402        spin_unlock_irqrestore(&whc->lock, flags);
 403}
 404
 405static struct whc_std *qset_new_std(struct whc *whc, struct whc_qset *qset,
 406                                    struct urb *urb, gfp_t mem_flags)
 407{
 408        struct whc_std *std;
 409
 410        std = kzalloc(sizeof(struct whc_std), mem_flags);
 411        if (std == NULL)
 412                return NULL;
 413
 414        std->urb = urb;
 415        std->qtd = NULL;
 416
 417        INIT_LIST_HEAD(&std->list_node);
 418        list_add_tail(&std->list_node, &qset->stds);
 419
 420        return std;
 421}
 422
 423static int qset_add_urb_sg(struct whc *whc, struct whc_qset *qset, struct urb *urb,
 424                           gfp_t mem_flags)
 425{
 426        size_t remaining;
 427        struct scatterlist *sg;
 428        int i;
 429        int ntds = 0;
 430        struct whc_std *std = NULL;
 431        struct whc_page_list_entry *new_pl_virt;
 432        dma_addr_t prev_end = 0;
 433        size_t pl_len;
 434        int p = 0;
 435
 436        remaining = urb->transfer_buffer_length;
 437
 438        for_each_sg(urb->sg, sg, urb->num_mapped_sgs, i) {
 439                dma_addr_t dma_addr;
 440                size_t dma_remaining;
 441                dma_addr_t sp, ep;
 442                int num_pointers;
 443
 444                if (remaining == 0) {
 445                        break;
 446                }
 447
 448                dma_addr = sg_dma_address(sg);
 449                dma_remaining = min_t(size_t, sg_dma_len(sg), remaining);
 450
 451                while (dma_remaining) {
 452                        size_t dma_len;
 453
 454                        /*
 455                         * We can use the previous std (if it exists) provided that:
 456                         * - the previous one ended on a page boundary.
 457                         * - the current one begins on a page boundary.
 458                         * - the previous one isn't full.
 459                         *
 460                         * If a new std is needed but the previous one
 461                         * was not a whole number of packets then this
 462                         * sg list cannot be mapped onto multiple
 463                         * qTDs.  Return an error and let the caller
 464                         * sort it out.
 465                         */
 466                        if (!std
 467                            || (prev_end & (WHCI_PAGE_SIZE-1))
 468                            || (dma_addr & (WHCI_PAGE_SIZE-1))
 469                            || std->len + WHCI_PAGE_SIZE > QTD_MAX_XFER_SIZE) {
 470                                if (std && std->len % qset->max_packet != 0)
 471                                        return -EINVAL;
 472                                std = qset_new_std(whc, qset, urb, mem_flags);
 473                                if (std == NULL) {
 474                                        return -ENOMEM;
 475                                }
 476                                ntds++;
 477                                p = 0;
 478                        }
 479
 480                        dma_len = dma_remaining;
 481
 482                        /*
 483                         * If the remainder of this element doesn't
 484                         * fit in a single qTD, limit the qTD to a
 485                         * whole number of packets.  This allows the
 486                         * remainder to go into the next qTD.
 487                         */
 488                        if (std->len + dma_len > QTD_MAX_XFER_SIZE) {
 489                                dma_len = (QTD_MAX_XFER_SIZE / qset->max_packet)
 490                                        * qset->max_packet - std->len;
 491                        }
 492
 493                        std->len += dma_len;
 494                        std->ntds_remaining = -1; /* filled in later */
 495
 496                        sp = dma_addr & ~(WHCI_PAGE_SIZE-1);
 497                        ep = dma_addr + dma_len;
 498                        num_pointers = DIV_ROUND_UP(ep - sp, WHCI_PAGE_SIZE);
 499                        std->num_pointers += num_pointers;
 500
 501                        pl_len = std->num_pointers * sizeof(struct whc_page_list_entry);
 502
 503                        new_pl_virt = krealloc(std->pl_virt, pl_len, mem_flags);
 504                        if (new_pl_virt == NULL) {
 505                                kfree(std->pl_virt);
 506                                std->pl_virt = NULL;
 507                                return -ENOMEM;
 508                        }
 509                        std->pl_virt = new_pl_virt;
 510
 511                        for (;p < std->num_pointers; p++) {
 512                                std->pl_virt[p].buf_ptr = cpu_to_le64(dma_addr);
 513                                dma_addr = (dma_addr + WHCI_PAGE_SIZE) & ~(WHCI_PAGE_SIZE-1);
 514                        }
 515
 516                        prev_end = dma_addr = ep;
 517                        dma_remaining -= dma_len;
 518                        remaining -= dma_len;
 519                }
 520        }
 521
 522        /* Now the number of stds is know, go back and fill in
 523           std->ntds_remaining. */
 524        list_for_each_entry(std, &qset->stds, list_node) {
 525                if (std->ntds_remaining == -1) {
 526                        pl_len = std->num_pointers * sizeof(struct whc_page_list_entry);
 527                        std->dma_addr = dma_map_single(whc->wusbhc.dev, std->pl_virt,
 528                                                       pl_len, DMA_TO_DEVICE);
 529                        if (dma_mapping_error(whc->wusbhc.dev, std->dma_addr))
 530                                return -EFAULT;
 531                        std->ntds_remaining = ntds--;
 532                }
 533        }
 534        return 0;
 535}
 536
 537/**
 538 * qset_add_urb_sg_linearize - add an urb with sg list, copying the data
 539 *
 540 * If the URB contains an sg list whose elements cannot be directly
 541 * mapped to qTDs then the data must be transferred via bounce
 542 * buffers.
 543 */
 544static int qset_add_urb_sg_linearize(struct whc *whc, struct whc_qset *qset,
 545                                     struct urb *urb, gfp_t mem_flags)
 546{
 547        bool is_out = usb_pipeout(urb->pipe);
 548        size_t max_std_len;
 549        size_t remaining;
 550        int ntds = 0;
 551        struct whc_std *std = NULL;
 552        void *bounce = NULL;
 553        struct scatterlist *sg;
 554        int i;
 555
 556        /* limit maximum bounce buffer to 16 * 3.5 KiB ~= 28 k */
 557        max_std_len = qset->max_burst * qset->max_packet;
 558
 559        remaining = urb->transfer_buffer_length;
 560
 561        for_each_sg(urb->sg, sg, urb->num_mapped_sgs, i) {
 562                size_t len;
 563                size_t sg_remaining;
 564                void *orig;
 565
 566                if (remaining == 0) {
 567                        break;
 568                }
 569
 570                sg_remaining = min_t(size_t, remaining, sg->length);
 571                orig = sg_virt(sg);
 572
 573                while (sg_remaining) {
 574                        if (!std || std->len == max_std_len) {
 575                                std = qset_new_std(whc, qset, urb, mem_flags);
 576                                if (std == NULL)
 577                                        return -ENOMEM;
 578                                std->bounce_buf = kmalloc(max_std_len, mem_flags);
 579                                if (std->bounce_buf == NULL)
 580                                        return -ENOMEM;
 581                                std->bounce_sg = sg;
 582                                std->bounce_offset = orig - sg_virt(sg);
 583                                bounce = std->bounce_buf;
 584                                ntds++;
 585                        }
 586
 587                        len = min(sg_remaining, max_std_len - std->len);
 588
 589                        if (is_out)
 590                                memcpy(bounce, orig, len);
 591
 592                        std->len += len;
 593                        std->ntds_remaining = -1; /* filled in later */
 594
 595                        bounce += len;
 596                        orig += len;
 597                        sg_remaining -= len;
 598                        remaining -= len;
 599                }
 600        }
 601
 602        /*
 603         * For each of the new sTDs, map the bounce buffers, create
 604         * page lists (if necessary), and fill in std->ntds_remaining.
 605         */
 606        list_for_each_entry(std, &qset->stds, list_node) {
 607                if (std->ntds_remaining != -1)
 608                        continue;
 609
 610                std->dma_addr = dma_map_single(&whc->umc->dev, std->bounce_buf, std->len,
 611                                               is_out ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
 612                if (dma_mapping_error(&whc->umc->dev, std->dma_addr))
 613                        return -EFAULT;
 614
 615                if (qset_fill_page_list(whc, std, mem_flags) < 0)
 616                        return -ENOMEM;
 617
 618                std->ntds_remaining = ntds--;
 619        }
 620
 621        return 0;
 622}
 623
 624/**
 625 * qset_add_urb - add an urb to the qset's queue.
 626 *
 627 * The URB is chopped into sTDs, one for each qTD that will required.
 628 * At least one qTD (and sTD) is required even if the transfer has no
 629 * data (e.g., for some control transfers).
 630 */
 631int qset_add_urb(struct whc *whc, struct whc_qset *qset, struct urb *urb,
 632        gfp_t mem_flags)
 633{
 634        struct whc_urb *wurb;
 635        int remaining = urb->transfer_buffer_length;
 636        u64 transfer_dma = urb->transfer_dma;
 637        int ntds_remaining;
 638        int ret;
 639
 640        wurb = kzalloc(sizeof(struct whc_urb), mem_flags);
 641        if (wurb == NULL)
 642                goto err_no_mem;
 643        urb->hcpriv = wurb;
 644        wurb->qset = qset;
 645        wurb->urb = urb;
 646        INIT_WORK(&wurb->dequeue_work, urb_dequeue_work);
 647
 648        if (urb->num_sgs) {
 649                ret = qset_add_urb_sg(whc, qset, urb, mem_flags);
 650                if (ret == -EINVAL) {
 651                        qset_free_stds(qset, urb);
 652                        ret = qset_add_urb_sg_linearize(whc, qset, urb, mem_flags);
 653                }
 654                if (ret < 0)
 655                        goto err_no_mem;
 656                return 0;
 657        }
 658
 659        ntds_remaining = DIV_ROUND_UP(remaining, QTD_MAX_XFER_SIZE);
 660        if (ntds_remaining == 0)
 661                ntds_remaining = 1;
 662
 663        while (ntds_remaining) {
 664                struct whc_std *std;
 665                size_t std_len;
 666
 667                std_len = remaining;
 668                if (std_len > QTD_MAX_XFER_SIZE)
 669                        std_len = QTD_MAX_XFER_SIZE;
 670
 671                std = qset_new_std(whc, qset, urb, mem_flags);
 672                if (std == NULL)
 673                        goto err_no_mem;
 674
 675                std->dma_addr = transfer_dma;
 676                std->len = std_len;
 677                std->ntds_remaining = ntds_remaining;
 678
 679                if (qset_fill_page_list(whc, std, mem_flags) < 0)
 680                        goto err_no_mem;
 681
 682                ntds_remaining--;
 683                remaining -= std_len;
 684                transfer_dma += std_len;
 685        }
 686
 687        return 0;
 688
 689err_no_mem:
 690        qset_free_stds(qset, urb);
 691        return -ENOMEM;
 692}
 693
 694/**
 695 * qset_remove_urb - remove an URB from the urb queue.
 696 *
 697 * The URB is returned to the USB subsystem.
 698 */
 699void qset_remove_urb(struct whc *whc, struct whc_qset *qset,
 700                            struct urb *urb, int status)
 701{
 702        struct wusbhc *wusbhc = &whc->wusbhc;
 703        struct whc_urb *wurb = urb->hcpriv;
 704
 705        usb_hcd_unlink_urb_from_ep(&wusbhc->usb_hcd, urb);
 706        /* Drop the lock as urb->complete() may enqueue another urb. */
 707        spin_unlock(&whc->lock);
 708        wusbhc_giveback_urb(wusbhc, urb, status);
 709        spin_lock(&whc->lock);
 710
 711        kfree(wurb);
 712}
 713
 714/**
 715 * get_urb_status_from_qtd - get the completed urb status from qTD status
 716 * @urb:    completed urb
 717 * @status: qTD status
 718 */
 719static int get_urb_status_from_qtd(struct urb *urb, u32 status)
 720{
 721        if (status & QTD_STS_HALTED) {
 722                if (status & QTD_STS_DBE)
 723                        return usb_pipein(urb->pipe) ? -ENOSR : -ECOMM;
 724                else if (status & QTD_STS_BABBLE)
 725                        return -EOVERFLOW;
 726                else if (status & QTD_STS_RCE)
 727                        return -ETIME;
 728                return -EPIPE;
 729        }
 730        if (usb_pipein(urb->pipe)
 731            && (urb->transfer_flags & URB_SHORT_NOT_OK)
 732            && urb->actual_length < urb->transfer_buffer_length)
 733                return -EREMOTEIO;
 734        return 0;
 735}
 736
 737/**
 738 * process_inactive_qtd - process an inactive (but not halted) qTD.
 739 *
 740 * Update the urb with the transfer bytes from the qTD, if the urb is
 741 * completely transferred or (in the case of an IN only) the LPF is
 742 * set, then the transfer is complete and the urb should be returned
 743 * to the system.
 744 */
 745void process_inactive_qtd(struct whc *whc, struct whc_qset *qset,
 746                                 struct whc_qtd *qtd)
 747{
 748        struct whc_std *std = list_first_entry(&qset->stds, struct whc_std, list_node);
 749        struct urb *urb = std->urb;
 750        uint32_t status;
 751        bool complete;
 752
 753        status = le32_to_cpu(qtd->status);
 754
 755        urb->actual_length += std->len - QTD_STS_TO_LEN(status);
 756
 757        if (usb_pipein(urb->pipe) && (status & QTD_STS_LAST_PKT))
 758                complete = true;
 759        else
 760                complete = whc_std_last(std);
 761
 762        qset_remove_qtd(whc, qset);
 763        qset_free_std(whc, std);
 764
 765        /*
 766         * Transfers for this URB are complete?  Then return it to the
 767         * USB subsystem.
 768         */
 769        if (complete) {
 770                qset_remove_qtds(whc, qset, urb);
 771                qset_remove_urb(whc, qset, urb, get_urb_status_from_qtd(urb, status));
 772
 773                /*
 774                 * If iAlt isn't valid then the hardware didn't
 775                 * advance iCur. Adjust the start and end pointers to
 776                 * match iCur.
 777                 */
 778                if (!(status & QTD_STS_IALT_VALID))
 779                        qset->td_start = qset->td_end
 780                                = QH_STATUS_TO_ICUR(le16_to_cpu(qset->qh.status));
 781                qset->pause_after_urb = NULL;
 782        }
 783}
 784
 785/**
 786 * process_halted_qtd - process a qset with a halted qtd
 787 *
 788 * Remove all the qTDs for the failed URB and return the failed URB to
 789 * the USB subsystem.  Then remove all other qTDs so the qset can be
 790 * removed.
 791 *
 792 * FIXME: this is the point where rate adaptation can be done.  If a
 793 * transfer failed because it exceeded the maximum number of retries
 794 * then it could be reactivated with a slower rate without having to
 795 * remove the qset.
 796 */
 797void process_halted_qtd(struct whc *whc, struct whc_qset *qset,
 798                               struct whc_qtd *qtd)
 799{
 800        struct whc_std *std = list_first_entry(&qset->stds, struct whc_std, list_node);
 801        struct urb *urb = std->urb;
 802        int urb_status;
 803
 804        urb_status = get_urb_status_from_qtd(urb, le32_to_cpu(qtd->status));
 805
 806        qset_remove_qtds(whc, qset, urb);
 807        qset_remove_urb(whc, qset, urb, urb_status);
 808
 809        list_for_each_entry(std, &qset->stds, list_node) {
 810                if (qset->ntds == 0)
 811                        break;
 812                qset_remove_qtd(whc, qset);
 813                std->qtd = NULL;
 814        }
 815
 816        qset->remove = 1;
 817}
 818
 819void qset_free(struct whc *whc, struct whc_qset *qset)
 820{
 821        dma_pool_free(whc->qset_pool, qset, qset->qset_dma);
 822}
 823
 824/**
 825 * qset_delete - wait for a qset to be unused, then free it.
 826 */
 827void qset_delete(struct whc *whc, struct whc_qset *qset)
 828{
 829        wait_for_completion(&qset->remove_complete);
 830        qset_free(whc, qset);
 831}
 832