linux/drivers/usb/host/whci/qset.c
<<
>>
Prefs
   1/*
   2 * Wireless Host Controller (WHC) qset management.
   3 *
   4 * Copyright (C) 2007 Cambridge Silicon Radio Ltd.
   5 *
   6 * This program is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU General Public License version
   8 * 2 as published by the Free Software Foundation.
   9 *
  10 * This program is distributed in the hope that it will be useful,
  11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  13 * GNU General Public License for more details.
  14 *
  15 * You should have received a copy of the GNU General Public License
  16 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  17 */
  18#include <linux/kernel.h>
  19#include <linux/dma-mapping.h>
  20#include <linux/slab.h>
  21#include <linux/uwb/umc.h>
  22#include <linux/usb.h>
  23
  24#include "../../wusbcore/wusbhc.h"
  25
  26#include "whcd.h"
  27
  28struct whc_qset *qset_alloc(struct whc *whc, gfp_t mem_flags)
  29{
  30        struct whc_qset *qset;
  31        dma_addr_t dma;
  32
  33        qset = dma_pool_alloc(whc->qset_pool, mem_flags, &dma);
  34        if (qset == NULL)
  35                return NULL;
  36        memset(qset, 0, sizeof(struct whc_qset));
  37
  38        qset->qset_dma = dma;
  39        qset->whc = whc;
  40
  41        INIT_LIST_HEAD(&qset->list_node);
  42        INIT_LIST_HEAD(&qset->stds);
  43
  44        return qset;
  45}
  46
  47/**
  48 * qset_fill_qh - fill the static endpoint state in a qset's QHead
  49 * @qset: the qset whose QH needs initializing with static endpoint
  50 *        state
  51 * @urb:  an urb for a transfer to this endpoint
  52 */
  53static void qset_fill_qh(struct whc *whc, struct whc_qset *qset, struct urb *urb)
  54{
  55        struct usb_device *usb_dev = urb->dev;
  56        struct wusb_dev *wusb_dev = usb_dev->wusb_dev;
  57        struct usb_wireless_ep_comp_descriptor *epcd;
  58        bool is_out;
  59        uint8_t phy_rate;
  60
  61        is_out = usb_pipeout(urb->pipe);
  62
  63        qset->max_packet = le16_to_cpu(urb->ep->desc.wMaxPacketSize);
  64
  65        epcd = (struct usb_wireless_ep_comp_descriptor *)qset->ep->extra;
  66        if (epcd) {
  67                qset->max_seq = epcd->bMaxSequence;
  68                qset->max_burst = epcd->bMaxBurst;
  69        } else {
  70                qset->max_seq = 2;
  71                qset->max_burst = 1;
  72        }
  73
  74        /*
  75         * Initial PHY rate is 53.3 Mbit/s for control endpoints or
  76         * the maximum supported by the device for other endpoints
  77         * (unless limited by the user).
  78         */
  79        if (usb_pipecontrol(urb->pipe))
  80                phy_rate = UWB_PHY_RATE_53;
  81        else {
  82                uint16_t phy_rates;
  83
  84                phy_rates = le16_to_cpu(wusb_dev->wusb_cap_descr->wPHYRates);
  85                phy_rate = fls(phy_rates) - 1;
  86                if (phy_rate > whc->wusbhc.phy_rate)
  87                        phy_rate = whc->wusbhc.phy_rate;
  88        }
  89
  90        qset->qh.info1 = cpu_to_le32(
  91                QH_INFO1_EP(usb_pipeendpoint(urb->pipe))
  92                | (is_out ? QH_INFO1_DIR_OUT : QH_INFO1_DIR_IN)
  93                | usb_pipe_to_qh_type(urb->pipe)
  94                | QH_INFO1_DEV_INFO_IDX(wusb_port_no_to_idx(usb_dev->portnum))
  95                | QH_INFO1_MAX_PKT_LEN(qset->max_packet)
  96                );
  97        qset->qh.info2 = cpu_to_le32(
  98                QH_INFO2_BURST(qset->max_burst)
  99                | QH_INFO2_DBP(0)
 100                | QH_INFO2_MAX_COUNT(3)
 101                | QH_INFO2_MAX_RETRY(3)
 102                | QH_INFO2_MAX_SEQ(qset->max_seq - 1)
 103                );
 104        /* FIXME: where can we obtain these Tx parameters from?  Why
 105         * doesn't the chip know what Tx power to use? It knows the Rx
 106         * strength and can presumably guess the Tx power required
 107         * from that? */
 108        qset->qh.info3 = cpu_to_le32(
 109                QH_INFO3_TX_RATE(phy_rate)
 110                | QH_INFO3_TX_PWR(0) /* 0 == max power */
 111                );
 112
 113        qset->qh.cur_window = cpu_to_le32((1 << qset->max_burst) - 1);
 114}
 115
 116/**
 117 * qset_clear - clear fields in a qset so it may be reinserted into a
 118 * schedule.
 119 *
 120 * The sequence number and current window are not cleared (see
 121 * qset_reset()).
 122 */
 123void qset_clear(struct whc *whc, struct whc_qset *qset)
 124{
 125        qset->td_start = qset->td_end = qset->ntds = 0;
 126
 127        qset->qh.link = cpu_to_le32(QH_LINK_NTDS(8) | QH_LINK_T);
 128        qset->qh.status = qset->qh.status & QH_STATUS_SEQ_MASK;
 129        qset->qh.err_count = 0;
 130        qset->qh.scratch[0] = 0;
 131        qset->qh.scratch[1] = 0;
 132        qset->qh.scratch[2] = 0;
 133
 134        memset(&qset->qh.overlay, 0, sizeof(qset->qh.overlay));
 135
 136        init_completion(&qset->remove_complete);
 137}
 138
 139/**
 140 * qset_reset - reset endpoint state in a qset.
 141 *
 142 * Clears the sequence number and current window.  This qset must not
 143 * be in the ASL or PZL.
 144 */
 145void qset_reset(struct whc *whc, struct whc_qset *qset)
 146{
 147        qset->reset = 0;
 148
 149        qset->qh.status &= ~QH_STATUS_SEQ_MASK;
 150        qset->qh.cur_window = cpu_to_le32((1 << qset->max_burst) - 1);
 151}
 152
 153/**
 154 * get_qset - get the qset for an async endpoint
 155 *
 156 * A new qset is created if one does not already exist.
 157 */
 158struct whc_qset *get_qset(struct whc *whc, struct urb *urb,
 159                                 gfp_t mem_flags)
 160{
 161        struct whc_qset *qset;
 162
 163        qset = urb->ep->hcpriv;
 164        if (qset == NULL) {
 165                qset = qset_alloc(whc, mem_flags);
 166                if (qset == NULL)
 167                        return NULL;
 168
 169                qset->ep = urb->ep;
 170                urb->ep->hcpriv = qset;
 171                qset_fill_qh(whc, qset, urb);
 172        }
 173        return qset;
 174}
 175
 176void qset_remove_complete(struct whc *whc, struct whc_qset *qset)
 177{
 178        qset->remove = 0;
 179        list_del_init(&qset->list_node);
 180        complete(&qset->remove_complete);
 181}
 182
 183/**
 184 * qset_add_qtds - add qTDs for an URB to a qset
 185 *
 186 * Returns true if the list (ASL/PZL) must be updated because (for a
 187 * WHCI 0.95 controller) an activated qTD was pointed to be iCur.
 188 */
 189enum whc_update qset_add_qtds(struct whc *whc, struct whc_qset *qset)
 190{
 191        struct whc_std *std;
 192        enum whc_update update = 0;
 193
 194        list_for_each_entry(std, &qset->stds, list_node) {
 195                struct whc_qtd *qtd;
 196                uint32_t status;
 197
 198                if (qset->ntds >= WHCI_QSET_TD_MAX
 199                    || (qset->pause_after_urb && std->urb != qset->pause_after_urb))
 200                        break;
 201
 202                if (std->qtd)
 203                        continue; /* already has a qTD */
 204
 205                qtd = std->qtd = &qset->qtd[qset->td_end];
 206
 207                /* Fill in setup bytes for control transfers. */
 208                if (usb_pipecontrol(std->urb->pipe))
 209                        memcpy(qtd->setup, std->urb->setup_packet, 8);
 210
 211                status = QTD_STS_ACTIVE | QTD_STS_LEN(std->len);
 212
 213                if (whc_std_last(std) && usb_pipeout(std->urb->pipe))
 214                        status |= QTD_STS_LAST_PKT;
 215
 216                /*
 217                 * For an IN transfer the iAlt field should be set so
 218                 * the h/w will automatically advance to the next
 219                 * transfer. However, if there are 8 or more TDs
 220                 * remaining in this transfer then iAlt cannot be set
 221                 * as it could point to somewhere in this transfer.
 222                 */
 223                if (std->ntds_remaining < WHCI_QSET_TD_MAX) {
 224                        int ialt;
 225                        ialt = (qset->td_end + std->ntds_remaining) % WHCI_QSET_TD_MAX;
 226                        status |= QTD_STS_IALT(ialt);
 227                } else if (usb_pipein(std->urb->pipe))
 228                        qset->pause_after_urb = std->urb;
 229
 230                if (std->num_pointers)
 231                        qtd->options = cpu_to_le32(QTD_OPT_IOC);
 232                else
 233                        qtd->options = cpu_to_le32(QTD_OPT_IOC | QTD_OPT_SMALL);
 234                qtd->page_list_ptr = cpu_to_le64(std->dma_addr);
 235
 236                qtd->status = cpu_to_le32(status);
 237
 238                if (QH_STATUS_TO_ICUR(qset->qh.status) == qset->td_end)
 239                        update = WHC_UPDATE_UPDATED;
 240
 241                if (++qset->td_end >= WHCI_QSET_TD_MAX)
 242                        qset->td_end = 0;
 243                qset->ntds++;
 244        }
 245
 246        return update;
 247}
 248
 249/**
 250 * qset_remove_qtd - remove the first qTD from a qset.
 251 *
 252 * The qTD might be still active (if it's part of a IN URB that
 253 * resulted in a short read) so ensure it's deactivated.
 254 */
 255static void qset_remove_qtd(struct whc *whc, struct whc_qset *qset)
 256{
 257        qset->qtd[qset->td_start].status = 0;
 258
 259        if (++qset->td_start >= WHCI_QSET_TD_MAX)
 260                qset->td_start = 0;
 261        qset->ntds--;
 262}
 263
 264static void qset_copy_bounce_to_sg(struct whc *whc, struct whc_std *std)
 265{
 266        struct scatterlist *sg;
 267        void *bounce;
 268        size_t remaining, offset;
 269
 270        bounce = std->bounce_buf;
 271        remaining = std->len;
 272
 273        sg = std->bounce_sg;
 274        offset = std->bounce_offset;
 275
 276        while (remaining) {
 277                size_t len;
 278
 279                len = min(sg->length - offset, remaining);
 280                memcpy(sg_virt(sg) + offset, bounce, len);
 281
 282                bounce += len;
 283                remaining -= len;
 284
 285                offset += len;
 286                if (offset >= sg->length) {
 287                        sg = sg_next(sg);
 288                        offset = 0;
 289                }
 290        }
 291
 292}
 293
 294/**
 295 * qset_free_std - remove an sTD and free it.
 296 * @whc: the WHCI host controller
 297 * @std: the sTD to remove and free.
 298 */
 299void qset_free_std(struct whc *whc, struct whc_std *std)
 300{
 301        list_del(&std->list_node);
 302        if (std->bounce_buf) {
 303                bool is_out = usb_pipeout(std->urb->pipe);
 304                dma_addr_t dma_addr;
 305
 306                if (std->num_pointers)
 307                        dma_addr = le64_to_cpu(std->pl_virt[0].buf_ptr);
 308                else
 309                        dma_addr = std->dma_addr;
 310
 311                dma_unmap_single(whc->wusbhc.dev, dma_addr,
 312                                 std->len, is_out ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
 313                if (!is_out)
 314                        qset_copy_bounce_to_sg(whc, std);
 315                kfree(std->bounce_buf);
 316        }
 317        if (std->pl_virt) {
 318                if (std->dma_addr)
 319                        dma_unmap_single(whc->wusbhc.dev, std->dma_addr,
 320                                         std->num_pointers * sizeof(struct whc_page_list_entry),
 321                                         DMA_TO_DEVICE);
 322                kfree(std->pl_virt);
 323                std->pl_virt = NULL;
 324        }
 325        kfree(std);
 326}
 327
 328/**
 329 * qset_remove_qtds - remove an URB's qTDs (and sTDs).
 330 */
 331static void qset_remove_qtds(struct whc *whc, struct whc_qset *qset,
 332                             struct urb *urb)
 333{
 334        struct whc_std *std, *t;
 335
 336        list_for_each_entry_safe(std, t, &qset->stds, list_node) {
 337                if (std->urb != urb)
 338                        break;
 339                if (std->qtd != NULL)
 340                        qset_remove_qtd(whc, qset);
 341                qset_free_std(whc, std);
 342        }
 343}
 344
 345/**
 346 * qset_free_stds - free any remaining sTDs for an URB.
 347 */
 348static void qset_free_stds(struct whc_qset *qset, struct urb *urb)
 349{
 350        struct whc_std *std, *t;
 351
 352        list_for_each_entry_safe(std, t, &qset->stds, list_node) {
 353                if (std->urb == urb)
 354                        qset_free_std(qset->whc, std);
 355        }
 356}
 357
 358static int qset_fill_page_list(struct whc *whc, struct whc_std *std, gfp_t mem_flags)
 359{
 360        dma_addr_t dma_addr = std->dma_addr;
 361        dma_addr_t sp, ep;
 362        size_t pl_len;
 363        int p;
 364
 365        /* Short buffers don't need a page list. */
 366        if (std->len <= WHCI_PAGE_SIZE) {
 367                std->num_pointers = 0;
 368                return 0;
 369        }
 370
 371        sp = dma_addr & ~(WHCI_PAGE_SIZE-1);
 372        ep = dma_addr + std->len;
 373        std->num_pointers = DIV_ROUND_UP(ep - sp, WHCI_PAGE_SIZE);
 374
 375        pl_len = std->num_pointers * sizeof(struct whc_page_list_entry);
 376        std->pl_virt = kmalloc(pl_len, mem_flags);
 377        if (std->pl_virt == NULL)
 378                return -ENOMEM;
 379        std->dma_addr = dma_map_single(whc->wusbhc.dev, std->pl_virt, pl_len, DMA_TO_DEVICE);
 380
 381        for (p = 0; p < std->num_pointers; p++) {
 382                std->pl_virt[p].buf_ptr = cpu_to_le64(dma_addr);
 383                dma_addr = (dma_addr + WHCI_PAGE_SIZE) & ~(WHCI_PAGE_SIZE-1);
 384        }
 385
 386        return 0;
 387}
 388
 389/**
 390 * urb_dequeue_work - executes asl/pzl update and gives back the urb to the system.
 391 */
 392static void urb_dequeue_work(struct work_struct *work)
 393{
 394        struct whc_urb *wurb = container_of(work, struct whc_urb, dequeue_work);
 395        struct whc_qset *qset = wurb->qset;
 396        struct whc *whc = qset->whc;
 397        unsigned long flags;
 398
 399        if (wurb->is_async == true)
 400                asl_update(whc, WUSBCMD_ASYNC_UPDATED
 401                           | WUSBCMD_ASYNC_SYNCED_DB
 402                           | WUSBCMD_ASYNC_QSET_RM);
 403        else
 404                pzl_update(whc, WUSBCMD_PERIODIC_UPDATED
 405                           | WUSBCMD_PERIODIC_SYNCED_DB
 406                           | WUSBCMD_PERIODIC_QSET_RM);
 407
 408        spin_lock_irqsave(&whc->lock, flags);
 409        qset_remove_urb(whc, qset, wurb->urb, wurb->status);
 410        spin_unlock_irqrestore(&whc->lock, flags);
 411}
 412
 413static struct whc_std *qset_new_std(struct whc *whc, struct whc_qset *qset,
 414                                    struct urb *urb, gfp_t mem_flags)
 415{
 416        struct whc_std *std;
 417
 418        std = kzalloc(sizeof(struct whc_std), mem_flags);
 419        if (std == NULL)
 420                return NULL;
 421
 422        std->urb = urb;
 423        std->qtd = NULL;
 424
 425        INIT_LIST_HEAD(&std->list_node);
 426        list_add_tail(&std->list_node, &qset->stds);
 427
 428        return std;
 429}
 430
 431static int qset_add_urb_sg(struct whc *whc, struct whc_qset *qset, struct urb *urb,
 432                           gfp_t mem_flags)
 433{
 434        size_t remaining;
 435        struct scatterlist *sg;
 436        int i;
 437        int ntds = 0;
 438        struct whc_std *std = NULL;
 439        struct whc_page_list_entry *entry;
 440        dma_addr_t prev_end = 0;
 441        size_t pl_len;
 442        int p = 0;
 443
 444        remaining = urb->transfer_buffer_length;
 445
 446        for_each_sg(urb->sg, sg, urb->num_sgs, i) {
 447                dma_addr_t dma_addr;
 448                size_t dma_remaining;
 449                dma_addr_t sp, ep;
 450                int num_pointers;
 451
 452                if (remaining == 0) {
 453                        break;
 454                }
 455
 456                dma_addr = sg_dma_address(sg);
 457                dma_remaining = min_t(size_t, sg_dma_len(sg), remaining);
 458
 459                while (dma_remaining) {
 460                        size_t dma_len;
 461
 462                        /*
 463                         * We can use the previous std (if it exists) provided that:
 464                         * - the previous one ended on a page boundary.
 465                         * - the current one begins on a page boundary.
 466                         * - the previous one isn't full.
 467                         *
 468                         * If a new std is needed but the previous one
 469                         * was not a whole number of packets then this
 470                         * sg list cannot be mapped onto multiple
 471                         * qTDs.  Return an error and let the caller
 472                         * sort it out.
 473                         */
 474                        if (!std
 475                            || (prev_end & (WHCI_PAGE_SIZE-1))
 476                            || (dma_addr & (WHCI_PAGE_SIZE-1))
 477                            || std->len + WHCI_PAGE_SIZE > QTD_MAX_XFER_SIZE) {
 478                                if (std && std->len % qset->max_packet != 0)
 479                                        return -EINVAL;
 480                                std = qset_new_std(whc, qset, urb, mem_flags);
 481                                if (std == NULL) {
 482                                        return -ENOMEM;
 483                                }
 484                                ntds++;
 485                                p = 0;
 486                        }
 487
 488                        dma_len = dma_remaining;
 489
 490                        /*
 491                         * If the remainder of this element doesn't
 492                         * fit in a single qTD, limit the qTD to a
 493                         * whole number of packets.  This allows the
 494                         * remainder to go into the next qTD.
 495                         */
 496                        if (std->len + dma_len > QTD_MAX_XFER_SIZE) {
 497                                dma_len = (QTD_MAX_XFER_SIZE / qset->max_packet)
 498                                        * qset->max_packet - std->len;
 499                        }
 500
 501                        std->len += dma_len;
 502                        std->ntds_remaining = -1; /* filled in later */
 503
 504                        sp = dma_addr & ~(WHCI_PAGE_SIZE-1);
 505                        ep = dma_addr + dma_len;
 506                        num_pointers = DIV_ROUND_UP(ep - sp, WHCI_PAGE_SIZE);
 507                        std->num_pointers += num_pointers;
 508
 509                        pl_len = std->num_pointers * sizeof(struct whc_page_list_entry);
 510
 511                        std->pl_virt = krealloc(std->pl_virt, pl_len, mem_flags);
 512                        if (std->pl_virt == NULL) {
 513                                return -ENOMEM;
 514                        }
 515
 516                        for (;p < std->num_pointers; p++, entry++) {
 517                                std->pl_virt[p].buf_ptr = cpu_to_le64(dma_addr);
 518                                dma_addr = (dma_addr + WHCI_PAGE_SIZE) & ~(WHCI_PAGE_SIZE-1);
 519                        }
 520
 521                        prev_end = dma_addr = ep;
 522                        dma_remaining -= dma_len;
 523                        remaining -= dma_len;
 524                }
 525        }
 526
 527        /* Now the number of stds is know, go back and fill in
 528           std->ntds_remaining. */
 529        list_for_each_entry(std, &qset->stds, list_node) {
 530                if (std->ntds_remaining == -1) {
 531                        pl_len = std->num_pointers * sizeof(struct whc_page_list_entry);
 532                        std->ntds_remaining = ntds--;
 533                        std->dma_addr = dma_map_single(whc->wusbhc.dev, std->pl_virt,
 534                                                       pl_len, DMA_TO_DEVICE);
 535                }
 536        }
 537        return 0;
 538}
 539
 540/**
 541 * qset_add_urb_sg_linearize - add an urb with sg list, copying the data
 542 *
 543 * If the URB contains an sg list whose elements cannot be directly
 544 * mapped to qTDs then the data must be transferred via bounce
 545 * buffers.
 546 */
 547static int qset_add_urb_sg_linearize(struct whc *whc, struct whc_qset *qset,
 548                                     struct urb *urb, gfp_t mem_flags)
 549{
 550        bool is_out = usb_pipeout(urb->pipe);
 551        size_t max_std_len;
 552        size_t remaining;
 553        int ntds = 0;
 554        struct whc_std *std = NULL;
 555        void *bounce = NULL;
 556        struct scatterlist *sg;
 557        int i;
 558
 559        /* limit maximum bounce buffer to 16 * 3.5 KiB ~= 28 k */
 560        max_std_len = qset->max_burst * qset->max_packet;
 561
 562        remaining = urb->transfer_buffer_length;
 563
 564        for_each_sg(urb->sg, sg, urb->num_sgs, i) {
 565                size_t len;
 566                size_t sg_remaining;
 567                void *orig;
 568
 569                if (remaining == 0) {
 570                        break;
 571                }
 572
 573                sg_remaining = min_t(size_t, remaining, sg->length);
 574                orig = sg_virt(sg);
 575
 576                while (sg_remaining) {
 577                        if (!std || std->len == max_std_len) {
 578                                std = qset_new_std(whc, qset, urb, mem_flags);
 579                                if (std == NULL)
 580                                        return -ENOMEM;
 581                                std->bounce_buf = kmalloc(max_std_len, mem_flags);
 582                                if (std->bounce_buf == NULL)
 583                                        return -ENOMEM;
 584                                std->bounce_sg = sg;
 585                                std->bounce_offset = orig - sg_virt(sg);
 586                                bounce = std->bounce_buf;
 587                                ntds++;
 588                        }
 589
 590                        len = min(sg_remaining, max_std_len - std->len);
 591
 592                        if (is_out)
 593                                memcpy(bounce, orig, len);
 594
 595                        std->len += len;
 596                        std->ntds_remaining = -1; /* filled in later */
 597
 598                        bounce += len;
 599                        orig += len;
 600                        sg_remaining -= len;
 601                        remaining -= len;
 602                }
 603        }
 604
 605        /*
 606         * For each of the new sTDs, map the bounce buffers, create
 607         * page lists (if necessary), and fill in std->ntds_remaining.
 608         */
 609        list_for_each_entry(std, &qset->stds, list_node) {
 610                if (std->ntds_remaining != -1)
 611                        continue;
 612
 613                std->dma_addr = dma_map_single(&whc->umc->dev, std->bounce_buf, std->len,
 614                                               is_out ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
 615
 616                if (qset_fill_page_list(whc, std, mem_flags) < 0)
 617                        return -ENOMEM;
 618
 619                std->ntds_remaining = ntds--;
 620        }
 621
 622        return 0;
 623}
 624
 625/**
 626 * qset_add_urb - add an urb to the qset's queue.
 627 *
 628 * The URB is chopped into sTDs, one for each qTD that will required.
 629 * At least one qTD (and sTD) is required even if the transfer has no
 630 * data (e.g., for some control transfers).
 631 */
 632int qset_add_urb(struct whc *whc, struct whc_qset *qset, struct urb *urb,
 633        gfp_t mem_flags)
 634{
 635        struct whc_urb *wurb;
 636        int remaining = urb->transfer_buffer_length;
 637        u64 transfer_dma = urb->transfer_dma;
 638        int ntds_remaining;
 639        int ret;
 640
 641        wurb = kzalloc(sizeof(struct whc_urb), mem_flags);
 642        if (wurb == NULL)
 643                goto err_no_mem;
 644        urb->hcpriv = wurb;
 645        wurb->qset = qset;
 646        wurb->urb = urb;
 647        INIT_WORK(&wurb->dequeue_work, urb_dequeue_work);
 648
 649        if (urb->num_sgs) {
 650                ret = qset_add_urb_sg(whc, qset, urb, mem_flags);
 651                if (ret == -EINVAL) {
 652                        qset_free_stds(qset, urb);
 653                        ret = qset_add_urb_sg_linearize(whc, qset, urb, mem_flags);
 654                }
 655                if (ret < 0)
 656                        goto err_no_mem;
 657                return 0;
 658        }
 659
 660        ntds_remaining = DIV_ROUND_UP(remaining, QTD_MAX_XFER_SIZE);
 661        if (ntds_remaining == 0)
 662                ntds_remaining = 1;
 663
 664        while (ntds_remaining) {
 665                struct whc_std *std;
 666                size_t std_len;
 667
 668                std_len = remaining;
 669                if (std_len > QTD_MAX_XFER_SIZE)
 670                        std_len = QTD_MAX_XFER_SIZE;
 671
 672                std = qset_new_std(whc, qset, urb, mem_flags);
 673                if (std == NULL)
 674                        goto err_no_mem;
 675
 676                std->dma_addr = transfer_dma;
 677                std->len = std_len;
 678                std->ntds_remaining = ntds_remaining;
 679
 680                if (qset_fill_page_list(whc, std, mem_flags) < 0)
 681                        goto err_no_mem;
 682
 683                ntds_remaining--;
 684                remaining -= std_len;
 685                transfer_dma += std_len;
 686        }
 687
 688        return 0;
 689
 690err_no_mem:
 691        qset_free_stds(qset, urb);
 692        return -ENOMEM;
 693}
 694
 695/**
 696 * qset_remove_urb - remove an URB from the urb queue.
 697 *
 698 * The URB is returned to the USB subsystem.
 699 */
 700void qset_remove_urb(struct whc *whc, struct whc_qset *qset,
 701                            struct urb *urb, int status)
 702{
 703        struct wusbhc *wusbhc = &whc->wusbhc;
 704        struct whc_urb *wurb = urb->hcpriv;
 705
 706        usb_hcd_unlink_urb_from_ep(&wusbhc->usb_hcd, urb);
 707        /* Drop the lock as urb->complete() may enqueue another urb. */
 708        spin_unlock(&whc->lock);
 709        wusbhc_giveback_urb(wusbhc, urb, status);
 710        spin_lock(&whc->lock);
 711
 712        kfree(wurb);
 713}
 714
 715/**
 716 * get_urb_status_from_qtd - get the completed urb status from qTD status
 717 * @urb:    completed urb
 718 * @status: qTD status
 719 */
 720static int get_urb_status_from_qtd(struct urb *urb, u32 status)
 721{
 722        if (status & QTD_STS_HALTED) {
 723                if (status & QTD_STS_DBE)
 724                        return usb_pipein(urb->pipe) ? -ENOSR : -ECOMM;
 725                else if (status & QTD_STS_BABBLE)
 726                        return -EOVERFLOW;
 727                else if (status & QTD_STS_RCE)
 728                        return -ETIME;
 729                return -EPIPE;
 730        }
 731        if (usb_pipein(urb->pipe)
 732            && (urb->transfer_flags & URB_SHORT_NOT_OK)
 733            && urb->actual_length < urb->transfer_buffer_length)
 734                return -EREMOTEIO;
 735        return 0;
 736}
 737
 738/**
 739 * process_inactive_qtd - process an inactive (but not halted) qTD.
 740 *
 741 * Update the urb with the transfer bytes from the qTD, if the urb is
 742 * completely transferred or (in the case of an IN only) the LPF is
 743 * set, then the transfer is complete and the urb should be returned
 744 * to the system.
 745 */
 746void process_inactive_qtd(struct whc *whc, struct whc_qset *qset,
 747                                 struct whc_qtd *qtd)
 748{
 749        struct whc_std *std = list_first_entry(&qset->stds, struct whc_std, list_node);
 750        struct urb *urb = std->urb;
 751        uint32_t status;
 752        bool complete;
 753
 754        status = le32_to_cpu(qtd->status);
 755
 756        urb->actual_length += std->len - QTD_STS_TO_LEN(status);
 757
 758        if (usb_pipein(urb->pipe) && (status & QTD_STS_LAST_PKT))
 759                complete = true;
 760        else
 761                complete = whc_std_last(std);
 762
 763        qset_remove_qtd(whc, qset);
 764        qset_free_std(whc, std);
 765
 766        /*
 767         * Transfers for this URB are complete?  Then return it to the
 768         * USB subsystem.
 769         */
 770        if (complete) {
 771                qset_remove_qtds(whc, qset, urb);
 772                qset_remove_urb(whc, qset, urb, get_urb_status_from_qtd(urb, status));
 773
 774                /*
 775                 * If iAlt isn't valid then the hardware didn't
 776                 * advance iCur. Adjust the start and end pointers to
 777                 * match iCur.
 778                 */
 779                if (!(status & QTD_STS_IALT_VALID))
 780                        qset->td_start = qset->td_end
 781                                = QH_STATUS_TO_ICUR(le16_to_cpu(qset->qh.status));
 782                qset->pause_after_urb = NULL;
 783        }
 784}
 785
 786/**
 787 * process_halted_qtd - process a qset with a halted qtd
 788 *
 789 * Remove all the qTDs for the failed URB and return the failed URB to
 790 * the USB subsystem.  Then remove all other qTDs so the qset can be
 791 * removed.
 792 *
 793 * FIXME: this is the point where rate adaptation can be done.  If a
 794 * transfer failed because it exceeded the maximum number of retries
 795 * then it could be reactivated with a slower rate without having to
 796 * remove the qset.
 797 */
 798void process_halted_qtd(struct whc *whc, struct whc_qset *qset,
 799                               struct whc_qtd *qtd)
 800{
 801        struct whc_std *std = list_first_entry(&qset->stds, struct whc_std, list_node);
 802        struct urb *urb = std->urb;
 803        int urb_status;
 804
 805        urb_status = get_urb_status_from_qtd(urb, le32_to_cpu(qtd->status));
 806
 807        qset_remove_qtds(whc, qset, urb);
 808        qset_remove_urb(whc, qset, urb, urb_status);
 809
 810        list_for_each_entry(std, &qset->stds, list_node) {
 811                if (qset->ntds == 0)
 812                        break;
 813                qset_remove_qtd(whc, qset);
 814                std->qtd = NULL;
 815        }
 816
 817        qset->remove = 1;
 818}
 819
 820void qset_free(struct whc *whc, struct whc_qset *qset)
 821{
 822        dma_pool_free(whc->qset_pool, qset, qset->qset_dma);
 823}
 824
 825/**
 826 * qset_delete - wait for a qset to be unused, then free it.
 827 */
 828void qset_delete(struct whc *whc, struct whc_qset *qset)
 829{
 830        wait_for_completion(&qset->remove_complete);
 831        qset_free(whc, qset);
 832}
 833