linux/drivers/usb/host/whci/qset.c
<<
>>
Prefs
   1/*
   2 * Wireless Host Controller (WHC) qset management.
   3 *
   4 * Copyright (C) 2007 Cambridge Silicon Radio Ltd.
   5 *
   6 * This program is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU General Public License version
   8 * 2 as published by the Free Software Foundation.
   9 *
  10 * This program is distributed in the hope that it will be useful,
  11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  13 * GNU General Public License for more details.
  14 *
  15 * You should have received a copy of the GNU General Public License
  16 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  17 */
  18#include <linux/kernel.h>
  19#include <linux/dma-mapping.h>
  20#include <linux/slab.h>
  21#include <linux/uwb/umc.h>
  22#include <linux/usb.h>
  23
  24#include "../../wusbcore/wusbhc.h"
  25
  26#include "whcd.h"
  27
  28struct whc_qset *qset_alloc(struct whc *whc, gfp_t mem_flags)
  29{
  30        struct whc_qset *qset;
  31        dma_addr_t dma;
  32
  33        qset = dma_pool_zalloc(whc->qset_pool, mem_flags, &dma);
  34        if (qset == NULL)
  35                return NULL;
  36
  37        qset->qset_dma = dma;
  38        qset->whc = whc;
  39
  40        INIT_LIST_HEAD(&qset->list_node);
  41        INIT_LIST_HEAD(&qset->stds);
  42
  43        return qset;
  44}
  45
  46/**
  47 * qset_fill_qh - fill the static endpoint state in a qset's QHead
  48 * @qset: the qset whose QH needs initializing with static endpoint
  49 *        state
  50 * @urb:  an urb for a transfer to this endpoint
  51 */
  52static void qset_fill_qh(struct whc *whc, struct whc_qset *qset, struct urb *urb)
  53{
  54        struct usb_device *usb_dev = urb->dev;
  55        struct wusb_dev *wusb_dev = usb_dev->wusb_dev;
  56        struct usb_wireless_ep_comp_descriptor *epcd;
  57        bool is_out;
  58        uint8_t phy_rate;
  59
  60        is_out = usb_pipeout(urb->pipe);
  61
  62        qset->max_packet = le16_to_cpu(urb->ep->desc.wMaxPacketSize);
  63
  64        epcd = (struct usb_wireless_ep_comp_descriptor *)qset->ep->extra;
  65        if (epcd) {
  66                qset->max_seq = epcd->bMaxSequence;
  67                qset->max_burst = epcd->bMaxBurst;
  68        } else {
  69                qset->max_seq = 2;
  70                qset->max_burst = 1;
  71        }
  72
  73        /*
  74         * Initial PHY rate is 53.3 Mbit/s for control endpoints or
  75         * the maximum supported by the device for other endpoints
  76         * (unless limited by the user).
  77         */
  78        if (usb_pipecontrol(urb->pipe))
  79                phy_rate = UWB_PHY_RATE_53;
  80        else {
  81                uint16_t phy_rates;
  82
  83                phy_rates = le16_to_cpu(wusb_dev->wusb_cap_descr->wPHYRates);
  84                phy_rate = fls(phy_rates) - 1;
  85                if (phy_rate > whc->wusbhc.phy_rate)
  86                        phy_rate = whc->wusbhc.phy_rate;
  87        }
  88
  89        qset->qh.info1 = cpu_to_le32(
  90                QH_INFO1_EP(usb_pipeendpoint(urb->pipe))
  91                | (is_out ? QH_INFO1_DIR_OUT : QH_INFO1_DIR_IN)
  92                | usb_pipe_to_qh_type(urb->pipe)
  93                | QH_INFO1_DEV_INFO_IDX(wusb_port_no_to_idx(usb_dev->portnum))
  94                | QH_INFO1_MAX_PKT_LEN(qset->max_packet)
  95                );
  96        qset->qh.info2 = cpu_to_le32(
  97                QH_INFO2_BURST(qset->max_burst)
  98                | QH_INFO2_DBP(0)
  99                | QH_INFO2_MAX_COUNT(3)
 100                | QH_INFO2_MAX_RETRY(3)
 101                | QH_INFO2_MAX_SEQ(qset->max_seq - 1)
 102                );
 103        /* FIXME: where can we obtain these Tx parameters from?  Why
 104         * doesn't the chip know what Tx power to use? It knows the Rx
 105         * strength and can presumably guess the Tx power required
 106         * from that? */
 107        qset->qh.info3 = cpu_to_le32(
 108                QH_INFO3_TX_RATE(phy_rate)
 109                | QH_INFO3_TX_PWR(0) /* 0 == max power */
 110                );
 111
 112        qset->qh.cur_window = cpu_to_le32((1 << qset->max_burst) - 1);
 113}
 114
 115/**
 116 * qset_clear - clear fields in a qset so it may be reinserted into a
 117 * schedule.
 118 *
 119 * The sequence number and current window are not cleared (see
 120 * qset_reset()).
 121 */
 122void qset_clear(struct whc *whc, struct whc_qset *qset)
 123{
 124        qset->td_start = qset->td_end = qset->ntds = 0;
 125
 126        qset->qh.link = cpu_to_le64(QH_LINK_NTDS(8) | QH_LINK_T);
 127        qset->qh.status = qset->qh.status & QH_STATUS_SEQ_MASK;
 128        qset->qh.err_count = 0;
 129        qset->qh.scratch[0] = 0;
 130        qset->qh.scratch[1] = 0;
 131        qset->qh.scratch[2] = 0;
 132
 133        memset(&qset->qh.overlay, 0, sizeof(qset->qh.overlay));
 134
 135        init_completion(&qset->remove_complete);
 136}
 137
 138/**
 139 * qset_reset - reset endpoint state in a qset.
 140 *
 141 * Clears the sequence number and current window.  This qset must not
 142 * be in the ASL or PZL.
 143 */
 144void qset_reset(struct whc *whc, struct whc_qset *qset)
 145{
 146        qset->reset = 0;
 147
 148        qset->qh.status &= ~QH_STATUS_SEQ_MASK;
 149        qset->qh.cur_window = cpu_to_le32((1 << qset->max_burst) - 1);
 150}
 151
 152/**
 153 * get_qset - get the qset for an async endpoint
 154 *
 155 * A new qset is created if one does not already exist.
 156 */
 157struct whc_qset *get_qset(struct whc *whc, struct urb *urb,
 158                                 gfp_t mem_flags)
 159{
 160        struct whc_qset *qset;
 161
 162        qset = urb->ep->hcpriv;
 163        if (qset == NULL) {
 164                qset = qset_alloc(whc, mem_flags);
 165                if (qset == NULL)
 166                        return NULL;
 167
 168                qset->ep = urb->ep;
 169                urb->ep->hcpriv = qset;
 170                qset_fill_qh(whc, qset, urb);
 171        }
 172        return qset;
 173}
 174
 175void qset_remove_complete(struct whc *whc, struct whc_qset *qset)
 176{
 177        qset->remove = 0;
 178        list_del_init(&qset->list_node);
 179        complete(&qset->remove_complete);
 180}
 181
 182/**
 183 * qset_add_qtds - add qTDs for an URB to a qset
 184 *
 185 * Returns true if the list (ASL/PZL) must be updated because (for a
 186 * WHCI 0.95 controller) an activated qTD was pointed to be iCur.
 187 */
 188enum whc_update qset_add_qtds(struct whc *whc, struct whc_qset *qset)
 189{
 190        struct whc_std *std;
 191        enum whc_update update = 0;
 192
 193        list_for_each_entry(std, &qset->stds, list_node) {
 194                struct whc_qtd *qtd;
 195                uint32_t status;
 196
 197                if (qset->ntds >= WHCI_QSET_TD_MAX
 198                    || (qset->pause_after_urb && std->urb != qset->pause_after_urb))
 199                        break;
 200
 201                if (std->qtd)
 202                        continue; /* already has a qTD */
 203
 204                qtd = std->qtd = &qset->qtd[qset->td_end];
 205
 206                /* Fill in setup bytes for control transfers. */
 207                if (usb_pipecontrol(std->urb->pipe))
 208                        memcpy(qtd->setup, std->urb->setup_packet, 8);
 209
 210                status = QTD_STS_ACTIVE | QTD_STS_LEN(std->len);
 211
 212                if (whc_std_last(std) && usb_pipeout(std->urb->pipe))
 213                        status |= QTD_STS_LAST_PKT;
 214
 215                /*
 216                 * For an IN transfer the iAlt field should be set so
 217                 * the h/w will automatically advance to the next
 218                 * transfer. However, if there are 8 or more TDs
 219                 * remaining in this transfer then iAlt cannot be set
 220                 * as it could point to somewhere in this transfer.
 221                 */
 222                if (std->ntds_remaining < WHCI_QSET_TD_MAX) {
 223                        int ialt;
 224                        ialt = (qset->td_end + std->ntds_remaining) % WHCI_QSET_TD_MAX;
 225                        status |= QTD_STS_IALT(ialt);
 226                } else if (usb_pipein(std->urb->pipe))
 227                        qset->pause_after_urb = std->urb;
 228
 229                if (std->num_pointers)
 230                        qtd->options = cpu_to_le32(QTD_OPT_IOC);
 231                else
 232                        qtd->options = cpu_to_le32(QTD_OPT_IOC | QTD_OPT_SMALL);
 233                qtd->page_list_ptr = cpu_to_le64(std->dma_addr);
 234
 235                qtd->status = cpu_to_le32(status);
 236
 237                if (QH_STATUS_TO_ICUR(qset->qh.status) == qset->td_end)
 238                        update = WHC_UPDATE_UPDATED;
 239
 240                if (++qset->td_end >= WHCI_QSET_TD_MAX)
 241                        qset->td_end = 0;
 242                qset->ntds++;
 243        }
 244
 245        return update;
 246}
 247
 248/**
 249 * qset_remove_qtd - remove the first qTD from a qset.
 250 *
 251 * The qTD might be still active (if it's part of a IN URB that
 252 * resulted in a short read) so ensure it's deactivated.
 253 */
 254static void qset_remove_qtd(struct whc *whc, struct whc_qset *qset)
 255{
 256        qset->qtd[qset->td_start].status = 0;
 257
 258        if (++qset->td_start >= WHCI_QSET_TD_MAX)
 259                qset->td_start = 0;
 260        qset->ntds--;
 261}
 262
 263static void qset_copy_bounce_to_sg(struct whc *whc, struct whc_std *std)
 264{
 265        struct scatterlist *sg;
 266        void *bounce;
 267        size_t remaining, offset;
 268
 269        bounce = std->bounce_buf;
 270        remaining = std->len;
 271
 272        sg = std->bounce_sg;
 273        offset = std->bounce_offset;
 274
 275        while (remaining) {
 276                size_t len;
 277
 278                len = min(sg->length - offset, remaining);
 279                memcpy(sg_virt(sg) + offset, bounce, len);
 280
 281                bounce += len;
 282                remaining -= len;
 283
 284                offset += len;
 285                if (offset >= sg->length) {
 286                        sg = sg_next(sg);
 287                        offset = 0;
 288                }
 289        }
 290
 291}
 292
 293/**
 294 * qset_free_std - remove an sTD and free it.
 295 * @whc: the WHCI host controller
 296 * @std: the sTD to remove and free.
 297 */
 298void qset_free_std(struct whc *whc, struct whc_std *std)
 299{
 300        list_del(&std->list_node);
 301        if (std->bounce_buf) {
 302                bool is_out = usb_pipeout(std->urb->pipe);
 303                dma_addr_t dma_addr;
 304
 305                if (std->num_pointers)
 306                        dma_addr = le64_to_cpu(std->pl_virt[0].buf_ptr);
 307                else
 308                        dma_addr = std->dma_addr;
 309
 310                dma_unmap_single(whc->wusbhc.dev, dma_addr,
 311                                 std->len, is_out ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
 312                if (!is_out)
 313                        qset_copy_bounce_to_sg(whc, std);
 314                kfree(std->bounce_buf);
 315        }
 316        if (std->pl_virt) {
 317                if (!dma_mapping_error(whc->wusbhc.dev, std->dma_addr))
 318                        dma_unmap_single(whc->wusbhc.dev, std->dma_addr,
 319                                         std->num_pointers * sizeof(struct whc_page_list_entry),
 320                                         DMA_TO_DEVICE);
 321                kfree(std->pl_virt);
 322                std->pl_virt = NULL;
 323        }
 324        kfree(std);
 325}
 326
 327/**
 328 * qset_remove_qtds - remove an URB's qTDs (and sTDs).
 329 */
 330static void qset_remove_qtds(struct whc *whc, struct whc_qset *qset,
 331                             struct urb *urb)
 332{
 333        struct whc_std *std, *t;
 334
 335        list_for_each_entry_safe(std, t, &qset->stds, list_node) {
 336                if (std->urb != urb)
 337                        break;
 338                if (std->qtd != NULL)
 339                        qset_remove_qtd(whc, qset);
 340                qset_free_std(whc, std);
 341        }
 342}
 343
 344/**
 345 * qset_free_stds - free any remaining sTDs for an URB.
 346 */
 347static void qset_free_stds(struct whc_qset *qset, struct urb *urb)
 348{
 349        struct whc_std *std, *t;
 350
 351        list_for_each_entry_safe(std, t, &qset->stds, list_node) {
 352                if (std->urb == urb)
 353                        qset_free_std(qset->whc, std);
 354        }
 355}
 356
 357static int qset_fill_page_list(struct whc *whc, struct whc_std *std, gfp_t mem_flags)
 358{
 359        dma_addr_t dma_addr = std->dma_addr;
 360        dma_addr_t sp, ep;
 361        size_t pl_len;
 362        int p;
 363
 364        /* Short buffers don't need a page list. */
 365        if (std->len <= WHCI_PAGE_SIZE) {
 366                std->num_pointers = 0;
 367                return 0;
 368        }
 369
 370        sp = dma_addr & ~(WHCI_PAGE_SIZE-1);
 371        ep = dma_addr + std->len;
 372        std->num_pointers = DIV_ROUND_UP(ep - sp, WHCI_PAGE_SIZE);
 373
 374        pl_len = std->num_pointers * sizeof(struct whc_page_list_entry);
 375        std->pl_virt = kmalloc(pl_len, mem_flags);
 376        if (std->pl_virt == NULL)
 377                return -ENOMEM;
 378        std->dma_addr = dma_map_single(whc->wusbhc.dev, std->pl_virt, pl_len, DMA_TO_DEVICE);
 379        if (dma_mapping_error(whc->wusbhc.dev, std->dma_addr)) {
 380                kfree(std->pl_virt);
 381                return -EFAULT;
 382        }
 383
 384        for (p = 0; p < std->num_pointers; p++) {
 385                std->pl_virt[p].buf_ptr = cpu_to_le64(dma_addr);
 386                dma_addr = (dma_addr + WHCI_PAGE_SIZE) & ~(WHCI_PAGE_SIZE-1);
 387        }
 388
 389        return 0;
 390}
 391
 392/**
 393 * urb_dequeue_work - executes asl/pzl update and gives back the urb to the system.
 394 */
 395static void urb_dequeue_work(struct work_struct *work)
 396{
 397        struct whc_urb *wurb = container_of(work, struct whc_urb, dequeue_work);
 398        struct whc_qset *qset = wurb->qset;
 399        struct whc *whc = qset->whc;
 400        unsigned long flags;
 401
 402        if (wurb->is_async)
 403                asl_update(whc, WUSBCMD_ASYNC_UPDATED
 404                           | WUSBCMD_ASYNC_SYNCED_DB
 405                           | WUSBCMD_ASYNC_QSET_RM);
 406        else
 407                pzl_update(whc, WUSBCMD_PERIODIC_UPDATED
 408                           | WUSBCMD_PERIODIC_SYNCED_DB
 409                           | WUSBCMD_PERIODIC_QSET_RM);
 410
 411        spin_lock_irqsave(&whc->lock, flags);
 412        qset_remove_urb(whc, qset, wurb->urb, wurb->status);
 413        spin_unlock_irqrestore(&whc->lock, flags);
 414}
 415
 416static struct whc_std *qset_new_std(struct whc *whc, struct whc_qset *qset,
 417                                    struct urb *urb, gfp_t mem_flags)
 418{
 419        struct whc_std *std;
 420
 421        std = kzalloc(sizeof(struct whc_std), mem_flags);
 422        if (std == NULL)
 423                return NULL;
 424
 425        std->urb = urb;
 426        std->qtd = NULL;
 427
 428        INIT_LIST_HEAD(&std->list_node);
 429        list_add_tail(&std->list_node, &qset->stds);
 430
 431        return std;
 432}
 433
 434static int qset_add_urb_sg(struct whc *whc, struct whc_qset *qset, struct urb *urb,
 435                           gfp_t mem_flags)
 436{
 437        size_t remaining;
 438        struct scatterlist *sg;
 439        int i;
 440        int ntds = 0;
 441        struct whc_std *std = NULL;
 442        struct whc_page_list_entry *new_pl_virt;
 443        dma_addr_t prev_end = 0;
 444        size_t pl_len;
 445        int p = 0;
 446
 447        remaining = urb->transfer_buffer_length;
 448
 449        for_each_sg(urb->sg, sg, urb->num_mapped_sgs, i) {
 450                dma_addr_t dma_addr;
 451                size_t dma_remaining;
 452                dma_addr_t sp, ep;
 453                int num_pointers;
 454
 455                if (remaining == 0) {
 456                        break;
 457                }
 458
 459                dma_addr = sg_dma_address(sg);
 460                dma_remaining = min_t(size_t, sg_dma_len(sg), remaining);
 461
 462                while (dma_remaining) {
 463                        size_t dma_len;
 464
 465                        /*
 466                         * We can use the previous std (if it exists) provided that:
 467                         * - the previous one ended on a page boundary.
 468                         * - the current one begins on a page boundary.
 469                         * - the previous one isn't full.
 470                         *
 471                         * If a new std is needed but the previous one
 472                         * was not a whole number of packets then this
 473                         * sg list cannot be mapped onto multiple
 474                         * qTDs.  Return an error and let the caller
 475                         * sort it out.
 476                         */
 477                        if (!std
 478                            || (prev_end & (WHCI_PAGE_SIZE-1))
 479                            || (dma_addr & (WHCI_PAGE_SIZE-1))
 480                            || std->len + WHCI_PAGE_SIZE > QTD_MAX_XFER_SIZE) {
 481                                if (std && std->len % qset->max_packet != 0)
 482                                        return -EINVAL;
 483                                std = qset_new_std(whc, qset, urb, mem_flags);
 484                                if (std == NULL) {
 485                                        return -ENOMEM;
 486                                }
 487                                ntds++;
 488                                p = 0;
 489                        }
 490
 491                        dma_len = dma_remaining;
 492
 493                        /*
 494                         * If the remainder of this element doesn't
 495                         * fit in a single qTD, limit the qTD to a
 496                         * whole number of packets.  This allows the
 497                         * remainder to go into the next qTD.
 498                         */
 499                        if (std->len + dma_len > QTD_MAX_XFER_SIZE) {
 500                                dma_len = (QTD_MAX_XFER_SIZE / qset->max_packet)
 501                                        * qset->max_packet - std->len;
 502                        }
 503
 504                        std->len += dma_len;
 505                        std->ntds_remaining = -1; /* filled in later */
 506
 507                        sp = dma_addr & ~(WHCI_PAGE_SIZE-1);
 508                        ep = dma_addr + dma_len;
 509                        num_pointers = DIV_ROUND_UP(ep - sp, WHCI_PAGE_SIZE);
 510                        std->num_pointers += num_pointers;
 511
 512                        pl_len = std->num_pointers * sizeof(struct whc_page_list_entry);
 513
 514                        new_pl_virt = krealloc(std->pl_virt, pl_len, mem_flags);
 515                        if (new_pl_virt == NULL) {
 516                                kfree(std->pl_virt);
 517                                std->pl_virt = NULL;
 518                                return -ENOMEM;
 519                        }
 520                        std->pl_virt = new_pl_virt;
 521
 522                        for (;p < std->num_pointers; p++) {
 523                                std->pl_virt[p].buf_ptr = cpu_to_le64(dma_addr);
 524                                dma_addr = (dma_addr + WHCI_PAGE_SIZE) & ~(WHCI_PAGE_SIZE-1);
 525                        }
 526
 527                        prev_end = dma_addr = ep;
 528                        dma_remaining -= dma_len;
 529                        remaining -= dma_len;
 530                }
 531        }
 532
 533        /* Now the number of stds is know, go back and fill in
 534           std->ntds_remaining. */
 535        list_for_each_entry(std, &qset->stds, list_node) {
 536                if (std->ntds_remaining == -1) {
 537                        pl_len = std->num_pointers * sizeof(struct whc_page_list_entry);
 538                        std->dma_addr = dma_map_single(whc->wusbhc.dev, std->pl_virt,
 539                                                       pl_len, DMA_TO_DEVICE);
 540                        if (dma_mapping_error(whc->wusbhc.dev, std->dma_addr))
 541                                return -EFAULT;
 542                        std->ntds_remaining = ntds--;
 543                }
 544        }
 545        return 0;
 546}
 547
 548/**
 549 * qset_add_urb_sg_linearize - add an urb with sg list, copying the data
 550 *
 551 * If the URB contains an sg list whose elements cannot be directly
 552 * mapped to qTDs then the data must be transferred via bounce
 553 * buffers.
 554 */
 555static int qset_add_urb_sg_linearize(struct whc *whc, struct whc_qset *qset,
 556                                     struct urb *urb, gfp_t mem_flags)
 557{
 558        bool is_out = usb_pipeout(urb->pipe);
 559        size_t max_std_len;
 560        size_t remaining;
 561        int ntds = 0;
 562        struct whc_std *std = NULL;
 563        void *bounce = NULL;
 564        struct scatterlist *sg;
 565        int i;
 566
 567        /* limit maximum bounce buffer to 16 * 3.5 KiB ~= 28 k */
 568        max_std_len = qset->max_burst * qset->max_packet;
 569
 570        remaining = urb->transfer_buffer_length;
 571
 572        for_each_sg(urb->sg, sg, urb->num_mapped_sgs, i) {
 573                size_t len;
 574                size_t sg_remaining;
 575                void *orig;
 576
 577                if (remaining == 0) {
 578                        break;
 579                }
 580
 581                sg_remaining = min_t(size_t, remaining, sg->length);
 582                orig = sg_virt(sg);
 583
 584                while (sg_remaining) {
 585                        if (!std || std->len == max_std_len) {
 586                                std = qset_new_std(whc, qset, urb, mem_flags);
 587                                if (std == NULL)
 588                                        return -ENOMEM;
 589                                std->bounce_buf = kmalloc(max_std_len, mem_flags);
 590                                if (std->bounce_buf == NULL)
 591                                        return -ENOMEM;
 592                                std->bounce_sg = sg;
 593                                std->bounce_offset = orig - sg_virt(sg);
 594                                bounce = std->bounce_buf;
 595                                ntds++;
 596                        }
 597
 598                        len = min(sg_remaining, max_std_len - std->len);
 599
 600                        if (is_out)
 601                                memcpy(bounce, orig, len);
 602
 603                        std->len += len;
 604                        std->ntds_remaining = -1; /* filled in later */
 605
 606                        bounce += len;
 607                        orig += len;
 608                        sg_remaining -= len;
 609                        remaining -= len;
 610                }
 611        }
 612
 613        /*
 614         * For each of the new sTDs, map the bounce buffers, create
 615         * page lists (if necessary), and fill in std->ntds_remaining.
 616         */
 617        list_for_each_entry(std, &qset->stds, list_node) {
 618                if (std->ntds_remaining != -1)
 619                        continue;
 620
 621                std->dma_addr = dma_map_single(&whc->umc->dev, std->bounce_buf, std->len,
 622                                               is_out ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
 623                if (dma_mapping_error(&whc->umc->dev, std->dma_addr))
 624                        return -EFAULT;
 625
 626                if (qset_fill_page_list(whc, std, mem_flags) < 0)
 627                        return -ENOMEM;
 628
 629                std->ntds_remaining = ntds--;
 630        }
 631
 632        return 0;
 633}
 634
 635/**
 636 * qset_add_urb - add an urb to the qset's queue.
 637 *
 638 * The URB is chopped into sTDs, one for each qTD that will required.
 639 * At least one qTD (and sTD) is required even if the transfer has no
 640 * data (e.g., for some control transfers).
 641 */
 642int qset_add_urb(struct whc *whc, struct whc_qset *qset, struct urb *urb,
 643        gfp_t mem_flags)
 644{
 645        struct whc_urb *wurb;
 646        int remaining = urb->transfer_buffer_length;
 647        u64 transfer_dma = urb->transfer_dma;
 648        int ntds_remaining;
 649        int ret;
 650
 651        wurb = kzalloc(sizeof(struct whc_urb), mem_flags);
 652        if (wurb == NULL)
 653                goto err_no_mem;
 654        urb->hcpriv = wurb;
 655        wurb->qset = qset;
 656        wurb->urb = urb;
 657        INIT_WORK(&wurb->dequeue_work, urb_dequeue_work);
 658
 659        if (urb->num_sgs) {
 660                ret = qset_add_urb_sg(whc, qset, urb, mem_flags);
 661                if (ret == -EINVAL) {
 662                        qset_free_stds(qset, urb);
 663                        ret = qset_add_urb_sg_linearize(whc, qset, urb, mem_flags);
 664                }
 665                if (ret < 0)
 666                        goto err_no_mem;
 667                return 0;
 668        }
 669
 670        ntds_remaining = DIV_ROUND_UP(remaining, QTD_MAX_XFER_SIZE);
 671        if (ntds_remaining == 0)
 672                ntds_remaining = 1;
 673
 674        while (ntds_remaining) {
 675                struct whc_std *std;
 676                size_t std_len;
 677
 678                std_len = remaining;
 679                if (std_len > QTD_MAX_XFER_SIZE)
 680                        std_len = QTD_MAX_XFER_SIZE;
 681
 682                std = qset_new_std(whc, qset, urb, mem_flags);
 683                if (std == NULL)
 684                        goto err_no_mem;
 685
 686                std->dma_addr = transfer_dma;
 687                std->len = std_len;
 688                std->ntds_remaining = ntds_remaining;
 689
 690                if (qset_fill_page_list(whc, std, mem_flags) < 0)
 691                        goto err_no_mem;
 692
 693                ntds_remaining--;
 694                remaining -= std_len;
 695                transfer_dma += std_len;
 696        }
 697
 698        return 0;
 699
 700err_no_mem:
 701        qset_free_stds(qset, urb);
 702        return -ENOMEM;
 703}
 704
 705/**
 706 * qset_remove_urb - remove an URB from the urb queue.
 707 *
 708 * The URB is returned to the USB subsystem.
 709 */
 710void qset_remove_urb(struct whc *whc, struct whc_qset *qset,
 711                            struct urb *urb, int status)
 712{
 713        struct wusbhc *wusbhc = &whc->wusbhc;
 714        struct whc_urb *wurb = urb->hcpriv;
 715
 716        usb_hcd_unlink_urb_from_ep(&wusbhc->usb_hcd, urb);
 717        /* Drop the lock as urb->complete() may enqueue another urb. */
 718        spin_unlock(&whc->lock);
 719        wusbhc_giveback_urb(wusbhc, urb, status);
 720        spin_lock(&whc->lock);
 721
 722        kfree(wurb);
 723}
 724
 725/**
 726 * get_urb_status_from_qtd - get the completed urb status from qTD status
 727 * @urb:    completed urb
 728 * @status: qTD status
 729 */
 730static int get_urb_status_from_qtd(struct urb *urb, u32 status)
 731{
 732        if (status & QTD_STS_HALTED) {
 733                if (status & QTD_STS_DBE)
 734                        return usb_pipein(urb->pipe) ? -ENOSR : -ECOMM;
 735                else if (status & QTD_STS_BABBLE)
 736                        return -EOVERFLOW;
 737                else if (status & QTD_STS_RCE)
 738                        return -ETIME;
 739                return -EPIPE;
 740        }
 741        if (usb_pipein(urb->pipe)
 742            && (urb->transfer_flags & URB_SHORT_NOT_OK)
 743            && urb->actual_length < urb->transfer_buffer_length)
 744                return -EREMOTEIO;
 745        return 0;
 746}
 747
 748/**
 749 * process_inactive_qtd - process an inactive (but not halted) qTD.
 750 *
 751 * Update the urb with the transfer bytes from the qTD, if the urb is
 752 * completely transferred or (in the case of an IN only) the LPF is
 753 * set, then the transfer is complete and the urb should be returned
 754 * to the system.
 755 */
 756void process_inactive_qtd(struct whc *whc, struct whc_qset *qset,
 757                                 struct whc_qtd *qtd)
 758{
 759        struct whc_std *std = list_first_entry(&qset->stds, struct whc_std, list_node);
 760        struct urb *urb = std->urb;
 761        uint32_t status;
 762        bool complete;
 763
 764        status = le32_to_cpu(qtd->status);
 765
 766        urb->actual_length += std->len - QTD_STS_TO_LEN(status);
 767
 768        if (usb_pipein(urb->pipe) && (status & QTD_STS_LAST_PKT))
 769                complete = true;
 770        else
 771                complete = whc_std_last(std);
 772
 773        qset_remove_qtd(whc, qset);
 774        qset_free_std(whc, std);
 775
 776        /*
 777         * Transfers for this URB are complete?  Then return it to the
 778         * USB subsystem.
 779         */
 780        if (complete) {
 781                qset_remove_qtds(whc, qset, urb);
 782                qset_remove_urb(whc, qset, urb, get_urb_status_from_qtd(urb, status));
 783
 784                /*
 785                 * If iAlt isn't valid then the hardware didn't
 786                 * advance iCur. Adjust the start and end pointers to
 787                 * match iCur.
 788                 */
 789                if (!(status & QTD_STS_IALT_VALID))
 790                        qset->td_start = qset->td_end
 791                                = QH_STATUS_TO_ICUR(le16_to_cpu(qset->qh.status));
 792                qset->pause_after_urb = NULL;
 793        }
 794}
 795
 796/**
 797 * process_halted_qtd - process a qset with a halted qtd
 798 *
 799 * Remove all the qTDs for the failed URB and return the failed URB to
 800 * the USB subsystem.  Then remove all other qTDs so the qset can be
 801 * removed.
 802 *
 803 * FIXME: this is the point where rate adaptation can be done.  If a
 804 * transfer failed because it exceeded the maximum number of retries
 805 * then it could be reactivated with a slower rate without having to
 806 * remove the qset.
 807 */
 808void process_halted_qtd(struct whc *whc, struct whc_qset *qset,
 809                               struct whc_qtd *qtd)
 810{
 811        struct whc_std *std = list_first_entry(&qset->stds, struct whc_std, list_node);
 812        struct urb *urb = std->urb;
 813        int urb_status;
 814
 815        urb_status = get_urb_status_from_qtd(urb, le32_to_cpu(qtd->status));
 816
 817        qset_remove_qtds(whc, qset, urb);
 818        qset_remove_urb(whc, qset, urb, urb_status);
 819
 820        list_for_each_entry(std, &qset->stds, list_node) {
 821                if (qset->ntds == 0)
 822                        break;
 823                qset_remove_qtd(whc, qset);
 824                std->qtd = NULL;
 825        }
 826
 827        qset->remove = 1;
 828}
 829
 830void qset_free(struct whc *whc, struct whc_qset *qset)
 831{
 832        dma_pool_free(whc->qset_pool, qset, qset->qset_dma);
 833}
 834
 835/**
 836 * qset_delete - wait for a qset to be unused, then free it.
 837 */
 838void qset_delete(struct whc *whc, struct whc_qset *qset)
 839{
 840        wait_for_completion(&qset->remove_complete);
 841        qset_free(whc, qset);
 842}
 843