linux/drivers/usb/host/xhci-mem.c
<<
>>
Prefs
   1/*
   2 * xHCI host controller driver
   3 *
   4 * Copyright (C) 2008 Intel Corp.
   5 *
   6 * Author: Sarah Sharp
   7 * Some code borrowed from the Linux EHCI driver.
   8 *
   9 * This program is free software; you can redistribute it and/or modify
  10 * it under the terms of the GNU General Public License version 2 as
  11 * published by the Free Software Foundation.
  12 *
  13 * This program is distributed in the hope that it will be useful, but
  14 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
  15 * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  16 * for more details.
  17 *
  18 * You should have received a copy of the GNU General Public License
  19 * along with this program; if not, write to the Free Software Foundation,
  20 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  21 */
  22
  23#include <linux/usb.h>
  24#include <linux/pci.h>
  25#include <linux/dmapool.h>
  26
  27#include "xhci.h"
  28
  29/*
  30 * Allocates a generic ring segment from the ring pool, sets the dma address,
  31 * initializes the segment to zero, and sets the private next pointer to NULL.
  32 *
  33 * Section 4.11.1.1:
  34 * "All components of all Command and Transfer TRBs shall be initialized to '0'"
  35 */
  36static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci, gfp_t flags)
  37{
  38        struct xhci_segment *seg;
  39        dma_addr_t      dma;
  40
  41        seg = kzalloc(sizeof *seg, flags);
  42        if (!seg)
  43                return 0;
  44        xhci_dbg(xhci, "Allocating priv segment structure at %p\n", seg);
  45
  46        seg->trbs = dma_pool_alloc(xhci->segment_pool, flags, &dma);
  47        if (!seg->trbs) {
  48                kfree(seg);
  49                return 0;
  50        }
  51        xhci_dbg(xhci, "// Allocating segment at %p (virtual) 0x%llx (DMA)\n",
  52                        seg->trbs, (unsigned long long)dma);
  53
  54        memset(seg->trbs, 0, SEGMENT_SIZE);
  55        seg->dma = dma;
  56        seg->next = NULL;
  57
  58        return seg;
  59}
  60
  61static void xhci_segment_free(struct xhci_hcd *xhci, struct xhci_segment *seg)
  62{
  63        if (!seg)
  64                return;
  65        if (seg->trbs) {
  66                xhci_dbg(xhci, "Freeing DMA segment at %p (virtual) 0x%llx (DMA)\n",
  67                                seg->trbs, (unsigned long long)seg->dma);
  68                dma_pool_free(xhci->segment_pool, seg->trbs, seg->dma);
  69                seg->trbs = NULL;
  70        }
  71        xhci_dbg(xhci, "Freeing priv segment structure at %p\n", seg);
  72        kfree(seg);
  73}
  74
  75/*
  76 * Make the prev segment point to the next segment.
  77 *
  78 * Change the last TRB in the prev segment to be a Link TRB which points to the
  79 * DMA address of the next segment.  The caller needs to set any Link TRB
  80 * related flags, such as End TRB, Toggle Cycle, and no snoop.
  81 */
  82static void xhci_link_segments(struct xhci_hcd *xhci, struct xhci_segment *prev,
  83                struct xhci_segment *next, bool link_trbs)
  84{
  85        u32 val;
  86
  87        if (!prev || !next)
  88                return;
  89        prev->next = next;
  90        if (link_trbs) {
  91                prev->trbs[TRBS_PER_SEGMENT-1].link.segment_ptr = next->dma;
  92
  93                /* Set the last TRB in the segment to have a TRB type ID of Link TRB */
  94                val = prev->trbs[TRBS_PER_SEGMENT-1].link.control;
  95                val &= ~TRB_TYPE_BITMASK;
  96                val |= TRB_TYPE(TRB_LINK);
  97                /* Always set the chain bit with 0.95 hardware */
  98                if (xhci_link_trb_quirk(xhci))
  99                        val |= TRB_CHAIN;
 100                prev->trbs[TRBS_PER_SEGMENT-1].link.control = val;
 101        }
 102        xhci_dbg(xhci, "Linking segment 0x%llx to segment 0x%llx (DMA)\n",
 103                        (unsigned long long)prev->dma,
 104                        (unsigned long long)next->dma);
 105}
 106
 107/* XXX: Do we need the hcd structure in all these functions? */
 108void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring)
 109{
 110        struct xhci_segment *seg;
 111        struct xhci_segment *first_seg;
 112
 113        if (!ring || !ring->first_seg)
 114                return;
 115        first_seg = ring->first_seg;
 116        seg = first_seg->next;
 117        xhci_dbg(xhci, "Freeing ring at %p\n", ring);
 118        while (seg != first_seg) {
 119                struct xhci_segment *next = seg->next;
 120                xhci_segment_free(xhci, seg);
 121                seg = next;
 122        }
 123        xhci_segment_free(xhci, first_seg);
 124        ring->first_seg = NULL;
 125        kfree(ring);
 126}
 127
 128/**
 129 * Create a new ring with zero or more segments.
 130 *
 131 * Link each segment together into a ring.
 132 * Set the end flag and the cycle toggle bit on the last segment.
 133 * See section 4.9.1 and figures 15 and 16.
 134 */
 135static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
 136                unsigned int num_segs, bool link_trbs, gfp_t flags)
 137{
 138        struct xhci_ring        *ring;
 139        struct xhci_segment     *prev;
 140
 141        ring = kzalloc(sizeof *(ring), flags);
 142        xhci_dbg(xhci, "Allocating ring at %p\n", ring);
 143        if (!ring)
 144                return 0;
 145
 146        INIT_LIST_HEAD(&ring->td_list);
 147        if (num_segs == 0)
 148                return ring;
 149
 150        ring->first_seg = xhci_segment_alloc(xhci, flags);
 151        if (!ring->first_seg)
 152                goto fail;
 153        num_segs--;
 154
 155        prev = ring->first_seg;
 156        while (num_segs > 0) {
 157                struct xhci_segment     *next;
 158
 159                next = xhci_segment_alloc(xhci, flags);
 160                if (!next)
 161                        goto fail;
 162                xhci_link_segments(xhci, prev, next, link_trbs);
 163
 164                prev = next;
 165                num_segs--;
 166        }
 167        xhci_link_segments(xhci, prev, ring->first_seg, link_trbs);
 168
 169        if (link_trbs) {
 170                /* See section 4.9.2.1 and 6.4.4.1 */
 171                prev->trbs[TRBS_PER_SEGMENT-1].link.control |= (LINK_TOGGLE);
 172                xhci_dbg(xhci, "Wrote link toggle flag to"
 173                                " segment %p (virtual), 0x%llx (DMA)\n",
 174                                prev, (unsigned long long)prev->dma);
 175        }
 176        /* The ring is empty, so the enqueue pointer == dequeue pointer */
 177        ring->enqueue = ring->first_seg->trbs;
 178        ring->enq_seg = ring->first_seg;
 179        ring->dequeue = ring->enqueue;
 180        ring->deq_seg = ring->first_seg;
 181        /* The ring is initialized to 0. The producer must write 1 to the cycle
 182         * bit to handover ownership of the TRB, so PCS = 1.  The consumer must
 183         * compare CCS to the cycle bit to check ownership, so CCS = 1.
 184         */
 185        ring->cycle_state = 1;
 186
 187        return ring;
 188
 189fail:
 190        xhci_ring_free(xhci, ring);
 191        return 0;
 192}
 193
 194#define CTX_SIZE(_hcc) (HCC_64BYTE_CONTEXT(_hcc) ? 64 : 32)
 195
 196struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci,
 197                                                    int type, gfp_t flags)
 198{
 199        struct xhci_container_ctx *ctx = kzalloc(sizeof(*ctx), flags);
 200        if (!ctx)
 201                return NULL;
 202
 203        BUG_ON((type != XHCI_CTX_TYPE_DEVICE) && (type != XHCI_CTX_TYPE_INPUT));
 204        ctx->type = type;
 205        ctx->size = HCC_64BYTE_CONTEXT(xhci->hcc_params) ? 2048 : 1024;
 206        if (type == XHCI_CTX_TYPE_INPUT)
 207                ctx->size += CTX_SIZE(xhci->hcc_params);
 208
 209        ctx->bytes = dma_pool_alloc(xhci->device_pool, flags, &ctx->dma);
 210        memset(ctx->bytes, 0, ctx->size);
 211        return ctx;
 212}
 213
 214void xhci_free_container_ctx(struct xhci_hcd *xhci,
 215                             struct xhci_container_ctx *ctx)
 216{
 217        dma_pool_free(xhci->device_pool, ctx->bytes, ctx->dma);
 218        kfree(ctx);
 219}
 220
 221struct xhci_input_control_ctx *xhci_get_input_control_ctx(struct xhci_hcd *xhci,
 222                                              struct xhci_container_ctx *ctx)
 223{
 224        BUG_ON(ctx->type != XHCI_CTX_TYPE_INPUT);
 225        return (struct xhci_input_control_ctx *)ctx->bytes;
 226}
 227
 228struct xhci_slot_ctx *xhci_get_slot_ctx(struct xhci_hcd *xhci,
 229                                        struct xhci_container_ctx *ctx)
 230{
 231        if (ctx->type == XHCI_CTX_TYPE_DEVICE)
 232                return (struct xhci_slot_ctx *)ctx->bytes;
 233
 234        return (struct xhci_slot_ctx *)
 235                (ctx->bytes + CTX_SIZE(xhci->hcc_params));
 236}
 237
 238struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_hcd *xhci,
 239                                    struct xhci_container_ctx *ctx,
 240                                    unsigned int ep_index)
 241{
 242        /* increment ep index by offset of start of ep ctx array */
 243        ep_index++;
 244        if (ctx->type == XHCI_CTX_TYPE_INPUT)
 245                ep_index++;
 246
 247        return (struct xhci_ep_ctx *)
 248                (ctx->bytes + (ep_index * CTX_SIZE(xhci->hcc_params)));
 249}
 250
 251/* All the xhci_tds in the ring's TD list should be freed at this point */
 252void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
 253{
 254        struct xhci_virt_device *dev;
 255        int i;
 256
 257        /* Slot ID 0 is reserved */
 258        if (slot_id == 0 || !xhci->devs[slot_id])
 259                return;
 260
 261        dev = xhci->devs[slot_id];
 262        xhci->dcbaa->dev_context_ptrs[slot_id] = 0;
 263        if (!dev)
 264                return;
 265
 266        for (i = 0; i < 31; ++i)
 267                if (dev->eps[i].ring)
 268                        xhci_ring_free(xhci, dev->eps[i].ring);
 269
 270        if (dev->in_ctx)
 271                xhci_free_container_ctx(xhci, dev->in_ctx);
 272        if (dev->out_ctx)
 273                xhci_free_container_ctx(xhci, dev->out_ctx);
 274
 275        kfree(xhci->devs[slot_id]);
 276        xhci->devs[slot_id] = 0;
 277}
 278
 279int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
 280                struct usb_device *udev, gfp_t flags)
 281{
 282        struct xhci_virt_device *dev;
 283        int i;
 284
 285        /* Slot ID 0 is reserved */
 286        if (slot_id == 0 || xhci->devs[slot_id]) {
 287                xhci_warn(xhci, "Bad Slot ID %d\n", slot_id);
 288                return 0;
 289        }
 290
 291        xhci->devs[slot_id] = kzalloc(sizeof(*xhci->devs[slot_id]), flags);
 292        if (!xhci->devs[slot_id])
 293                return 0;
 294        dev = xhci->devs[slot_id];
 295
 296        /* Allocate the (output) device context that will be used in the HC. */
 297        dev->out_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_DEVICE, flags);
 298        if (!dev->out_ctx)
 299                goto fail;
 300
 301        xhci_dbg(xhci, "Slot %d output ctx = 0x%llx (dma)\n", slot_id,
 302                        (unsigned long long)dev->out_ctx->dma);
 303
 304        /* Allocate the (input) device context for address device command */
 305        dev->in_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_INPUT, flags);
 306        if (!dev->in_ctx)
 307                goto fail;
 308
 309        xhci_dbg(xhci, "Slot %d input ctx = 0x%llx (dma)\n", slot_id,
 310                        (unsigned long long)dev->in_ctx->dma);
 311
 312        /* Initialize the cancellation list for each endpoint */
 313        for (i = 0; i < 31; i++)
 314                INIT_LIST_HEAD(&dev->eps[i].cancelled_td_list);
 315
 316        /* Allocate endpoint 0 ring */
 317        dev->eps[0].ring = xhci_ring_alloc(xhci, 1, true, flags);
 318        if (!dev->eps[0].ring)
 319                goto fail;
 320
 321        init_completion(&dev->cmd_completion);
 322        INIT_LIST_HEAD(&dev->cmd_list);
 323
 324        /* Point to output device context in dcbaa. */
 325        xhci->dcbaa->dev_context_ptrs[slot_id] = dev->out_ctx->dma;
 326        xhci_dbg(xhci, "Set slot id %d dcbaa entry %p to 0x%llx\n",
 327                        slot_id,
 328                        &xhci->dcbaa->dev_context_ptrs[slot_id],
 329                        (unsigned long long) xhci->dcbaa->dev_context_ptrs[slot_id]);
 330
 331        return 1;
 332fail:
 333        xhci_free_virt_device(xhci, slot_id);
 334        return 0;
 335}
 336
 337/* Setup an xHCI virtual device for a Set Address command */
 338int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *udev)
 339{
 340        struct xhci_virt_device *dev;
 341        struct xhci_ep_ctx      *ep0_ctx;
 342        struct usb_device       *top_dev;
 343        struct xhci_slot_ctx    *slot_ctx;
 344        struct xhci_input_control_ctx *ctrl_ctx;
 345
 346        dev = xhci->devs[udev->slot_id];
 347        /* Slot ID 0 is reserved */
 348        if (udev->slot_id == 0 || !dev) {
 349                xhci_warn(xhci, "Slot ID %d is not assigned to this device\n",
 350                                udev->slot_id);
 351                return -EINVAL;
 352        }
 353        ep0_ctx = xhci_get_ep_ctx(xhci, dev->in_ctx, 0);
 354        ctrl_ctx = xhci_get_input_control_ctx(xhci, dev->in_ctx);
 355        slot_ctx = xhci_get_slot_ctx(xhci, dev->in_ctx);
 356
 357        /* 2) New slot context and endpoint 0 context are valid*/
 358        ctrl_ctx->add_flags = SLOT_FLAG | EP0_FLAG;
 359
 360        /* 3) Only the control endpoint is valid - one endpoint context */
 361        slot_ctx->dev_info |= LAST_CTX(1);
 362
 363        slot_ctx->dev_info |= (u32) udev->route;
 364        switch (udev->speed) {
 365        case USB_SPEED_SUPER:
 366                slot_ctx->dev_info |= (u32) SLOT_SPEED_SS;
 367                break;
 368        case USB_SPEED_HIGH:
 369                slot_ctx->dev_info |= (u32) SLOT_SPEED_HS;
 370                break;
 371        case USB_SPEED_FULL:
 372                slot_ctx->dev_info |= (u32) SLOT_SPEED_FS;
 373                break;
 374        case USB_SPEED_LOW:
 375                slot_ctx->dev_info |= (u32) SLOT_SPEED_LS;
 376                break;
 377        case USB_SPEED_VARIABLE:
 378                xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n");
 379                return -EINVAL;
 380                break;
 381        default:
 382                /* Speed was set earlier, this shouldn't happen. */
 383                BUG();
 384        }
 385        /* Find the root hub port this device is under */
 386        for (top_dev = udev; top_dev->parent && top_dev->parent->parent;
 387                        top_dev = top_dev->parent)
 388                /* Found device below root hub */;
 389        slot_ctx->dev_info2 |= (u32) ROOT_HUB_PORT(top_dev->portnum);
 390        xhci_dbg(xhci, "Set root hub portnum to %d\n", top_dev->portnum);
 391
 392        /* Is this a LS/FS device under a HS hub? */
 393        if ((udev->speed == USB_SPEED_LOW || udev->speed == USB_SPEED_FULL) &&
 394                        udev->tt) {
 395                slot_ctx->tt_info = udev->tt->hub->slot_id;
 396                slot_ctx->tt_info |= udev->ttport << 8;
 397                if (udev->tt->multi)
 398                        slot_ctx->dev_info |= DEV_MTT;
 399        }
 400        xhci_dbg(xhci, "udev->tt = %p\n", udev->tt);
 401        xhci_dbg(xhci, "udev->ttport = 0x%x\n", udev->ttport);
 402
 403        /* Step 4 - ring already allocated */
 404        /* Step 5 */
 405        ep0_ctx->ep_info2 = EP_TYPE(CTRL_EP);
 406        /*
 407         * XXX: Not sure about wireless USB devices.
 408         */
 409        switch (udev->speed) {
 410        case USB_SPEED_SUPER:
 411                ep0_ctx->ep_info2 |= MAX_PACKET(512);
 412                break;
 413        case USB_SPEED_HIGH:
 414        /* USB core guesses at a 64-byte max packet first for FS devices */
 415        case USB_SPEED_FULL:
 416                ep0_ctx->ep_info2 |= MAX_PACKET(64);
 417                break;
 418        case USB_SPEED_LOW:
 419                ep0_ctx->ep_info2 |= MAX_PACKET(8);
 420                break;
 421        case USB_SPEED_VARIABLE:
 422                xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n");
 423                return -EINVAL;
 424                break;
 425        default:
 426                /* New speed? */
 427                BUG();
 428        }
 429        /* EP 0 can handle "burst" sizes of 1, so Max Burst Size field is 0 */
 430        ep0_ctx->ep_info2 |= MAX_BURST(0);
 431        ep0_ctx->ep_info2 |= ERROR_COUNT(3);
 432
 433        ep0_ctx->deq =
 434                dev->eps[0].ring->first_seg->dma;
 435        ep0_ctx->deq |= dev->eps[0].ring->cycle_state;
 436
 437        /* Steps 7 and 8 were done in xhci_alloc_virt_device() */
 438
 439        return 0;
 440}
 441
 442/* Return the polling or NAK interval.
 443 *
 444 * The polling interval is expressed in "microframes".  If xHCI's Interval field
 445 * is set to N, it will service the endpoint every 2^(Interval)*125us.
 446 *
 447 * The NAK interval is one NAK per 1 to 255 microframes, or no NAKs if interval
 448 * is set to 0.
 449 */
 450static inline unsigned int xhci_get_endpoint_interval(struct usb_device *udev,
 451                struct usb_host_endpoint *ep)
 452{
 453        unsigned int interval = 0;
 454
 455        switch (udev->speed) {
 456        case USB_SPEED_HIGH:
 457                /* Max NAK rate */
 458                if (usb_endpoint_xfer_control(&ep->desc) ||
 459                                usb_endpoint_xfer_bulk(&ep->desc))
 460                        interval = ep->desc.bInterval;
 461                /* Fall through - SS and HS isoc/int have same decoding */
 462        case USB_SPEED_SUPER:
 463                if (usb_endpoint_xfer_int(&ep->desc) ||
 464                                usb_endpoint_xfer_isoc(&ep->desc)) {
 465                        if (ep->desc.bInterval == 0)
 466                                interval = 0;
 467                        else
 468                                interval = ep->desc.bInterval - 1;
 469                        if (interval > 15)
 470                                interval = 15;
 471                        if (interval != ep->desc.bInterval + 1)
 472                                dev_warn(&udev->dev, "ep %#x - rounding interval to %d microframes\n",
 473                                                ep->desc.bEndpointAddress, 1 << interval);
 474                }
 475                break;
 476        /* Convert bInterval (in 1-255 frames) to microframes and round down to
 477         * nearest power of 2.
 478         */
 479        case USB_SPEED_FULL:
 480        case USB_SPEED_LOW:
 481                if (usb_endpoint_xfer_int(&ep->desc) ||
 482                                usb_endpoint_xfer_isoc(&ep->desc)) {
 483                        interval = fls(8*ep->desc.bInterval) - 1;
 484                        if (interval > 10)
 485                                interval = 10;
 486                        if (interval < 3)
 487                                interval = 3;
 488                        if ((1 << interval) != 8*ep->desc.bInterval)
 489                                dev_warn(&udev->dev, "ep %#x - rounding interval to %d microframes\n",
 490                                                ep->desc.bEndpointAddress, 1 << interval);
 491                }
 492                break;
 493        default:
 494                BUG();
 495        }
 496        return EP_INTERVAL(interval);
 497}
 498
 499static inline u32 xhci_get_endpoint_type(struct usb_device *udev,
 500                struct usb_host_endpoint *ep)
 501{
 502        int in;
 503        u32 type;
 504
 505        in = usb_endpoint_dir_in(&ep->desc);
 506        if (usb_endpoint_xfer_control(&ep->desc)) {
 507                type = EP_TYPE(CTRL_EP);
 508        } else if (usb_endpoint_xfer_bulk(&ep->desc)) {
 509                if (in)
 510                        type = EP_TYPE(BULK_IN_EP);
 511                else
 512                        type = EP_TYPE(BULK_OUT_EP);
 513        } else if (usb_endpoint_xfer_isoc(&ep->desc)) {
 514                if (in)
 515                        type = EP_TYPE(ISOC_IN_EP);
 516                else
 517                        type = EP_TYPE(ISOC_OUT_EP);
 518        } else if (usb_endpoint_xfer_int(&ep->desc)) {
 519                if (in)
 520                        type = EP_TYPE(INT_IN_EP);
 521                else
 522                        type = EP_TYPE(INT_OUT_EP);
 523        } else {
 524                BUG();
 525        }
 526        return type;
 527}
 528
 529int xhci_endpoint_init(struct xhci_hcd *xhci,
 530                struct xhci_virt_device *virt_dev,
 531                struct usb_device *udev,
 532                struct usb_host_endpoint *ep,
 533                gfp_t mem_flags)
 534{
 535        unsigned int ep_index;
 536        struct xhci_ep_ctx *ep_ctx;
 537        struct xhci_ring *ep_ring;
 538        unsigned int max_packet;
 539        unsigned int max_burst;
 540
 541        ep_index = xhci_get_endpoint_index(&ep->desc);
 542        ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
 543
 544        /* Set up the endpoint ring */
 545        virt_dev->eps[ep_index].new_ring =
 546                xhci_ring_alloc(xhci, 1, true, mem_flags);
 547        if (!virt_dev->eps[ep_index].new_ring)
 548                return -ENOMEM;
 549        ep_ring = virt_dev->eps[ep_index].new_ring;
 550        ep_ctx->deq = ep_ring->first_seg->dma | ep_ring->cycle_state;
 551
 552        ep_ctx->ep_info = xhci_get_endpoint_interval(udev, ep);
 553
 554        /* FIXME dig Mult and streams info out of ep companion desc */
 555
 556        /* Allow 3 retries for everything but isoc;
 557         * error count = 0 means infinite retries.
 558         */
 559        if (!usb_endpoint_xfer_isoc(&ep->desc))
 560                ep_ctx->ep_info2 = ERROR_COUNT(3);
 561        else
 562                ep_ctx->ep_info2 = ERROR_COUNT(1);
 563
 564        ep_ctx->ep_info2 |= xhci_get_endpoint_type(udev, ep);
 565
 566        /* Set the max packet size and max burst */
 567        switch (udev->speed) {
 568        case USB_SPEED_SUPER:
 569                max_packet = ep->desc.wMaxPacketSize;
 570                ep_ctx->ep_info2 |= MAX_PACKET(max_packet);
 571                /* dig out max burst from ep companion desc */
 572                if (!ep->ss_ep_comp) {
 573                        xhci_warn(xhci, "WARN no SS endpoint companion descriptor.\n");
 574                        max_packet = 0;
 575                } else {
 576                        max_packet = ep->ss_ep_comp->desc.bMaxBurst;
 577                }
 578                ep_ctx->ep_info2 |= MAX_BURST(max_packet);
 579                break;
 580        case USB_SPEED_HIGH:
 581                /* bits 11:12 specify the number of additional transaction
 582                 * opportunities per microframe (USB 2.0, section 9.6.6)
 583                 */
 584                if (usb_endpoint_xfer_isoc(&ep->desc) ||
 585                                usb_endpoint_xfer_int(&ep->desc)) {
 586                        max_burst = (ep->desc.wMaxPacketSize & 0x1800) >> 11;
 587                        ep_ctx->ep_info2 |= MAX_BURST(max_burst);
 588                }
 589                /* Fall through */
 590        case USB_SPEED_FULL:
 591        case USB_SPEED_LOW:
 592                max_packet = ep->desc.wMaxPacketSize & 0x3ff;
 593                ep_ctx->ep_info2 |= MAX_PACKET(max_packet);
 594                break;
 595        default:
 596                BUG();
 597        }
 598        /* FIXME Debug endpoint context */
 599        return 0;
 600}
 601
 602void xhci_endpoint_zero(struct xhci_hcd *xhci,
 603                struct xhci_virt_device *virt_dev,
 604                struct usb_host_endpoint *ep)
 605{
 606        unsigned int ep_index;
 607        struct xhci_ep_ctx *ep_ctx;
 608
 609        ep_index = xhci_get_endpoint_index(&ep->desc);
 610        ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
 611
 612        ep_ctx->ep_info = 0;
 613        ep_ctx->ep_info2 = 0;
 614        ep_ctx->deq = 0;
 615        ep_ctx->tx_info = 0;
 616        /* Don't free the endpoint ring until the set interface or configuration
 617         * request succeeds.
 618         */
 619}
 620
 621/* Copy output xhci_ep_ctx to the input xhci_ep_ctx copy.
 622 * Useful when you want to change one particular aspect of the endpoint and then
 623 * issue a configure endpoint command.
 624 */
 625void xhci_endpoint_copy(struct xhci_hcd *xhci,
 626                struct xhci_container_ctx *in_ctx,
 627                struct xhci_container_ctx *out_ctx,
 628                unsigned int ep_index)
 629{
 630        struct xhci_ep_ctx *out_ep_ctx;
 631        struct xhci_ep_ctx *in_ep_ctx;
 632
 633        out_ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
 634        in_ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index);
 635
 636        in_ep_ctx->ep_info = out_ep_ctx->ep_info;
 637        in_ep_ctx->ep_info2 = out_ep_ctx->ep_info2;
 638        in_ep_ctx->deq = out_ep_ctx->deq;
 639        in_ep_ctx->tx_info = out_ep_ctx->tx_info;
 640}
 641
 642/* Copy output xhci_slot_ctx to the input xhci_slot_ctx.
 643 * Useful when you want to change one particular aspect of the endpoint and then
 644 * issue a configure endpoint command.  Only the context entries field matters,
 645 * but we'll copy the whole thing anyway.
 646 */
 647void xhci_slot_copy(struct xhci_hcd *xhci,
 648                struct xhci_container_ctx *in_ctx,
 649                struct xhci_container_ctx *out_ctx)
 650{
 651        struct xhci_slot_ctx *in_slot_ctx;
 652        struct xhci_slot_ctx *out_slot_ctx;
 653
 654        in_slot_ctx = xhci_get_slot_ctx(xhci, in_ctx);
 655        out_slot_ctx = xhci_get_slot_ctx(xhci, out_ctx);
 656
 657        in_slot_ctx->dev_info = out_slot_ctx->dev_info;
 658        in_slot_ctx->dev_info2 = out_slot_ctx->dev_info2;
 659        in_slot_ctx->tt_info = out_slot_ctx->tt_info;
 660        in_slot_ctx->dev_state = out_slot_ctx->dev_state;
 661}
 662
 663/* Set up the scratchpad buffer array and scratchpad buffers, if needed. */
 664static int scratchpad_alloc(struct xhci_hcd *xhci, gfp_t flags)
 665{
 666        int i;
 667        struct device *dev = xhci_to_hcd(xhci)->self.controller;
 668        int num_sp = HCS_MAX_SCRATCHPAD(xhci->hcs_params2);
 669
 670        xhci_dbg(xhci, "Allocating %d scratchpad buffers\n", num_sp);
 671
 672        if (!num_sp)
 673                return 0;
 674
 675        xhci->scratchpad = kzalloc(sizeof(*xhci->scratchpad), flags);
 676        if (!xhci->scratchpad)
 677                goto fail_sp;
 678
 679        xhci->scratchpad->sp_array =
 680                pci_alloc_consistent(to_pci_dev(dev),
 681                                     num_sp * sizeof(u64),
 682                                     &xhci->scratchpad->sp_dma);
 683        if (!xhci->scratchpad->sp_array)
 684                goto fail_sp2;
 685
 686        xhci->scratchpad->sp_buffers = kzalloc(sizeof(void *) * num_sp, flags);
 687        if (!xhci->scratchpad->sp_buffers)
 688                goto fail_sp3;
 689
 690        xhci->scratchpad->sp_dma_buffers =
 691                kzalloc(sizeof(dma_addr_t) * num_sp, flags);
 692
 693        if (!xhci->scratchpad->sp_dma_buffers)
 694                goto fail_sp4;
 695
 696        xhci->dcbaa->dev_context_ptrs[0] = xhci->scratchpad->sp_dma;
 697        for (i = 0; i < num_sp; i++) {
 698                dma_addr_t dma;
 699                void *buf = pci_alloc_consistent(to_pci_dev(dev),
 700                                                 xhci->page_size, &dma);
 701                if (!buf)
 702                        goto fail_sp5;
 703
 704                xhci->scratchpad->sp_array[i] = dma;
 705                xhci->scratchpad->sp_buffers[i] = buf;
 706                xhci->scratchpad->sp_dma_buffers[i] = dma;
 707        }
 708
 709        return 0;
 710
 711 fail_sp5:
 712        for (i = i - 1; i >= 0; i--) {
 713                pci_free_consistent(to_pci_dev(dev), xhci->page_size,
 714                                    xhci->scratchpad->sp_buffers[i],
 715                                    xhci->scratchpad->sp_dma_buffers[i]);
 716        }
 717        kfree(xhci->scratchpad->sp_dma_buffers);
 718
 719 fail_sp4:
 720        kfree(xhci->scratchpad->sp_buffers);
 721
 722 fail_sp3:
 723        pci_free_consistent(to_pci_dev(dev), num_sp * sizeof(u64),
 724                            xhci->scratchpad->sp_array,
 725                            xhci->scratchpad->sp_dma);
 726
 727 fail_sp2:
 728        kfree(xhci->scratchpad);
 729        xhci->scratchpad = NULL;
 730
 731 fail_sp:
 732        return -ENOMEM;
 733}
 734
 735static void scratchpad_free(struct xhci_hcd *xhci)
 736{
 737        int num_sp;
 738        int i;
 739        struct pci_dev  *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
 740
 741        if (!xhci->scratchpad)
 742                return;
 743
 744        num_sp = HCS_MAX_SCRATCHPAD(xhci->hcs_params2);
 745
 746        for (i = 0; i < num_sp; i++) {
 747                pci_free_consistent(pdev, xhci->page_size,
 748                                    xhci->scratchpad->sp_buffers[i],
 749                                    xhci->scratchpad->sp_dma_buffers[i]);
 750        }
 751        kfree(xhci->scratchpad->sp_dma_buffers);
 752        kfree(xhci->scratchpad->sp_buffers);
 753        pci_free_consistent(pdev, num_sp * sizeof(u64),
 754                            xhci->scratchpad->sp_array,
 755                            xhci->scratchpad->sp_dma);
 756        kfree(xhci->scratchpad);
 757        xhci->scratchpad = NULL;
 758}
 759
 760struct xhci_command *xhci_alloc_command(struct xhci_hcd *xhci,
 761                bool allocate_completion, gfp_t mem_flags)
 762{
 763        struct xhci_command *command;
 764
 765        command = kzalloc(sizeof(*command), mem_flags);
 766        if (!command)
 767                return NULL;
 768
 769        command->in_ctx =
 770                xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_INPUT, mem_flags);
 771        if (!command->in_ctx)
 772                return NULL;
 773
 774        if (allocate_completion) {
 775                command->completion =
 776                        kzalloc(sizeof(struct completion), mem_flags);
 777                if (!command->completion) {
 778                        xhci_free_container_ctx(xhci, command->in_ctx);
 779                        return NULL;
 780                }
 781                init_completion(command->completion);
 782        }
 783
 784        command->status = 0;
 785        INIT_LIST_HEAD(&command->cmd_list);
 786        return command;
 787}
 788
 789void xhci_free_command(struct xhci_hcd *xhci,
 790                struct xhci_command *command)
 791{
 792        xhci_free_container_ctx(xhci,
 793                        command->in_ctx);
 794        kfree(command->completion);
 795        kfree(command);
 796}
 797
 798void xhci_mem_cleanup(struct xhci_hcd *xhci)
 799{
 800        struct pci_dev  *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
 801        int size;
 802        int i;
 803
 804        /* Free the Event Ring Segment Table and the actual Event Ring */
 805        if (xhci->ir_set) {
 806                xhci_writel(xhci, 0, &xhci->ir_set->erst_size);
 807                xhci_write_64(xhci, 0, &xhci->ir_set->erst_base);
 808                xhci_write_64(xhci, 0, &xhci->ir_set->erst_dequeue);
 809        }
 810        size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries);
 811        if (xhci->erst.entries)
 812                pci_free_consistent(pdev, size,
 813                                xhci->erst.entries, xhci->erst.erst_dma_addr);
 814        xhci->erst.entries = NULL;
 815        xhci_dbg(xhci, "Freed ERST\n");
 816        if (xhci->event_ring)
 817                xhci_ring_free(xhci, xhci->event_ring);
 818        xhci->event_ring = NULL;
 819        xhci_dbg(xhci, "Freed event ring\n");
 820
 821        xhci_write_64(xhci, 0, &xhci->op_regs->cmd_ring);
 822        if (xhci->cmd_ring)
 823                xhci_ring_free(xhci, xhci->cmd_ring);
 824        xhci->cmd_ring = NULL;
 825        xhci_dbg(xhci, "Freed command ring\n");
 826
 827        for (i = 1; i < MAX_HC_SLOTS; ++i)
 828                xhci_free_virt_device(xhci, i);
 829
 830        if (xhci->segment_pool)
 831                dma_pool_destroy(xhci->segment_pool);
 832        xhci->segment_pool = NULL;
 833        xhci_dbg(xhci, "Freed segment pool\n");
 834
 835        if (xhci->device_pool)
 836                dma_pool_destroy(xhci->device_pool);
 837        xhci->device_pool = NULL;
 838        xhci_dbg(xhci, "Freed device context pool\n");
 839
 840        xhci_write_64(xhci, 0, &xhci->op_regs->dcbaa_ptr);
 841        if (xhci->dcbaa)
 842                pci_free_consistent(pdev, sizeof(*xhci->dcbaa),
 843                                xhci->dcbaa, xhci->dcbaa->dma);
 844        xhci->dcbaa = NULL;
 845
 846        scratchpad_free(xhci);
 847        xhci->page_size = 0;
 848        xhci->page_shift = 0;
 849}
 850
 851int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
 852{
 853        dma_addr_t      dma;
 854        struct device   *dev = xhci_to_hcd(xhci)->self.controller;
 855        unsigned int    val, val2;
 856        u64             val_64;
 857        struct xhci_segment     *seg;
 858        u32 page_size;
 859        int i;
 860
 861        page_size = xhci_readl(xhci, &xhci->op_regs->page_size);
 862        xhci_dbg(xhci, "Supported page size register = 0x%x\n", page_size);
 863        for (i = 0; i < 16; i++) {
 864                if ((0x1 & page_size) != 0)
 865                        break;
 866                page_size = page_size >> 1;
 867        }
 868        if (i < 16)
 869                xhci_dbg(xhci, "Supported page size of %iK\n", (1 << (i+12)) / 1024);
 870        else
 871                xhci_warn(xhci, "WARN: no supported page size\n");
 872        /* Use 4K pages, since that's common and the minimum the HC supports */
 873        xhci->page_shift = 12;
 874        xhci->page_size = 1 << xhci->page_shift;
 875        xhci_dbg(xhci, "HCD page size set to %iK\n", xhci->page_size / 1024);
 876
 877        /*
 878         * Program the Number of Device Slots Enabled field in the CONFIG
 879         * register with the max value of slots the HC can handle.
 880         */
 881        val = HCS_MAX_SLOTS(xhci_readl(xhci, &xhci->cap_regs->hcs_params1));
 882        xhci_dbg(xhci, "// xHC can handle at most %d device slots.\n",
 883                        (unsigned int) val);
 884        val2 = xhci_readl(xhci, &xhci->op_regs->config_reg);
 885        val |= (val2 & ~HCS_SLOTS_MASK);
 886        xhci_dbg(xhci, "// Setting Max device slots reg = 0x%x.\n",
 887                        (unsigned int) val);
 888        xhci_writel(xhci, val, &xhci->op_regs->config_reg);
 889
 890        /*
 891         * Section 5.4.8 - doorbell array must be
 892         * "physically contiguous and 64-byte (cache line) aligned".
 893         */
 894        xhci->dcbaa = pci_alloc_consistent(to_pci_dev(dev),
 895                        sizeof(*xhci->dcbaa), &dma);
 896        if (!xhci->dcbaa)
 897                goto fail;
 898        memset(xhci->dcbaa, 0, sizeof *(xhci->dcbaa));
 899        xhci->dcbaa->dma = dma;
 900        xhci_dbg(xhci, "// Device context base array address = 0x%llx (DMA), %p (virt)\n",
 901                        (unsigned long long)xhci->dcbaa->dma, xhci->dcbaa);
 902        xhci_write_64(xhci, dma, &xhci->op_regs->dcbaa_ptr);
 903
 904        /*
 905         * Initialize the ring segment pool.  The ring must be a contiguous
 906         * structure comprised of TRBs.  The TRBs must be 16 byte aligned,
 907         * however, the command ring segment needs 64-byte aligned segments,
 908         * so we pick the greater alignment need.
 909         */
 910        xhci->segment_pool = dma_pool_create("xHCI ring segments", dev,
 911                        SEGMENT_SIZE, 64, xhci->page_size);
 912
 913        /* See Table 46 and Note on Figure 55 */
 914        xhci->device_pool = dma_pool_create("xHCI input/output contexts", dev,
 915                        2112, 64, xhci->page_size);
 916        if (!xhci->segment_pool || !xhci->device_pool)
 917                goto fail;
 918
 919        /* Set up the command ring to have one segments for now. */
 920        xhci->cmd_ring = xhci_ring_alloc(xhci, 1, true, flags);
 921        if (!xhci->cmd_ring)
 922                goto fail;
 923        xhci_dbg(xhci, "Allocated command ring at %p\n", xhci->cmd_ring);
 924        xhci_dbg(xhci, "First segment DMA is 0x%llx\n",
 925                        (unsigned long long)xhci->cmd_ring->first_seg->dma);
 926
 927        /* Set the address in the Command Ring Control register */
 928        val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
 929        val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) |
 930                (xhci->cmd_ring->first_seg->dma & (u64) ~CMD_RING_RSVD_BITS) |
 931                xhci->cmd_ring->cycle_state;
 932        xhci_dbg(xhci, "// Setting command ring address to 0x%x\n", val);
 933        xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring);
 934        xhci_dbg_cmd_ptrs(xhci);
 935
 936        val = xhci_readl(xhci, &xhci->cap_regs->db_off);
 937        val &= DBOFF_MASK;
 938        xhci_dbg(xhci, "// Doorbell array is located at offset 0x%x"
 939                        " from cap regs base addr\n", val);
 940        xhci->dba = (void *) xhci->cap_regs + val;
 941        xhci_dbg_regs(xhci);
 942        xhci_print_run_regs(xhci);
 943        /* Set ir_set to interrupt register set 0 */
 944        xhci->ir_set = (void *) xhci->run_regs->ir_set;
 945
 946        /*
 947         * Event ring setup: Allocate a normal ring, but also setup
 948         * the event ring segment table (ERST).  Section 4.9.3.
 949         */
 950        xhci_dbg(xhci, "// Allocating event ring\n");
 951        xhci->event_ring = xhci_ring_alloc(xhci, ERST_NUM_SEGS, false, flags);
 952        if (!xhci->event_ring)
 953                goto fail;
 954
 955        xhci->erst.entries = pci_alloc_consistent(to_pci_dev(dev),
 956                        sizeof(struct xhci_erst_entry)*ERST_NUM_SEGS, &dma);
 957        if (!xhci->erst.entries)
 958                goto fail;
 959        xhci_dbg(xhci, "// Allocated event ring segment table at 0x%llx\n",
 960                        (unsigned long long)dma);
 961
 962        memset(xhci->erst.entries, 0, sizeof(struct xhci_erst_entry)*ERST_NUM_SEGS);
 963        xhci->erst.num_entries = ERST_NUM_SEGS;
 964        xhci->erst.erst_dma_addr = dma;
 965        xhci_dbg(xhci, "Set ERST to 0; private num segs = %i, virt addr = %p, dma addr = 0x%llx\n",
 966                        xhci->erst.num_entries,
 967                        xhci->erst.entries,
 968                        (unsigned long long)xhci->erst.erst_dma_addr);
 969
 970        /* set ring base address and size for each segment table entry */
 971        for (val = 0, seg = xhci->event_ring->first_seg; val < ERST_NUM_SEGS; val++) {
 972                struct xhci_erst_entry *entry = &xhci->erst.entries[val];
 973                entry->seg_addr = seg->dma;
 974                entry->seg_size = TRBS_PER_SEGMENT;
 975                entry->rsvd = 0;
 976                seg = seg->next;
 977        }
 978
 979        /* set ERST count with the number of entries in the segment table */
 980        val = xhci_readl(xhci, &xhci->ir_set->erst_size);
 981        val &= ERST_SIZE_MASK;
 982        val |= ERST_NUM_SEGS;
 983        xhci_dbg(xhci, "// Write ERST size = %i to ir_set 0 (some bits preserved)\n",
 984                        val);
 985        xhci_writel(xhci, val, &xhci->ir_set->erst_size);
 986
 987        xhci_dbg(xhci, "// Set ERST entries to point to event ring.\n");
 988        /* set the segment table base address */
 989        xhci_dbg(xhci, "// Set ERST base address for ir_set 0 = 0x%llx\n",
 990                        (unsigned long long)xhci->erst.erst_dma_addr);
 991        val_64 = xhci_read_64(xhci, &xhci->ir_set->erst_base);
 992        val_64 &= ERST_PTR_MASK;
 993        val_64 |= (xhci->erst.erst_dma_addr & (u64) ~ERST_PTR_MASK);
 994        xhci_write_64(xhci, val_64, &xhci->ir_set->erst_base);
 995
 996        /* Set the event ring dequeue address */
 997        xhci_set_hc_event_deq(xhci);
 998        xhci_dbg(xhci, "Wrote ERST address to ir_set 0.\n");
 999        xhci_print_ir_set(xhci, xhci->ir_set, 0);
1000
1001        /*
1002         * XXX: Might need to set the Interrupter Moderation Register to
1003         * something other than the default (~1ms minimum between interrupts).
1004         * See section 5.5.1.2.
1005         */
1006        init_completion(&xhci->addr_dev);
1007        for (i = 0; i < MAX_HC_SLOTS; ++i)
1008                xhci->devs[i] = 0;
1009
1010        if (scratchpad_alloc(xhci, flags))
1011                goto fail;
1012
1013        return 0;
1014
1015fail:
1016        xhci_warn(xhci, "Couldn't initialize memory\n");
1017        xhci_mem_cleanup(xhci);
1018        return -ENOMEM;
1019}
1020