uboot/drivers/usb/host/xhci-mem.c
<<
>>
Prefs
   1/*
   2 * USB HOST XHCI Controller stack
   3 *
   4 * Based on xHCI host controller driver in linux-kernel
   5 * by Sarah Sharp.
   6 *
   7 * Copyright (C) 2008 Intel Corp.
   8 * Author: Sarah Sharp
   9 *
  10 * Copyright (C) 2013 Samsung Electronics Co.Ltd
  11 * Authors: Vivek Gautam <gautam.vivek@samsung.com>
  12 *          Vikas Sajjan <vikas.sajjan@samsung.com>
  13 *
  14 * SPDX-License-Identifier:     GPL-2.0+
  15 */
  16
  17#include <common.h>
  18#include <dm.h>
  19#include <asm/byteorder.h>
  20#include <usb.h>
  21#include <malloc.h>
  22#include <asm/cache.h>
  23#include <asm-generic/errno.h>
  24
  25#include "xhci.h"
  26
  27#define CACHELINE_SIZE          CONFIG_SYS_CACHELINE_SIZE
  28/**
  29 * flushes the address passed till the length
  30 *
  31 * @param addr  pointer to memory region to be flushed
  32 * @param len   the length of the cache line to be flushed
  33 * @return none
  34 */
  35void xhci_flush_cache(uintptr_t addr, u32 len)
  36{
  37        BUG_ON((void *)addr == NULL || len == 0);
  38
  39        flush_dcache_range(addr & ~(CACHELINE_SIZE - 1),
  40                                ALIGN(addr + len, CACHELINE_SIZE));
  41}
  42
  43/**
  44 * invalidates the address passed till the length
  45 *
  46 * @param addr  pointer to memory region to be invalidates
  47 * @param len   the length of the cache line to be invalidated
  48 * @return none
  49 */
  50void xhci_inval_cache(uintptr_t addr, u32 len)
  51{
  52        BUG_ON((void *)addr == NULL || len == 0);
  53
  54        invalidate_dcache_range(addr & ~(CACHELINE_SIZE - 1),
  55                                ALIGN(addr + len, CACHELINE_SIZE));
  56}
  57
  58
  59/**
  60 * frees the "segment" pointer passed
  61 *
  62 * @param ptr   pointer to "segement" to be freed
  63 * @return none
  64 */
  65static void xhci_segment_free(struct xhci_segment *seg)
  66{
  67        free(seg->trbs);
  68        seg->trbs = NULL;
  69
  70        free(seg);
  71}
  72
  73/**
  74 * frees the "ring" pointer passed
  75 *
  76 * @param ptr   pointer to "ring" to be freed
  77 * @return none
  78 */
  79static void xhci_ring_free(struct xhci_ring *ring)
  80{
  81        struct xhci_segment *seg;
  82        struct xhci_segment *first_seg;
  83
  84        BUG_ON(!ring);
  85
  86        first_seg = ring->first_seg;
  87        seg = first_seg->next;
  88        while (seg != first_seg) {
  89                struct xhci_segment *next = seg->next;
  90                xhci_segment_free(seg);
  91                seg = next;
  92        }
  93        xhci_segment_free(first_seg);
  94
  95        free(ring);
  96}
  97
  98/**
  99 * frees the "xhci_container_ctx" pointer passed
 100 *
 101 * @param ptr   pointer to "xhci_container_ctx" to be freed
 102 * @return none
 103 */
 104static void xhci_free_container_ctx(struct xhci_container_ctx *ctx)
 105{
 106        free(ctx->bytes);
 107        free(ctx);
 108}
 109
 110/**
 111 * frees the virtual devices for "xhci_ctrl" pointer passed
 112 *
 113 * @param ptr   pointer to "xhci_ctrl" whose virtual devices are to be freed
 114 * @return none
 115 */
 116static void xhci_free_virt_devices(struct xhci_ctrl *ctrl)
 117{
 118        int i;
 119        int slot_id;
 120        struct xhci_virt_device *virt_dev;
 121
 122        /*
 123         * refactored here to loop through all virt_dev
 124         * Slot ID 0 is reserved
 125         */
 126        for (slot_id = 0; slot_id < MAX_HC_SLOTS; slot_id++) {
 127                virt_dev = ctrl->devs[slot_id];
 128                if (!virt_dev)
 129                        continue;
 130
 131                ctrl->dcbaa->dev_context_ptrs[slot_id] = 0;
 132
 133                for (i = 0; i < 31; ++i)
 134                        if (virt_dev->eps[i].ring)
 135                                xhci_ring_free(virt_dev->eps[i].ring);
 136
 137                if (virt_dev->in_ctx)
 138                        xhci_free_container_ctx(virt_dev->in_ctx);
 139                if (virt_dev->out_ctx)
 140                        xhci_free_container_ctx(virt_dev->out_ctx);
 141
 142                free(virt_dev);
 143                /* make sure we are pointing to NULL */
 144                ctrl->devs[slot_id] = NULL;
 145        }
 146}
 147
 148/**
 149 * frees all the memory allocated
 150 *
 151 * @param ptr   pointer to "xhci_ctrl" to be cleaned up
 152 * @return none
 153 */
 154void xhci_cleanup(struct xhci_ctrl *ctrl)
 155{
 156        xhci_ring_free(ctrl->event_ring);
 157        xhci_ring_free(ctrl->cmd_ring);
 158        xhci_free_virt_devices(ctrl);
 159        free(ctrl->erst.entries);
 160        free(ctrl->dcbaa);
 161        memset(ctrl, '\0', sizeof(struct xhci_ctrl));
 162}
 163
 164/**
 165 * Malloc the aligned memory
 166 *
 167 * @param size  size of memory to be allocated
 168 * @return allocates the memory and returns the aligned pointer
 169 */
 170static void *xhci_malloc(unsigned int size)
 171{
 172        void *ptr;
 173        size_t cacheline_size = max(XHCI_ALIGNMENT, CACHELINE_SIZE);
 174
 175        ptr = memalign(cacheline_size, ALIGN(size, cacheline_size));
 176        BUG_ON(!ptr);
 177        memset(ptr, '\0', size);
 178
 179        xhci_flush_cache((uintptr_t)ptr, size);
 180
 181        return ptr;
 182}
 183
 184/**
 185 * Make the prev segment point to the next segment.
 186 * Change the last TRB in the prev segment to be a Link TRB which points to the
 187 * address of the next segment.  The caller needs to set any Link TRB
 188 * related flags, such as End TRB, Toggle Cycle, and no snoop.
 189 *
 190 * @param prev  pointer to the previous segment
 191 * @param next  pointer to the next segment
 192 * @param link_trbs     flag to indicate whether to link the trbs or NOT
 193 * @return none
 194 */
 195static void xhci_link_segments(struct xhci_segment *prev,
 196                                struct xhci_segment *next, bool link_trbs)
 197{
 198        u32 val;
 199        u64 val_64 = 0;
 200
 201        if (!prev || !next)
 202                return;
 203        prev->next = next;
 204        if (link_trbs) {
 205                val_64 = (uintptr_t)next->trbs;
 206                prev->trbs[TRBS_PER_SEGMENT-1].link.segment_ptr = val_64;
 207
 208                /*
 209                 * Set the last TRB in the segment to
 210                 * have a TRB type ID of Link TRB
 211                 */
 212                val = le32_to_cpu(prev->trbs[TRBS_PER_SEGMENT-1].link.control);
 213                val &= ~TRB_TYPE_BITMASK;
 214                val |= (TRB_LINK << TRB_TYPE_SHIFT);
 215
 216                prev->trbs[TRBS_PER_SEGMENT-1].link.control = cpu_to_le32(val);
 217        }
 218}
 219
 220/**
 221 * Initialises the Ring's enqueue,dequeue,enq_seg pointers
 222 *
 223 * @param ring  pointer to the RING to be intialised
 224 * @return none
 225 */
 226static void xhci_initialize_ring_info(struct xhci_ring *ring)
 227{
 228        /*
 229         * The ring is empty, so the enqueue pointer == dequeue pointer
 230         */
 231        ring->enqueue = ring->first_seg->trbs;
 232        ring->enq_seg = ring->first_seg;
 233        ring->dequeue = ring->enqueue;
 234        ring->deq_seg = ring->first_seg;
 235
 236        /*
 237         * The ring is initialized to 0. The producer must write 1 to the
 238         * cycle bit to handover ownership of the TRB, so PCS = 1.
 239         * The consumer must compare CCS to the cycle bit to
 240         * check ownership, so CCS = 1.
 241         */
 242        ring->cycle_state = 1;
 243}
 244
 245/**
 246 * Allocates a generic ring segment from the ring pool, sets the dma address,
 247 * initializes the segment to zero, and sets the private next pointer to NULL.
 248 * Section 4.11.1.1:
 249 * "All components of all Command and Transfer TRBs shall be initialized to '0'"
 250 *
 251 * @param       none
 252 * @return pointer to the newly allocated SEGMENT
 253 */
 254static struct xhci_segment *xhci_segment_alloc(void)
 255{
 256        struct xhci_segment *seg;
 257
 258        seg = (struct xhci_segment *)malloc(sizeof(struct xhci_segment));
 259        BUG_ON(!seg);
 260
 261        seg->trbs = (union xhci_trb *)xhci_malloc(SEGMENT_SIZE);
 262
 263        seg->next = NULL;
 264
 265        return seg;
 266}
 267
 268/**
 269 * Create a new ring with zero or more segments.
 270 * TODO: current code only uses one-time-allocated single-segment rings
 271 * of 1KB anyway, so we might as well get rid of all the segment and
 272 * linking code (and maybe increase the size a bit, e.g. 4KB).
 273 *
 274 *
 275 * Link each segment together into a ring.
 276 * Set the end flag and the cycle toggle bit on the last segment.
 277 * See section 4.9.2 and figures 15 and 16 of XHCI spec rev1.0.
 278 *
 279 * @param num_segs      number of segments in the ring
 280 * @param link_trbs     flag to indicate whether to link the trbs or NOT
 281 * @return pointer to the newly created RING
 282 */
 283struct xhci_ring *xhci_ring_alloc(unsigned int num_segs, bool link_trbs)
 284{
 285        struct xhci_ring *ring;
 286        struct xhci_segment *prev;
 287
 288        ring = (struct xhci_ring *)malloc(sizeof(struct xhci_ring));
 289        BUG_ON(!ring);
 290
 291        if (num_segs == 0)
 292                return ring;
 293
 294        ring->first_seg = xhci_segment_alloc();
 295        BUG_ON(!ring->first_seg);
 296
 297        num_segs--;
 298
 299        prev = ring->first_seg;
 300        while (num_segs > 0) {
 301                struct xhci_segment *next;
 302
 303                next = xhci_segment_alloc();
 304                BUG_ON(!next);
 305
 306                xhci_link_segments(prev, next, link_trbs);
 307
 308                prev = next;
 309                num_segs--;
 310        }
 311        xhci_link_segments(prev, ring->first_seg, link_trbs);
 312        if (link_trbs) {
 313                /* See section 4.9.2.1 and 6.4.4.1 */
 314                prev->trbs[TRBS_PER_SEGMENT-1].link.control |=
 315                                        cpu_to_le32(LINK_TOGGLE);
 316        }
 317        xhci_initialize_ring_info(ring);
 318
 319        return ring;
 320}
 321
 322/**
 323 * Allocates the Container context
 324 *
 325 * @param ctrl  Host controller data structure
 326 * @param type type of XHCI Container Context
 327 * @return NULL if failed else pointer to the context on success
 328 */
 329static struct xhci_container_ctx
 330                *xhci_alloc_container_ctx(struct xhci_ctrl *ctrl, int type)
 331{
 332        struct xhci_container_ctx *ctx;
 333
 334        ctx = (struct xhci_container_ctx *)
 335                malloc(sizeof(struct xhci_container_ctx));
 336        BUG_ON(!ctx);
 337
 338        BUG_ON((type != XHCI_CTX_TYPE_DEVICE) && (type != XHCI_CTX_TYPE_INPUT));
 339        ctx->type = type;
 340        ctx->size = (MAX_EP_CTX_NUM + 1) *
 341                        CTX_SIZE(readl(&ctrl->hccr->cr_hccparams));
 342        if (type == XHCI_CTX_TYPE_INPUT)
 343                ctx->size += CTX_SIZE(readl(&ctrl->hccr->cr_hccparams));
 344
 345        ctx->bytes = (u8 *)xhci_malloc(ctx->size);
 346
 347        return ctx;
 348}
 349
 350/**
 351 * Allocating virtual device
 352 *
 353 * @param udev  pointer to USB deivce structure
 354 * @return 0 on success else -1 on failure
 355 */
 356int xhci_alloc_virt_device(struct xhci_ctrl *ctrl, unsigned int slot_id)
 357{
 358        u64 byte_64 = 0;
 359        struct xhci_virt_device *virt_dev;
 360
 361        /* Slot ID 0 is reserved */
 362        if (ctrl->devs[slot_id]) {
 363                printf("Virt dev for slot[%d] already allocated\n", slot_id);
 364                return -EEXIST;
 365        }
 366
 367        ctrl->devs[slot_id] = (struct xhci_virt_device *)
 368                                        malloc(sizeof(struct xhci_virt_device));
 369
 370        if (!ctrl->devs[slot_id]) {
 371                puts("Failed to allocate virtual device\n");
 372                return -ENOMEM;
 373        }
 374
 375        memset(ctrl->devs[slot_id], 0, sizeof(struct xhci_virt_device));
 376        virt_dev = ctrl->devs[slot_id];
 377
 378        /* Allocate the (output) device context that will be used in the HC. */
 379        virt_dev->out_ctx = xhci_alloc_container_ctx(ctrl,
 380                                        XHCI_CTX_TYPE_DEVICE);
 381        if (!virt_dev->out_ctx) {
 382                puts("Failed to allocate out context for virt dev\n");
 383                return -ENOMEM;
 384        }
 385
 386        /* Allocate the (input) device context for address device command */
 387        virt_dev->in_ctx = xhci_alloc_container_ctx(ctrl,
 388                                        XHCI_CTX_TYPE_INPUT);
 389        if (!virt_dev->in_ctx) {
 390                puts("Failed to allocate in context for virt dev\n");
 391                return -ENOMEM;
 392        }
 393
 394        /* Allocate endpoint 0 ring */
 395        virt_dev->eps[0].ring = xhci_ring_alloc(1, true);
 396
 397        byte_64 = (uintptr_t)(virt_dev->out_ctx->bytes);
 398
 399        /* Point to output device context in dcbaa. */
 400        ctrl->dcbaa->dev_context_ptrs[slot_id] = byte_64;
 401
 402        xhci_flush_cache((uintptr_t)&ctrl->dcbaa->dev_context_ptrs[slot_id],
 403                         sizeof(__le64));
 404        return 0;
 405}
 406
 407/**
 408 * Allocates the necessary data structures
 409 * for XHCI host controller
 410 *
 411 * @param ctrl  Host controller data structure
 412 * @param hccr  pointer to HOST Controller Control Registers
 413 * @param hcor  pointer to HOST Controller Operational Registers
 414 * @return 0 if successful else -1 on failure
 415 */
 416int xhci_mem_init(struct xhci_ctrl *ctrl, struct xhci_hccr *hccr,
 417                                        struct xhci_hcor *hcor)
 418{
 419        uint64_t val_64;
 420        uint64_t trb_64;
 421        uint32_t val;
 422        unsigned long deq;
 423        int i;
 424        struct xhci_segment *seg;
 425
 426        /* DCBAA initialization */
 427        ctrl->dcbaa = (struct xhci_device_context_array *)
 428                        xhci_malloc(sizeof(struct xhci_device_context_array));
 429        if (ctrl->dcbaa == NULL) {
 430                puts("unable to allocate DCBA\n");
 431                return -ENOMEM;
 432        }
 433
 434        val_64 = (uintptr_t)ctrl->dcbaa;
 435        /* Set the pointer in DCBAA register */
 436        xhci_writeq(&hcor->or_dcbaap, val_64);
 437
 438        /* Command ring control pointer register initialization */
 439        ctrl->cmd_ring = xhci_ring_alloc(1, true);
 440
 441        /* Set the address in the Command Ring Control register */
 442        trb_64 = (uintptr_t)ctrl->cmd_ring->first_seg->trbs;
 443        val_64 = xhci_readq(&hcor->or_crcr);
 444        val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) |
 445                (trb_64 & (u64) ~CMD_RING_RSVD_BITS) |
 446                ctrl->cmd_ring->cycle_state;
 447        xhci_writeq(&hcor->or_crcr, val_64);
 448
 449        /* write the address of db register */
 450        val = xhci_readl(&hccr->cr_dboff);
 451        val &= DBOFF_MASK;
 452        ctrl->dba = (struct xhci_doorbell_array *)((char *)hccr + val);
 453
 454        /* write the address of runtime register */
 455        val = xhci_readl(&hccr->cr_rtsoff);
 456        val &= RTSOFF_MASK;
 457        ctrl->run_regs = (struct xhci_run_regs *)((char *)hccr + val);
 458
 459        /* writting the address of ir_set structure */
 460        ctrl->ir_set = &ctrl->run_regs->ir_set[0];
 461
 462        /* Event ring does not maintain link TRB */
 463        ctrl->event_ring = xhci_ring_alloc(ERST_NUM_SEGS, false);
 464        ctrl->erst.entries = (struct xhci_erst_entry *)
 465                xhci_malloc(sizeof(struct xhci_erst_entry) * ERST_NUM_SEGS);
 466
 467        ctrl->erst.num_entries = ERST_NUM_SEGS;
 468
 469        for (val = 0, seg = ctrl->event_ring->first_seg;
 470                        val < ERST_NUM_SEGS;
 471                        val++) {
 472                trb_64 = 0;
 473                trb_64 = (uintptr_t)seg->trbs;
 474                struct xhci_erst_entry *entry = &ctrl->erst.entries[val];
 475                xhci_writeq(&entry->seg_addr, trb_64);
 476                entry->seg_size = cpu_to_le32(TRBS_PER_SEGMENT);
 477                entry->rsvd = 0;
 478                seg = seg->next;
 479        }
 480        xhci_flush_cache((uintptr_t)ctrl->erst.entries,
 481                         ERST_NUM_SEGS * sizeof(struct xhci_erst_entry));
 482
 483        deq = (unsigned long)ctrl->event_ring->dequeue;
 484
 485        /* Update HC event ring dequeue pointer */
 486        xhci_writeq(&ctrl->ir_set->erst_dequeue,
 487                                (u64)deq & (u64)~ERST_PTR_MASK);
 488
 489        /* set ERST count with the number of entries in the segment table */
 490        val = xhci_readl(&ctrl->ir_set->erst_size);
 491        val &= ERST_SIZE_MASK;
 492        val |= ERST_NUM_SEGS;
 493        xhci_writel(&ctrl->ir_set->erst_size, val);
 494
 495        /* this is the event ring segment table pointer */
 496        val_64 = xhci_readq(&ctrl->ir_set->erst_base);
 497        val_64 &= ERST_PTR_MASK;
 498        val_64 |= ((uintptr_t)(ctrl->erst.entries) & ~ERST_PTR_MASK);
 499
 500        xhci_writeq(&ctrl->ir_set->erst_base, val_64);
 501
 502        /* initializing the virtual devices to NULL */
 503        for (i = 0; i < MAX_HC_SLOTS; ++i)
 504                ctrl->devs[i] = NULL;
 505
 506        /*
 507         * Just Zero'ing this register completely,
 508         * or some spurious Device Notification Events
 509         * might screw things here.
 510         */
 511        xhci_writel(&hcor->or_dnctrl, 0x0);
 512
 513        return 0;
 514}
 515
 516/**
 517 * Give the input control context for the passed container context
 518 *
 519 * @param ctx   pointer to the context
 520 * @return pointer to the Input control context data
 521 */
 522struct xhci_input_control_ctx
 523                *xhci_get_input_control_ctx(struct xhci_container_ctx *ctx)
 524{
 525        BUG_ON(ctx->type != XHCI_CTX_TYPE_INPUT);
 526        return (struct xhci_input_control_ctx *)ctx->bytes;
 527}
 528
 529/**
 530 * Give the slot context for the passed container context
 531 *
 532 * @param ctrl  Host controller data structure
 533 * @param ctx   pointer to the context
 534 * @return pointer to the slot control context data
 535 */
 536struct xhci_slot_ctx *xhci_get_slot_ctx(struct xhci_ctrl *ctrl,
 537                                struct xhci_container_ctx *ctx)
 538{
 539        if (ctx->type == XHCI_CTX_TYPE_DEVICE)
 540                return (struct xhci_slot_ctx *)ctx->bytes;
 541
 542        return (struct xhci_slot_ctx *)
 543                (ctx->bytes + CTX_SIZE(readl(&ctrl->hccr->cr_hccparams)));
 544}
 545
 546/**
 547 * Gets the EP context from based on the ep_index
 548 *
 549 * @param ctrl  Host controller data structure
 550 * @param ctx   context container
 551 * @param ep_index      index of the endpoint
 552 * @return pointer to the End point context
 553 */
 554struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_ctrl *ctrl,
 555                                    struct xhci_container_ctx *ctx,
 556                                    unsigned int ep_index)
 557{
 558        /* increment ep index by offset of start of ep ctx array */
 559        ep_index++;
 560        if (ctx->type == XHCI_CTX_TYPE_INPUT)
 561                ep_index++;
 562
 563        return (struct xhci_ep_ctx *)
 564                (ctx->bytes +
 565                (ep_index * CTX_SIZE(readl(&ctrl->hccr->cr_hccparams))));
 566}
 567
 568/**
 569 * Copy output xhci_ep_ctx to the input xhci_ep_ctx copy.
 570 * Useful when you want to change one particular aspect of the endpoint
 571 * and then issue a configure endpoint command.
 572 *
 573 * @param ctrl  Host controller data structure
 574 * @param in_ctx contains the input context
 575 * @param out_ctx contains the input context
 576 * @param ep_index index of the end point
 577 * @return none
 578 */
 579void xhci_endpoint_copy(struct xhci_ctrl *ctrl,
 580                        struct xhci_container_ctx *in_ctx,
 581                        struct xhci_container_ctx *out_ctx,
 582                        unsigned int ep_index)
 583{
 584        struct xhci_ep_ctx *out_ep_ctx;
 585        struct xhci_ep_ctx *in_ep_ctx;
 586
 587        out_ep_ctx = xhci_get_ep_ctx(ctrl, out_ctx, ep_index);
 588        in_ep_ctx = xhci_get_ep_ctx(ctrl, in_ctx, ep_index);
 589
 590        in_ep_ctx->ep_info = out_ep_ctx->ep_info;
 591        in_ep_ctx->ep_info2 = out_ep_ctx->ep_info2;
 592        in_ep_ctx->deq = out_ep_ctx->deq;
 593        in_ep_ctx->tx_info = out_ep_ctx->tx_info;
 594}
 595
 596/**
 597 * Copy output xhci_slot_ctx to the input xhci_slot_ctx.
 598 * Useful when you want to change one particular aspect of the endpoint
 599 * and then issue a configure endpoint command.
 600 * Only the context entries field matters, but
 601 * we'll copy the whole thing anyway.
 602 *
 603 * @param ctrl  Host controller data structure
 604 * @param in_ctx contains the inpout context
 605 * @param out_ctx contains the inpout context
 606 * @return none
 607 */
 608void xhci_slot_copy(struct xhci_ctrl *ctrl, struct xhci_container_ctx *in_ctx,
 609                                        struct xhci_container_ctx *out_ctx)
 610{
 611        struct xhci_slot_ctx *in_slot_ctx;
 612        struct xhci_slot_ctx *out_slot_ctx;
 613
 614        in_slot_ctx = xhci_get_slot_ctx(ctrl, in_ctx);
 615        out_slot_ctx = xhci_get_slot_ctx(ctrl, out_ctx);
 616
 617        in_slot_ctx->dev_info = out_slot_ctx->dev_info;
 618        in_slot_ctx->dev_info2 = out_slot_ctx->dev_info2;
 619        in_slot_ctx->tt_info = out_slot_ctx->tt_info;
 620        in_slot_ctx->dev_state = out_slot_ctx->dev_state;
 621}
 622
 623/**
 624 * Setup an xHCI virtual device for a Set Address command
 625 *
 626 * @param udev pointer to the Device Data Structure
 627 * @return returns negative value on failure else 0 on success
 628 */
 629void xhci_setup_addressable_virt_dev(struct xhci_ctrl *ctrl, int slot_id,
 630                                     int speed, int hop_portnr)
 631{
 632        struct xhci_virt_device *virt_dev;
 633        struct xhci_ep_ctx *ep0_ctx;
 634        struct xhci_slot_ctx *slot_ctx;
 635        u32 port_num = 0;
 636        u64 trb_64 = 0;
 637
 638        virt_dev = ctrl->devs[slot_id];
 639
 640        BUG_ON(!virt_dev);
 641
 642        /* Extract the EP0 and Slot Ctrl */
 643        ep0_ctx = xhci_get_ep_ctx(ctrl, virt_dev->in_ctx, 0);
 644        slot_ctx = xhci_get_slot_ctx(ctrl, virt_dev->in_ctx);
 645
 646        /* Only the control endpoint is valid - one endpoint context */
 647        slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1) | 0);
 648
 649        switch (speed) {
 650        case USB_SPEED_SUPER:
 651                slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_SS);
 652                break;
 653        case USB_SPEED_HIGH:
 654                slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_HS);
 655                break;
 656        case USB_SPEED_FULL:
 657                slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_FS);
 658                break;
 659        case USB_SPEED_LOW:
 660                slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_LS);
 661                break;
 662        default:
 663                /* Speed was set earlier, this shouldn't happen. */
 664                BUG();
 665        }
 666
 667        port_num = hop_portnr;
 668        debug("port_num = %d\n", port_num);
 669
 670        slot_ctx->dev_info2 |=
 671                        cpu_to_le32(((port_num & ROOT_HUB_PORT_MASK) <<
 672                                ROOT_HUB_PORT_SHIFT));
 673
 674        /* Step 4 - ring already allocated */
 675        /* Step 5 */
 676        ep0_ctx->ep_info2 = cpu_to_le32(CTRL_EP << EP_TYPE_SHIFT);
 677        debug("SPEED = %d\n", speed);
 678
 679        switch (speed) {
 680        case USB_SPEED_SUPER:
 681                ep0_ctx->ep_info2 |= cpu_to_le32(((512 & MAX_PACKET_MASK) <<
 682                                        MAX_PACKET_SHIFT));
 683                debug("Setting Packet size = 512bytes\n");
 684                break;
 685        case USB_SPEED_HIGH:
 686        /* USB core guesses at a 64-byte max packet first for FS devices */
 687        case USB_SPEED_FULL:
 688                ep0_ctx->ep_info2 |= cpu_to_le32(((64 & MAX_PACKET_MASK) <<
 689                                        MAX_PACKET_SHIFT));
 690                debug("Setting Packet size = 64bytes\n");
 691                break;
 692        case USB_SPEED_LOW:
 693                ep0_ctx->ep_info2 |= cpu_to_le32(((8 & MAX_PACKET_MASK) <<
 694                                        MAX_PACKET_SHIFT));
 695                debug("Setting Packet size = 8bytes\n");
 696                break;
 697        default:
 698                /* New speed? */
 699                BUG();
 700        }
 701
 702        /* EP 0 can handle "burst" sizes of 1, so Max Burst Size field is 0 */
 703        ep0_ctx->ep_info2 |=
 704                        cpu_to_le32(((0 & MAX_BURST_MASK) << MAX_BURST_SHIFT) |
 705                        ((3 & ERROR_COUNT_MASK) << ERROR_COUNT_SHIFT));
 706
 707        trb_64 = (uintptr_t)virt_dev->eps[0].ring->first_seg->trbs;
 708        ep0_ctx->deq = cpu_to_le64(trb_64 | virt_dev->eps[0].ring->cycle_state);
 709
 710        /* Steps 7 and 8 were done in xhci_alloc_virt_device() */
 711
 712        xhci_flush_cache((uintptr_t)ep0_ctx, sizeof(struct xhci_ep_ctx));
 713        xhci_flush_cache((uintptr_t)slot_ctx, sizeof(struct xhci_slot_ctx));
 714}
 715