uboot/drivers/usb/host/xhci-mem.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0+
   2/*
   3 * USB HOST XHCI Controller stack
   4 *
   5 * Based on xHCI host controller driver in linux-kernel
   6 * by Sarah Sharp.
   7 *
   8 * Copyright (C) 2008 Intel Corp.
   9 * Author: Sarah Sharp
  10 *
  11 * Copyright (C) 2013 Samsung Electronics Co.Ltd
  12 * Authors: Vivek Gautam <gautam.vivek@samsung.com>
  13 *          Vikas Sajjan <vikas.sajjan@samsung.com>
  14 */
  15
  16#include <common.h>
  17#include <dm.h>
  18#include <asm/byteorder.h>
  19#include <usb.h>
  20#include <malloc.h>
  21#include <asm/cache.h>
  22#include <linux/errno.h>
  23
  24#include "xhci.h"
  25
  26#define CACHELINE_SIZE          CONFIG_SYS_CACHELINE_SIZE
  27/**
  28 * flushes the address passed till the length
  29 *
  30 * @param addr  pointer to memory region to be flushed
  31 * @param len   the length of the cache line to be flushed
  32 * @return none
  33 */
  34void xhci_flush_cache(uintptr_t addr, u32 len)
  35{
  36        BUG_ON((void *)addr == NULL || len == 0);
  37
  38        flush_dcache_range(addr & ~(CACHELINE_SIZE - 1),
  39                                ALIGN(addr + len, CACHELINE_SIZE));
  40}
  41
  42/**
  43 * invalidates the address passed till the length
  44 *
  45 * @param addr  pointer to memory region to be invalidates
  46 * @param len   the length of the cache line to be invalidated
  47 * @return none
  48 */
  49void xhci_inval_cache(uintptr_t addr, u32 len)
  50{
  51        BUG_ON((void *)addr == NULL || len == 0);
  52
  53        invalidate_dcache_range(addr & ~(CACHELINE_SIZE - 1),
  54                                ALIGN(addr + len, CACHELINE_SIZE));
  55}
  56
  57
  58/**
  59 * frees the "segment" pointer passed
  60 *
  61 * @param ptr   pointer to "segement" to be freed
  62 * @return none
  63 */
  64static void xhci_segment_free(struct xhci_segment *seg)
  65{
  66        free(seg->trbs);
  67        seg->trbs = NULL;
  68
  69        free(seg);
  70}
  71
  72/**
  73 * frees the "ring" pointer passed
  74 *
  75 * @param ptr   pointer to "ring" to be freed
  76 * @return none
  77 */
  78static void xhci_ring_free(struct xhci_ring *ring)
  79{
  80        struct xhci_segment *seg;
  81        struct xhci_segment *first_seg;
  82
  83        BUG_ON(!ring);
  84
  85        first_seg = ring->first_seg;
  86        seg = first_seg->next;
  87        while (seg != first_seg) {
  88                struct xhci_segment *next = seg->next;
  89                xhci_segment_free(seg);
  90                seg = next;
  91        }
  92        xhci_segment_free(first_seg);
  93
  94        free(ring);
  95}
  96
  97/**
  98 * Free the scratchpad buffer array and scratchpad buffers
  99 *
 100 * @ctrl        host controller data structure
 101 * @return      none
 102 */
 103static void xhci_scratchpad_free(struct xhci_ctrl *ctrl)
 104{
 105        if (!ctrl->scratchpad)
 106                return;
 107
 108        ctrl->dcbaa->dev_context_ptrs[0] = 0;
 109
 110        free((void *)(uintptr_t)ctrl->scratchpad->sp_array[0]);
 111        free(ctrl->scratchpad->sp_array);
 112        free(ctrl->scratchpad);
 113        ctrl->scratchpad = NULL;
 114}
 115
 116/**
 117 * frees the "xhci_container_ctx" pointer passed
 118 *
 119 * @param ptr   pointer to "xhci_container_ctx" to be freed
 120 * @return none
 121 */
 122static void xhci_free_container_ctx(struct xhci_container_ctx *ctx)
 123{
 124        free(ctx->bytes);
 125        free(ctx);
 126}
 127
 128/**
 129 * frees the virtual devices for "xhci_ctrl" pointer passed
 130 *
 131 * @param ptr   pointer to "xhci_ctrl" whose virtual devices are to be freed
 132 * @return none
 133 */
 134static void xhci_free_virt_devices(struct xhci_ctrl *ctrl)
 135{
 136        int i;
 137        int slot_id;
 138        struct xhci_virt_device *virt_dev;
 139
 140        /*
 141         * refactored here to loop through all virt_dev
 142         * Slot ID 0 is reserved
 143         */
 144        for (slot_id = 0; slot_id < MAX_HC_SLOTS; slot_id++) {
 145                virt_dev = ctrl->devs[slot_id];
 146                if (!virt_dev)
 147                        continue;
 148
 149                ctrl->dcbaa->dev_context_ptrs[slot_id] = 0;
 150
 151                for (i = 0; i < 31; ++i)
 152                        if (virt_dev->eps[i].ring)
 153                                xhci_ring_free(virt_dev->eps[i].ring);
 154
 155                if (virt_dev->in_ctx)
 156                        xhci_free_container_ctx(virt_dev->in_ctx);
 157                if (virt_dev->out_ctx)
 158                        xhci_free_container_ctx(virt_dev->out_ctx);
 159
 160                free(virt_dev);
 161                /* make sure we are pointing to NULL */
 162                ctrl->devs[slot_id] = NULL;
 163        }
 164}
 165
 166/**
 167 * frees all the memory allocated
 168 *
 169 * @param ptr   pointer to "xhci_ctrl" to be cleaned up
 170 * @return none
 171 */
 172void xhci_cleanup(struct xhci_ctrl *ctrl)
 173{
 174        xhci_ring_free(ctrl->event_ring);
 175        xhci_ring_free(ctrl->cmd_ring);
 176        xhci_scratchpad_free(ctrl);
 177        xhci_free_virt_devices(ctrl);
 178        free(ctrl->erst.entries);
 179        free(ctrl->dcbaa);
 180        memset(ctrl, '\0', sizeof(struct xhci_ctrl));
 181}
 182
 183/**
 184 * Malloc the aligned memory
 185 *
 186 * @param size  size of memory to be allocated
 187 * @return allocates the memory and returns the aligned pointer
 188 */
 189static void *xhci_malloc(unsigned int size)
 190{
 191        void *ptr;
 192        size_t cacheline_size = max(XHCI_ALIGNMENT, CACHELINE_SIZE);
 193
 194        ptr = memalign(cacheline_size, ALIGN(size, cacheline_size));
 195        BUG_ON(!ptr);
 196        memset(ptr, '\0', size);
 197
 198        xhci_flush_cache((uintptr_t)ptr, size);
 199
 200        return ptr;
 201}
 202
 203/**
 204 * Make the prev segment point to the next segment.
 205 * Change the last TRB in the prev segment to be a Link TRB which points to the
 206 * address of the next segment.  The caller needs to set any Link TRB
 207 * related flags, such as End TRB, Toggle Cycle, and no snoop.
 208 *
 209 * @param prev  pointer to the previous segment
 210 * @param next  pointer to the next segment
 211 * @param link_trbs     flag to indicate whether to link the trbs or NOT
 212 * @return none
 213 */
 214static void xhci_link_segments(struct xhci_segment *prev,
 215                                struct xhci_segment *next, bool link_trbs)
 216{
 217        u32 val;
 218        u64 val_64 = 0;
 219
 220        if (!prev || !next)
 221                return;
 222        prev->next = next;
 223        if (link_trbs) {
 224                val_64 = (uintptr_t)next->trbs;
 225                prev->trbs[TRBS_PER_SEGMENT-1].link.segment_ptr = val_64;
 226
 227                /*
 228                 * Set the last TRB in the segment to
 229                 * have a TRB type ID of Link TRB
 230                 */
 231                val = le32_to_cpu(prev->trbs[TRBS_PER_SEGMENT-1].link.control);
 232                val &= ~TRB_TYPE_BITMASK;
 233                val |= (TRB_LINK << TRB_TYPE_SHIFT);
 234
 235                prev->trbs[TRBS_PER_SEGMENT-1].link.control = cpu_to_le32(val);
 236        }
 237}
 238
 239/**
 240 * Initialises the Ring's enqueue,dequeue,enq_seg pointers
 241 *
 242 * @param ring  pointer to the RING to be intialised
 243 * @return none
 244 */
 245static void xhci_initialize_ring_info(struct xhci_ring *ring)
 246{
 247        /*
 248         * The ring is empty, so the enqueue pointer == dequeue pointer
 249         */
 250        ring->enqueue = ring->first_seg->trbs;
 251        ring->enq_seg = ring->first_seg;
 252        ring->dequeue = ring->enqueue;
 253        ring->deq_seg = ring->first_seg;
 254
 255        /*
 256         * The ring is initialized to 0. The producer must write 1 to the
 257         * cycle bit to handover ownership of the TRB, so PCS = 1.
 258         * The consumer must compare CCS to the cycle bit to
 259         * check ownership, so CCS = 1.
 260         */
 261        ring->cycle_state = 1;
 262}
 263
 264/**
 265 * Allocates a generic ring segment from the ring pool, sets the dma address,
 266 * initializes the segment to zero, and sets the private next pointer to NULL.
 267 * Section 4.11.1.1:
 268 * "All components of all Command and Transfer TRBs shall be initialized to '0'"
 269 *
 270 * @param       none
 271 * @return pointer to the newly allocated SEGMENT
 272 */
 273static struct xhci_segment *xhci_segment_alloc(void)
 274{
 275        struct xhci_segment *seg;
 276
 277        seg = (struct xhci_segment *)malloc(sizeof(struct xhci_segment));
 278        BUG_ON(!seg);
 279
 280        seg->trbs = (union xhci_trb *)xhci_malloc(SEGMENT_SIZE);
 281
 282        seg->next = NULL;
 283
 284        return seg;
 285}
 286
 287/**
 288 * Create a new ring with zero or more segments.
 289 * TODO: current code only uses one-time-allocated single-segment rings
 290 * of 1KB anyway, so we might as well get rid of all the segment and
 291 * linking code (and maybe increase the size a bit, e.g. 4KB).
 292 *
 293 *
 294 * Link each segment together into a ring.
 295 * Set the end flag and the cycle toggle bit on the last segment.
 296 * See section 4.9.2 and figures 15 and 16 of XHCI spec rev1.0.
 297 *
 298 * @param num_segs      number of segments in the ring
 299 * @param link_trbs     flag to indicate whether to link the trbs or NOT
 300 * @return pointer to the newly created RING
 301 */
 302struct xhci_ring *xhci_ring_alloc(unsigned int num_segs, bool link_trbs)
 303{
 304        struct xhci_ring *ring;
 305        struct xhci_segment *prev;
 306
 307        ring = (struct xhci_ring *)malloc(sizeof(struct xhci_ring));
 308        BUG_ON(!ring);
 309
 310        if (num_segs == 0)
 311                return ring;
 312
 313        ring->first_seg = xhci_segment_alloc();
 314        BUG_ON(!ring->first_seg);
 315
 316        num_segs--;
 317
 318        prev = ring->first_seg;
 319        while (num_segs > 0) {
 320                struct xhci_segment *next;
 321
 322                next = xhci_segment_alloc();
 323                BUG_ON(!next);
 324
 325                xhci_link_segments(prev, next, link_trbs);
 326
 327                prev = next;
 328                num_segs--;
 329        }
 330        xhci_link_segments(prev, ring->first_seg, link_trbs);
 331        if (link_trbs) {
 332                /* See section 4.9.2.1 and 6.4.4.1 */
 333                prev->trbs[TRBS_PER_SEGMENT-1].link.control |=
 334                                        cpu_to_le32(LINK_TOGGLE);
 335        }
 336        xhci_initialize_ring_info(ring);
 337
 338        return ring;
 339}
 340
 341/**
 342 * Set up the scratchpad buffer array and scratchpad buffers
 343 *
 344 * @ctrl        host controller data structure
 345 * @return      -ENOMEM if buffer allocation fails, 0 on success
 346 */
 347static int xhci_scratchpad_alloc(struct xhci_ctrl *ctrl)
 348{
 349        struct xhci_hccr *hccr = ctrl->hccr;
 350        struct xhci_hcor *hcor = ctrl->hcor;
 351        struct xhci_scratchpad *scratchpad;
 352        int num_sp;
 353        uint32_t page_size;
 354        void *buf;
 355        int i;
 356
 357        num_sp = HCS_MAX_SCRATCHPAD(xhci_readl(&hccr->cr_hcsparams2));
 358        if (!num_sp)
 359                return 0;
 360
 361        scratchpad = malloc(sizeof(*scratchpad));
 362        if (!scratchpad)
 363                goto fail_sp;
 364        ctrl->scratchpad = scratchpad;
 365
 366        scratchpad->sp_array = xhci_malloc(num_sp * sizeof(u64));
 367        if (!scratchpad->sp_array)
 368                goto fail_sp2;
 369        ctrl->dcbaa->dev_context_ptrs[0] =
 370                cpu_to_le64((uintptr_t)scratchpad->sp_array);
 371
 372        xhci_flush_cache((uintptr_t)&ctrl->dcbaa->dev_context_ptrs[0],
 373                sizeof(ctrl->dcbaa->dev_context_ptrs[0]));
 374
 375        page_size = xhci_readl(&hcor->or_pagesize) & 0xffff;
 376        for (i = 0; i < 16; i++) {
 377                if ((0x1 & page_size) != 0)
 378                        break;
 379                page_size = page_size >> 1;
 380        }
 381        BUG_ON(i == 16);
 382
 383        page_size = 1 << (i + 12);
 384        buf = memalign(page_size, num_sp * page_size);
 385        if (!buf)
 386                goto fail_sp3;
 387        memset(buf, '\0', num_sp * page_size);
 388        xhci_flush_cache((uintptr_t)buf, num_sp * page_size);
 389
 390        for (i = 0; i < num_sp; i++) {
 391                uintptr_t ptr = (uintptr_t)buf + i * page_size;
 392                scratchpad->sp_array[i] = cpu_to_le64(ptr);
 393        }
 394
 395        return 0;
 396
 397fail_sp3:
 398        free(scratchpad->sp_array);
 399
 400fail_sp2:
 401        free(scratchpad);
 402        ctrl->scratchpad = NULL;
 403
 404fail_sp:
 405        return -ENOMEM;
 406}
 407
 408/**
 409 * Allocates the Container context
 410 *
 411 * @param ctrl  Host controller data structure
 412 * @param type type of XHCI Container Context
 413 * @return NULL if failed else pointer to the context on success
 414 */
 415static struct xhci_container_ctx
 416                *xhci_alloc_container_ctx(struct xhci_ctrl *ctrl, int type)
 417{
 418        struct xhci_container_ctx *ctx;
 419
 420        ctx = (struct xhci_container_ctx *)
 421                malloc(sizeof(struct xhci_container_ctx));
 422        BUG_ON(!ctx);
 423
 424        BUG_ON((type != XHCI_CTX_TYPE_DEVICE) && (type != XHCI_CTX_TYPE_INPUT));
 425        ctx->type = type;
 426        ctx->size = (MAX_EP_CTX_NUM + 1) *
 427                        CTX_SIZE(readl(&ctrl->hccr->cr_hccparams));
 428        if (type == XHCI_CTX_TYPE_INPUT)
 429                ctx->size += CTX_SIZE(readl(&ctrl->hccr->cr_hccparams));
 430
 431        ctx->bytes = (u8 *)xhci_malloc(ctx->size);
 432
 433        return ctx;
 434}
 435
 436/**
 437 * Allocating virtual device
 438 *
 439 * @param udev  pointer to USB deivce structure
 440 * @return 0 on success else -1 on failure
 441 */
 442int xhci_alloc_virt_device(struct xhci_ctrl *ctrl, unsigned int slot_id)
 443{
 444        u64 byte_64 = 0;
 445        struct xhci_virt_device *virt_dev;
 446
 447        /* Slot ID 0 is reserved */
 448        if (ctrl->devs[slot_id]) {
 449                printf("Virt dev for slot[%d] already allocated\n", slot_id);
 450                return -EEXIST;
 451        }
 452
 453        ctrl->devs[slot_id] = (struct xhci_virt_device *)
 454                                        malloc(sizeof(struct xhci_virt_device));
 455
 456        if (!ctrl->devs[slot_id]) {
 457                puts("Failed to allocate virtual device\n");
 458                return -ENOMEM;
 459        }
 460
 461        memset(ctrl->devs[slot_id], 0, sizeof(struct xhci_virt_device));
 462        virt_dev = ctrl->devs[slot_id];
 463
 464        /* Allocate the (output) device context that will be used in the HC. */
 465        virt_dev->out_ctx = xhci_alloc_container_ctx(ctrl,
 466                                        XHCI_CTX_TYPE_DEVICE);
 467        if (!virt_dev->out_ctx) {
 468                puts("Failed to allocate out context for virt dev\n");
 469                return -ENOMEM;
 470        }
 471
 472        /* Allocate the (input) device context for address device command */
 473        virt_dev->in_ctx = xhci_alloc_container_ctx(ctrl,
 474                                        XHCI_CTX_TYPE_INPUT);
 475        if (!virt_dev->in_ctx) {
 476                puts("Failed to allocate in context for virt dev\n");
 477                return -ENOMEM;
 478        }
 479
 480        /* Allocate endpoint 0 ring */
 481        virt_dev->eps[0].ring = xhci_ring_alloc(1, true);
 482
 483        byte_64 = (uintptr_t)(virt_dev->out_ctx->bytes);
 484
 485        /* Point to output device context in dcbaa. */
 486        ctrl->dcbaa->dev_context_ptrs[slot_id] = byte_64;
 487
 488        xhci_flush_cache((uintptr_t)&ctrl->dcbaa->dev_context_ptrs[slot_id],
 489                         sizeof(__le64));
 490        return 0;
 491}
 492
 493/**
 494 * Allocates the necessary data structures
 495 * for XHCI host controller
 496 *
 497 * @param ctrl  Host controller data structure
 498 * @param hccr  pointer to HOST Controller Control Registers
 499 * @param hcor  pointer to HOST Controller Operational Registers
 500 * @return 0 if successful else -1 on failure
 501 */
 502int xhci_mem_init(struct xhci_ctrl *ctrl, struct xhci_hccr *hccr,
 503                                        struct xhci_hcor *hcor)
 504{
 505        uint64_t val_64;
 506        uint64_t trb_64;
 507        uint32_t val;
 508        unsigned long deq;
 509        int i;
 510        struct xhci_segment *seg;
 511
 512        /* DCBAA initialization */
 513        ctrl->dcbaa = (struct xhci_device_context_array *)
 514                        xhci_malloc(sizeof(struct xhci_device_context_array));
 515        if (ctrl->dcbaa == NULL) {
 516                puts("unable to allocate DCBA\n");
 517                return -ENOMEM;
 518        }
 519
 520        val_64 = (uintptr_t)ctrl->dcbaa;
 521        /* Set the pointer in DCBAA register */
 522        xhci_writeq(&hcor->or_dcbaap, val_64);
 523
 524        /* Command ring control pointer register initialization */
 525        ctrl->cmd_ring = xhci_ring_alloc(1, true);
 526
 527        /* Set the address in the Command Ring Control register */
 528        trb_64 = (uintptr_t)ctrl->cmd_ring->first_seg->trbs;
 529        val_64 = xhci_readq(&hcor->or_crcr);
 530        val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) |
 531                (trb_64 & (u64) ~CMD_RING_RSVD_BITS) |
 532                ctrl->cmd_ring->cycle_state;
 533        xhci_writeq(&hcor->or_crcr, val_64);
 534
 535        /* write the address of db register */
 536        val = xhci_readl(&hccr->cr_dboff);
 537        val &= DBOFF_MASK;
 538        ctrl->dba = (struct xhci_doorbell_array *)((char *)hccr + val);
 539
 540        /* write the address of runtime register */
 541        val = xhci_readl(&hccr->cr_rtsoff);
 542        val &= RTSOFF_MASK;
 543        ctrl->run_regs = (struct xhci_run_regs *)((char *)hccr + val);
 544
 545        /* writting the address of ir_set structure */
 546        ctrl->ir_set = &ctrl->run_regs->ir_set[0];
 547
 548        /* Event ring does not maintain link TRB */
 549        ctrl->event_ring = xhci_ring_alloc(ERST_NUM_SEGS, false);
 550        ctrl->erst.entries = (struct xhci_erst_entry *)
 551                xhci_malloc(sizeof(struct xhci_erst_entry) * ERST_NUM_SEGS);
 552
 553        ctrl->erst.num_entries = ERST_NUM_SEGS;
 554
 555        for (val = 0, seg = ctrl->event_ring->first_seg;
 556                        val < ERST_NUM_SEGS;
 557                        val++) {
 558                trb_64 = 0;
 559                trb_64 = (uintptr_t)seg->trbs;
 560                struct xhci_erst_entry *entry = &ctrl->erst.entries[val];
 561                xhci_writeq(&entry->seg_addr, trb_64);
 562                entry->seg_size = cpu_to_le32(TRBS_PER_SEGMENT);
 563                entry->rsvd = 0;
 564                seg = seg->next;
 565        }
 566        xhci_flush_cache((uintptr_t)ctrl->erst.entries,
 567                         ERST_NUM_SEGS * sizeof(struct xhci_erst_entry));
 568
 569        deq = (unsigned long)ctrl->event_ring->dequeue;
 570
 571        /* Update HC event ring dequeue pointer */
 572        xhci_writeq(&ctrl->ir_set->erst_dequeue,
 573                                (u64)deq & (u64)~ERST_PTR_MASK);
 574
 575        /* set ERST count with the number of entries in the segment table */
 576        val = xhci_readl(&ctrl->ir_set->erst_size);
 577        val &= ERST_SIZE_MASK;
 578        val |= ERST_NUM_SEGS;
 579        xhci_writel(&ctrl->ir_set->erst_size, val);
 580
 581        /* this is the event ring segment table pointer */
 582        val_64 = xhci_readq(&ctrl->ir_set->erst_base);
 583        val_64 &= ERST_PTR_MASK;
 584        val_64 |= ((uintptr_t)(ctrl->erst.entries) & ~ERST_PTR_MASK);
 585
 586        xhci_writeq(&ctrl->ir_set->erst_base, val_64);
 587
 588        /* set up the scratchpad buffer array and scratchpad buffers */
 589        xhci_scratchpad_alloc(ctrl);
 590
 591        /* initializing the virtual devices to NULL */
 592        for (i = 0; i < MAX_HC_SLOTS; ++i)
 593                ctrl->devs[i] = NULL;
 594
 595        /*
 596         * Just Zero'ing this register completely,
 597         * or some spurious Device Notification Events
 598         * might screw things here.
 599         */
 600        xhci_writel(&hcor->or_dnctrl, 0x0);
 601
 602        return 0;
 603}
 604
 605/**
 606 * Give the input control context for the passed container context
 607 *
 608 * @param ctx   pointer to the context
 609 * @return pointer to the Input control context data
 610 */
 611struct xhci_input_control_ctx
 612                *xhci_get_input_control_ctx(struct xhci_container_ctx *ctx)
 613{
 614        BUG_ON(ctx->type != XHCI_CTX_TYPE_INPUT);
 615        return (struct xhci_input_control_ctx *)ctx->bytes;
 616}
 617
 618/**
 619 * Give the slot context for the passed container context
 620 *
 621 * @param ctrl  Host controller data structure
 622 * @param ctx   pointer to the context
 623 * @return pointer to the slot control context data
 624 */
 625struct xhci_slot_ctx *xhci_get_slot_ctx(struct xhci_ctrl *ctrl,
 626                                struct xhci_container_ctx *ctx)
 627{
 628        if (ctx->type == XHCI_CTX_TYPE_DEVICE)
 629                return (struct xhci_slot_ctx *)ctx->bytes;
 630
 631        return (struct xhci_slot_ctx *)
 632                (ctx->bytes + CTX_SIZE(readl(&ctrl->hccr->cr_hccparams)));
 633}
 634
 635/**
 636 * Gets the EP context from based on the ep_index
 637 *
 638 * @param ctrl  Host controller data structure
 639 * @param ctx   context container
 640 * @param ep_index      index of the endpoint
 641 * @return pointer to the End point context
 642 */
 643struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_ctrl *ctrl,
 644                                    struct xhci_container_ctx *ctx,
 645                                    unsigned int ep_index)
 646{
 647        /* increment ep index by offset of start of ep ctx array */
 648        ep_index++;
 649        if (ctx->type == XHCI_CTX_TYPE_INPUT)
 650                ep_index++;
 651
 652        return (struct xhci_ep_ctx *)
 653                (ctx->bytes +
 654                (ep_index * CTX_SIZE(readl(&ctrl->hccr->cr_hccparams))));
 655}
 656
 657/**
 658 * Copy output xhci_ep_ctx to the input xhci_ep_ctx copy.
 659 * Useful when you want to change one particular aspect of the endpoint
 660 * and then issue a configure endpoint command.
 661 *
 662 * @param ctrl  Host controller data structure
 663 * @param in_ctx contains the input context
 664 * @param out_ctx contains the input context
 665 * @param ep_index index of the end point
 666 * @return none
 667 */
 668void xhci_endpoint_copy(struct xhci_ctrl *ctrl,
 669                        struct xhci_container_ctx *in_ctx,
 670                        struct xhci_container_ctx *out_ctx,
 671                        unsigned int ep_index)
 672{
 673        struct xhci_ep_ctx *out_ep_ctx;
 674        struct xhci_ep_ctx *in_ep_ctx;
 675
 676        out_ep_ctx = xhci_get_ep_ctx(ctrl, out_ctx, ep_index);
 677        in_ep_ctx = xhci_get_ep_ctx(ctrl, in_ctx, ep_index);
 678
 679        in_ep_ctx->ep_info = out_ep_ctx->ep_info;
 680        in_ep_ctx->ep_info2 = out_ep_ctx->ep_info2;
 681        in_ep_ctx->deq = out_ep_ctx->deq;
 682        in_ep_ctx->tx_info = out_ep_ctx->tx_info;
 683}
 684
 685/**
 686 * Copy output xhci_slot_ctx to the input xhci_slot_ctx.
 687 * Useful when you want to change one particular aspect of the endpoint
 688 * and then issue a configure endpoint command.
 689 * Only the context entries field matters, but
 690 * we'll copy the whole thing anyway.
 691 *
 692 * @param ctrl  Host controller data structure
 693 * @param in_ctx contains the inpout context
 694 * @param out_ctx contains the inpout context
 695 * @return none
 696 */
 697void xhci_slot_copy(struct xhci_ctrl *ctrl, struct xhci_container_ctx *in_ctx,
 698                                        struct xhci_container_ctx *out_ctx)
 699{
 700        struct xhci_slot_ctx *in_slot_ctx;
 701        struct xhci_slot_ctx *out_slot_ctx;
 702
 703        in_slot_ctx = xhci_get_slot_ctx(ctrl, in_ctx);
 704        out_slot_ctx = xhci_get_slot_ctx(ctrl, out_ctx);
 705
 706        in_slot_ctx->dev_info = out_slot_ctx->dev_info;
 707        in_slot_ctx->dev_info2 = out_slot_ctx->dev_info2;
 708        in_slot_ctx->tt_info = out_slot_ctx->tt_info;
 709        in_slot_ctx->dev_state = out_slot_ctx->dev_state;
 710}
 711
 712/**
 713 * Setup an xHCI virtual device for a Set Address command
 714 *
 715 * @param udev pointer to the Device Data Structure
 716 * @return returns negative value on failure else 0 on success
 717 */
 718void xhci_setup_addressable_virt_dev(struct xhci_ctrl *ctrl,
 719                                     struct usb_device *udev, int hop_portnr)
 720{
 721        struct xhci_virt_device *virt_dev;
 722        struct xhci_ep_ctx *ep0_ctx;
 723        struct xhci_slot_ctx *slot_ctx;
 724        u32 port_num = 0;
 725        u64 trb_64 = 0;
 726        int slot_id = udev->slot_id;
 727        int speed = udev->speed;
 728        int route = 0;
 729#if CONFIG_IS_ENABLED(DM_USB)
 730        struct usb_device *dev = udev;
 731        struct usb_hub_device *hub;
 732#endif
 733
 734        virt_dev = ctrl->devs[slot_id];
 735
 736        BUG_ON(!virt_dev);
 737
 738        /* Extract the EP0 and Slot Ctrl */
 739        ep0_ctx = xhci_get_ep_ctx(ctrl, virt_dev->in_ctx, 0);
 740        slot_ctx = xhci_get_slot_ctx(ctrl, virt_dev->in_ctx);
 741
 742        /* Only the control endpoint is valid - one endpoint context */
 743        slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1));
 744
 745#if CONFIG_IS_ENABLED(DM_USB)
 746        /* Calculate the route string for this device */
 747        port_num = dev->portnr;
 748        while (!usb_hub_is_root_hub(dev->dev)) {
 749                hub = dev_get_uclass_priv(dev->dev);
 750                /*
 751                 * Each hub in the topology is expected to have no more than
 752                 * 15 ports in order for the route string of a device to be
 753                 * unique. SuperSpeed hubs are restricted to only having 15
 754                 * ports, but FS/LS/HS hubs are not. The xHCI specification
 755                 * says that if the port number the device is greater than 15,
 756                 * that portion of the route string shall be set to 15.
 757                 */
 758                if (port_num > 15)
 759                        port_num = 15;
 760                route |= port_num << (hub->hub_depth * 4);
 761                dev = dev_get_parent_priv(dev->dev);
 762                port_num = dev->portnr;
 763                dev = dev_get_parent_priv(dev->dev->parent);
 764        }
 765
 766        debug("route string %x\n", route);
 767#endif
 768        slot_ctx->dev_info |= route;
 769
 770        switch (speed) {
 771        case USB_SPEED_SUPER:
 772                slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_SS);
 773                break;
 774        case USB_SPEED_HIGH:
 775                slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_HS);
 776                break;
 777        case USB_SPEED_FULL:
 778                slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_FS);
 779                break;
 780        case USB_SPEED_LOW:
 781                slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_LS);
 782                break;
 783        default:
 784                /* Speed was set earlier, this shouldn't happen. */
 785                BUG();
 786        }
 787
 788#if CONFIG_IS_ENABLED(DM_USB)
 789        /* Set up TT fields to support FS/LS devices */
 790        if (speed == USB_SPEED_LOW || speed == USB_SPEED_FULL) {
 791                struct udevice *parent = udev->dev;
 792
 793                dev = udev;
 794                do {
 795                        port_num = dev->portnr;
 796                        dev = dev_get_parent_priv(parent);
 797                        if (usb_hub_is_root_hub(dev->dev))
 798                                break;
 799                        parent = dev->dev->parent;
 800                } while (dev->speed != USB_SPEED_HIGH);
 801
 802                if (!usb_hub_is_root_hub(dev->dev)) {
 803                        hub = dev_get_uclass_priv(dev->dev);
 804                        if (hub->tt.multi)
 805                                slot_ctx->dev_info |= cpu_to_le32(DEV_MTT);
 806                        slot_ctx->tt_info |= cpu_to_le32(TT_PORT(port_num));
 807                        slot_ctx->tt_info |= cpu_to_le32(TT_SLOT(dev->slot_id));
 808                }
 809        }
 810#endif
 811
 812        port_num = hop_portnr;
 813        debug("port_num = %d\n", port_num);
 814
 815        slot_ctx->dev_info2 |=
 816                        cpu_to_le32(((port_num & ROOT_HUB_PORT_MASK) <<
 817                                ROOT_HUB_PORT_SHIFT));
 818
 819        /* Step 4 - ring already allocated */
 820        /* Step 5 */
 821        ep0_ctx->ep_info2 = cpu_to_le32(CTRL_EP << EP_TYPE_SHIFT);
 822        debug("SPEED = %d\n", speed);
 823
 824        switch (speed) {
 825        case USB_SPEED_SUPER:
 826                ep0_ctx->ep_info2 |= cpu_to_le32(((512 & MAX_PACKET_MASK) <<
 827                                        MAX_PACKET_SHIFT));
 828                debug("Setting Packet size = 512bytes\n");
 829                break;
 830        case USB_SPEED_HIGH:
 831        /* USB core guesses at a 64-byte max packet first for FS devices */
 832        case USB_SPEED_FULL:
 833                ep0_ctx->ep_info2 |= cpu_to_le32(((64 & MAX_PACKET_MASK) <<
 834                                        MAX_PACKET_SHIFT));
 835                debug("Setting Packet size = 64bytes\n");
 836                break;
 837        case USB_SPEED_LOW:
 838                ep0_ctx->ep_info2 |= cpu_to_le32(((8 & MAX_PACKET_MASK) <<
 839                                        MAX_PACKET_SHIFT));
 840                debug("Setting Packet size = 8bytes\n");
 841                break;
 842        default:
 843                /* New speed? */
 844                BUG();
 845        }
 846
 847        /* EP 0 can handle "burst" sizes of 1, so Max Burst Size field is 0 */
 848        ep0_ctx->ep_info2 |=
 849                        cpu_to_le32(((0 & MAX_BURST_MASK) << MAX_BURST_SHIFT) |
 850                        ((3 & ERROR_COUNT_MASK) << ERROR_COUNT_SHIFT));
 851
 852        trb_64 = (uintptr_t)virt_dev->eps[0].ring->first_seg->trbs;
 853        ep0_ctx->deq = cpu_to_le64(trb_64 | virt_dev->eps[0].ring->cycle_state);
 854
 855        /*
 856         * xHCI spec 6.2.3:
 857         * software shall set 'Average TRB Length' to 8 for control endpoints.
 858         */
 859        ep0_ctx->tx_info = cpu_to_le32(EP_AVG_TRB_LENGTH(8));
 860
 861        /* Steps 7 and 8 were done in xhci_alloc_virt_device() */
 862
 863        xhci_flush_cache((uintptr_t)ep0_ctx, sizeof(struct xhci_ep_ctx));
 864        xhci_flush_cache((uintptr_t)slot_ctx, sizeof(struct xhci_slot_ctx));
 865}
 866