linux/drivers/usb/cdns3/cdnsp-mem.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Cadence CDNSP DRD Driver.
   4 *
   5 * Copyright (C) 2020 Cadence.
   6 *
   7 * Author: Pawel Laszczak <pawell@cadence.com>
   8 *
   9 * Code based on Linux XHCI driver.
  10 * Origin: Copyright (C) 2008 Intel Corp.
  11 */
  12
  13#include <linux/dma-mapping.h>
  14#include <linux/dmapool.h>
  15#include <linux/slab.h>
  16#include <linux/usb.h>
  17
  18#include "cdnsp-gadget.h"
  19#include "cdnsp-trace.h"
  20
  21static void cdnsp_free_stream_info(struct cdnsp_device *pdev,
  22                                   struct cdnsp_ep *pep);
  23/*
  24 * Allocates a generic ring segment from the ring pool, sets the dma address,
  25 * initializes the segment to zero, and sets the private next pointer to NULL.
  26 *
  27 * "All components of all Command and Transfer TRBs shall be initialized to '0'"
  28 */
  29static struct cdnsp_segment *cdnsp_segment_alloc(struct cdnsp_device *pdev,
  30                                                 unsigned int cycle_state,
  31                                                 unsigned int max_packet,
  32                                                 gfp_t flags)
  33{
  34        struct cdnsp_segment *seg;
  35        dma_addr_t dma;
  36        int i;
  37
  38        seg = kzalloc(sizeof(*seg), flags);
  39        if (!seg)
  40                return NULL;
  41
  42        seg->trbs = dma_pool_zalloc(pdev->segment_pool, flags, &dma);
  43        if (!seg->trbs) {
  44                kfree(seg);
  45                return NULL;
  46        }
  47
  48        if (max_packet) {
  49                seg->bounce_buf = kzalloc(max_packet, flags | GFP_DMA);
  50                if (!seg->bounce_buf)
  51                        goto free_dma;
  52        }
  53
  54        /* If the cycle state is 0, set the cycle bit to 1 for all the TRBs. */
  55        if (cycle_state == 0) {
  56                for (i = 0; i < TRBS_PER_SEGMENT; i++)
  57                        seg->trbs[i].link.control |= cpu_to_le32(TRB_CYCLE);
  58        }
  59        seg->dma = dma;
  60        seg->next = NULL;
  61
  62        return seg;
  63
  64free_dma:
  65        dma_pool_free(pdev->segment_pool, seg->trbs, dma);
  66        kfree(seg);
  67
  68        return NULL;
  69}
  70
  71static void cdnsp_segment_free(struct cdnsp_device *pdev,
  72                               struct cdnsp_segment *seg)
  73{
  74        if (seg->trbs)
  75                dma_pool_free(pdev->segment_pool, seg->trbs, seg->dma);
  76
  77        kfree(seg->bounce_buf);
  78        kfree(seg);
  79}
  80
  81static void cdnsp_free_segments_for_ring(struct cdnsp_device *pdev,
  82                                         struct cdnsp_segment *first)
  83{
  84        struct cdnsp_segment *seg;
  85
  86        seg = first->next;
  87
  88        while (seg != first) {
  89                struct cdnsp_segment *next = seg->next;
  90
  91                cdnsp_segment_free(pdev, seg);
  92                seg = next;
  93        }
  94
  95        cdnsp_segment_free(pdev, first);
  96}
  97
  98/*
  99 * Make the prev segment point to the next segment.
 100 *
 101 * Change the last TRB in the prev segment to be a Link TRB which points to the
 102 * DMA address of the next segment. The caller needs to set any Link TRB
 103 * related flags, such as End TRB, Toggle Cycle, and no snoop.
 104 */
 105static void cdnsp_link_segments(struct cdnsp_device *pdev,
 106                                struct cdnsp_segment *prev,
 107                                struct cdnsp_segment *next,
 108                                enum cdnsp_ring_type type)
 109{
 110        struct cdnsp_link_trb *link;
 111        u32 val;
 112
 113        if (!prev || !next)
 114                return;
 115
 116        prev->next = next;
 117        if (type != TYPE_EVENT) {
 118                link = &prev->trbs[TRBS_PER_SEGMENT - 1].link;
 119                link->segment_ptr = cpu_to_le64(next->dma);
 120
 121                /*
 122                 * Set the last TRB in the segment to have a TRB type ID
 123                 * of Link TRB
 124                 */
 125                val = le32_to_cpu(link->control);
 126                val &= ~TRB_TYPE_BITMASK;
 127                val |= TRB_TYPE(TRB_LINK);
 128                link->control = cpu_to_le32(val);
 129        }
 130}
 131
 132/*
 133 * Link the ring to the new segments.
 134 * Set Toggle Cycle for the new ring if needed.
 135 */
 136static void cdnsp_link_rings(struct cdnsp_device *pdev,
 137                             struct cdnsp_ring *ring,
 138                             struct cdnsp_segment *first,
 139                             struct cdnsp_segment *last,
 140                             unsigned int num_segs)
 141{
 142        struct cdnsp_segment *next;
 143
 144        if (!ring || !first || !last)
 145                return;
 146
 147        next = ring->enq_seg->next;
 148        cdnsp_link_segments(pdev, ring->enq_seg, first, ring->type);
 149        cdnsp_link_segments(pdev, last, next, ring->type);
 150        ring->num_segs += num_segs;
 151        ring->num_trbs_free += (TRBS_PER_SEGMENT - 1) * num_segs;
 152
 153        if (ring->type != TYPE_EVENT && ring->enq_seg == ring->last_seg) {
 154                ring->last_seg->trbs[TRBS_PER_SEGMENT - 1].link.control &=
 155                        ~cpu_to_le32(LINK_TOGGLE);
 156                last->trbs[TRBS_PER_SEGMENT - 1].link.control |=
 157                        cpu_to_le32(LINK_TOGGLE);
 158                ring->last_seg = last;
 159        }
 160}
 161
 162/*
 163 * We need a radix tree for mapping physical addresses of TRBs to which stream
 164 * ID they belong to. We need to do this because the device controller won't
 165 * tell us which stream ring the TRB came from. We could store the stream ID
 166 * in an event data TRB, but that doesn't help us for the cancellation case,
 167 * since the endpoint may stop before it reaches that event data TRB.
 168 *
 169 * The radix tree maps the upper portion of the TRB DMA address to a ring
 170 * segment that has the same upper portion of DMA addresses. For example,
 171 * say I have segments of size 1KB, that are always 1KB aligned. A segment may
 172 * start at 0x10c91000 and end at 0x10c913f0. If I use the upper 10 bits, the
 173 * key to the stream ID is 0x43244. I can use the DMA address of the TRB to
 174 * pass the radix tree a key to get the right stream ID:
 175 *
 176 *      0x10c90fff >> 10 = 0x43243
 177 *      0x10c912c0 >> 10 = 0x43244
 178 *      0x10c91400 >> 10 = 0x43245
 179 *
 180 * Obviously, only those TRBs with DMA addresses that are within the segment
 181 * will make the radix tree return the stream ID for that ring.
 182 *
 183 * Caveats for the radix tree:
 184 *
 185 * The radix tree uses an unsigned long as a key pair. On 32-bit systems, an
 186 * unsigned long will be 32-bits; on a 64-bit system an unsigned long will be
 187 * 64-bits. Since we only request 32-bit DMA addresses, we can use that as the
 188 * key on 32-bit or 64-bit systems (it would also be fine if we asked for 64-bit
 189 * PCI DMA addresses on a 64-bit system). There might be a problem on 32-bit
 190 * extended systems (where the DMA address can be bigger than 32-bits),
 191 * if we allow the PCI dma mask to be bigger than 32-bits. So don't do that.
 192 */
 193static int cdnsp_insert_segment_mapping(struct radix_tree_root *trb_address_map,
 194                                        struct cdnsp_ring *ring,
 195                                        struct cdnsp_segment *seg,
 196                                        gfp_t mem_flags)
 197{
 198        unsigned long key;
 199        int ret;
 200
 201        key = (unsigned long)(seg->dma >> TRB_SEGMENT_SHIFT);
 202
 203        /* Skip any segments that were already added. */
 204        if (radix_tree_lookup(trb_address_map, key))
 205                return 0;
 206
 207        ret = radix_tree_maybe_preload(mem_flags);
 208        if (ret)
 209                return ret;
 210
 211        ret = radix_tree_insert(trb_address_map, key, ring);
 212        radix_tree_preload_end();
 213
 214        return ret;
 215}
 216
 217static void cdnsp_remove_segment_mapping(struct radix_tree_root *trb_address_map,
 218                                         struct cdnsp_segment *seg)
 219{
 220        unsigned long key;
 221
 222        key = (unsigned long)(seg->dma >> TRB_SEGMENT_SHIFT);
 223        if (radix_tree_lookup(trb_address_map, key))
 224                radix_tree_delete(trb_address_map, key);
 225}
 226
 227static int cdnsp_update_stream_segment_mapping(struct radix_tree_root *trb_address_map,
 228                                               struct cdnsp_ring *ring,
 229                                               struct cdnsp_segment *first_seg,
 230                                               struct cdnsp_segment *last_seg,
 231                                               gfp_t mem_flags)
 232{
 233        struct cdnsp_segment *failed_seg;
 234        struct cdnsp_segment *seg;
 235        int ret;
 236
 237        seg = first_seg;
 238        do {
 239                ret = cdnsp_insert_segment_mapping(trb_address_map, ring, seg,
 240                                                   mem_flags);
 241                if (ret)
 242                        goto remove_streams;
 243                if (seg == last_seg)
 244                        return 0;
 245                seg = seg->next;
 246        } while (seg != first_seg);
 247
 248        return 0;
 249
 250remove_streams:
 251        failed_seg = seg;
 252        seg = first_seg;
 253        do {
 254                cdnsp_remove_segment_mapping(trb_address_map, seg);
 255                if (seg == failed_seg)
 256                        return ret;
 257                seg = seg->next;
 258        } while (seg != first_seg);
 259
 260        return ret;
 261}
 262
 263static void cdnsp_remove_stream_mapping(struct cdnsp_ring *ring)
 264{
 265        struct cdnsp_segment *seg;
 266
 267        seg = ring->first_seg;
 268        do {
 269                cdnsp_remove_segment_mapping(ring->trb_address_map, seg);
 270                seg = seg->next;
 271        } while (seg != ring->first_seg);
 272}
 273
 274static int cdnsp_update_stream_mapping(struct cdnsp_ring *ring)
 275{
 276        return cdnsp_update_stream_segment_mapping(ring->trb_address_map, ring,
 277                        ring->first_seg, ring->last_seg, GFP_ATOMIC);
 278}
 279
 280static void cdnsp_ring_free(struct cdnsp_device *pdev, struct cdnsp_ring *ring)
 281{
 282        if (!ring)
 283                return;
 284
 285        trace_cdnsp_ring_free(ring);
 286
 287        if (ring->first_seg) {
 288                if (ring->type == TYPE_STREAM)
 289                        cdnsp_remove_stream_mapping(ring);
 290
 291                cdnsp_free_segments_for_ring(pdev, ring->first_seg);
 292        }
 293
 294        kfree(ring);
 295}
 296
 297void cdnsp_initialize_ring_info(struct cdnsp_ring *ring)
 298{
 299        ring->enqueue = ring->first_seg->trbs;
 300        ring->enq_seg = ring->first_seg;
 301        ring->dequeue = ring->enqueue;
 302        ring->deq_seg = ring->first_seg;
 303
 304        /*
 305         * The ring is initialized to 0. The producer must write 1 to the cycle
 306         * bit to handover ownership of the TRB, so PCS = 1. The consumer must
 307         * compare CCS to the cycle bit to check ownership, so CCS = 1.
 308         *
 309         * New rings are initialized with cycle state equal to 1; if we are
 310         * handling ring expansion, set the cycle state equal to the old ring.
 311         */
 312        ring->cycle_state = 1;
 313
 314        /*
 315         * Each segment has a link TRB, and leave an extra TRB for SW
 316         * accounting purpose
 317         */
 318        ring->num_trbs_free = ring->num_segs * (TRBS_PER_SEGMENT - 1) - 1;
 319}
 320
 321/* Allocate segments and link them for a ring. */
 322static int cdnsp_alloc_segments_for_ring(struct cdnsp_device *pdev,
 323                                         struct cdnsp_segment **first,
 324                                         struct cdnsp_segment **last,
 325                                         unsigned int num_segs,
 326                                         unsigned int cycle_state,
 327                                         enum cdnsp_ring_type type,
 328                                         unsigned int max_packet,
 329                                         gfp_t flags)
 330{
 331        struct cdnsp_segment *prev;
 332
 333        /* Allocate first segment. */
 334        prev = cdnsp_segment_alloc(pdev, cycle_state, max_packet, flags);
 335        if (!prev)
 336                return -ENOMEM;
 337
 338        num_segs--;
 339        *first = prev;
 340
 341        /* Allocate all other segments. */
 342        while (num_segs > 0) {
 343                struct cdnsp_segment    *next;
 344
 345                next = cdnsp_segment_alloc(pdev, cycle_state,
 346                                           max_packet, flags);
 347                if (!next) {
 348                        cdnsp_free_segments_for_ring(pdev, *first);
 349                        return -ENOMEM;
 350                }
 351
 352                cdnsp_link_segments(pdev, prev, next, type);
 353
 354                prev = next;
 355                num_segs--;
 356        }
 357
 358        cdnsp_link_segments(pdev, prev, *first, type);
 359        *last = prev;
 360
 361        return 0;
 362}
 363
 364/*
 365 * Create a new ring with zero or more segments.
 366 *
 367 * Link each segment together into a ring.
 368 * Set the end flag and the cycle toggle bit on the last segment.
 369 */
 370static struct cdnsp_ring *cdnsp_ring_alloc(struct cdnsp_device *pdev,
 371                                           unsigned int num_segs,
 372                                           enum cdnsp_ring_type type,
 373                                           unsigned int max_packet,
 374                                           gfp_t flags)
 375{
 376        struct cdnsp_ring *ring;
 377        int ret;
 378
 379        ring = kzalloc(sizeof *(ring), flags);
 380        if (!ring)
 381                return NULL;
 382
 383        ring->num_segs = num_segs;
 384        ring->bounce_buf_len = max_packet;
 385        INIT_LIST_HEAD(&ring->td_list);
 386        ring->type = type;
 387
 388        if (num_segs == 0)
 389                return ring;
 390
 391        ret = cdnsp_alloc_segments_for_ring(pdev, &ring->first_seg,
 392                                            &ring->last_seg, num_segs,
 393                                            1, type, max_packet, flags);
 394        if (ret)
 395                goto fail;
 396
 397        /* Only event ring does not use link TRB. */
 398        if (type != TYPE_EVENT)
 399                ring->last_seg->trbs[TRBS_PER_SEGMENT - 1].link.control |=
 400                        cpu_to_le32(LINK_TOGGLE);
 401
 402        cdnsp_initialize_ring_info(ring);
 403        trace_cdnsp_ring_alloc(ring);
 404        return ring;
 405fail:
 406        kfree(ring);
 407        return NULL;
 408}
 409
 410void cdnsp_free_endpoint_rings(struct cdnsp_device *pdev, struct cdnsp_ep *pep)
 411{
 412        cdnsp_ring_free(pdev, pep->ring);
 413        pep->ring = NULL;
 414        cdnsp_free_stream_info(pdev, pep);
 415}
 416
 417/*
 418 * Expand an existing ring.
 419 * Allocate a new ring which has same segment numbers and link the two rings.
 420 */
 421int cdnsp_ring_expansion(struct cdnsp_device *pdev,
 422                         struct cdnsp_ring *ring,
 423                         unsigned int num_trbs,
 424                         gfp_t flags)
 425{
 426        unsigned int num_segs_needed;
 427        struct cdnsp_segment *first;
 428        struct cdnsp_segment *last;
 429        unsigned int num_segs;
 430        int ret;
 431
 432        num_segs_needed = (num_trbs + (TRBS_PER_SEGMENT - 1) - 1) /
 433                        (TRBS_PER_SEGMENT - 1);
 434
 435        /* Allocate number of segments we needed, or double the ring size. */
 436        num_segs = max(ring->num_segs, num_segs_needed);
 437
 438        ret = cdnsp_alloc_segments_for_ring(pdev, &first, &last, num_segs,
 439                                            ring->cycle_state, ring->type,
 440                                            ring->bounce_buf_len, flags);
 441        if (ret)
 442                return -ENOMEM;
 443
 444        if (ring->type == TYPE_STREAM)
 445                ret = cdnsp_update_stream_segment_mapping(ring->trb_address_map,
 446                                                          ring, first,
 447                                                          last, flags);
 448
 449        if (ret) {
 450                cdnsp_free_segments_for_ring(pdev, first);
 451
 452                return ret;
 453        }
 454
 455        cdnsp_link_rings(pdev, ring, first, last, num_segs);
 456        trace_cdnsp_ring_expansion(ring);
 457
 458        return 0;
 459}
 460
 461static int cdnsp_init_device_ctx(struct cdnsp_device *pdev)
 462{
 463        int size = HCC_64BYTE_CONTEXT(pdev->hcc_params) ? 2048 : 1024;
 464
 465        pdev->out_ctx.type = CDNSP_CTX_TYPE_DEVICE;
 466        pdev->out_ctx.size = size;
 467        pdev->out_ctx.ctx_size = CTX_SIZE(pdev->hcc_params);
 468        pdev->out_ctx.bytes = dma_pool_zalloc(pdev->device_pool, GFP_ATOMIC,
 469                                              &pdev->out_ctx.dma);
 470
 471        if (!pdev->out_ctx.bytes)
 472                return -ENOMEM;
 473
 474        pdev->in_ctx.type = CDNSP_CTX_TYPE_INPUT;
 475        pdev->in_ctx.ctx_size = pdev->out_ctx.ctx_size;
 476        pdev->in_ctx.size = size + pdev->out_ctx.ctx_size;
 477        pdev->in_ctx.bytes = dma_pool_zalloc(pdev->device_pool, GFP_ATOMIC,
 478                                             &pdev->in_ctx.dma);
 479
 480        if (!pdev->in_ctx.bytes) {
 481                dma_pool_free(pdev->device_pool, pdev->out_ctx.bytes,
 482                              pdev->out_ctx.dma);
 483                return -ENOMEM;
 484        }
 485
 486        return 0;
 487}
 488
 489struct cdnsp_input_control_ctx
 490        *cdnsp_get_input_control_ctx(struct cdnsp_container_ctx *ctx)
 491{
 492        if (ctx->type != CDNSP_CTX_TYPE_INPUT)
 493                return NULL;
 494
 495        return (struct cdnsp_input_control_ctx *)ctx->bytes;
 496}
 497
 498struct cdnsp_slot_ctx *cdnsp_get_slot_ctx(struct cdnsp_container_ctx *ctx)
 499{
 500        if (ctx->type == CDNSP_CTX_TYPE_DEVICE)
 501                return (struct cdnsp_slot_ctx *)ctx->bytes;
 502
 503        return (struct cdnsp_slot_ctx *)(ctx->bytes + ctx->ctx_size);
 504}
 505
 506struct cdnsp_ep_ctx *cdnsp_get_ep_ctx(struct cdnsp_container_ctx *ctx,
 507                                      unsigned int ep_index)
 508{
 509        /* Increment ep index by offset of start of ep ctx array. */
 510        ep_index++;
 511        if (ctx->type == CDNSP_CTX_TYPE_INPUT)
 512                ep_index++;
 513
 514        return (struct cdnsp_ep_ctx *)(ctx->bytes + (ep_index * ctx->ctx_size));
 515}
 516
 517static void cdnsp_free_stream_ctx(struct cdnsp_device *pdev,
 518                                  struct cdnsp_ep *pep)
 519{
 520        dma_pool_free(pdev->device_pool, pep->stream_info.stream_ctx_array,
 521                      pep->stream_info.ctx_array_dma);
 522}
 523
 524/* The stream context array must be a power of 2. */
 525static struct cdnsp_stream_ctx
 526        *cdnsp_alloc_stream_ctx(struct cdnsp_device *pdev, struct cdnsp_ep *pep)
 527{
 528        size_t size = sizeof(struct cdnsp_stream_ctx) *
 529                      pep->stream_info.num_stream_ctxs;
 530
 531        if (size > CDNSP_CTX_SIZE)
 532                return NULL;
 533
 534        /**
 535         * Driver uses intentionally the device_pool to allocated stream
 536         * context array. Device Pool has 2048 bytes of size what gives us
 537         * 128 entries.
 538         */
 539        return dma_pool_zalloc(pdev->device_pool, GFP_DMA32 | GFP_ATOMIC,
 540                               &pep->stream_info.ctx_array_dma);
 541}
 542
 543struct cdnsp_ring *cdnsp_dma_to_transfer_ring(struct cdnsp_ep *pep, u64 address)
 544{
 545        if (pep->ep_state & EP_HAS_STREAMS)
 546                return radix_tree_lookup(&pep->stream_info.trb_address_map,
 547                                         address >> TRB_SEGMENT_SHIFT);
 548
 549        return pep->ring;
 550}
 551
 552/*
 553 * Change an endpoint's internal structure so it supports stream IDs.
 554 * The number of requested streams includes stream 0, which cannot be used by
 555 * driver.
 556 *
 557 * The number of stream contexts in the stream context array may be bigger than
 558 * the number of streams the driver wants to use. This is because the number of
 559 * stream context array entries must be a power of two.
 560 */
 561int cdnsp_alloc_stream_info(struct cdnsp_device *pdev,
 562                            struct cdnsp_ep *pep,
 563                            unsigned int num_stream_ctxs,
 564                            unsigned int num_streams)
 565{
 566        struct cdnsp_stream_info *stream_info;
 567        struct cdnsp_ring *cur_ring;
 568        u32 cur_stream;
 569        u64 addr;
 570        int ret;
 571        int mps;
 572
 573        stream_info = &pep->stream_info;
 574        stream_info->num_streams = num_streams;
 575        stream_info->num_stream_ctxs = num_stream_ctxs;
 576
 577        /* Initialize the array of virtual pointers to stream rings. */
 578        stream_info->stream_rings = kcalloc(num_streams,
 579                                            sizeof(struct cdnsp_ring *),
 580                                            GFP_ATOMIC);
 581        if (!stream_info->stream_rings)
 582                return -ENOMEM;
 583
 584        /* Initialize the array of DMA addresses for stream rings for the HW. */
 585        stream_info->stream_ctx_array = cdnsp_alloc_stream_ctx(pdev, pep);
 586        if (!stream_info->stream_ctx_array)
 587                goto cleanup_stream_rings;
 588
 589        memset(stream_info->stream_ctx_array, 0,
 590               sizeof(struct cdnsp_stream_ctx) * num_stream_ctxs);
 591        INIT_RADIX_TREE(&stream_info->trb_address_map, GFP_ATOMIC);
 592        mps = usb_endpoint_maxp(pep->endpoint.desc);
 593
 594        /*
 595         * Allocate rings for all the streams that the driver will use,
 596         * and add their segment DMA addresses to the radix tree.
 597         * Stream 0 is reserved.
 598         */
 599        for (cur_stream = 1; cur_stream < num_streams; cur_stream++) {
 600                cur_ring = cdnsp_ring_alloc(pdev, 2, TYPE_STREAM, mps,
 601                                            GFP_ATOMIC);
 602                stream_info->stream_rings[cur_stream] = cur_ring;
 603
 604                if (!cur_ring)
 605                        goto cleanup_rings;
 606
 607                cur_ring->stream_id = cur_stream;
 608                cur_ring->trb_address_map = &stream_info->trb_address_map;
 609
 610                /* Set deq ptr, cycle bit, and stream context type. */
 611                addr = cur_ring->first_seg->dma | SCT_FOR_CTX(SCT_PRI_TR) |
 612                       cur_ring->cycle_state;
 613
 614                stream_info->stream_ctx_array[cur_stream].stream_ring =
 615                        cpu_to_le64(addr);
 616
 617                trace_cdnsp_set_stream_ring(cur_ring);
 618
 619                ret = cdnsp_update_stream_mapping(cur_ring);
 620                if (ret)
 621                        goto cleanup_rings;
 622        }
 623
 624        return 0;
 625
 626cleanup_rings:
 627        for (cur_stream = 1; cur_stream < num_streams; cur_stream++) {
 628                cur_ring = stream_info->stream_rings[cur_stream];
 629                if (cur_ring) {
 630                        cdnsp_ring_free(pdev, cur_ring);
 631                        stream_info->stream_rings[cur_stream] = NULL;
 632                }
 633        }
 634
 635cleanup_stream_rings:
 636        kfree(pep->stream_info.stream_rings);
 637
 638        return -ENOMEM;
 639}
 640
 641/* Frees all stream contexts associated with the endpoint. */
 642static void cdnsp_free_stream_info(struct cdnsp_device *pdev,
 643                                   struct cdnsp_ep *pep)
 644{
 645        struct cdnsp_stream_info *stream_info = &pep->stream_info;
 646        struct cdnsp_ring *cur_ring;
 647        int cur_stream;
 648
 649        if (!(pep->ep_state & EP_HAS_STREAMS))
 650                return;
 651
 652        for (cur_stream = 1; cur_stream < stream_info->num_streams;
 653             cur_stream++) {
 654                cur_ring = stream_info->stream_rings[cur_stream];
 655                if (cur_ring) {
 656                        cdnsp_ring_free(pdev, cur_ring);
 657                        stream_info->stream_rings[cur_stream] = NULL;
 658                }
 659        }
 660
 661        if (stream_info->stream_ctx_array)
 662                cdnsp_free_stream_ctx(pdev, pep);
 663
 664        kfree(stream_info->stream_rings);
 665        pep->ep_state &= ~EP_HAS_STREAMS;
 666}
 667
 668/* All the cdnsp_tds in the ring's TD list should be freed at this point.*/
 669static void cdnsp_free_priv_device(struct cdnsp_device *pdev)
 670{
 671        pdev->dcbaa->dev_context_ptrs[1] = 0;
 672
 673        cdnsp_free_endpoint_rings(pdev, &pdev->eps[0]);
 674
 675        if (pdev->in_ctx.bytes)
 676                dma_pool_free(pdev->device_pool, pdev->in_ctx.bytes,
 677                              pdev->in_ctx.dma);
 678
 679        if (pdev->out_ctx.bytes)
 680                dma_pool_free(pdev->device_pool, pdev->out_ctx.bytes,
 681                              pdev->out_ctx.dma);
 682
 683        pdev->in_ctx.bytes = NULL;
 684        pdev->out_ctx.bytes = NULL;
 685}
 686
 687static int cdnsp_alloc_priv_device(struct cdnsp_device *pdev)
 688{
 689        int ret;
 690
 691        ret = cdnsp_init_device_ctx(pdev);
 692        if (ret)
 693                return ret;
 694
 695        /* Allocate endpoint 0 ring. */
 696        pdev->eps[0].ring = cdnsp_ring_alloc(pdev, 2, TYPE_CTRL, 0, GFP_ATOMIC);
 697        if (!pdev->eps[0].ring)
 698                goto fail;
 699
 700        /* Point to output device context in dcbaa. */
 701        pdev->dcbaa->dev_context_ptrs[1] = cpu_to_le64(pdev->out_ctx.dma);
 702        pdev->cmd.in_ctx = &pdev->in_ctx;
 703
 704        trace_cdnsp_alloc_priv_device(pdev);
 705        return 0;
 706fail:
 707        dma_pool_free(pdev->device_pool, pdev->out_ctx.bytes,
 708                      pdev->out_ctx.dma);
 709        dma_pool_free(pdev->device_pool, pdev->in_ctx.bytes,
 710                      pdev->in_ctx.dma);
 711
 712        return ret;
 713}
 714
 715void cdnsp_copy_ep0_dequeue_into_input_ctx(struct cdnsp_device *pdev)
 716{
 717        struct cdnsp_ep_ctx *ep0_ctx = pdev->eps[0].in_ctx;
 718        struct cdnsp_ring *ep_ring = pdev->eps[0].ring;
 719        dma_addr_t dma;
 720
 721        dma = cdnsp_trb_virt_to_dma(ep_ring->enq_seg, ep_ring->enqueue);
 722        ep0_ctx->deq = cpu_to_le64(dma | ep_ring->cycle_state);
 723}
 724
 725/* Setup an controller private device for a Set Address command. */
 726int cdnsp_setup_addressable_priv_dev(struct cdnsp_device *pdev)
 727{
 728        struct cdnsp_slot_ctx *slot_ctx;
 729        struct cdnsp_ep_ctx *ep0_ctx;
 730        u32 max_packets, port;
 731
 732        ep0_ctx = cdnsp_get_ep_ctx(&pdev->in_ctx, 0);
 733        slot_ctx = cdnsp_get_slot_ctx(&pdev->in_ctx);
 734
 735        /* Only the control endpoint is valid - one endpoint context. */
 736        slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1));
 737
 738        switch (pdev->gadget.speed) {
 739        case USB_SPEED_SUPER_PLUS:
 740                slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_SSP);
 741                max_packets = MAX_PACKET(512);
 742                break;
 743        case USB_SPEED_SUPER:
 744                slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_SS);
 745                max_packets = MAX_PACKET(512);
 746                break;
 747        case USB_SPEED_HIGH:
 748                slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_HS);
 749                max_packets = MAX_PACKET(64);
 750                break;
 751        case USB_SPEED_FULL:
 752                slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_FS);
 753                max_packets = MAX_PACKET(64);
 754                break;
 755        default:
 756                /* Speed was not set , this shouldn't happen. */
 757                return -EINVAL;
 758        }
 759
 760        port = DEV_PORT(pdev->active_port->port_num);
 761        slot_ctx->dev_port |= cpu_to_le32(port);
 762        slot_ctx->dev_state = cpu_to_le32((pdev->device_address &
 763                                           DEV_ADDR_MASK));
 764        ep0_ctx->tx_info = cpu_to_le32(EP_AVG_TRB_LENGTH(0x8));
 765        ep0_ctx->ep_info2 = cpu_to_le32(EP_TYPE(CTRL_EP));
 766        ep0_ctx->ep_info2 |= cpu_to_le32(MAX_BURST(0) | ERROR_COUNT(3) |
 767                                         max_packets);
 768
 769        ep0_ctx->deq = cpu_to_le64(pdev->eps[0].ring->first_seg->dma |
 770                                   pdev->eps[0].ring->cycle_state);
 771
 772        trace_cdnsp_setup_addressable_priv_device(pdev);
 773
 774        return 0;
 775}
 776
 777/*
 778 * Convert interval expressed as 2^(bInterval - 1) == interval into
 779 * straight exponent value 2^n == interval.
 780 */
 781static unsigned int cdnsp_parse_exponent_interval(struct usb_gadget *g,
 782                                                  struct cdnsp_ep *pep)
 783{
 784        unsigned int interval;
 785
 786        interval = clamp_val(pep->endpoint.desc->bInterval, 1, 16) - 1;
 787        if (interval != pep->endpoint.desc->bInterval - 1)
 788                dev_warn(&g->dev, "ep %s - rounding interval to %d %sframes\n",
 789                         pep->name, 1 << interval,
 790                         g->speed == USB_SPEED_FULL ? "" : "micro");
 791
 792        /*
 793         * Full speed isoc endpoints specify interval in frames,
 794         * not microframes. We are using microframes everywhere,
 795         * so adjust accordingly.
 796         */
 797        if (g->speed == USB_SPEED_FULL)
 798                interval += 3;  /* 1 frame = 2^3 uframes */
 799
 800        /* Controller handles only up to 512ms (2^12). */
 801        if (interval > 12)
 802                interval = 12;
 803
 804        return interval;
 805}
 806
 807/*
 808 * Convert bInterval expressed in microframes (in 1-255 range) to exponent of
 809 * microframes, rounded down to nearest power of 2.
 810 */
 811static unsigned int cdnsp_microframes_to_exponent(struct usb_gadget *g,
 812                                                  struct cdnsp_ep *pep,
 813                                                  unsigned int desc_interval,
 814                                                  unsigned int min_exponent,
 815                                                  unsigned int max_exponent)
 816{
 817        unsigned int interval;
 818
 819        interval = fls(desc_interval) - 1;
 820        return clamp_val(interval, min_exponent, max_exponent);
 821}
 822
 823/*
 824 * Return the polling interval.
 825 *
 826 * The polling interval is expressed in "microframes". If controllers's Interval
 827 * field is set to N, it will service the endpoint every 2^(Interval)*125us.
 828 */
 829static unsigned int cdnsp_get_endpoint_interval(struct usb_gadget *g,
 830                                                struct cdnsp_ep *pep)
 831{
 832        unsigned int interval = 0;
 833
 834        switch (g->speed) {
 835        case USB_SPEED_HIGH:
 836        case USB_SPEED_SUPER_PLUS:
 837        case USB_SPEED_SUPER:
 838                if (usb_endpoint_xfer_int(pep->endpoint.desc) ||
 839                    usb_endpoint_xfer_isoc(pep->endpoint.desc))
 840                        interval = cdnsp_parse_exponent_interval(g, pep);
 841                break;
 842        case USB_SPEED_FULL:
 843                if (usb_endpoint_xfer_isoc(pep->endpoint.desc)) {
 844                        interval = cdnsp_parse_exponent_interval(g, pep);
 845                } else if (usb_endpoint_xfer_int(pep->endpoint.desc)) {
 846                        interval = pep->endpoint.desc->bInterval << 3;
 847                        interval = cdnsp_microframes_to_exponent(g, pep,
 848                                                                 interval,
 849                                                                 3, 10);
 850                }
 851
 852                break;
 853        default:
 854                WARN_ON(1);
 855        }
 856
 857        return interval;
 858}
 859
 860/*
 861 * The "Mult" field in the endpoint context is only set for SuperSpeed isoc eps.
 862 * High speed endpoint descriptors can define "the number of additional
 863 * transaction opportunities per microframe", but that goes in the Max Burst
 864 * endpoint context field.
 865 */
 866static u32 cdnsp_get_endpoint_mult(struct usb_gadget *g, struct cdnsp_ep *pep)
 867{
 868        if (g->speed < USB_SPEED_SUPER ||
 869            !usb_endpoint_xfer_isoc(pep->endpoint.desc))
 870                return 0;
 871
 872        return pep->endpoint.comp_desc->bmAttributes;
 873}
 874
 875static u32 cdnsp_get_endpoint_max_burst(struct usb_gadget *g,
 876                                        struct cdnsp_ep *pep)
 877{
 878        /* Super speed and Plus have max burst in ep companion desc */
 879        if (g->speed >= USB_SPEED_SUPER)
 880                return pep->endpoint.comp_desc->bMaxBurst;
 881
 882        if (g->speed == USB_SPEED_HIGH &&
 883            (usb_endpoint_xfer_isoc(pep->endpoint.desc) ||
 884             usb_endpoint_xfer_int(pep->endpoint.desc)))
 885                return (usb_endpoint_maxp(pep->endpoint.desc) & 0x1800) >> 11;
 886
 887        return 0;
 888}
 889
 890static u32 cdnsp_get_endpoint_type(const struct usb_endpoint_descriptor *desc)
 891{
 892        int in;
 893
 894        in = usb_endpoint_dir_in(desc);
 895
 896        switch (usb_endpoint_type(desc)) {
 897        case USB_ENDPOINT_XFER_CONTROL:
 898                return CTRL_EP;
 899        case USB_ENDPOINT_XFER_BULK:
 900                return in ? BULK_IN_EP : BULK_OUT_EP;
 901        case USB_ENDPOINT_XFER_ISOC:
 902                return in ? ISOC_IN_EP : ISOC_OUT_EP;
 903        case USB_ENDPOINT_XFER_INT:
 904                return in ? INT_IN_EP : INT_OUT_EP;
 905        }
 906
 907        return 0;
 908}
 909
 910/*
 911 * Return the maximum endpoint service interval time (ESIT) payload.
 912 * Basically, this is the maxpacket size, multiplied by the burst size
 913 * and mult size.
 914 */
 915static u32 cdnsp_get_max_esit_payload(struct usb_gadget *g,
 916                                      struct cdnsp_ep *pep)
 917{
 918        int max_packet;
 919        int max_burst;
 920
 921        /* Only applies for interrupt or isochronous endpoints*/
 922        if (usb_endpoint_xfer_control(pep->endpoint.desc) ||
 923            usb_endpoint_xfer_bulk(pep->endpoint.desc))
 924                return 0;
 925
 926        /* SuperSpeedPlus Isoc ep sending over 48k per EIST. */
 927        if (g->speed >= USB_SPEED_SUPER_PLUS &&
 928            USB_SS_SSP_ISOC_COMP(pep->endpoint.desc->bmAttributes))
 929                return le16_to_cpu(pep->endpoint.comp_desc->wBytesPerInterval);
 930        /* SuperSpeed or SuperSpeedPlus Isoc ep with less than 48k per esit */
 931        else if (g->speed >= USB_SPEED_SUPER)
 932                return le16_to_cpu(pep->endpoint.comp_desc->wBytesPerInterval);
 933
 934        max_packet = usb_endpoint_maxp(pep->endpoint.desc);
 935        max_burst = usb_endpoint_maxp_mult(pep->endpoint.desc);
 936
 937        /* A 0 in max burst means 1 transfer per ESIT */
 938        return max_packet * max_burst;
 939}
 940
 941int cdnsp_endpoint_init(struct cdnsp_device *pdev,
 942                        struct cdnsp_ep *pep,
 943                        gfp_t mem_flags)
 944{
 945        enum cdnsp_ring_type ring_type;
 946        struct cdnsp_ep_ctx *ep_ctx;
 947        unsigned int err_count = 0;
 948        unsigned int avg_trb_len;
 949        unsigned int max_packet;
 950        unsigned int max_burst;
 951        unsigned int interval;
 952        u32 max_esit_payload;
 953        unsigned int mult;
 954        u32 endpoint_type;
 955        int ret;
 956
 957        ep_ctx = pep->in_ctx;
 958
 959        endpoint_type = cdnsp_get_endpoint_type(pep->endpoint.desc);
 960        if (!endpoint_type)
 961                return -EINVAL;
 962
 963        ring_type = usb_endpoint_type(pep->endpoint.desc);
 964
 965        /*
 966         * Get values to fill the endpoint context, mostly from ep descriptor.
 967         * The average TRB buffer length for bulk endpoints is unclear as we
 968         * have no clue on scatter gather list entry size. For Isoc and Int,
 969         * set it to max available.
 970         */
 971        max_esit_payload = cdnsp_get_max_esit_payload(&pdev->gadget, pep);
 972        interval = cdnsp_get_endpoint_interval(&pdev->gadget, pep);
 973        mult = cdnsp_get_endpoint_mult(&pdev->gadget, pep);
 974        max_packet = usb_endpoint_maxp(pep->endpoint.desc);
 975        max_burst = cdnsp_get_endpoint_max_burst(&pdev->gadget, pep);
 976        avg_trb_len = max_esit_payload;
 977
 978        /* Allow 3 retries for everything but isoc, set CErr = 3. */
 979        if (!usb_endpoint_xfer_isoc(pep->endpoint.desc))
 980                err_count = 3;
 981        if (usb_endpoint_xfer_bulk(pep->endpoint.desc) &&
 982            pdev->gadget.speed == USB_SPEED_HIGH)
 983                max_packet = 512;
 984        /* Controller spec indicates that ctrl ep avg TRB Length should be 8. */
 985        if (usb_endpoint_xfer_control(pep->endpoint.desc))
 986                avg_trb_len = 8;
 987
 988        /* Set up the endpoint ring. */
 989        pep->ring = cdnsp_ring_alloc(pdev, 2, ring_type, max_packet, mem_flags);
 990        pep->skip = false;
 991
 992        /* Fill the endpoint context */
 993        ep_ctx->ep_info = cpu_to_le32(EP_MAX_ESIT_PAYLOAD_HI(max_esit_payload) |
 994                                EP_INTERVAL(interval) | EP_MULT(mult));
 995        ep_ctx->ep_info2 = cpu_to_le32(EP_TYPE(endpoint_type) |
 996                                MAX_PACKET(max_packet) | MAX_BURST(max_burst) |
 997                                ERROR_COUNT(err_count));
 998        ep_ctx->deq = cpu_to_le64(pep->ring->first_seg->dma |
 999                                  pep->ring->cycle_state);
1000
1001        ep_ctx->tx_info = cpu_to_le32(EP_MAX_ESIT_PAYLOAD_LO(max_esit_payload) |
1002                                EP_AVG_TRB_LENGTH(avg_trb_len));
1003
1004        if (usb_endpoint_xfer_bulk(pep->endpoint.desc) &&
1005            pdev->gadget.speed > USB_SPEED_HIGH) {
1006                ret = cdnsp_alloc_streams(pdev, pep);
1007                if (ret < 0)
1008                        return ret;
1009        }
1010
1011        return 0;
1012}
1013
1014void cdnsp_endpoint_zero(struct cdnsp_device *pdev, struct cdnsp_ep *pep)
1015{
1016        pep->in_ctx->ep_info = 0;
1017        pep->in_ctx->ep_info2 = 0;
1018        pep->in_ctx->deq = 0;
1019        pep->in_ctx->tx_info = 0;
1020}
1021
1022static int cdnsp_alloc_erst(struct cdnsp_device *pdev,
1023                            struct cdnsp_ring *evt_ring,
1024                            struct cdnsp_erst *erst)
1025{
1026        struct cdnsp_erst_entry *entry;
1027        struct cdnsp_segment *seg;
1028        unsigned int val;
1029        size_t size;
1030
1031        size = sizeof(struct cdnsp_erst_entry) * evt_ring->num_segs;
1032        erst->entries = dma_alloc_coherent(pdev->dev, size,
1033                                           &erst->erst_dma_addr, GFP_KERNEL);
1034        if (!erst->entries)
1035                return -ENOMEM;
1036
1037        erst->num_entries = evt_ring->num_segs;
1038
1039        seg = evt_ring->first_seg;
1040        for (val = 0; val < evt_ring->num_segs; val++) {
1041                entry = &erst->entries[val];
1042                entry->seg_addr = cpu_to_le64(seg->dma);
1043                entry->seg_size = cpu_to_le32(TRBS_PER_SEGMENT);
1044                entry->rsvd = 0;
1045                seg = seg->next;
1046        }
1047
1048        return 0;
1049}
1050
1051static void cdnsp_free_erst(struct cdnsp_device *pdev, struct cdnsp_erst *erst)
1052{
1053        size_t size = sizeof(struct cdnsp_erst_entry) * (erst->num_entries);
1054        struct device *dev = pdev->dev;
1055
1056        if (erst->entries)
1057                dma_free_coherent(dev, size, erst->entries,
1058                                  erst->erst_dma_addr);
1059
1060        erst->entries = NULL;
1061}
1062
1063void cdnsp_mem_cleanup(struct cdnsp_device *pdev)
1064{
1065        struct device *dev = pdev->dev;
1066
1067        cdnsp_free_priv_device(pdev);
1068        cdnsp_free_erst(pdev, &pdev->erst);
1069
1070        if (pdev->event_ring)
1071                cdnsp_ring_free(pdev, pdev->event_ring);
1072
1073        pdev->event_ring = NULL;
1074
1075        if (pdev->cmd_ring)
1076                cdnsp_ring_free(pdev, pdev->cmd_ring);
1077
1078        pdev->cmd_ring = NULL;
1079
1080        dma_pool_destroy(pdev->segment_pool);
1081        pdev->segment_pool = NULL;
1082        dma_pool_destroy(pdev->device_pool);
1083        pdev->device_pool = NULL;
1084
1085        dma_free_coherent(dev, sizeof(*pdev->dcbaa),
1086                          pdev->dcbaa, pdev->dcbaa->dma);
1087
1088        pdev->dcbaa = NULL;
1089
1090        pdev->usb2_port.exist = 0;
1091        pdev->usb3_port.exist = 0;
1092        pdev->usb2_port.port_num = 0;
1093        pdev->usb3_port.port_num = 0;
1094        pdev->active_port = NULL;
1095}
1096
1097static void cdnsp_set_event_deq(struct cdnsp_device *pdev)
1098{
1099        dma_addr_t deq;
1100        u64 temp;
1101
1102        deq = cdnsp_trb_virt_to_dma(pdev->event_ring->deq_seg,
1103                                    pdev->event_ring->dequeue);
1104
1105        /* Update controller event ring dequeue pointer */
1106        temp = cdnsp_read_64(&pdev->ir_set->erst_dequeue);
1107        temp &= ERST_PTR_MASK;
1108
1109        /*
1110         * Don't clear the EHB bit (which is RW1C) because
1111         * there might be more events to service.
1112         */
1113        temp &= ~ERST_EHB;
1114
1115        cdnsp_write_64(((u64)deq & (u64)~ERST_PTR_MASK) | temp,
1116                       &pdev->ir_set->erst_dequeue);
1117}
1118
1119static void cdnsp_add_in_port(struct cdnsp_device *pdev,
1120                              struct cdnsp_port *port,
1121                              __le32 __iomem *addr)
1122{
1123        u32 temp, port_offset, port_count;
1124
1125        temp = readl(addr);
1126        port->maj_rev = CDNSP_EXT_PORT_MAJOR(temp);
1127        port->min_rev = CDNSP_EXT_PORT_MINOR(temp);
1128
1129        /* Port offset and count in the third dword.*/
1130        temp = readl(addr + 2);
1131        port_offset = CDNSP_EXT_PORT_OFF(temp);
1132        port_count = CDNSP_EXT_PORT_COUNT(temp);
1133
1134        trace_cdnsp_port_info(addr, port_offset, port_count, port->maj_rev);
1135
1136        port->port_num = port_offset;
1137        port->exist = 1;
1138}
1139
1140/*
1141 * Scan the Extended Capabilities for the "Supported Protocol Capabilities" that
1142 * specify what speeds each port is supposed to be.
1143 */
1144static int cdnsp_setup_port_arrays(struct cdnsp_device *pdev)
1145{
1146        void __iomem *base;
1147        u32 offset;
1148        int i;
1149
1150        base = &pdev->cap_regs->hc_capbase;
1151        offset = cdnsp_find_next_ext_cap(base, 0,
1152                                         EXT_CAP_CFG_DEV_20PORT_CAP_ID);
1153        pdev->port20_regs = base + offset;
1154
1155        offset = cdnsp_find_next_ext_cap(base, 0, D_XEC_CFG_3XPORT_CAP);
1156        pdev->port3x_regs =  base + offset;
1157
1158        offset = 0;
1159        base = &pdev->cap_regs->hc_capbase;
1160
1161        /* Driver expects max 2 extended protocol capability. */
1162        for (i = 0; i < 2; i++) {
1163                u32 temp;
1164
1165                offset = cdnsp_find_next_ext_cap(base, offset,
1166                                                 EXT_CAPS_PROTOCOL);
1167                temp = readl(base + offset);
1168
1169                if (CDNSP_EXT_PORT_MAJOR(temp) == 0x03 &&
1170                    !pdev->usb3_port.port_num)
1171                        cdnsp_add_in_port(pdev, &pdev->usb3_port,
1172                                          base + offset);
1173
1174                if (CDNSP_EXT_PORT_MAJOR(temp) == 0x02 &&
1175                    !pdev->usb2_port.port_num)
1176                        cdnsp_add_in_port(pdev, &pdev->usb2_port,
1177                                          base + offset);
1178        }
1179
1180        if (!pdev->usb2_port.exist || !pdev->usb3_port.exist) {
1181                dev_err(pdev->dev, "Error: Only one port detected\n");
1182                return -ENODEV;
1183        }
1184
1185        trace_cdnsp_init("Found USB 2.0 ports and  USB 3.0 ports.");
1186
1187        pdev->usb2_port.regs = (struct cdnsp_port_regs __iomem *)
1188                               (&pdev->op_regs->port_reg_base + NUM_PORT_REGS *
1189                                (pdev->usb2_port.port_num - 1));
1190
1191        pdev->usb3_port.regs = (struct cdnsp_port_regs __iomem *)
1192                               (&pdev->op_regs->port_reg_base + NUM_PORT_REGS *
1193                                (pdev->usb3_port.port_num - 1));
1194
1195        return 0;
1196}
1197
1198/*
1199 * Initialize memory for CDNSP (one-time init).
1200 *
1201 * Program the PAGESIZE register, initialize the device context array, create
1202 * device contexts, set up a command ring segment, create event
1203 * ring (one for now).
1204 */
1205int cdnsp_mem_init(struct cdnsp_device *pdev)
1206{
1207        struct device *dev = pdev->dev;
1208        int ret = -ENOMEM;
1209        unsigned int val;
1210        dma_addr_t dma;
1211        u32 page_size;
1212        u64 val_64;
1213
1214        /*
1215         * Use 4K pages, since that's common and the minimum the
1216         * controller supports
1217         */
1218        page_size = 1 << 12;
1219
1220        val = readl(&pdev->op_regs->config_reg);
1221        val |= ((val & ~MAX_DEVS) | CDNSP_DEV_MAX_SLOTS) | CONFIG_U3E;
1222        writel(val, &pdev->op_regs->config_reg);
1223
1224        /*
1225         * Doorbell array must be physically contiguous
1226         * and 64-byte (cache line) aligned.
1227         */
1228        pdev->dcbaa = dma_alloc_coherent(dev, sizeof(*pdev->dcbaa),
1229                                         &dma, GFP_KERNEL);
1230        if (!pdev->dcbaa)
1231                return -ENOMEM;
1232
1233        pdev->dcbaa->dma = dma;
1234
1235        cdnsp_write_64(dma, &pdev->op_regs->dcbaa_ptr);
1236
1237        /*
1238         * Initialize the ring segment pool.  The ring must be a contiguous
1239         * structure comprised of TRBs. The TRBs must be 16 byte aligned,
1240         * however, the command ring segment needs 64-byte aligned segments
1241         * and our use of dma addresses in the trb_address_map radix tree needs
1242         * TRB_SEGMENT_SIZE alignment, so driver pick the greater alignment
1243         * need.
1244         */
1245        pdev->segment_pool = dma_pool_create("CDNSP ring segments", dev,
1246                                             TRB_SEGMENT_SIZE, TRB_SEGMENT_SIZE,
1247                                             page_size);
1248        if (!pdev->segment_pool)
1249                goto release_dcbaa;
1250
1251        pdev->device_pool = dma_pool_create("CDNSP input/output contexts", dev,
1252                                            CDNSP_CTX_SIZE, 64, page_size);
1253        if (!pdev->device_pool)
1254                goto destroy_segment_pool;
1255
1256
1257        /* Set up the command ring to have one segments for now. */
1258        pdev->cmd_ring = cdnsp_ring_alloc(pdev, 1, TYPE_COMMAND, 0, GFP_KERNEL);
1259        if (!pdev->cmd_ring)
1260                goto destroy_device_pool;
1261
1262        /* Set the address in the Command Ring Control register */
1263        val_64 = cdnsp_read_64(&pdev->op_regs->cmd_ring);
1264        val_64 = (val_64 & (u64)CMD_RING_RSVD_BITS) |
1265                 (pdev->cmd_ring->first_seg->dma & (u64)~CMD_RING_RSVD_BITS) |
1266                 pdev->cmd_ring->cycle_state;
1267        cdnsp_write_64(val_64, &pdev->op_regs->cmd_ring);
1268
1269        val = readl(&pdev->cap_regs->db_off);
1270        val &= DBOFF_MASK;
1271        pdev->dba = (void __iomem *)pdev->cap_regs + val;
1272
1273        /* Set ir_set to interrupt register set 0 */
1274        pdev->ir_set = &pdev->run_regs->ir_set[0];
1275
1276        /*
1277         * Event ring setup: Allocate a normal ring, but also setup
1278         * the event ring segment table (ERST).
1279         */
1280        pdev->event_ring = cdnsp_ring_alloc(pdev, ERST_NUM_SEGS, TYPE_EVENT,
1281                                            0, GFP_KERNEL);
1282        if (!pdev->event_ring)
1283                goto free_cmd_ring;
1284
1285        ret = cdnsp_alloc_erst(pdev, pdev->event_ring, &pdev->erst);
1286        if (ret)
1287                goto free_event_ring;
1288
1289        /* Set ERST count with the number of entries in the segment table. */
1290        val = readl(&pdev->ir_set->erst_size);
1291        val &= ERST_SIZE_MASK;
1292        val |= ERST_NUM_SEGS;
1293        writel(val, &pdev->ir_set->erst_size);
1294
1295        /* Set the segment table base address. */
1296        val_64 = cdnsp_read_64(&pdev->ir_set->erst_base);
1297        val_64 &= ERST_PTR_MASK;
1298        val_64 |= (pdev->erst.erst_dma_addr & (u64)~ERST_PTR_MASK);
1299        cdnsp_write_64(val_64, &pdev->ir_set->erst_base);
1300
1301        /* Set the event ring dequeue address. */
1302        cdnsp_set_event_deq(pdev);
1303
1304        ret = cdnsp_setup_port_arrays(pdev);
1305        if (ret)
1306                goto free_erst;
1307
1308        ret = cdnsp_alloc_priv_device(pdev);
1309        if (ret) {
1310                dev_err(pdev->dev,
1311                        "Could not allocate cdnsp_device data structures\n");
1312                goto free_erst;
1313        }
1314
1315        return 0;
1316
1317free_erst:
1318        cdnsp_free_erst(pdev, &pdev->erst);
1319free_event_ring:
1320        cdnsp_ring_free(pdev, pdev->event_ring);
1321free_cmd_ring:
1322        cdnsp_ring_free(pdev, pdev->cmd_ring);
1323destroy_device_pool:
1324        dma_pool_destroy(pdev->device_pool);
1325destroy_segment_pool:
1326        dma_pool_destroy(pdev->segment_pool);
1327release_dcbaa:
1328        dma_free_coherent(dev, sizeof(*pdev->dcbaa), pdev->dcbaa,
1329                          pdev->dcbaa->dma);
1330
1331        cdnsp_reset(pdev);
1332
1333        return ret;
1334}
1335