linux/drivers/usb/cdns3/cdnsp-ring.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Cadence CDNSP DRD Driver.
   4 *
   5 * Copyright (C) 2020 Cadence.
   6 *
   7 * Author: Pawel Laszczak <pawell@cadence.com>
   8 *
   9 * Code based on Linux XHCI driver.
  10 * Origin: Copyright (C) 2008 Intel Corp
  11 */
  12
  13/*
  14 * Ring initialization rules:
  15 * 1. Each segment is initialized to zero, except for link TRBs.
  16 * 2. Ring cycle state = 0. This represents Producer Cycle State (PCS) or
  17 *    Consumer Cycle State (CCS), depending on ring function.
  18 * 3. Enqueue pointer = dequeue pointer = address of first TRB in the segment.
  19 *
  20 * Ring behavior rules:
  21 * 1. A ring is empty if enqueue == dequeue. This means there will always be at
  22 *    least one free TRB in the ring. This is useful if you want to turn that
  23 *    into a link TRB and expand the ring.
  24 * 2. When incrementing an enqueue or dequeue pointer, if the next TRB is a
  25 *    link TRB, then load the pointer with the address in the link TRB. If the
  26 *    link TRB had its toggle bit set, you may need to update the ring cycle
  27 *    state (see cycle bit rules). You may have to do this multiple times
  28 *    until you reach a non-link TRB.
  29 * 3. A ring is full if enqueue++ (for the definition of increment above)
  30 *    equals the dequeue pointer.
  31 *
  32 * Cycle bit rules:
  33 * 1. When a consumer increments a dequeue pointer and encounters a toggle bit
  34 *    in a link TRB, it must toggle the ring cycle state.
  35 * 2. When a producer increments an enqueue pointer and encounters a toggle bit
  36 *    in a link TRB, it must toggle the ring cycle state.
  37 *
  38 * Producer rules:
  39 * 1. Check if ring is full before you enqueue.
  40 * 2. Write the ring cycle state to the cycle bit in the TRB you're enqueuing.
  41 *    Update enqueue pointer between each write (which may update the ring
  42 *    cycle state).
  43 * 3. Notify consumer. If SW is producer, it rings the doorbell for command
  44 *    and endpoint rings. If controller is the producer for the event ring,
  45 *    and it generates an interrupt according to interrupt modulation rules.
  46 *
  47 * Consumer rules:
  48 * 1. Check if TRB belongs to you. If the cycle bit == your ring cycle state,
  49 *    the TRB is owned by the consumer.
  50 * 2. Update dequeue pointer (which may update the ring cycle state) and
  51 *    continue processing TRBs until you reach a TRB which is not owned by you.
  52 * 3. Notify the producer. SW is the consumer for the event ring, and it
  53 *    updates event ring dequeue pointer. Controller is the consumer for the
  54 *    command and endpoint rings; it generates events on the event ring
  55 *    for these.
  56 */
  57
  58#include <linux/scatterlist.h>
  59#include <linux/dma-mapping.h>
  60#include <linux/delay.h>
  61#include <linux/slab.h>
  62#include <linux/irq.h>
  63
  64#include "cdnsp-trace.h"
  65#include "cdnsp-gadget.h"
  66
  67/*
  68 * Returns zero if the TRB isn't in this segment, otherwise it returns the DMA
  69 * address of the TRB.
  70 */
  71dma_addr_t cdnsp_trb_virt_to_dma(struct cdnsp_segment *seg,
  72                                 union cdnsp_trb *trb)
  73{
  74        unsigned long segment_offset = trb - seg->trbs;
  75
  76        if (trb < seg->trbs || segment_offset >= TRBS_PER_SEGMENT)
  77                return 0;
  78
  79        return seg->dma + (segment_offset * sizeof(*trb));
  80}
  81
  82static bool cdnsp_trb_is_noop(union cdnsp_trb *trb)
  83{
  84        return TRB_TYPE_NOOP_LE32(trb->generic.field[3]);
  85}
  86
  87static bool cdnsp_trb_is_link(union cdnsp_trb *trb)
  88{
  89        return TRB_TYPE_LINK_LE32(trb->link.control);
  90}
  91
  92bool cdnsp_last_trb_on_seg(struct cdnsp_segment *seg, union cdnsp_trb *trb)
  93{
  94        return trb == &seg->trbs[TRBS_PER_SEGMENT - 1];
  95}
  96
  97bool cdnsp_last_trb_on_ring(struct cdnsp_ring *ring,
  98                            struct cdnsp_segment *seg,
  99                            union cdnsp_trb *trb)
 100{
 101        return cdnsp_last_trb_on_seg(seg, trb) && (seg->next == ring->first_seg);
 102}
 103
 104static bool cdnsp_link_trb_toggles_cycle(union cdnsp_trb *trb)
 105{
 106        return le32_to_cpu(trb->link.control) & LINK_TOGGLE;
 107}
 108
 109static void cdnsp_trb_to_noop(union cdnsp_trb *trb, u32 noop_type)
 110{
 111        if (cdnsp_trb_is_link(trb)) {
 112                /* Unchain chained link TRBs. */
 113                trb->link.control &= cpu_to_le32(~TRB_CHAIN);
 114        } else {
 115                trb->generic.field[0] = 0;
 116                trb->generic.field[1] = 0;
 117                trb->generic.field[2] = 0;
 118                /* Preserve only the cycle bit of this TRB. */
 119                trb->generic.field[3] &= cpu_to_le32(TRB_CYCLE);
 120                trb->generic.field[3] |= cpu_to_le32(TRB_TYPE(noop_type));
 121        }
 122}
 123
 124/*
 125 * Updates trb to point to the next TRB in the ring, and updates seg if the next
 126 * TRB is in a new segment. This does not skip over link TRBs, and it does not
 127 * effect the ring dequeue or enqueue pointers.
 128 */
 129static void cdnsp_next_trb(struct cdnsp_device *pdev,
 130                           struct cdnsp_ring *ring,
 131                           struct cdnsp_segment **seg,
 132                           union cdnsp_trb **trb)
 133{
 134        if (cdnsp_trb_is_link(*trb)) {
 135                *seg = (*seg)->next;
 136                *trb = ((*seg)->trbs);
 137        } else {
 138                (*trb)++;
 139        }
 140}
 141
 142/*
 143 * See Cycle bit rules. SW is the consumer for the event ring only.
 144 * Don't make a ring full of link TRBs. That would be dumb and this would loop.
 145 */
 146void cdnsp_inc_deq(struct cdnsp_device *pdev, struct cdnsp_ring *ring)
 147{
 148        /* event ring doesn't have link trbs, check for last trb. */
 149        if (ring->type == TYPE_EVENT) {
 150                if (!cdnsp_last_trb_on_seg(ring->deq_seg, ring->dequeue)) {
 151                        ring->dequeue++;
 152                        goto out;
 153                }
 154
 155                if (cdnsp_last_trb_on_ring(ring, ring->deq_seg, ring->dequeue))
 156                        ring->cycle_state ^= 1;
 157
 158                ring->deq_seg = ring->deq_seg->next;
 159                ring->dequeue = ring->deq_seg->trbs;
 160                goto out;
 161        }
 162
 163        /* All other rings have link trbs. */
 164        if (!cdnsp_trb_is_link(ring->dequeue)) {
 165                ring->dequeue++;
 166                ring->num_trbs_free++;
 167        }
 168        while (cdnsp_trb_is_link(ring->dequeue)) {
 169                ring->deq_seg = ring->deq_seg->next;
 170                ring->dequeue = ring->deq_seg->trbs;
 171        }
 172out:
 173        trace_cdnsp_inc_deq(ring);
 174}
 175
 176/*
 177 * See Cycle bit rules. SW is the consumer for the event ring only.
 178 * Don't make a ring full of link TRBs. That would be dumb and this would loop.
 179 *
 180 * If we've just enqueued a TRB that is in the middle of a TD (meaning the
 181 * chain bit is set), then set the chain bit in all the following link TRBs.
 182 * If we've enqueued the last TRB in a TD, make sure the following link TRBs
 183 * have their chain bit cleared (so that each Link TRB is a separate TD).
 184 *
 185 * @more_trbs_coming:   Will you enqueue more TRBs before ringing the doorbell.
 186 */
 187static void cdnsp_inc_enq(struct cdnsp_device *pdev,
 188                          struct cdnsp_ring *ring,
 189                          bool more_trbs_coming)
 190{
 191        union cdnsp_trb *next;
 192        u32 chain;
 193
 194        chain = le32_to_cpu(ring->enqueue->generic.field[3]) & TRB_CHAIN;
 195
 196        /* If this is not event ring, there is one less usable TRB. */
 197        if (!cdnsp_trb_is_link(ring->enqueue))
 198                ring->num_trbs_free--;
 199        next = ++(ring->enqueue);
 200
 201        /* Update the dequeue pointer further if that was a link TRB */
 202        while (cdnsp_trb_is_link(next)) {
 203                /*
 204                 * If the caller doesn't plan on enqueuing more TDs before
 205                 * ringing the doorbell, then we don't want to give the link TRB
 206                 * to the hardware just yet. We'll give the link TRB back in
 207                 * cdnsp_prepare_ring() just before we enqueue the TD at the
 208                 * top of the ring.
 209                 */
 210                if (!chain && !more_trbs_coming)
 211                        break;
 212
 213                next->link.control &= cpu_to_le32(~TRB_CHAIN);
 214                next->link.control |= cpu_to_le32(chain);
 215
 216                /* Give this link TRB to the hardware */
 217                wmb();
 218                next->link.control ^= cpu_to_le32(TRB_CYCLE);
 219
 220                /* Toggle the cycle bit after the last ring segment. */
 221                if (cdnsp_link_trb_toggles_cycle(next))
 222                        ring->cycle_state ^= 1;
 223
 224                ring->enq_seg = ring->enq_seg->next;
 225                ring->enqueue = ring->enq_seg->trbs;
 226                next = ring->enqueue;
 227        }
 228
 229        trace_cdnsp_inc_enq(ring);
 230}
 231
 232/*
 233 * Check to see if there's room to enqueue num_trbs on the ring and make sure
 234 * enqueue pointer will not advance into dequeue segment.
 235 */
 236static bool cdnsp_room_on_ring(struct cdnsp_device *pdev,
 237                               struct cdnsp_ring *ring,
 238                               unsigned int num_trbs)
 239{
 240        int num_trbs_in_deq_seg;
 241
 242        if (ring->num_trbs_free < num_trbs)
 243                return false;
 244
 245        if (ring->type != TYPE_COMMAND && ring->type != TYPE_EVENT) {
 246                num_trbs_in_deq_seg = ring->dequeue - ring->deq_seg->trbs;
 247
 248                if (ring->num_trbs_free < num_trbs + num_trbs_in_deq_seg)
 249                        return false;
 250        }
 251
 252        return true;
 253}
 254
 255/*
 256 * Workaround for L1: controller has issue with resuming from L1 after
 257 * setting doorbell for endpoint during L1 state. This function forces
 258 * resume signal in such case.
 259 */
 260static void cdnsp_force_l0_go(struct cdnsp_device *pdev)
 261{
 262        if (pdev->active_port == &pdev->usb2_port && pdev->gadget.lpm_capable)
 263                cdnsp_set_link_state(pdev, &pdev->active_port->regs->portsc, XDEV_U0);
 264}
 265
 266/* Ring the doorbell after placing a command on the ring. */
 267void cdnsp_ring_cmd_db(struct cdnsp_device *pdev)
 268{
 269        writel(DB_VALUE_CMD, &pdev->dba->cmd_db);
 270}
 271
 272/*
 273 * Ring the doorbell after placing a transfer on the ring.
 274 * Returns true if doorbell was set, otherwise false.
 275 */
 276static bool cdnsp_ring_ep_doorbell(struct cdnsp_device *pdev,
 277                                   struct cdnsp_ep *pep,
 278                                   unsigned int stream_id)
 279{
 280        __le32 __iomem *reg_addr = &pdev->dba->ep_db;
 281        unsigned int ep_state = pep->ep_state;
 282        unsigned int db_value;
 283
 284        /*
 285         * Don't ring the doorbell for this endpoint if endpoint is halted or
 286         * disabled.
 287         */
 288        if (ep_state & EP_HALTED || !(ep_state & EP_ENABLED))
 289                return false;
 290
 291        /* For stream capable endpoints driver can ring doorbell only twice. */
 292        if (pep->ep_state & EP_HAS_STREAMS) {
 293                if (pep->stream_info.drbls_count >= 2)
 294                        return false;
 295
 296                pep->stream_info.drbls_count++;
 297        }
 298
 299        pep->ep_state &= ~EP_STOPPED;
 300
 301        if (pep->idx == 0 && pdev->ep0_stage == CDNSP_DATA_STAGE &&
 302            !pdev->ep0_expect_in)
 303                db_value = DB_VALUE_EP0_OUT(pep->idx, stream_id);
 304        else
 305                db_value = DB_VALUE(pep->idx, stream_id);
 306
 307        trace_cdnsp_tr_drbl(pep, stream_id);
 308
 309        writel(db_value, reg_addr);
 310
 311        cdnsp_force_l0_go(pdev);
 312
 313        /* Doorbell was set. */
 314        return true;
 315}
 316
 317/*
 318 * Get the right ring for the given pep and stream_id.
 319 * If the endpoint supports streams, boundary check the USB request's stream ID.
 320 * If the endpoint doesn't support streams, return the singular endpoint ring.
 321 */
 322static struct cdnsp_ring *cdnsp_get_transfer_ring(struct cdnsp_device *pdev,
 323                                                  struct cdnsp_ep *pep,
 324                                                  unsigned int stream_id)
 325{
 326        if (!(pep->ep_state & EP_HAS_STREAMS))
 327                return pep->ring;
 328
 329        if (stream_id == 0 || stream_id >= pep->stream_info.num_streams) {
 330                dev_err(pdev->dev, "ERR: %s ring doesn't exist for SID: %d.\n",
 331                        pep->name, stream_id);
 332                return NULL;
 333        }
 334
 335        return pep->stream_info.stream_rings[stream_id];
 336}
 337
 338static struct cdnsp_ring *
 339        cdnsp_request_to_transfer_ring(struct cdnsp_device *pdev,
 340                                       struct cdnsp_request *preq)
 341{
 342        return cdnsp_get_transfer_ring(pdev, preq->pep,
 343                                       preq->request.stream_id);
 344}
 345
 346/* Ring the doorbell for any rings with pending requests. */
 347void cdnsp_ring_doorbell_for_active_rings(struct cdnsp_device *pdev,
 348                                          struct cdnsp_ep *pep)
 349{
 350        struct cdnsp_stream_info *stream_info;
 351        unsigned int stream_id;
 352        int ret;
 353
 354        if (pep->ep_state & EP_DIS_IN_RROGRESS)
 355                return;
 356
 357        /* A ring has pending Request if its TD list is not empty. */
 358        if (!(pep->ep_state & EP_HAS_STREAMS) && pep->number) {
 359                if (pep->ring && !list_empty(&pep->ring->td_list))
 360                        cdnsp_ring_ep_doorbell(pdev, pep, 0);
 361                return;
 362        }
 363
 364        stream_info = &pep->stream_info;
 365
 366        for (stream_id = 1; stream_id < stream_info->num_streams; stream_id++) {
 367                struct cdnsp_td *td, *td_temp;
 368                struct cdnsp_ring *ep_ring;
 369
 370                if (stream_info->drbls_count >= 2)
 371                        return;
 372
 373                ep_ring = cdnsp_get_transfer_ring(pdev, pep, stream_id);
 374                if (!ep_ring)
 375                        continue;
 376
 377                if (!ep_ring->stream_active || ep_ring->stream_rejected)
 378                        continue;
 379
 380                list_for_each_entry_safe(td, td_temp, &ep_ring->td_list,
 381                                         td_list) {
 382                        if (td->drbl)
 383                                continue;
 384
 385                        ret = cdnsp_ring_ep_doorbell(pdev, pep, stream_id);
 386                        if (ret)
 387                                td->drbl = 1;
 388                }
 389        }
 390}
 391
 392/*
 393 * Get the hw dequeue pointer controller stopped on, either directly from the
 394 * endpoint context, or if streams are in use from the stream context.
 395 * The returned hw_dequeue contains the lowest four bits with cycle state
 396 * and possible stream context type.
 397 */
 398static u64 cdnsp_get_hw_deq(struct cdnsp_device *pdev,
 399                            unsigned int ep_index,
 400                            unsigned int stream_id)
 401{
 402        struct cdnsp_stream_ctx *st_ctx;
 403        struct cdnsp_ep *pep;
 404
 405        pep = &pdev->eps[stream_id];
 406
 407        if (pep->ep_state & EP_HAS_STREAMS) {
 408                st_ctx = &pep->stream_info.stream_ctx_array[stream_id];
 409                return le64_to_cpu(st_ctx->stream_ring);
 410        }
 411
 412        return le64_to_cpu(pep->out_ctx->deq);
 413}
 414
 415/*
 416 * Move the controller endpoint ring dequeue pointer past cur_td.
 417 * Record the new state of the controller endpoint ring dequeue segment,
 418 * dequeue pointer, and new consumer cycle state in state.
 419 * Update internal representation of the ring's dequeue pointer.
 420 *
 421 * We do this in three jumps:
 422 *  - First we update our new ring state to be the same as when the
 423 *    controller stopped.
 424 *  - Then we traverse the ring to find the segment that contains
 425 *    the last TRB in the TD. We toggle the controller new cycle state
 426 *    when we pass any link TRBs with the toggle cycle bit set.
 427 *  - Finally we move the dequeue state one TRB further, toggling the cycle bit
 428 *    if we've moved it past a link TRB with the toggle cycle bit set.
 429 */
 430static void cdnsp_find_new_dequeue_state(struct cdnsp_device *pdev,
 431                                         struct cdnsp_ep *pep,
 432                                         unsigned int stream_id,
 433                                         struct cdnsp_td *cur_td,
 434                                         struct cdnsp_dequeue_state *state)
 435{
 436        bool td_last_trb_found = false;
 437        struct cdnsp_segment *new_seg;
 438        struct cdnsp_ring *ep_ring;
 439        union cdnsp_trb *new_deq;
 440        bool cycle_found = false;
 441        u64 hw_dequeue;
 442
 443        ep_ring = cdnsp_get_transfer_ring(pdev, pep, stream_id);
 444        if (!ep_ring)
 445                return;
 446
 447        /*
 448         * Dig out the cycle state saved by the controller during the
 449         * stop endpoint command.
 450         */
 451        hw_dequeue = cdnsp_get_hw_deq(pdev, pep->idx, stream_id);
 452        new_seg = ep_ring->deq_seg;
 453        new_deq = ep_ring->dequeue;
 454        state->new_cycle_state = hw_dequeue & 0x1;
 455        state->stream_id = stream_id;
 456
 457        /*
 458         * We want to find the pointer, segment and cycle state of the new trb
 459         * (the one after current TD's last_trb). We know the cycle state at
 460         * hw_dequeue, so walk the ring until both hw_dequeue and last_trb are
 461         * found.
 462         */
 463        do {
 464                if (!cycle_found && cdnsp_trb_virt_to_dma(new_seg, new_deq)
 465                    == (dma_addr_t)(hw_dequeue & ~0xf)) {
 466                        cycle_found = true;
 467
 468                        if (td_last_trb_found)
 469                                break;
 470                }
 471
 472                if (new_deq == cur_td->last_trb)
 473                        td_last_trb_found = true;
 474
 475                if (cycle_found && cdnsp_trb_is_link(new_deq) &&
 476                    cdnsp_link_trb_toggles_cycle(new_deq))
 477                        state->new_cycle_state ^= 0x1;
 478
 479                cdnsp_next_trb(pdev, ep_ring, &new_seg, &new_deq);
 480
 481                /* Search wrapped around, bail out. */
 482                if (new_deq == pep->ring->dequeue) {
 483                        dev_err(pdev->dev,
 484                                "Error: Failed finding new dequeue state\n");
 485                        state->new_deq_seg = NULL;
 486                        state->new_deq_ptr = NULL;
 487                        return;
 488                }
 489
 490        } while (!cycle_found || !td_last_trb_found);
 491
 492        state->new_deq_seg = new_seg;
 493        state->new_deq_ptr = new_deq;
 494
 495        trace_cdnsp_new_deq_state(state);
 496}
 497
 498/*
 499 * flip_cycle means flip the cycle bit of all but the first and last TRB.
 500 * (The last TRB actually points to the ring enqueue pointer, which is not part
 501 * of this TD.) This is used to remove partially enqueued isoc TDs from a ring.
 502 */
 503static void cdnsp_td_to_noop(struct cdnsp_device *pdev,
 504                             struct cdnsp_ring *ep_ring,
 505                             struct cdnsp_td *td,
 506                             bool flip_cycle)
 507{
 508        struct cdnsp_segment *seg = td->start_seg;
 509        union cdnsp_trb *trb = td->first_trb;
 510
 511        while (1) {
 512                cdnsp_trb_to_noop(trb, TRB_TR_NOOP);
 513
 514                /* flip cycle if asked to */
 515                if (flip_cycle && trb != td->first_trb && trb != td->last_trb)
 516                        trb->generic.field[3] ^= cpu_to_le32(TRB_CYCLE);
 517
 518                if (trb == td->last_trb)
 519                        break;
 520
 521                cdnsp_next_trb(pdev, ep_ring, &seg, &trb);
 522        }
 523}
 524
 525/*
 526 * This TD is defined by the TRBs starting at start_trb in start_seg and ending
 527 * at end_trb, which may be in another segment. If the suspect DMA address is a
 528 * TRB in this TD, this function returns that TRB's segment. Otherwise it
 529 * returns 0.
 530 */
 531static struct cdnsp_segment *cdnsp_trb_in_td(struct cdnsp_device *pdev,
 532                                             struct cdnsp_segment *start_seg,
 533                                             union cdnsp_trb *start_trb,
 534                                             union cdnsp_trb *end_trb,
 535                                             dma_addr_t suspect_dma)
 536{
 537        struct cdnsp_segment *cur_seg;
 538        union cdnsp_trb *temp_trb;
 539        dma_addr_t end_seg_dma;
 540        dma_addr_t end_trb_dma;
 541        dma_addr_t start_dma;
 542
 543        start_dma = cdnsp_trb_virt_to_dma(start_seg, start_trb);
 544        cur_seg = start_seg;
 545
 546        do {
 547                if (start_dma == 0)
 548                        return NULL;
 549
 550                temp_trb = &cur_seg->trbs[TRBS_PER_SEGMENT - 1];
 551                /* We may get an event for a Link TRB in the middle of a TD */
 552                end_seg_dma = cdnsp_trb_virt_to_dma(cur_seg, temp_trb);
 553                /* If the end TRB isn't in this segment, this is set to 0 */
 554                end_trb_dma = cdnsp_trb_virt_to_dma(cur_seg, end_trb);
 555
 556                trace_cdnsp_looking_trb_in_td(suspect_dma, start_dma,
 557                                              end_trb_dma, cur_seg->dma,
 558                                              end_seg_dma);
 559
 560                if (end_trb_dma > 0) {
 561                        /*
 562                         * The end TRB is in this segment, so suspect should
 563                         * be here
 564                         */
 565                        if (start_dma <= end_trb_dma) {
 566                                if (suspect_dma >= start_dma &&
 567                                    suspect_dma <= end_trb_dma) {
 568                                        return cur_seg;
 569                                }
 570                        } else {
 571                                /*
 572                                 * Case for one segment with a
 573                                 * TD wrapped around to the top
 574                                 */
 575                                if ((suspect_dma >= start_dma &&
 576                                     suspect_dma <= end_seg_dma) ||
 577                                    (suspect_dma >= cur_seg->dma &&
 578                                     suspect_dma <= end_trb_dma)) {
 579                                        return cur_seg;
 580                                }
 581                        }
 582
 583                        return NULL;
 584                }
 585
 586                /* Might still be somewhere in this segment */
 587                if (suspect_dma >= start_dma && suspect_dma <= end_seg_dma)
 588                        return cur_seg;
 589
 590                cur_seg = cur_seg->next;
 591                start_dma = cdnsp_trb_virt_to_dma(cur_seg, &cur_seg->trbs[0]);
 592        } while (cur_seg != start_seg);
 593
 594        return NULL;
 595}
 596
 597static void cdnsp_unmap_td_bounce_buffer(struct cdnsp_device *pdev,
 598                                         struct cdnsp_ring *ring,
 599                                         struct cdnsp_td *td)
 600{
 601        struct cdnsp_segment *seg = td->bounce_seg;
 602        struct cdnsp_request *preq;
 603        size_t len;
 604
 605        if (!seg)
 606                return;
 607
 608        preq = td->preq;
 609
 610        trace_cdnsp_bounce_unmap(td->preq, seg->bounce_len, seg->bounce_offs,
 611                                 seg->bounce_dma, 0);
 612
 613        if (!preq->direction) {
 614                dma_unmap_single(pdev->dev, seg->bounce_dma,
 615                                 ring->bounce_buf_len,  DMA_TO_DEVICE);
 616                return;
 617        }
 618
 619        dma_unmap_single(pdev->dev, seg->bounce_dma, ring->bounce_buf_len,
 620                         DMA_FROM_DEVICE);
 621
 622        /* For in transfers we need to copy the data from bounce to sg */
 623        len = sg_pcopy_from_buffer(preq->request.sg, preq->request.num_sgs,
 624                                   seg->bounce_buf, seg->bounce_len,
 625                                   seg->bounce_offs);
 626        if (len != seg->bounce_len)
 627                dev_warn(pdev->dev, "WARN Wrong bounce buffer read length: %zu != %d\n",
 628                         len, seg->bounce_len);
 629
 630        seg->bounce_len = 0;
 631        seg->bounce_offs = 0;
 632}
 633
 634static int cdnsp_cmd_set_deq(struct cdnsp_device *pdev,
 635                             struct cdnsp_ep *pep,
 636                             struct cdnsp_dequeue_state *deq_state)
 637{
 638        struct cdnsp_ring *ep_ring;
 639        int ret;
 640
 641        if (!deq_state->new_deq_ptr || !deq_state->new_deq_seg) {
 642                cdnsp_ring_doorbell_for_active_rings(pdev, pep);
 643                return 0;
 644        }
 645
 646        cdnsp_queue_new_dequeue_state(pdev, pep, deq_state);
 647        cdnsp_ring_cmd_db(pdev);
 648        ret = cdnsp_wait_for_cmd_compl(pdev);
 649
 650        trace_cdnsp_handle_cmd_set_deq(cdnsp_get_slot_ctx(&pdev->out_ctx));
 651        trace_cdnsp_handle_cmd_set_deq_ep(pep->out_ctx);
 652
 653        /*
 654         * Update the ring's dequeue segment and dequeue pointer
 655         * to reflect the new position.
 656         */
 657        ep_ring = cdnsp_get_transfer_ring(pdev, pep, deq_state->stream_id);
 658
 659        if (cdnsp_trb_is_link(ep_ring->dequeue)) {
 660                ep_ring->deq_seg = ep_ring->deq_seg->next;
 661                ep_ring->dequeue = ep_ring->deq_seg->trbs;
 662        }
 663
 664        while (ep_ring->dequeue != deq_state->new_deq_ptr) {
 665                ep_ring->num_trbs_free++;
 666                ep_ring->dequeue++;
 667
 668                if (cdnsp_trb_is_link(ep_ring->dequeue)) {
 669                        if (ep_ring->dequeue == deq_state->new_deq_ptr)
 670                                break;
 671
 672                        ep_ring->deq_seg = ep_ring->deq_seg->next;
 673                        ep_ring->dequeue = ep_ring->deq_seg->trbs;
 674                }
 675        }
 676
 677        /*
 678         * Probably there was TIMEOUT during handling Set Dequeue Pointer
 679         * command. It's critical error and controller will be stopped.
 680         */
 681        if (ret)
 682                return -ESHUTDOWN;
 683
 684        /* Restart any rings with pending requests */
 685        cdnsp_ring_doorbell_for_active_rings(pdev, pep);
 686
 687        return 0;
 688}
 689
 690int cdnsp_remove_request(struct cdnsp_device *pdev,
 691                         struct cdnsp_request *preq,
 692                         struct cdnsp_ep *pep)
 693{
 694        struct cdnsp_dequeue_state deq_state;
 695        struct cdnsp_td *cur_td = NULL;
 696        struct cdnsp_ring *ep_ring;
 697        struct cdnsp_segment *seg;
 698        int status = -ECONNRESET;
 699        int ret = 0;
 700        u64 hw_deq;
 701
 702        memset(&deq_state, 0, sizeof(deq_state));
 703
 704        trace_cdnsp_remove_request(pep->out_ctx);
 705        trace_cdnsp_remove_request_td(preq);
 706
 707        cur_td = &preq->td;
 708        ep_ring = cdnsp_request_to_transfer_ring(pdev, preq);
 709
 710        /*
 711         * If we stopped on the TD we need to cancel, then we have to
 712         * move the controller endpoint ring dequeue pointer past
 713         * this TD.
 714         */
 715        hw_deq = cdnsp_get_hw_deq(pdev, pep->idx, preq->request.stream_id);
 716        hw_deq &= ~0xf;
 717
 718        seg = cdnsp_trb_in_td(pdev, cur_td->start_seg, cur_td->first_trb,
 719                              cur_td->last_trb, hw_deq);
 720
 721        if (seg && (pep->ep_state & EP_ENABLED))
 722                cdnsp_find_new_dequeue_state(pdev, pep, preq->request.stream_id,
 723                                             cur_td, &deq_state);
 724        else
 725                cdnsp_td_to_noop(pdev, ep_ring, cur_td, false);
 726
 727        /*
 728         * The event handler won't see a completion for this TD anymore,
 729         * so remove it from the endpoint ring's TD list.
 730         */
 731        list_del_init(&cur_td->td_list);
 732        ep_ring->num_tds--;
 733        pep->stream_info.td_count--;
 734
 735        /*
 736         * During disconnecting all endpoint will be disabled so we don't
 737         * have to worry about updating dequeue pointer.
 738         */
 739        if (pdev->cdnsp_state & CDNSP_STATE_DISCONNECT_PENDING) {
 740                status = -ESHUTDOWN;
 741                ret = cdnsp_cmd_set_deq(pdev, pep, &deq_state);
 742        }
 743
 744        cdnsp_unmap_td_bounce_buffer(pdev, ep_ring, cur_td);
 745        cdnsp_gadget_giveback(pep, cur_td->preq, status);
 746
 747        return ret;
 748}
 749
 750static int cdnsp_update_port_id(struct cdnsp_device *pdev, u32 port_id)
 751{
 752        struct cdnsp_port *port = pdev->active_port;
 753        u8 old_port = 0;
 754
 755        if (port && port->port_num == port_id)
 756                return 0;
 757
 758        if (port)
 759                old_port = port->port_num;
 760
 761        if (port_id == pdev->usb2_port.port_num) {
 762                port = &pdev->usb2_port;
 763        } else if (port_id == pdev->usb3_port.port_num) {
 764                port  = &pdev->usb3_port;
 765        } else {
 766                dev_err(pdev->dev, "Port event with invalid port ID %d\n",
 767                        port_id);
 768                return -EINVAL;
 769        }
 770
 771        if (port_id != old_port) {
 772                cdnsp_disable_slot(pdev);
 773                pdev->active_port = port;
 774                cdnsp_enable_slot(pdev);
 775        }
 776
 777        if (port_id == pdev->usb2_port.port_num)
 778                cdnsp_set_usb2_hardware_lpm(pdev, NULL, 1);
 779        else
 780                writel(PORT_U1_TIMEOUT(1) | PORT_U2_TIMEOUT(1),
 781                       &pdev->usb3_port.regs->portpmsc);
 782
 783        return 0;
 784}
 785
 786static void cdnsp_handle_port_status(struct cdnsp_device *pdev,
 787                                     union cdnsp_trb *event)
 788{
 789        struct cdnsp_port_regs __iomem *port_regs;
 790        u32 portsc, cmd_regs;
 791        bool port2 = false;
 792        u32 link_state;
 793        u32 port_id;
 794
 795        /* Port status change events always have a successful completion code */
 796        if (GET_COMP_CODE(le32_to_cpu(event->generic.field[2])) != COMP_SUCCESS)
 797                dev_err(pdev->dev, "ERR: incorrect PSC event\n");
 798
 799        port_id = GET_PORT_ID(le32_to_cpu(event->generic.field[0]));
 800
 801        if (cdnsp_update_port_id(pdev, port_id))
 802                goto cleanup;
 803
 804        port_regs = pdev->active_port->regs;
 805
 806        if (port_id == pdev->usb2_port.port_num)
 807                port2 = true;
 808
 809new_event:
 810        portsc = readl(&port_regs->portsc);
 811        writel(cdnsp_port_state_to_neutral(portsc) |
 812               (portsc & PORT_CHANGE_BITS), &port_regs->portsc);
 813
 814        trace_cdnsp_handle_port_status(pdev->active_port->port_num, portsc);
 815
 816        pdev->gadget.speed = cdnsp_port_speed(portsc);
 817        link_state = portsc & PORT_PLS_MASK;
 818
 819        /* Port Link State change detected. */
 820        if ((portsc & PORT_PLC)) {
 821                if (!(pdev->cdnsp_state & CDNSP_WAKEUP_PENDING)  &&
 822                    link_state == XDEV_RESUME) {
 823                        cmd_regs = readl(&pdev->op_regs->command);
 824                        if (!(cmd_regs & CMD_R_S))
 825                                goto cleanup;
 826
 827                        if (DEV_SUPERSPEED_ANY(portsc)) {
 828                                cdnsp_set_link_state(pdev, &port_regs->portsc,
 829                                                     XDEV_U0);
 830
 831                                cdnsp_resume_gadget(pdev);
 832                        }
 833                }
 834
 835                if ((pdev->cdnsp_state & CDNSP_WAKEUP_PENDING) &&
 836                    link_state == XDEV_U0) {
 837                        pdev->cdnsp_state &= ~CDNSP_WAKEUP_PENDING;
 838
 839                        cdnsp_force_header_wakeup(pdev, 1);
 840                        cdnsp_ring_cmd_db(pdev);
 841                        cdnsp_wait_for_cmd_compl(pdev);
 842                }
 843
 844                if (link_state == XDEV_U0 && pdev->link_state == XDEV_U3 &&
 845                    !DEV_SUPERSPEED_ANY(portsc))
 846                        cdnsp_resume_gadget(pdev);
 847
 848                if (link_state == XDEV_U3 &&  pdev->link_state != XDEV_U3)
 849                        cdnsp_suspend_gadget(pdev);
 850
 851                pdev->link_state = link_state;
 852        }
 853
 854        if (portsc & PORT_CSC) {
 855                /* Detach device. */
 856                if (pdev->gadget.connected && !(portsc & PORT_CONNECT))
 857                        cdnsp_disconnect_gadget(pdev);
 858
 859                /* Attach device. */
 860                if (portsc & PORT_CONNECT) {
 861                        if (!port2)
 862                                cdnsp_irq_reset(pdev);
 863
 864                        usb_gadget_set_state(&pdev->gadget, USB_STATE_ATTACHED);
 865                }
 866        }
 867
 868        /* Port reset. */
 869        if ((portsc & (PORT_RC | PORT_WRC)) && (portsc & PORT_CONNECT)) {
 870                cdnsp_irq_reset(pdev);
 871                pdev->u1_allowed = 0;
 872                pdev->u2_allowed = 0;
 873                pdev->may_wakeup = 0;
 874        }
 875
 876        if (portsc & PORT_CEC)
 877                dev_err(pdev->dev, "Port Over Current detected\n");
 878
 879        if (portsc & PORT_CEC)
 880                dev_err(pdev->dev, "Port Configure Error detected\n");
 881
 882        if (readl(&port_regs->portsc) & PORT_CHANGE_BITS)
 883                goto new_event;
 884
 885cleanup:
 886        cdnsp_inc_deq(pdev, pdev->event_ring);
 887}
 888
 889static void cdnsp_td_cleanup(struct cdnsp_device *pdev,
 890                             struct cdnsp_td *td,
 891                             struct cdnsp_ring *ep_ring,
 892                             int *status)
 893{
 894        struct cdnsp_request *preq = td->preq;
 895
 896        /* if a bounce buffer was used to align this td then unmap it */
 897        cdnsp_unmap_td_bounce_buffer(pdev, ep_ring, td);
 898
 899        /*
 900         * If the controller said we transferred more data than the buffer
 901         * length, Play it safe and say we didn't transfer anything.
 902         */
 903        if (preq->request.actual > preq->request.length) {
 904                preq->request.actual = 0;
 905                *status = 0;
 906        }
 907
 908        list_del_init(&td->td_list);
 909        ep_ring->num_tds--;
 910        preq->pep->stream_info.td_count--;
 911
 912        cdnsp_gadget_giveback(preq->pep, preq, *status);
 913}
 914
 915static void cdnsp_finish_td(struct cdnsp_device *pdev,
 916                            struct cdnsp_td *td,
 917                            struct cdnsp_transfer_event *event,
 918                            struct cdnsp_ep *ep,
 919                            int *status)
 920{
 921        struct cdnsp_ring *ep_ring;
 922        u32 trb_comp_code;
 923
 924        ep_ring = cdnsp_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
 925        trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
 926
 927        if (trb_comp_code == COMP_STOPPED_LENGTH_INVALID ||
 928            trb_comp_code == COMP_STOPPED ||
 929            trb_comp_code == COMP_STOPPED_SHORT_PACKET) {
 930                /*
 931                 * The Endpoint Stop Command completion will take care of any
 932                 * stopped TDs. A stopped TD may be restarted, so don't update
 933                 * the ring dequeue pointer or take this TD off any lists yet.
 934                 */
 935                return;
 936        }
 937
 938        /* Update ring dequeue pointer */
 939        while (ep_ring->dequeue != td->last_trb)
 940                cdnsp_inc_deq(pdev, ep_ring);
 941
 942        cdnsp_inc_deq(pdev, ep_ring);
 943
 944        cdnsp_td_cleanup(pdev, td, ep_ring, status);
 945}
 946
 947/* sum trb lengths from ring dequeue up to stop_trb, _excluding_ stop_trb */
 948static int cdnsp_sum_trb_lengths(struct cdnsp_device *pdev,
 949                                 struct cdnsp_ring *ring,
 950                                 union cdnsp_trb *stop_trb)
 951{
 952        struct cdnsp_segment *seg = ring->deq_seg;
 953        union cdnsp_trb *trb = ring->dequeue;
 954        u32 sum;
 955
 956        for (sum = 0; trb != stop_trb; cdnsp_next_trb(pdev, ring, &seg, &trb)) {
 957                if (!cdnsp_trb_is_noop(trb) && !cdnsp_trb_is_link(trb))
 958                        sum += TRB_LEN(le32_to_cpu(trb->generic.field[2]));
 959        }
 960        return sum;
 961}
 962
 963static int cdnsp_giveback_first_trb(struct cdnsp_device *pdev,
 964                                    struct cdnsp_ep *pep,
 965                                    unsigned int stream_id,
 966                                    int start_cycle,
 967                                    struct cdnsp_generic_trb *start_trb)
 968{
 969        /*
 970         * Pass all the TRBs to the hardware at once and make sure this write
 971         * isn't reordered.
 972         */
 973        wmb();
 974
 975        if (start_cycle)
 976                start_trb->field[3] |= cpu_to_le32(start_cycle);
 977        else
 978                start_trb->field[3] &= cpu_to_le32(~TRB_CYCLE);
 979
 980        if ((pep->ep_state & EP_HAS_STREAMS) &&
 981            !pep->stream_info.first_prime_det) {
 982                trace_cdnsp_wait_for_prime(pep, stream_id);
 983                return 0;
 984        }
 985
 986        return cdnsp_ring_ep_doorbell(pdev, pep, stream_id);
 987}
 988
 989/*
 990 * Process control tds, update USB request status and actual_length.
 991 */
 992static void cdnsp_process_ctrl_td(struct cdnsp_device *pdev,
 993                                  struct cdnsp_td *td,
 994                                  union cdnsp_trb *event_trb,
 995                                  struct cdnsp_transfer_event *event,
 996                                  struct cdnsp_ep *pep,
 997                                  int *status)
 998{
 999        struct cdnsp_ring *ep_ring;
1000        u32 remaining;
1001        u32 trb_type;
1002
1003        trb_type = TRB_FIELD_TO_TYPE(le32_to_cpu(event_trb->generic.field[3]));
1004        ep_ring = cdnsp_dma_to_transfer_ring(pep, le64_to_cpu(event->buffer));
1005        remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
1006
1007        /*
1008         * if on data stage then update the actual_length of the USB
1009         * request and flag it as set, so it won't be overwritten in the event
1010         * for the last TRB.
1011         */
1012        if (trb_type == TRB_DATA) {
1013                td->request_length_set = true;
1014                td->preq->request.actual = td->preq->request.length - remaining;
1015        }
1016
1017        /* at status stage */
1018        if (!td->request_length_set)
1019                td->preq->request.actual = td->preq->request.length;
1020
1021        if (pdev->ep0_stage == CDNSP_DATA_STAGE && pep->number == 0 &&
1022            pdev->three_stage_setup) {
1023                td = list_entry(ep_ring->td_list.next, struct cdnsp_td,
1024                                td_list);
1025                pdev->ep0_stage = CDNSP_STATUS_STAGE;
1026
1027                cdnsp_giveback_first_trb(pdev, pep, 0, ep_ring->cycle_state,
1028                                         &td->last_trb->generic);
1029                return;
1030        }
1031
1032        cdnsp_finish_td(pdev, td, event, pep, status);
1033}
1034
1035/*
1036 * Process isochronous tds, update usb request status and actual_length.
1037 */
1038static void cdnsp_process_isoc_td(struct cdnsp_device *pdev,
1039                                  struct cdnsp_td *td,
1040                                  union cdnsp_trb *ep_trb,
1041                                  struct cdnsp_transfer_event *event,
1042                                  struct cdnsp_ep *pep,
1043                                  int status)
1044{
1045        struct cdnsp_request *preq = td->preq;
1046        u32 remaining, requested, ep_trb_len;
1047        bool sum_trbs_for_length = false;
1048        struct cdnsp_ring *ep_ring;
1049        u32 trb_comp_code;
1050        u32 td_length;
1051
1052        ep_ring = cdnsp_dma_to_transfer_ring(pep, le64_to_cpu(event->buffer));
1053        trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
1054        remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
1055        ep_trb_len = TRB_LEN(le32_to_cpu(ep_trb->generic.field[2]));
1056
1057        requested = preq->request.length;
1058
1059        /* handle completion code */
1060        switch (trb_comp_code) {
1061        case COMP_SUCCESS:
1062                preq->request.status = 0;
1063                break;
1064        case COMP_SHORT_PACKET:
1065                preq->request.status = 0;
1066                sum_trbs_for_length = true;
1067                break;
1068        case COMP_ISOCH_BUFFER_OVERRUN:
1069        case COMP_BABBLE_DETECTED_ERROR:
1070                preq->request.status = -EOVERFLOW;
1071                break;
1072        case COMP_STOPPED:
1073                sum_trbs_for_length = true;
1074                break;
1075        case COMP_STOPPED_SHORT_PACKET:
1076                /* field normally containing residue now contains transferred */
1077                preq->request.status  = 0;
1078                requested = remaining;
1079                break;
1080        case COMP_STOPPED_LENGTH_INVALID:
1081                requested = 0;
1082                remaining = 0;
1083                break;
1084        default:
1085                sum_trbs_for_length = true;
1086                preq->request.status = -1;
1087                break;
1088        }
1089
1090        if (sum_trbs_for_length) {
1091                td_length = cdnsp_sum_trb_lengths(pdev, ep_ring, ep_trb);
1092                td_length += ep_trb_len - remaining;
1093        } else {
1094                td_length = requested;
1095        }
1096
1097        td->preq->request.actual += td_length;
1098
1099        cdnsp_finish_td(pdev, td, event, pep, &status);
1100}
1101
1102static void cdnsp_skip_isoc_td(struct cdnsp_device *pdev,
1103                               struct cdnsp_td *td,
1104                               struct cdnsp_transfer_event *event,
1105                               struct cdnsp_ep *pep,
1106                               int status)
1107{
1108        struct cdnsp_ring *ep_ring;
1109
1110        ep_ring = cdnsp_dma_to_transfer_ring(pep, le64_to_cpu(event->buffer));
1111        td->preq->request.status = -EXDEV;
1112        td->preq->request.actual = 0;
1113
1114        /* Update ring dequeue pointer */
1115        while (ep_ring->dequeue != td->last_trb)
1116                cdnsp_inc_deq(pdev, ep_ring);
1117
1118        cdnsp_inc_deq(pdev, ep_ring);
1119
1120        cdnsp_td_cleanup(pdev, td, ep_ring, &status);
1121}
1122
1123/*
1124 * Process bulk and interrupt tds, update usb request status and actual_length.
1125 */
1126static void cdnsp_process_bulk_intr_td(struct cdnsp_device *pdev,
1127                                       struct cdnsp_td *td,
1128                                       union cdnsp_trb *ep_trb,
1129                                       struct cdnsp_transfer_event *event,
1130                                       struct cdnsp_ep *ep,
1131                                       int *status)
1132{
1133        u32 remaining, requested, ep_trb_len;
1134        struct cdnsp_ring *ep_ring;
1135        u32 trb_comp_code;
1136
1137        ep_ring = cdnsp_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
1138        trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
1139        remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
1140        ep_trb_len = TRB_LEN(le32_to_cpu(ep_trb->generic.field[2]));
1141        requested = td->preq->request.length;
1142
1143        switch (trb_comp_code) {
1144        case COMP_SUCCESS:
1145        case COMP_SHORT_PACKET:
1146                *status = 0;
1147                break;
1148        case COMP_STOPPED_SHORT_PACKET:
1149                td->preq->request.actual = remaining;
1150                goto finish_td;
1151        case COMP_STOPPED_LENGTH_INVALID:
1152                /* Stopped on ep trb with invalid length, exclude it. */
1153                ep_trb_len = 0;
1154                remaining = 0;
1155                break;
1156        }
1157
1158        if (ep_trb == td->last_trb)
1159                ep_trb_len = requested - remaining;
1160        else
1161                ep_trb_len = cdnsp_sum_trb_lengths(pdev, ep_ring, ep_trb) +
1162                                                   ep_trb_len - remaining;
1163        td->preq->request.actual = ep_trb_len;
1164
1165finish_td:
1166        ep->stream_info.drbls_count--;
1167
1168        cdnsp_finish_td(pdev, td, event, ep, status);
1169}
1170
1171static void cdnsp_handle_tx_nrdy(struct cdnsp_device *pdev,
1172                                 struct cdnsp_transfer_event *event)
1173{
1174        struct cdnsp_generic_trb *generic;
1175        struct cdnsp_ring *ep_ring;
1176        struct cdnsp_ep *pep;
1177        int cur_stream;
1178        int ep_index;
1179        int host_sid;
1180        int dev_sid;
1181
1182        generic = (struct cdnsp_generic_trb *)event;
1183        ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
1184        dev_sid = TRB_TO_DEV_STREAM(le32_to_cpu(generic->field[0]));
1185        host_sid = TRB_TO_HOST_STREAM(le32_to_cpu(generic->field[2]));
1186
1187        pep = &pdev->eps[ep_index];
1188
1189        if (!(pep->ep_state & EP_HAS_STREAMS))
1190                return;
1191
1192        if (host_sid == STREAM_PRIME_ACK) {
1193                pep->stream_info.first_prime_det = 1;
1194                for (cur_stream = 1; cur_stream < pep->stream_info.num_streams;
1195                    cur_stream++) {
1196                        ep_ring = pep->stream_info.stream_rings[cur_stream];
1197                        ep_ring->stream_active = 1;
1198                        ep_ring->stream_rejected = 0;
1199                }
1200        }
1201
1202        if (host_sid == STREAM_REJECTED) {
1203                struct cdnsp_td *td, *td_temp;
1204
1205                pep->stream_info.drbls_count--;
1206                ep_ring = pep->stream_info.stream_rings[dev_sid];
1207                ep_ring->stream_active = 0;
1208                ep_ring->stream_rejected = 1;
1209
1210                list_for_each_entry_safe(td, td_temp, &ep_ring->td_list,
1211                                         td_list) {
1212                        td->drbl = 0;
1213                }
1214        }
1215
1216        cdnsp_ring_doorbell_for_active_rings(pdev, pep);
1217}
1218
1219/*
1220 * If this function returns an error condition, it means it got a Transfer
1221 * event with a corrupted TRB DMA address or endpoint is disabled.
1222 */
1223static int cdnsp_handle_tx_event(struct cdnsp_device *pdev,
1224                                 struct cdnsp_transfer_event *event)
1225{
1226        const struct usb_endpoint_descriptor *desc;
1227        bool handling_skipped_tds = false;
1228        struct cdnsp_segment *ep_seg;
1229        struct cdnsp_ring *ep_ring;
1230        int status = -EINPROGRESS;
1231        union cdnsp_trb *ep_trb;
1232        dma_addr_t ep_trb_dma;
1233        struct cdnsp_ep *pep;
1234        struct cdnsp_td *td;
1235        u32 trb_comp_code;
1236        int invalidate;
1237        int ep_index;
1238
1239        invalidate = le32_to_cpu(event->flags) & TRB_EVENT_INVALIDATE;
1240        ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
1241        trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
1242        ep_trb_dma = le64_to_cpu(event->buffer);
1243
1244        pep = &pdev->eps[ep_index];
1245        ep_ring = cdnsp_dma_to_transfer_ring(pep, le64_to_cpu(event->buffer));
1246
1247        /*
1248         * If device is disconnect then all requests will be dequeued
1249         * by upper layers as part of disconnect sequence.
1250         * We don't want handle such event to avoid racing.
1251         */
1252        if (invalidate || !pdev->gadget.connected)
1253                goto cleanup;
1254
1255        if (GET_EP_CTX_STATE(pep->out_ctx) == EP_STATE_DISABLED) {
1256                trace_cdnsp_ep_disabled(pep->out_ctx);
1257                goto err_out;
1258        }
1259
1260        /* Some transfer events don't always point to a trb*/
1261        if (!ep_ring) {
1262                switch (trb_comp_code) {
1263                case COMP_INVALID_STREAM_TYPE_ERROR:
1264                case COMP_INVALID_STREAM_ID_ERROR:
1265                case COMP_RING_UNDERRUN:
1266                case COMP_RING_OVERRUN:
1267                        goto cleanup;
1268                default:
1269                        dev_err(pdev->dev, "ERROR: %s event for unknown ring\n",
1270                                pep->name);
1271                        goto err_out;
1272                }
1273        }
1274
1275        /* Look for some error cases that need special treatment. */
1276        switch (trb_comp_code) {
1277        case COMP_BABBLE_DETECTED_ERROR:
1278                status = -EOVERFLOW;
1279                break;
1280        case COMP_RING_UNDERRUN:
1281        case COMP_RING_OVERRUN:
1282                /*
1283                 * When the Isoch ring is empty, the controller will generate
1284                 * a Ring Overrun Event for IN Isoch endpoint or Ring
1285                 * Underrun Event for OUT Isoch endpoint.
1286                 */
1287                goto cleanup;
1288        case COMP_MISSED_SERVICE_ERROR:
1289                /*
1290                 * When encounter missed service error, one or more isoc tds
1291                 * may be missed by controller.
1292                 * Set skip flag of the ep_ring; Complete the missed tds as
1293                 * short transfer when process the ep_ring next time.
1294                 */
1295                pep->skip = true;
1296                break;
1297        }
1298
1299        do {
1300                /*
1301                 * This TRB should be in the TD at the head of this ring's TD
1302                 * list.
1303                 */
1304                if (list_empty(&ep_ring->td_list)) {
1305                        /*
1306                         * Don't print warnings if it's due to a stopped
1307                         * endpoint generating an extra completion event, or
1308                         * a event for the last TRB of a short TD we already
1309                         * got a short event for.
1310                         * The short TD is already removed from the TD list.
1311                         */
1312                        if (!(trb_comp_code == COMP_STOPPED ||
1313                              trb_comp_code == COMP_STOPPED_LENGTH_INVALID ||
1314                              ep_ring->last_td_was_short))
1315                                trace_cdnsp_trb_without_td(ep_ring,
1316                                        (struct cdnsp_generic_trb *)event);
1317
1318                        if (pep->skip) {
1319                                pep->skip = false;
1320                                trace_cdnsp_ep_list_empty_with_skip(pep, 0);
1321                        }
1322
1323                        goto cleanup;
1324                }
1325
1326                td = list_entry(ep_ring->td_list.next, struct cdnsp_td,
1327                                td_list);
1328
1329                /* Is this a TRB in the currently executing TD? */
1330                ep_seg = cdnsp_trb_in_td(pdev, ep_ring->deq_seg,
1331                                         ep_ring->dequeue, td->last_trb,
1332                                         ep_trb_dma);
1333
1334                /*
1335                 * Skip the Force Stopped Event. The event_trb(ep_trb_dma)
1336                 * of FSE is not in the current TD pointed by ep_ring->dequeue
1337                 * because that the hardware dequeue pointer still at the
1338                 * previous TRB of the current TD. The previous TRB maybe a
1339                 * Link TD or the last TRB of the previous TD. The command
1340                 * completion handle will take care the rest.
1341                 */
1342                if (!ep_seg && (trb_comp_code == COMP_STOPPED ||
1343                                trb_comp_code == COMP_STOPPED_LENGTH_INVALID)) {
1344                        pep->skip = false;
1345                        goto cleanup;
1346                }
1347
1348                desc = td->preq->pep->endpoint.desc;
1349                if (!ep_seg) {
1350                        if (!pep->skip || !usb_endpoint_xfer_isoc(desc)) {
1351                                /* Something is busted, give up! */
1352                                dev_err(pdev->dev,
1353                                        "ERROR Transfer event TRB DMA ptr not "
1354                                        "part of current TD ep_index %d "
1355                                        "comp_code %u\n", ep_index,
1356                                        trb_comp_code);
1357                                return -EINVAL;
1358                        }
1359
1360                        cdnsp_skip_isoc_td(pdev, td, event, pep, status);
1361                        goto cleanup;
1362                }
1363
1364                if (trb_comp_code == COMP_SHORT_PACKET)
1365                        ep_ring->last_td_was_short = true;
1366                else
1367                        ep_ring->last_td_was_short = false;
1368
1369                if (pep->skip) {
1370                        pep->skip = false;
1371                        cdnsp_skip_isoc_td(pdev, td, event, pep, status);
1372                        goto cleanup;
1373                }
1374
1375                ep_trb = &ep_seg->trbs[(ep_trb_dma - ep_seg->dma)
1376                                       / sizeof(*ep_trb)];
1377
1378                trace_cdnsp_handle_transfer(ep_ring,
1379                                            (struct cdnsp_generic_trb *)ep_trb);
1380
1381                if (cdnsp_trb_is_noop(ep_trb))
1382                        goto cleanup;
1383
1384                if (usb_endpoint_xfer_control(desc))
1385                        cdnsp_process_ctrl_td(pdev, td, ep_trb, event, pep,
1386                                              &status);
1387                else if (usb_endpoint_xfer_isoc(desc))
1388                        cdnsp_process_isoc_td(pdev, td, ep_trb, event, pep,
1389                                              status);
1390                else
1391                        cdnsp_process_bulk_intr_td(pdev, td, ep_trb, event, pep,
1392                                                   &status);
1393cleanup:
1394                handling_skipped_tds = pep->skip;
1395
1396                /*
1397                 * Do not update event ring dequeue pointer if we're in a loop
1398                 * processing missed tds.
1399                 */
1400                if (!handling_skipped_tds)
1401                        cdnsp_inc_deq(pdev, pdev->event_ring);
1402
1403        /*
1404         * If ep->skip is set, it means there are missed tds on the
1405         * endpoint ring need to take care of.
1406         * Process them as short transfer until reach the td pointed by
1407         * the event.
1408         */
1409        } while (handling_skipped_tds);
1410        return 0;
1411
1412err_out:
1413        dev_err(pdev->dev, "@%016llx %08x %08x %08x %08x\n",
1414                (unsigned long long)
1415                cdnsp_trb_virt_to_dma(pdev->event_ring->deq_seg,
1416                                      pdev->event_ring->dequeue),
1417                 lower_32_bits(le64_to_cpu(event->buffer)),
1418                 upper_32_bits(le64_to_cpu(event->buffer)),
1419                 le32_to_cpu(event->transfer_len),
1420                 le32_to_cpu(event->flags));
1421        return -EINVAL;
1422}
1423
1424/*
1425 * This function handles all events on the event ring.
1426 * Returns true for "possibly more events to process" (caller should call
1427 * again), otherwise false if done.
1428 */
1429static bool cdnsp_handle_event(struct cdnsp_device *pdev)
1430{
1431        unsigned int comp_code;
1432        union cdnsp_trb *event;
1433        bool update_ptrs = true;
1434        u32 cycle_bit;
1435        int ret = 0;
1436        u32 flags;
1437
1438        event = pdev->event_ring->dequeue;
1439        flags = le32_to_cpu(event->event_cmd.flags);
1440        cycle_bit = (flags & TRB_CYCLE);
1441
1442        /* Does the controller or driver own the TRB? */
1443        if (cycle_bit != pdev->event_ring->cycle_state)
1444                return false;
1445
1446        trace_cdnsp_handle_event(pdev->event_ring, &event->generic);
1447
1448        /*
1449         * Barrier between reading the TRB_CYCLE (valid) flag above and any
1450         * reads of the event's flags/data below.
1451         */
1452        rmb();
1453
1454        switch (flags & TRB_TYPE_BITMASK) {
1455        case TRB_TYPE(TRB_COMPLETION):
1456                /*
1457                 * Command can't be handled in interrupt context so just
1458                 * increment command ring dequeue pointer.
1459                 */
1460                cdnsp_inc_deq(pdev, pdev->cmd_ring);
1461                break;
1462        case TRB_TYPE(TRB_PORT_STATUS):
1463                cdnsp_handle_port_status(pdev, event);
1464                update_ptrs = false;
1465                break;
1466        case TRB_TYPE(TRB_TRANSFER):
1467                ret = cdnsp_handle_tx_event(pdev, &event->trans_event);
1468                if (ret >= 0)
1469                        update_ptrs = false;
1470                break;
1471        case TRB_TYPE(TRB_SETUP):
1472                pdev->ep0_stage = CDNSP_SETUP_STAGE;
1473                pdev->setup_id = TRB_SETUPID_TO_TYPE(flags);
1474                pdev->setup_speed = TRB_SETUP_SPEEDID(flags);
1475                pdev->setup = *((struct usb_ctrlrequest *)
1476                                &event->trans_event.buffer);
1477
1478                cdnsp_setup_analyze(pdev);
1479                break;
1480        case TRB_TYPE(TRB_ENDPOINT_NRDY):
1481                cdnsp_handle_tx_nrdy(pdev, &event->trans_event);
1482                break;
1483        case TRB_TYPE(TRB_HC_EVENT): {
1484                comp_code = GET_COMP_CODE(le32_to_cpu(event->generic.field[2]));
1485
1486                switch (comp_code) {
1487                case COMP_EVENT_RING_FULL_ERROR:
1488                        dev_err(pdev->dev, "Event Ring Full\n");
1489                        break;
1490                default:
1491                        dev_err(pdev->dev, "Controller error code 0x%02x\n",
1492                                comp_code);
1493                }
1494
1495                break;
1496        }
1497        case TRB_TYPE(TRB_MFINDEX_WRAP):
1498        case TRB_TYPE(TRB_DRB_OVERFLOW):
1499                break;
1500        default:
1501                dev_warn(pdev->dev, "ERROR unknown event type %ld\n",
1502                         TRB_FIELD_TO_TYPE(flags));
1503        }
1504
1505        if (update_ptrs)
1506                /* Update SW event ring dequeue pointer. */
1507                cdnsp_inc_deq(pdev, pdev->event_ring);
1508
1509        /*
1510         * Caller will call us again to check if there are more items
1511         * on the event ring.
1512         */
1513        return true;
1514}
1515
1516irqreturn_t cdnsp_thread_irq_handler(int irq, void *data)
1517{
1518        struct cdnsp_device *pdev = (struct cdnsp_device *)data;
1519        union cdnsp_trb *event_ring_deq;
1520        unsigned long flags;
1521        int counter = 0;
1522
1523        spin_lock_irqsave(&pdev->lock, flags);
1524
1525        if (pdev->cdnsp_state & (CDNSP_STATE_HALTED | CDNSP_STATE_DYING)) {
1526                cdnsp_died(pdev);
1527                spin_unlock_irqrestore(&pdev->lock, flags);
1528                return IRQ_HANDLED;
1529        }
1530
1531        event_ring_deq = pdev->event_ring->dequeue;
1532
1533        while (cdnsp_handle_event(pdev)) {
1534                if (++counter >= TRBS_PER_EV_DEQ_UPDATE) {
1535                        cdnsp_update_erst_dequeue(pdev, event_ring_deq, 0);
1536                        event_ring_deq = pdev->event_ring->dequeue;
1537                        counter = 0;
1538                }
1539        }
1540
1541        cdnsp_update_erst_dequeue(pdev, event_ring_deq, 1);
1542
1543        spin_unlock_irqrestore(&pdev->lock, flags);
1544
1545        return IRQ_HANDLED;
1546}
1547
1548irqreturn_t cdnsp_irq_handler(int irq, void *priv)
1549{
1550        struct cdnsp_device *pdev = (struct cdnsp_device *)priv;
1551        u32 irq_pending;
1552        u32 status;
1553
1554        status = readl(&pdev->op_regs->status);
1555
1556        if (status == ~(u32)0) {
1557                cdnsp_died(pdev);
1558                return IRQ_HANDLED;
1559        }
1560
1561        if (!(status & STS_EINT))
1562                return IRQ_NONE;
1563
1564        writel(status | STS_EINT, &pdev->op_regs->status);
1565        irq_pending = readl(&pdev->ir_set->irq_pending);
1566        irq_pending |= IMAN_IP;
1567        writel(irq_pending, &pdev->ir_set->irq_pending);
1568
1569        if (status & STS_FATAL) {
1570                cdnsp_died(pdev);
1571                return IRQ_HANDLED;
1572        }
1573
1574        return IRQ_WAKE_THREAD;
1575}
1576
1577/*
1578 * Generic function for queuing a TRB on a ring.
1579 * The caller must have checked to make sure there's room on the ring.
1580 *
1581 * @more_trbs_coming:   Will you enqueue more TRBs before setting doorbell?
1582 */
1583static void cdnsp_queue_trb(struct cdnsp_device *pdev, struct cdnsp_ring *ring,
1584                            bool more_trbs_coming, u32 field1, u32 field2,
1585                            u32 field3, u32 field4)
1586{
1587        struct cdnsp_generic_trb *trb;
1588
1589        trb = &ring->enqueue->generic;
1590
1591        trb->field[0] = cpu_to_le32(field1);
1592        trb->field[1] = cpu_to_le32(field2);
1593        trb->field[2] = cpu_to_le32(field3);
1594        trb->field[3] = cpu_to_le32(field4);
1595
1596        trace_cdnsp_queue_trb(ring, trb);
1597        cdnsp_inc_enq(pdev, ring, more_trbs_coming);
1598}
1599
1600/*
1601 * Does various checks on the endpoint ring, and makes it ready to
1602 * queue num_trbs.
1603 */
1604static int cdnsp_prepare_ring(struct cdnsp_device *pdev,
1605                              struct cdnsp_ring *ep_ring,
1606                              u32 ep_state, unsigned
1607                              int num_trbs,
1608                              gfp_t mem_flags)
1609{
1610        unsigned int num_trbs_needed;
1611
1612        /* Make sure the endpoint has been added to controller schedule. */
1613        switch (ep_state) {
1614        case EP_STATE_STOPPED:
1615        case EP_STATE_RUNNING:
1616        case EP_STATE_HALTED:
1617                break;
1618        default:
1619                dev_err(pdev->dev, "ERROR: incorrect endpoint state\n");
1620                return -EINVAL;
1621        }
1622
1623        while (1) {
1624                if (cdnsp_room_on_ring(pdev, ep_ring, num_trbs))
1625                        break;
1626
1627                trace_cdnsp_no_room_on_ring("try ring expansion");
1628
1629                num_trbs_needed = num_trbs - ep_ring->num_trbs_free;
1630                if (cdnsp_ring_expansion(pdev, ep_ring, num_trbs_needed,
1631                                         mem_flags)) {
1632                        dev_err(pdev->dev, "Ring expansion failed\n");
1633                        return -ENOMEM;
1634                }
1635        }
1636
1637        while (cdnsp_trb_is_link(ep_ring->enqueue)) {
1638                ep_ring->enqueue->link.control |= cpu_to_le32(TRB_CHAIN);
1639                /* The cycle bit must be set as the last operation. */
1640                wmb();
1641                ep_ring->enqueue->link.control ^= cpu_to_le32(TRB_CYCLE);
1642
1643                /* Toggle the cycle bit after the last ring segment. */
1644                if (cdnsp_link_trb_toggles_cycle(ep_ring->enqueue))
1645                        ep_ring->cycle_state ^= 1;
1646                ep_ring->enq_seg = ep_ring->enq_seg->next;
1647                ep_ring->enqueue = ep_ring->enq_seg->trbs;
1648        }
1649        return 0;
1650}
1651
1652static int cdnsp_prepare_transfer(struct cdnsp_device *pdev,
1653                                  struct cdnsp_request *preq,
1654                                  unsigned int num_trbs)
1655{
1656        struct cdnsp_ring *ep_ring;
1657        int ret;
1658
1659        ep_ring = cdnsp_get_transfer_ring(pdev, preq->pep,
1660                                          preq->request.stream_id);
1661        if (!ep_ring)
1662                return -EINVAL;
1663
1664        ret = cdnsp_prepare_ring(pdev, ep_ring,
1665                                 GET_EP_CTX_STATE(preq->pep->out_ctx),
1666                                 num_trbs, GFP_ATOMIC);
1667        if (ret)
1668                return ret;
1669
1670        INIT_LIST_HEAD(&preq->td.td_list);
1671        preq->td.preq = preq;
1672
1673        /* Add this TD to the tail of the endpoint ring's TD list. */
1674        list_add_tail(&preq->td.td_list, &ep_ring->td_list);
1675        ep_ring->num_tds++;
1676        preq->pep->stream_info.td_count++;
1677
1678        preq->td.start_seg = ep_ring->enq_seg;
1679        preq->td.first_trb = ep_ring->enqueue;
1680
1681        return 0;
1682}
1683
1684static unsigned int cdnsp_count_trbs(u64 addr, u64 len)
1685{
1686        unsigned int num_trbs;
1687
1688        num_trbs = DIV_ROUND_UP(len + (addr & (TRB_MAX_BUFF_SIZE - 1)),
1689                                TRB_MAX_BUFF_SIZE);
1690        if (num_trbs == 0)
1691                num_trbs++;
1692
1693        return num_trbs;
1694}
1695
1696static unsigned int count_trbs_needed(struct cdnsp_request *preq)
1697{
1698        return cdnsp_count_trbs(preq->request.dma, preq->request.length);
1699}
1700
1701static unsigned int count_sg_trbs_needed(struct cdnsp_request *preq)
1702{
1703        unsigned int i, len, full_len, num_trbs = 0;
1704        struct scatterlist *sg;
1705
1706        full_len = preq->request.length;
1707
1708        for_each_sg(preq->request.sg, sg, preq->request.num_sgs, i) {
1709                len = sg_dma_len(sg);
1710                num_trbs += cdnsp_count_trbs(sg_dma_address(sg), len);
1711                len = min(len, full_len);
1712                full_len -= len;
1713                if (full_len == 0)
1714                        break;
1715        }
1716
1717        return num_trbs;
1718}
1719
1720static unsigned int count_isoc_trbs_needed(struct cdnsp_request *preq)
1721{
1722        return cdnsp_count_trbs(preq->request.dma, preq->request.length);
1723}
1724
1725static void cdnsp_check_trb_math(struct cdnsp_request *preq, int running_total)
1726{
1727        if (running_total != preq->request.length)
1728                dev_err(preq->pep->pdev->dev,
1729                        "%s - Miscalculated tx length, "
1730                        "queued %#x, asked for %#x (%d)\n",
1731                        preq->pep->name, running_total,
1732                        preq->request.length, preq->request.actual);
1733}
1734
1735/*
1736 * TD size is the number of max packet sized packets remaining in the TD
1737 * (*not* including this TRB).
1738 *
1739 * Total TD packet count = total_packet_count =
1740 *     DIV_ROUND_UP(TD size in bytes / wMaxPacketSize)
1741 *
1742 * Packets transferred up to and including this TRB = packets_transferred =
1743 *     rounddown(total bytes transferred including this TRB / wMaxPacketSize)
1744 *
1745 * TD size = total_packet_count - packets_transferred
1746 *
1747 * It must fit in bits 21:17, so it can't be bigger than 31.
1748 * This is taken care of in the TRB_TD_SIZE() macro
1749 *
1750 * The last TRB in a TD must have the TD size set to zero.
1751 */
1752static u32 cdnsp_td_remainder(struct cdnsp_device *pdev,
1753                              int transferred,
1754                              int trb_buff_len,
1755                              unsigned int td_total_len,
1756                              struct cdnsp_request *preq,
1757                              bool more_trbs_coming)
1758{
1759        u32 maxp, total_packet_count;
1760
1761        /* One TRB with a zero-length data packet. */
1762        if (!more_trbs_coming || (transferred == 0 && trb_buff_len == 0) ||
1763            trb_buff_len == td_total_len)
1764                return 0;
1765
1766        maxp = usb_endpoint_maxp(preq->pep->endpoint.desc);
1767        total_packet_count = DIV_ROUND_UP(td_total_len, maxp);
1768
1769        /* Queuing functions don't count the current TRB into transferred. */
1770        return (total_packet_count - ((transferred + trb_buff_len) / maxp));
1771}
1772
1773static int cdnsp_align_td(struct cdnsp_device *pdev,
1774                          struct cdnsp_request *preq, u32 enqd_len,
1775                          u32 *trb_buff_len, struct cdnsp_segment *seg)
1776{
1777        struct device *dev = pdev->dev;
1778        unsigned int unalign;
1779        unsigned int max_pkt;
1780        u32 new_buff_len;
1781
1782        max_pkt = usb_endpoint_maxp(preq->pep->endpoint.desc);
1783        unalign = (enqd_len + *trb_buff_len) % max_pkt;
1784
1785        /* We got lucky, last normal TRB data on segment is packet aligned. */
1786        if (unalign == 0)
1787                return 0;
1788
1789        /* Is the last nornal TRB alignable by splitting it. */
1790        if (*trb_buff_len > unalign) {
1791                *trb_buff_len -= unalign;
1792                trace_cdnsp_bounce_align_td_split(preq, *trb_buff_len,
1793                                                  enqd_len, 0, unalign);
1794                return 0;
1795        }
1796
1797        /*
1798         * We want enqd_len + trb_buff_len to sum up to a number aligned to
1799         * number which is divisible by the endpoint's wMaxPacketSize. IOW:
1800         * (size of currently enqueued TRBs + remainder) % wMaxPacketSize == 0.
1801         */
1802        new_buff_len = max_pkt - (enqd_len % max_pkt);
1803
1804        if (new_buff_len > (preq->request.length - enqd_len))
1805                new_buff_len = (preq->request.length - enqd_len);
1806
1807        /* Create a max max_pkt sized bounce buffer pointed to by last trb. */
1808        if (preq->direction) {
1809                sg_pcopy_to_buffer(preq->request.sg,
1810                                   preq->request.num_mapped_sgs,
1811                                   seg->bounce_buf, new_buff_len, enqd_len);
1812                seg->bounce_dma = dma_map_single(dev, seg->bounce_buf,
1813                                                 max_pkt, DMA_TO_DEVICE);
1814        } else {
1815                seg->bounce_dma = dma_map_single(dev, seg->bounce_buf,
1816                                                 max_pkt, DMA_FROM_DEVICE);
1817        }
1818
1819        if (dma_mapping_error(dev, seg->bounce_dma)) {
1820                /* Try without aligning.*/
1821                dev_warn(pdev->dev,
1822                         "Failed mapping bounce buffer, not aligning\n");
1823                return 0;
1824        }
1825
1826        *trb_buff_len = new_buff_len;
1827        seg->bounce_len = new_buff_len;
1828        seg->bounce_offs = enqd_len;
1829
1830        trace_cdnsp_bounce_map(preq, new_buff_len, enqd_len, seg->bounce_dma,
1831                               unalign);
1832
1833        /*
1834         * Bounce buffer successful aligned and seg->bounce_dma will be used
1835         * in transfer TRB as new transfer buffer address.
1836         */
1837        return 1;
1838}
1839
1840int cdnsp_queue_bulk_tx(struct cdnsp_device *pdev, struct cdnsp_request *preq)
1841{
1842        unsigned int enqd_len, block_len, trb_buff_len, full_len;
1843        unsigned int start_cycle, num_sgs = 0;
1844        struct cdnsp_generic_trb *start_trb;
1845        u32 field, length_field, remainder;
1846        struct scatterlist *sg = NULL;
1847        bool more_trbs_coming = true;
1848        bool need_zero_pkt = false;
1849        bool zero_len_trb = false;
1850        struct cdnsp_ring *ring;
1851        bool first_trb = true;
1852        unsigned int num_trbs;
1853        struct cdnsp_ep *pep;
1854        u64 addr, send_addr;
1855        int sent_len, ret;
1856
1857        ring = cdnsp_request_to_transfer_ring(pdev, preq);
1858        if (!ring)
1859                return -EINVAL;
1860
1861        full_len = preq->request.length;
1862
1863        if (preq->request.num_sgs) {
1864                num_sgs = preq->request.num_sgs;
1865                sg = preq->request.sg;
1866                addr = (u64)sg_dma_address(sg);
1867                block_len = sg_dma_len(sg);
1868                num_trbs = count_sg_trbs_needed(preq);
1869        } else {
1870                num_trbs = count_trbs_needed(preq);
1871                addr = (u64)preq->request.dma;
1872                block_len = full_len;
1873        }
1874
1875        pep = preq->pep;
1876
1877        /* Deal with request.zero - need one more td/trb. */
1878        if (preq->request.zero && preq->request.length &&
1879            IS_ALIGNED(full_len, usb_endpoint_maxp(pep->endpoint.desc))) {
1880                need_zero_pkt = true;
1881                num_trbs++;
1882        }
1883
1884        ret = cdnsp_prepare_transfer(pdev, preq, num_trbs);
1885        if (ret)
1886                return ret;
1887
1888        /*
1889         * Don't give the first TRB to the hardware (by toggling the cycle bit)
1890         * until we've finished creating all the other TRBs. The ring's cycle
1891         * state may change as we enqueue the other TRBs, so save it too.
1892         */
1893        start_trb = &ring->enqueue->generic;
1894        start_cycle = ring->cycle_state;
1895        send_addr = addr;
1896
1897        /* Queue the TRBs, even if they are zero-length */
1898        for (enqd_len = 0; zero_len_trb || first_trb || enqd_len < full_len;
1899             enqd_len += trb_buff_len) {
1900                field = TRB_TYPE(TRB_NORMAL);
1901
1902                /* TRB buffer should not cross 64KB boundaries */
1903                trb_buff_len = TRB_BUFF_LEN_UP_TO_BOUNDARY(addr);
1904                trb_buff_len = min(trb_buff_len, block_len);
1905                if (enqd_len + trb_buff_len > full_len)
1906                        trb_buff_len = full_len - enqd_len;
1907
1908                /* Don't change the cycle bit of the first TRB until later */
1909                if (first_trb) {
1910                        first_trb = false;
1911                        if (start_cycle == 0)
1912                                field |= TRB_CYCLE;
1913                } else {
1914                        field |= ring->cycle_state;
1915                }
1916
1917                /*
1918                 * Chain all the TRBs together; clear the chain bit in the last
1919                 * TRB to indicate it's the last TRB in the chain.
1920                 */
1921                if (enqd_len + trb_buff_len < full_len || need_zero_pkt) {
1922                        field |= TRB_CHAIN;
1923                        if (cdnsp_trb_is_link(ring->enqueue + 1)) {
1924                                if (cdnsp_align_td(pdev, preq, enqd_len,
1925                                                   &trb_buff_len,
1926                                                   ring->enq_seg)) {
1927                                        send_addr = ring->enq_seg->bounce_dma;
1928                                        /* Assuming TD won't span 2 segs */
1929                                        preq->td.bounce_seg = ring->enq_seg;
1930                                }
1931                        }
1932                }
1933
1934                if (enqd_len + trb_buff_len >= full_len) {
1935                        if (need_zero_pkt)
1936                                zero_len_trb = !zero_len_trb;
1937
1938                        field &= ~TRB_CHAIN;
1939                        field |= TRB_IOC;
1940                        more_trbs_coming = false;
1941                        preq->td.last_trb = ring->enqueue;
1942                }
1943
1944                /* Only set interrupt on short packet for OUT endpoints. */
1945                if (!preq->direction)
1946                        field |= TRB_ISP;
1947
1948                /* Set the TRB length, TD size, and interrupter fields. */
1949                remainder = cdnsp_td_remainder(pdev, enqd_len, trb_buff_len,
1950                                               full_len, preq,
1951                                               more_trbs_coming);
1952
1953                length_field = TRB_LEN(trb_buff_len) | TRB_TD_SIZE(remainder) |
1954                        TRB_INTR_TARGET(0);
1955
1956                cdnsp_queue_trb(pdev, ring, more_trbs_coming | zero_len_trb,
1957                                lower_32_bits(send_addr),
1958                                upper_32_bits(send_addr),
1959                                length_field,
1960                                field);
1961
1962                addr += trb_buff_len;
1963                sent_len = trb_buff_len;
1964                while (sg && sent_len >= block_len) {
1965                        /* New sg entry */
1966                        --num_sgs;
1967                        sent_len -= block_len;
1968                        if (num_sgs != 0) {
1969                                sg = sg_next(sg);
1970                                block_len = sg_dma_len(sg);
1971                                addr = (u64)sg_dma_address(sg);
1972                                addr += sent_len;
1973                        }
1974                }
1975                block_len -= sent_len;
1976                send_addr = addr;
1977        }
1978
1979        cdnsp_check_trb_math(preq, enqd_len);
1980        ret = cdnsp_giveback_first_trb(pdev, pep, preq->request.stream_id,
1981                                       start_cycle, start_trb);
1982
1983        if (ret)
1984                preq->td.drbl = 1;
1985
1986        return 0;
1987}
1988
1989int cdnsp_queue_ctrl_tx(struct cdnsp_device *pdev, struct cdnsp_request *preq)
1990{
1991        u32 field, length_field, remainder;
1992        struct cdnsp_ep *pep = preq->pep;
1993        struct cdnsp_ring *ep_ring;
1994        int num_trbs;
1995        int ret;
1996
1997        ep_ring = cdnsp_request_to_transfer_ring(pdev, preq);
1998        if (!ep_ring)
1999                return -EINVAL;
2000
2001        /* 1 TRB for data, 1 for status */
2002        num_trbs = (pdev->three_stage_setup) ? 2 : 1;
2003
2004        ret = cdnsp_prepare_transfer(pdev, preq, num_trbs);
2005        if (ret)
2006                return ret;
2007
2008        /* If there's data, queue data TRBs */
2009        if (pdev->ep0_expect_in)
2010                field = TRB_TYPE(TRB_DATA) | TRB_IOC;
2011        else
2012                field = TRB_ISP | TRB_TYPE(TRB_DATA) | TRB_IOC;
2013
2014        if (preq->request.length > 0) {
2015                remainder = cdnsp_td_remainder(pdev, 0, preq->request.length,
2016                                               preq->request.length, preq, 1);
2017
2018                length_field = TRB_LEN(preq->request.length) |
2019                                TRB_TD_SIZE(remainder) | TRB_INTR_TARGET(0);
2020
2021                if (pdev->ep0_expect_in)
2022                        field |= TRB_DIR_IN;
2023
2024                cdnsp_queue_trb(pdev, ep_ring, true,
2025                                lower_32_bits(preq->request.dma),
2026                                upper_32_bits(preq->request.dma), length_field,
2027                                field | ep_ring->cycle_state |
2028                                TRB_SETUPID(pdev->setup_id) |
2029                                pdev->setup_speed);
2030
2031                pdev->ep0_stage = CDNSP_DATA_STAGE;
2032        }
2033
2034        /* Save the DMA address of the last TRB in the TD. */
2035        preq->td.last_trb = ep_ring->enqueue;
2036
2037        /* Queue status TRB. */
2038        if (preq->request.length == 0)
2039                field = ep_ring->cycle_state;
2040        else
2041                field = (ep_ring->cycle_state ^ 1);
2042
2043        if (preq->request.length > 0 && pdev->ep0_expect_in)
2044                field |= TRB_DIR_IN;
2045
2046        if (pep->ep_state & EP0_HALTED_STATUS) {
2047                pep->ep_state &= ~EP0_HALTED_STATUS;
2048                field |= TRB_SETUPSTAT(TRB_SETUPSTAT_STALL);
2049        } else {
2050                field |= TRB_SETUPSTAT(TRB_SETUPSTAT_ACK);
2051        }
2052
2053        cdnsp_queue_trb(pdev, ep_ring, false, 0, 0, TRB_INTR_TARGET(0),
2054                        field | TRB_IOC | TRB_SETUPID(pdev->setup_id) |
2055                        TRB_TYPE(TRB_STATUS) | pdev->setup_speed);
2056
2057        cdnsp_ring_ep_doorbell(pdev, pep, preq->request.stream_id);
2058
2059        return 0;
2060}
2061
2062int cdnsp_cmd_stop_ep(struct cdnsp_device *pdev, struct cdnsp_ep *pep)
2063{
2064        u32 ep_state = GET_EP_CTX_STATE(pep->out_ctx);
2065        int ret = 0;
2066
2067        if (ep_state == EP_STATE_STOPPED || ep_state == EP_STATE_DISABLED) {
2068                trace_cdnsp_ep_stopped_or_disabled(pep->out_ctx);
2069                goto ep_stopped;
2070        }
2071
2072        cdnsp_queue_stop_endpoint(pdev, pep->idx);
2073        cdnsp_ring_cmd_db(pdev);
2074        ret = cdnsp_wait_for_cmd_compl(pdev);
2075
2076        trace_cdnsp_handle_cmd_stop_ep(pep->out_ctx);
2077
2078ep_stopped:
2079        pep->ep_state |= EP_STOPPED;
2080        return ret;
2081}
2082
2083int cdnsp_cmd_flush_ep(struct cdnsp_device *pdev, struct cdnsp_ep *pep)
2084{
2085        int ret;
2086
2087        cdnsp_queue_flush_endpoint(pdev, pep->idx);
2088        cdnsp_ring_cmd_db(pdev);
2089        ret = cdnsp_wait_for_cmd_compl(pdev);
2090
2091        trace_cdnsp_handle_cmd_flush_ep(pep->out_ctx);
2092
2093        return ret;
2094}
2095
2096/*
2097 * The transfer burst count field of the isochronous TRB defines the number of
2098 * bursts that are required to move all packets in this TD. Only SuperSpeed
2099 * devices can burst up to bMaxBurst number of packets per service interval.
2100 * This field is zero based, meaning a value of zero in the field means one
2101 * burst. Basically, for everything but SuperSpeed devices, this field will be
2102 * zero.
2103 */
2104static unsigned int cdnsp_get_burst_count(struct cdnsp_device *pdev,
2105                                          struct cdnsp_request *preq,
2106                                          unsigned int total_packet_count)
2107{
2108        unsigned int max_burst;
2109
2110        if (pdev->gadget.speed < USB_SPEED_SUPER)
2111                return 0;
2112
2113        max_burst = preq->pep->endpoint.comp_desc->bMaxBurst;
2114        return DIV_ROUND_UP(total_packet_count, max_burst + 1) - 1;
2115}
2116
2117/*
2118 * Returns the number of packets in the last "burst" of packets. This field is
2119 * valid for all speeds of devices. USB 2.0 devices can only do one "burst", so
2120 * the last burst packet count is equal to the total number of packets in the
2121 * TD. SuperSpeed endpoints can have up to 3 bursts. All but the last burst
2122 * must contain (bMaxBurst + 1) number of packets, but the last burst can
2123 * contain 1 to (bMaxBurst + 1) packets.
2124 */
2125static unsigned int
2126        cdnsp_get_last_burst_packet_count(struct cdnsp_device *pdev,
2127                                          struct cdnsp_request *preq,
2128                                          unsigned int total_packet_count)
2129{
2130        unsigned int max_burst;
2131        unsigned int residue;
2132
2133        if (pdev->gadget.speed >= USB_SPEED_SUPER) {
2134                /* bMaxBurst is zero based: 0 means 1 packet per burst. */
2135                max_burst = preq->pep->endpoint.comp_desc->bMaxBurst;
2136                residue = total_packet_count % (max_burst + 1);
2137
2138                /*
2139                 * If residue is zero, the last burst contains (max_burst + 1)
2140                 * number of packets, but the TLBPC field is zero-based.
2141                 */
2142                if (residue == 0)
2143                        return max_burst;
2144
2145                return residue - 1;
2146        }
2147        if (total_packet_count == 0)
2148                return 0;
2149
2150        return total_packet_count - 1;
2151}
2152
2153/* Queue function isoc transfer */
2154static int cdnsp_queue_isoc_tx(struct cdnsp_device *pdev,
2155                               struct cdnsp_request *preq)
2156{
2157        int trb_buff_len, td_len, td_remain_len, ret;
2158        unsigned int burst_count, last_burst_pkt;
2159        unsigned int total_pkt_count, max_pkt;
2160        struct cdnsp_generic_trb *start_trb;
2161        bool more_trbs_coming = true;
2162        struct cdnsp_ring *ep_ring;
2163        int running_total = 0;
2164        u32 field, length_field;
2165        int start_cycle;
2166        int trbs_per_td;
2167        u64 addr;
2168        int i;
2169
2170        ep_ring = preq->pep->ring;
2171        start_trb = &ep_ring->enqueue->generic;
2172        start_cycle = ep_ring->cycle_state;
2173        td_len = preq->request.length;
2174        addr = (u64)preq->request.dma;
2175        td_remain_len = td_len;
2176
2177        max_pkt = usb_endpoint_maxp(preq->pep->endpoint.desc);
2178        total_pkt_count = DIV_ROUND_UP(td_len, max_pkt);
2179
2180        /* A zero-length transfer still involves at least one packet. */
2181        if (total_pkt_count == 0)
2182                total_pkt_count++;
2183
2184        burst_count = cdnsp_get_burst_count(pdev, preq, total_pkt_count);
2185        last_burst_pkt = cdnsp_get_last_burst_packet_count(pdev, preq,
2186                                                           total_pkt_count);
2187        trbs_per_td = count_isoc_trbs_needed(preq);
2188
2189        ret = cdnsp_prepare_transfer(pdev, preq, trbs_per_td);
2190        if (ret)
2191                goto cleanup;
2192
2193        /*
2194         * Set isoc specific data for the first TRB in a TD.
2195         * Prevent HW from getting the TRBs by keeping the cycle state
2196         * inverted in the first TDs isoc TRB.
2197         */
2198        field = TRB_TYPE(TRB_ISOC) | TRB_TLBPC(last_burst_pkt) |
2199                TRB_SIA | TRB_TBC(burst_count);
2200
2201        if (!start_cycle)
2202                field |= TRB_CYCLE;
2203
2204        /* Fill the rest of the TRB fields, and remaining normal TRBs. */
2205        for (i = 0; i < trbs_per_td; i++) {
2206                u32 remainder;
2207
2208                /* Calculate TRB length. */
2209                trb_buff_len = TRB_BUFF_LEN_UP_TO_BOUNDARY(addr);
2210                if (trb_buff_len > td_remain_len)
2211                        trb_buff_len = td_remain_len;
2212
2213                /* Set the TRB length, TD size, & interrupter fields. */
2214                remainder = cdnsp_td_remainder(pdev, running_total,
2215                                               trb_buff_len, td_len, preq,
2216                                               more_trbs_coming);
2217
2218                length_field = TRB_LEN(trb_buff_len) | TRB_INTR_TARGET(0);
2219
2220                /* Only first TRB is isoc, overwrite otherwise. */
2221                if (i) {
2222                        field = TRB_TYPE(TRB_NORMAL) | ep_ring->cycle_state;
2223                        length_field |= TRB_TD_SIZE(remainder);
2224                } else {
2225                        length_field |= TRB_TD_SIZE_TBC(burst_count);
2226                }
2227
2228                /* Only set interrupt on short packet for OUT EPs. */
2229                if (usb_endpoint_dir_out(preq->pep->endpoint.desc))
2230                        field |= TRB_ISP;
2231
2232                /* Set the chain bit for all except the last TRB. */
2233                if (i < trbs_per_td - 1) {
2234                        more_trbs_coming = true;
2235                        field |= TRB_CHAIN;
2236                } else {
2237                        more_trbs_coming = false;
2238                        preq->td.last_trb = ep_ring->enqueue;
2239                        field |= TRB_IOC;
2240                }
2241
2242                cdnsp_queue_trb(pdev, ep_ring, more_trbs_coming,
2243                                lower_32_bits(addr), upper_32_bits(addr),
2244                                length_field, field);
2245
2246                running_total += trb_buff_len;
2247                addr += trb_buff_len;
2248                td_remain_len -= trb_buff_len;
2249        }
2250
2251        /* Check TD length */
2252        if (running_total != td_len) {
2253                dev_err(pdev->dev, "ISOC TD length unmatch\n");
2254                ret = -EINVAL;
2255                goto cleanup;
2256        }
2257
2258        cdnsp_giveback_first_trb(pdev, preq->pep, preq->request.stream_id,
2259                                 start_cycle, start_trb);
2260
2261        return 0;
2262
2263cleanup:
2264        /* Clean up a partially enqueued isoc transfer. */
2265        list_del_init(&preq->td.td_list);
2266        ep_ring->num_tds--;
2267
2268        /*
2269         * Use the first TD as a temporary variable to turn the TDs we've
2270         * queued into No-ops with a software-owned cycle bit.
2271         * That way the hardware won't accidentally start executing bogus TDs
2272         * when we partially overwrite them.
2273         * td->first_trb and td->start_seg are already set.
2274         */
2275        preq->td.last_trb = ep_ring->enqueue;
2276        /* Every TRB except the first & last will have its cycle bit flipped. */
2277        cdnsp_td_to_noop(pdev, ep_ring, &preq->td, true);
2278
2279        /* Reset the ring enqueue back to the first TRB and its cycle bit. */
2280        ep_ring->enqueue = preq->td.first_trb;
2281        ep_ring->enq_seg = preq->td.start_seg;
2282        ep_ring->cycle_state = start_cycle;
2283        return ret;
2284}
2285
2286int cdnsp_queue_isoc_tx_prepare(struct cdnsp_device *pdev,
2287                                struct cdnsp_request *preq)
2288{
2289        struct cdnsp_ring *ep_ring;
2290        u32 ep_state;
2291        int num_trbs;
2292        int ret;
2293
2294        ep_ring = preq->pep->ring;
2295        ep_state = GET_EP_CTX_STATE(preq->pep->out_ctx);
2296        num_trbs = count_isoc_trbs_needed(preq);
2297
2298        /*
2299         * Check the ring to guarantee there is enough room for the whole
2300         * request. Do not insert any td of the USB Request to the ring if the
2301         * check failed.
2302         */
2303        ret = cdnsp_prepare_ring(pdev, ep_ring, ep_state, num_trbs, GFP_ATOMIC);
2304        if (ret)
2305                return ret;
2306
2307        return cdnsp_queue_isoc_tx(pdev, preq);
2308}
2309
2310/****           Command Ring Operations         ****/
2311/*
2312 * Generic function for queuing a command TRB on the command ring.
2313 * Driver queue only one command to ring in the moment.
2314 */
2315static void cdnsp_queue_command(struct cdnsp_device *pdev,
2316                                u32 field1,
2317                                u32 field2,
2318                                u32 field3,
2319                                u32 field4)
2320{
2321        cdnsp_prepare_ring(pdev, pdev->cmd_ring, EP_STATE_RUNNING, 1,
2322                           GFP_ATOMIC);
2323
2324        pdev->cmd.command_trb = pdev->cmd_ring->enqueue;
2325
2326        cdnsp_queue_trb(pdev, pdev->cmd_ring, false, field1, field2,
2327                        field3, field4 | pdev->cmd_ring->cycle_state);
2328}
2329
2330/* Queue a slot enable or disable request on the command ring */
2331void cdnsp_queue_slot_control(struct cdnsp_device *pdev, u32 trb_type)
2332{
2333        cdnsp_queue_command(pdev, 0, 0, 0, TRB_TYPE(trb_type) |
2334                            SLOT_ID_FOR_TRB(pdev->slot_id));
2335}
2336
2337/* Queue an address device command TRB */
2338void cdnsp_queue_address_device(struct cdnsp_device *pdev,
2339                                dma_addr_t in_ctx_ptr,
2340                                enum cdnsp_setup_dev setup)
2341{
2342        cdnsp_queue_command(pdev, lower_32_bits(in_ctx_ptr),
2343                            upper_32_bits(in_ctx_ptr), 0,
2344                            TRB_TYPE(TRB_ADDR_DEV) |
2345                            SLOT_ID_FOR_TRB(pdev->slot_id) |
2346                            (setup == SETUP_CONTEXT_ONLY ? TRB_BSR : 0));
2347}
2348
2349/* Queue a reset device command TRB */
2350void cdnsp_queue_reset_device(struct cdnsp_device *pdev)
2351{
2352        cdnsp_queue_command(pdev, 0, 0, 0, TRB_TYPE(TRB_RESET_DEV) |
2353                            SLOT_ID_FOR_TRB(pdev->slot_id));
2354}
2355
2356/* Queue a configure endpoint command TRB */
2357void cdnsp_queue_configure_endpoint(struct cdnsp_device *pdev,
2358                                    dma_addr_t in_ctx_ptr)
2359{
2360        cdnsp_queue_command(pdev, lower_32_bits(in_ctx_ptr),
2361                            upper_32_bits(in_ctx_ptr), 0,
2362                            TRB_TYPE(TRB_CONFIG_EP) |
2363                            SLOT_ID_FOR_TRB(pdev->slot_id));
2364}
2365
2366/*
2367 * Suspend is set to indicate "Stop Endpoint Command" is being issued to stop
2368 * activity on an endpoint that is about to be suspended.
2369 */
2370void cdnsp_queue_stop_endpoint(struct cdnsp_device *pdev, unsigned int ep_index)
2371{
2372        cdnsp_queue_command(pdev, 0, 0, 0, SLOT_ID_FOR_TRB(pdev->slot_id) |
2373                            EP_ID_FOR_TRB(ep_index) | TRB_TYPE(TRB_STOP_RING));
2374}
2375
2376/* Set Transfer Ring Dequeue Pointer command. */
2377void cdnsp_queue_new_dequeue_state(struct cdnsp_device *pdev,
2378                                   struct cdnsp_ep *pep,
2379                                   struct cdnsp_dequeue_state *deq_state)
2380{
2381        u32 trb_stream_id = STREAM_ID_FOR_TRB(deq_state->stream_id);
2382        u32 trb_slot_id = SLOT_ID_FOR_TRB(pdev->slot_id);
2383        u32 type = TRB_TYPE(TRB_SET_DEQ);
2384        u32 trb_sct = 0;
2385        dma_addr_t addr;
2386
2387        addr = cdnsp_trb_virt_to_dma(deq_state->new_deq_seg,
2388                                     deq_state->new_deq_ptr);
2389
2390        if (deq_state->stream_id)
2391                trb_sct = SCT_FOR_TRB(SCT_PRI_TR);
2392
2393        cdnsp_queue_command(pdev, lower_32_bits(addr) | trb_sct |
2394                            deq_state->new_cycle_state, upper_32_bits(addr),
2395                            trb_stream_id, trb_slot_id |
2396                            EP_ID_FOR_TRB(pep->idx) | type);
2397}
2398
2399void cdnsp_queue_reset_ep(struct cdnsp_device *pdev, unsigned int ep_index)
2400{
2401        return cdnsp_queue_command(pdev, 0, 0, 0,
2402                                   SLOT_ID_FOR_TRB(pdev->slot_id) |
2403                                   EP_ID_FOR_TRB(ep_index) |
2404                                   TRB_TYPE(TRB_RESET_EP));
2405}
2406
2407/*
2408 * Queue a halt endpoint request on the command ring.
2409 */
2410void cdnsp_queue_halt_endpoint(struct cdnsp_device *pdev, unsigned int ep_index)
2411{
2412        cdnsp_queue_command(pdev, 0, 0, 0, TRB_TYPE(TRB_HALT_ENDPOINT) |
2413                            SLOT_ID_FOR_TRB(pdev->slot_id) |
2414                            EP_ID_FOR_TRB(ep_index));
2415}
2416
2417/*
2418 * Queue a flush endpoint request on the command ring.
2419 */
2420void  cdnsp_queue_flush_endpoint(struct cdnsp_device *pdev,
2421                                 unsigned int ep_index)
2422{
2423        cdnsp_queue_command(pdev, 0, 0, 0, TRB_TYPE(TRB_FLUSH_ENDPOINT) |
2424                            SLOT_ID_FOR_TRB(pdev->slot_id) |
2425                            EP_ID_FOR_TRB(ep_index));
2426}
2427
2428void cdnsp_force_header_wakeup(struct cdnsp_device *pdev, int intf_num)
2429{
2430        u32 lo, mid;
2431
2432        lo = TRB_FH_TO_PACKET_TYPE(TRB_FH_TR_PACKET) |
2433             TRB_FH_TO_DEVICE_ADDRESS(pdev->device_address);
2434        mid = TRB_FH_TR_PACKET_DEV_NOT |
2435              TRB_FH_TO_NOT_TYPE(TRB_FH_TR_PACKET_FUNCTION_WAKE) |
2436              TRB_FH_TO_INTERFACE(intf_num);
2437
2438        cdnsp_queue_command(pdev, lo, mid, 0,
2439                            TRB_TYPE(TRB_FORCE_HEADER) | SET_PORT_ID(2));
2440}
2441