linux/drivers/usb/host/xhci-dbgcap.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/**
   3 * xhci-dbgcap.c - xHCI debug capability support
   4 *
   5 * Copyright (C) 2017 Intel Corporation
   6 *
   7 * Author: Lu Baolu <baolu.lu@linux.intel.com>
   8 */
   9#include <linux/dma-mapping.h>
  10#include <linux/slab.h>
  11#include <linux/nls.h>
  12
  13#include "xhci.h"
  14#include "xhci-trace.h"
  15#include "xhci-dbgcap.h"
  16
  17static inline void *
  18dbc_dma_alloc_coherent(struct xhci_hcd *xhci, size_t size,
  19                       dma_addr_t *dma_handle, gfp_t flags)
  20{
  21        void            *vaddr;
  22
  23        vaddr = dma_alloc_coherent(xhci_to_hcd(xhci)->self.sysdev,
  24                                   size, dma_handle, flags);
  25        memset(vaddr, 0, size);
  26        return vaddr;
  27}
  28
  29static inline void
  30dbc_dma_free_coherent(struct xhci_hcd *xhci, size_t size,
  31                      void *cpu_addr, dma_addr_t dma_handle)
  32{
  33        if (cpu_addr)
  34                dma_free_coherent(xhci_to_hcd(xhci)->self.sysdev,
  35                                  size, cpu_addr, dma_handle);
  36}
  37
  38static u32 xhci_dbc_populate_strings(struct dbc_str_descs *strings)
  39{
  40        struct usb_string_descriptor    *s_desc;
  41        u32                             string_length;
  42
  43        /* Serial string: */
  44        s_desc = (struct usb_string_descriptor *)strings->serial;
  45        utf8s_to_utf16s(DBC_STRING_SERIAL, strlen(DBC_STRING_SERIAL),
  46                        UTF16_LITTLE_ENDIAN, (wchar_t *)s_desc->wData,
  47                        DBC_MAX_STRING_LENGTH);
  48
  49        s_desc->bLength         = (strlen(DBC_STRING_SERIAL) + 1) * 2;
  50        s_desc->bDescriptorType = USB_DT_STRING;
  51        string_length           = s_desc->bLength;
  52        string_length           <<= 8;
  53
  54        /* Product string: */
  55        s_desc = (struct usb_string_descriptor *)strings->product;
  56        utf8s_to_utf16s(DBC_STRING_PRODUCT, strlen(DBC_STRING_PRODUCT),
  57                        UTF16_LITTLE_ENDIAN, (wchar_t *)s_desc->wData,
  58                        DBC_MAX_STRING_LENGTH);
  59
  60        s_desc->bLength         = (strlen(DBC_STRING_PRODUCT) + 1) * 2;
  61        s_desc->bDescriptorType = USB_DT_STRING;
  62        string_length           += s_desc->bLength;
  63        string_length           <<= 8;
  64
  65        /* Manufacture string: */
  66        s_desc = (struct usb_string_descriptor *)strings->manufacturer;
  67        utf8s_to_utf16s(DBC_STRING_MANUFACTURER,
  68                        strlen(DBC_STRING_MANUFACTURER),
  69                        UTF16_LITTLE_ENDIAN, (wchar_t *)s_desc->wData,
  70                        DBC_MAX_STRING_LENGTH);
  71
  72        s_desc->bLength         = (strlen(DBC_STRING_MANUFACTURER) + 1) * 2;
  73        s_desc->bDescriptorType = USB_DT_STRING;
  74        string_length           += s_desc->bLength;
  75        string_length           <<= 8;
  76
  77        /* String0: */
  78        strings->string0[0]     = 4;
  79        strings->string0[1]     = USB_DT_STRING;
  80        strings->string0[2]     = 0x09;
  81        strings->string0[3]     = 0x04;
  82        string_length           += 4;
  83
  84        return string_length;
  85}
  86
  87static void xhci_dbc_init_contexts(struct xhci_hcd *xhci, u32 string_length)
  88{
  89        struct xhci_dbc         *dbc;
  90        struct dbc_info_context *info;
  91        struct xhci_ep_ctx      *ep_ctx;
  92        u32                     dev_info;
  93        dma_addr_t              deq, dma;
  94        unsigned int            max_burst;
  95
  96        dbc = xhci->dbc;
  97        if (!dbc)
  98                return;
  99
 100        /* Populate info Context: */
 101        info                    = (struct dbc_info_context *)dbc->ctx->bytes;
 102        dma                     = dbc->string_dma;
 103        info->string0           = cpu_to_le64(dma);
 104        info->manufacturer      = cpu_to_le64(dma + DBC_MAX_STRING_LENGTH);
 105        info->product           = cpu_to_le64(dma + DBC_MAX_STRING_LENGTH * 2);
 106        info->serial            = cpu_to_le64(dma + DBC_MAX_STRING_LENGTH * 3);
 107        info->length            = cpu_to_le32(string_length);
 108
 109        /* Populate bulk out endpoint context: */
 110        ep_ctx                  = dbc_bulkout_ctx(dbc);
 111        max_burst               = DBC_CTRL_MAXBURST(readl(&dbc->regs->control));
 112        deq                     = dbc_bulkout_enq(dbc);
 113        ep_ctx->ep_info         = 0;
 114        ep_ctx->ep_info2        = dbc_epctx_info2(BULK_OUT_EP, 1024, max_burst);
 115        ep_ctx->deq             = cpu_to_le64(deq | dbc->ring_out->cycle_state);
 116
 117        /* Populate bulk in endpoint context: */
 118        ep_ctx                  = dbc_bulkin_ctx(dbc);
 119        deq                     = dbc_bulkin_enq(dbc);
 120        ep_ctx->ep_info         = 0;
 121        ep_ctx->ep_info2        = dbc_epctx_info2(BULK_IN_EP, 1024, max_burst);
 122        ep_ctx->deq             = cpu_to_le64(deq | dbc->ring_in->cycle_state);
 123
 124        /* Set DbC context and info registers: */
 125        xhci_write_64(xhci, dbc->ctx->dma, &dbc->regs->dccp);
 126
 127        dev_info = cpu_to_le32((DBC_VENDOR_ID << 16) | DBC_PROTOCOL);
 128        writel(dev_info, &dbc->regs->devinfo1);
 129
 130        dev_info = cpu_to_le32((DBC_DEVICE_REV << 16) | DBC_PRODUCT_ID);
 131        writel(dev_info, &dbc->regs->devinfo2);
 132}
 133
 134static void xhci_dbc_giveback(struct dbc_request *req, int status)
 135        __releases(&dbc->lock)
 136        __acquires(&dbc->lock)
 137{
 138        struct dbc_ep           *dep = req->dep;
 139        struct xhci_dbc         *dbc = dep->dbc;
 140        struct xhci_hcd         *xhci = dbc->xhci;
 141        struct device           *dev = xhci_to_hcd(dbc->xhci)->self.sysdev;
 142
 143        list_del_init(&req->list_pending);
 144        req->trb_dma = 0;
 145        req->trb = NULL;
 146
 147        if (req->status == -EINPROGRESS)
 148                req->status = status;
 149
 150        trace_xhci_dbc_giveback_request(req);
 151
 152        dma_unmap_single(dev,
 153                         req->dma,
 154                         req->length,
 155                         dbc_ep_dma_direction(dep));
 156
 157        /* Give back the transfer request: */
 158        spin_unlock(&dbc->lock);
 159        req->complete(xhci, req);
 160        spin_lock(&dbc->lock);
 161}
 162
 163static void xhci_dbc_flush_single_request(struct dbc_request *req)
 164{
 165        union xhci_trb  *trb = req->trb;
 166
 167        trb->generic.field[0]   = 0;
 168        trb->generic.field[1]   = 0;
 169        trb->generic.field[2]   = 0;
 170        trb->generic.field[3]   &= cpu_to_le32(TRB_CYCLE);
 171        trb->generic.field[3]   |= cpu_to_le32(TRB_TYPE(TRB_TR_NOOP));
 172
 173        xhci_dbc_giveback(req, -ESHUTDOWN);
 174}
 175
 176static void xhci_dbc_flush_endpoint_requests(struct dbc_ep *dep)
 177{
 178        struct dbc_request      *req, *tmp;
 179
 180        list_for_each_entry_safe(req, tmp, &dep->list_pending, list_pending)
 181                xhci_dbc_flush_single_request(req);
 182}
 183
 184static void xhci_dbc_flush_requests(struct xhci_dbc *dbc)
 185{
 186        xhci_dbc_flush_endpoint_requests(&dbc->eps[BULK_OUT]);
 187        xhci_dbc_flush_endpoint_requests(&dbc->eps[BULK_IN]);
 188}
 189
 190struct dbc_request *
 191dbc_alloc_request(struct dbc_ep *dep, gfp_t gfp_flags)
 192{
 193        struct dbc_request      *req;
 194
 195        req = kzalloc(sizeof(*req), gfp_flags);
 196        if (!req)
 197                return NULL;
 198
 199        req->dep = dep;
 200        INIT_LIST_HEAD(&req->list_pending);
 201        INIT_LIST_HEAD(&req->list_pool);
 202        req->direction = dep->direction;
 203
 204        trace_xhci_dbc_alloc_request(req);
 205
 206        return req;
 207}
 208
 209void
 210dbc_free_request(struct dbc_ep *dep, struct dbc_request *req)
 211{
 212        trace_xhci_dbc_free_request(req);
 213
 214        kfree(req);
 215}
 216
 217static void
 218xhci_dbc_queue_trb(struct xhci_ring *ring, u32 field1,
 219                   u32 field2, u32 field3, u32 field4)
 220{
 221        union xhci_trb          *trb, *next;
 222
 223        trb = ring->enqueue;
 224        trb->generic.field[0]   = cpu_to_le32(field1);
 225        trb->generic.field[1]   = cpu_to_le32(field2);
 226        trb->generic.field[2]   = cpu_to_le32(field3);
 227        trb->generic.field[3]   = cpu_to_le32(field4);
 228
 229        trace_xhci_dbc_gadget_ep_queue(ring, &trb->generic);
 230
 231        ring->num_trbs_free--;
 232        next = ++(ring->enqueue);
 233        if (TRB_TYPE_LINK_LE32(next->link.control)) {
 234                next->link.control ^= cpu_to_le32(TRB_CYCLE);
 235                ring->enqueue = ring->enq_seg->trbs;
 236                ring->cycle_state ^= 1;
 237        }
 238}
 239
 240static int xhci_dbc_queue_bulk_tx(struct dbc_ep *dep,
 241                                  struct dbc_request *req)
 242{
 243        u64                     addr;
 244        union xhci_trb          *trb;
 245        unsigned int            num_trbs;
 246        struct xhci_dbc         *dbc = dep->dbc;
 247        struct xhci_ring        *ring = dep->ring;
 248        u32                     length, control, cycle;
 249
 250        num_trbs = count_trbs(req->dma, req->length);
 251        WARN_ON(num_trbs != 1);
 252        if (ring->num_trbs_free < num_trbs)
 253                return -EBUSY;
 254
 255        addr    = req->dma;
 256        trb     = ring->enqueue;
 257        cycle   = ring->cycle_state;
 258        length  = TRB_LEN(req->length);
 259        control = TRB_TYPE(TRB_NORMAL) | TRB_IOC;
 260
 261        if (cycle)
 262                control &= cpu_to_le32(~TRB_CYCLE);
 263        else
 264                control |= cpu_to_le32(TRB_CYCLE);
 265
 266        req->trb = ring->enqueue;
 267        req->trb_dma = xhci_trb_virt_to_dma(ring->enq_seg, ring->enqueue);
 268        xhci_dbc_queue_trb(ring,
 269                           lower_32_bits(addr),
 270                           upper_32_bits(addr),
 271                           length, control);
 272
 273        /*
 274         * Add a barrier between writes of trb fields and flipping
 275         * the cycle bit:
 276         */
 277        wmb();
 278
 279        if (cycle)
 280                trb->generic.field[3] |= cpu_to_le32(TRB_CYCLE);
 281        else
 282                trb->generic.field[3] &= cpu_to_le32(~TRB_CYCLE);
 283
 284        writel(DBC_DOOR_BELL_TARGET(dep->direction), &dbc->regs->doorbell);
 285
 286        return 0;
 287}
 288
 289static int
 290dbc_ep_do_queue(struct dbc_ep *dep, struct dbc_request *req)
 291{
 292        int                     ret;
 293        struct device           *dev;
 294        struct xhci_dbc         *dbc = dep->dbc;
 295        struct xhci_hcd         *xhci = dbc->xhci;
 296
 297        dev = xhci_to_hcd(xhci)->self.sysdev;
 298
 299        if (!req->length || !req->buf)
 300                return -EINVAL;
 301
 302        req->actual             = 0;
 303        req->status             = -EINPROGRESS;
 304
 305        req->dma = dma_map_single(dev,
 306                                  req->buf,
 307                                  req->length,
 308                                  dbc_ep_dma_direction(dep));
 309        if (dma_mapping_error(dev, req->dma)) {
 310                xhci_err(xhci, "failed to map buffer\n");
 311                return -EFAULT;
 312        }
 313
 314        ret = xhci_dbc_queue_bulk_tx(dep, req);
 315        if (ret) {
 316                xhci_err(xhci, "failed to queue trbs\n");
 317                dma_unmap_single(dev,
 318                                 req->dma,
 319                                 req->length,
 320                                 dbc_ep_dma_direction(dep));
 321                return -EFAULT;
 322        }
 323
 324        list_add_tail(&req->list_pending, &dep->list_pending);
 325
 326        return 0;
 327}
 328
 329int dbc_ep_queue(struct dbc_ep *dep, struct dbc_request *req,
 330                 gfp_t gfp_flags)
 331{
 332        unsigned long           flags;
 333        struct xhci_dbc         *dbc = dep->dbc;
 334        int                     ret = -ESHUTDOWN;
 335
 336        spin_lock_irqsave(&dbc->lock, flags);
 337        if (dbc->state == DS_CONFIGURED)
 338                ret = dbc_ep_do_queue(dep, req);
 339        spin_unlock_irqrestore(&dbc->lock, flags);
 340
 341        mod_delayed_work(system_wq, &dbc->event_work, 0);
 342
 343        trace_xhci_dbc_queue_request(req);
 344
 345        return ret;
 346}
 347
 348static inline void xhci_dbc_do_eps_init(struct xhci_hcd *xhci, bool direction)
 349{
 350        struct dbc_ep           *dep;
 351        struct xhci_dbc         *dbc = xhci->dbc;
 352
 353        dep                     = &dbc->eps[direction];
 354        dep->dbc                = dbc;
 355        dep->direction          = direction;
 356        dep->ring               = direction ? dbc->ring_in : dbc->ring_out;
 357
 358        INIT_LIST_HEAD(&dep->list_pending);
 359}
 360
 361static void xhci_dbc_eps_init(struct xhci_hcd *xhci)
 362{
 363        xhci_dbc_do_eps_init(xhci, BULK_OUT);
 364        xhci_dbc_do_eps_init(xhci, BULK_IN);
 365}
 366
 367static void xhci_dbc_eps_exit(struct xhci_hcd *xhci)
 368{
 369        struct xhci_dbc         *dbc = xhci->dbc;
 370
 371        memset(dbc->eps, 0, sizeof(struct dbc_ep) * ARRAY_SIZE(dbc->eps));
 372}
 373
 374static int xhci_dbc_mem_init(struct xhci_hcd *xhci, gfp_t flags)
 375{
 376        int                     ret;
 377        dma_addr_t              deq;
 378        u32                     string_length;
 379        struct xhci_dbc         *dbc = xhci->dbc;
 380
 381        /* Allocate various rings for events and transfers: */
 382        dbc->ring_evt = xhci_ring_alloc(xhci, 1, 1, TYPE_EVENT, 0, flags);
 383        if (!dbc->ring_evt)
 384                goto evt_fail;
 385
 386        dbc->ring_in = xhci_ring_alloc(xhci, 1, 1, TYPE_BULK, 0, flags);
 387        if (!dbc->ring_in)
 388                goto in_fail;
 389
 390        dbc->ring_out = xhci_ring_alloc(xhci, 1, 1, TYPE_BULK, 0, flags);
 391        if (!dbc->ring_out)
 392                goto out_fail;
 393
 394        /* Allocate and populate ERST: */
 395        ret = xhci_alloc_erst(xhci, dbc->ring_evt, &dbc->erst, flags);
 396        if (ret)
 397                goto erst_fail;
 398
 399        /* Allocate context data structure: */
 400        dbc->ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_DEVICE, flags);
 401        if (!dbc->ctx)
 402                goto ctx_fail;
 403
 404        /* Allocate the string table: */
 405        dbc->string_size = sizeof(struct dbc_str_descs);
 406        dbc->string = dbc_dma_alloc_coherent(xhci,
 407                                             dbc->string_size,
 408                                             &dbc->string_dma,
 409                                             flags);
 410        if (!dbc->string)
 411                goto string_fail;
 412
 413        /* Setup ERST register: */
 414        writel(dbc->erst.erst_size, &dbc->regs->ersts);
 415        xhci_write_64(xhci, dbc->erst.erst_dma_addr, &dbc->regs->erstba);
 416        deq = xhci_trb_virt_to_dma(dbc->ring_evt->deq_seg,
 417                                   dbc->ring_evt->dequeue);
 418        xhci_write_64(xhci, deq, &dbc->regs->erdp);
 419
 420        /* Setup strings and contexts: */
 421        string_length = xhci_dbc_populate_strings(dbc->string);
 422        xhci_dbc_init_contexts(xhci, string_length);
 423
 424        xhci_dbc_eps_init(xhci);
 425        dbc->state = DS_INITIALIZED;
 426
 427        return 0;
 428
 429string_fail:
 430        xhci_free_container_ctx(xhci, dbc->ctx);
 431        dbc->ctx = NULL;
 432ctx_fail:
 433        xhci_free_erst(xhci, &dbc->erst);
 434erst_fail:
 435        xhci_ring_free(xhci, dbc->ring_out);
 436        dbc->ring_out = NULL;
 437out_fail:
 438        xhci_ring_free(xhci, dbc->ring_in);
 439        dbc->ring_in = NULL;
 440in_fail:
 441        xhci_ring_free(xhci, dbc->ring_evt);
 442        dbc->ring_evt = NULL;
 443evt_fail:
 444        return -ENOMEM;
 445}
 446
 447static void xhci_dbc_mem_cleanup(struct xhci_hcd *xhci)
 448{
 449        struct xhci_dbc         *dbc = xhci->dbc;
 450
 451        if (!dbc)
 452                return;
 453
 454        xhci_dbc_eps_exit(xhci);
 455
 456        if (dbc->string) {
 457                dbc_dma_free_coherent(xhci,
 458                                      dbc->string_size,
 459                                      dbc->string, dbc->string_dma);
 460                dbc->string = NULL;
 461        }
 462
 463        xhci_free_container_ctx(xhci, dbc->ctx);
 464        dbc->ctx = NULL;
 465
 466        xhci_free_erst(xhci, &dbc->erst);
 467        xhci_ring_free(xhci, dbc->ring_out);
 468        xhci_ring_free(xhci, dbc->ring_in);
 469        xhci_ring_free(xhci, dbc->ring_evt);
 470        dbc->ring_in = NULL;
 471        dbc->ring_out = NULL;
 472        dbc->ring_evt = NULL;
 473}
 474
 475static int xhci_do_dbc_start(struct xhci_hcd *xhci)
 476{
 477        int                     ret;
 478        u32                     ctrl;
 479        struct xhci_dbc         *dbc = xhci->dbc;
 480
 481        if (dbc->state != DS_DISABLED)
 482                return -EINVAL;
 483
 484        writel(0, &dbc->regs->control);
 485        ret = xhci_handshake(&dbc->regs->control,
 486                             DBC_CTRL_DBC_ENABLE,
 487                             0, 1000);
 488        if (ret)
 489                return ret;
 490
 491        ret = xhci_dbc_mem_init(xhci, GFP_ATOMIC);
 492        if (ret)
 493                return ret;
 494
 495        ctrl = readl(&dbc->regs->control);
 496        writel(ctrl | DBC_CTRL_DBC_ENABLE | DBC_CTRL_PORT_ENABLE,
 497               &dbc->regs->control);
 498        ret = xhci_handshake(&dbc->regs->control,
 499                             DBC_CTRL_DBC_ENABLE,
 500                             DBC_CTRL_DBC_ENABLE, 1000);
 501        if (ret)
 502                return ret;
 503
 504        dbc->state = DS_ENABLED;
 505
 506        return 0;
 507}
 508
 509static int xhci_do_dbc_stop(struct xhci_hcd *xhci)
 510{
 511        struct xhci_dbc         *dbc = xhci->dbc;
 512
 513        if (dbc->state == DS_DISABLED)
 514                return -1;
 515
 516        writel(0, &dbc->regs->control);
 517        dbc->state = DS_DISABLED;
 518
 519        return 0;
 520}
 521
 522static int xhci_dbc_start(struct xhci_hcd *xhci)
 523{
 524        int                     ret;
 525        unsigned long           flags;
 526        struct xhci_dbc         *dbc = xhci->dbc;
 527
 528        WARN_ON(!dbc);
 529
 530        pm_runtime_get_sync(xhci_to_hcd(xhci)->self.controller);
 531
 532        spin_lock_irqsave(&dbc->lock, flags);
 533        ret = xhci_do_dbc_start(xhci);
 534        spin_unlock_irqrestore(&dbc->lock, flags);
 535
 536        if (ret) {
 537                pm_runtime_put(xhci_to_hcd(xhci)->self.controller);
 538                return ret;
 539        }
 540
 541        return mod_delayed_work(system_wq, &dbc->event_work, 1);
 542}
 543
 544static void xhci_dbc_stop(struct xhci_hcd *xhci)
 545{
 546        int ret;
 547        unsigned long           flags;
 548        struct xhci_dbc         *dbc = xhci->dbc;
 549        struct dbc_port         *port = &dbc->port;
 550
 551        WARN_ON(!dbc);
 552
 553        cancel_delayed_work_sync(&dbc->event_work);
 554
 555        if (port->registered)
 556                xhci_dbc_tty_unregister_device(xhci);
 557
 558        spin_lock_irqsave(&dbc->lock, flags);
 559        ret = xhci_do_dbc_stop(xhci);
 560        spin_unlock_irqrestore(&dbc->lock, flags);
 561
 562        if (!ret) {
 563                xhci_dbc_mem_cleanup(xhci);
 564                pm_runtime_put_sync(xhci_to_hcd(xhci)->self.controller);
 565        }
 566}
 567
 568static void
 569dbc_handle_port_status(struct xhci_hcd *xhci, union xhci_trb *event)
 570{
 571        u32                     portsc;
 572        struct xhci_dbc         *dbc = xhci->dbc;
 573
 574        portsc = readl(&dbc->regs->portsc);
 575        if (portsc & DBC_PORTSC_CONN_CHANGE)
 576                xhci_info(xhci, "DbC port connect change\n");
 577
 578        if (portsc & DBC_PORTSC_RESET_CHANGE)
 579                xhci_info(xhci, "DbC port reset change\n");
 580
 581        if (portsc & DBC_PORTSC_LINK_CHANGE)
 582                xhci_info(xhci, "DbC port link status change\n");
 583
 584        if (portsc & DBC_PORTSC_CONFIG_CHANGE)
 585                xhci_info(xhci, "DbC config error change\n");
 586
 587        /* Port reset change bit will be cleared in other place: */
 588        writel(portsc & ~DBC_PORTSC_RESET_CHANGE, &dbc->regs->portsc);
 589}
 590
 591static void dbc_handle_xfer_event(struct xhci_hcd *xhci, union xhci_trb *event)
 592{
 593        struct dbc_ep           *dep;
 594        struct xhci_ring        *ring;
 595        int                     ep_id;
 596        int                     status;
 597        u32                     comp_code;
 598        size_t                  remain_length;
 599        struct dbc_request      *req = NULL, *r;
 600
 601        comp_code       = GET_COMP_CODE(le32_to_cpu(event->generic.field[2]));
 602        remain_length   = EVENT_TRB_LEN(le32_to_cpu(event->generic.field[2]));
 603        ep_id           = TRB_TO_EP_ID(le32_to_cpu(event->generic.field[3]));
 604        dep             = (ep_id == EPID_OUT) ?
 605                                get_out_ep(xhci) : get_in_ep(xhci);
 606        ring            = dep->ring;
 607
 608        switch (comp_code) {
 609        case COMP_SUCCESS:
 610                remain_length = 0;
 611        /* FALLTHROUGH */
 612        case COMP_SHORT_PACKET:
 613                status = 0;
 614                break;
 615        case COMP_TRB_ERROR:
 616        case COMP_BABBLE_DETECTED_ERROR:
 617        case COMP_USB_TRANSACTION_ERROR:
 618        case COMP_STALL_ERROR:
 619                xhci_warn(xhci, "tx error %d detected\n", comp_code);
 620                status = -comp_code;
 621                break;
 622        default:
 623                xhci_err(xhci, "unknown tx error %d\n", comp_code);
 624                status = -comp_code;
 625                break;
 626        }
 627
 628        /* Match the pending request: */
 629        list_for_each_entry(r, &dep->list_pending, list_pending) {
 630                if (r->trb_dma == event->trans_event.buffer) {
 631                        req = r;
 632                        break;
 633                }
 634        }
 635
 636        if (!req) {
 637                xhci_warn(xhci, "no matched request\n");
 638                return;
 639        }
 640
 641        trace_xhci_dbc_handle_transfer(ring, &req->trb->generic);
 642
 643        ring->num_trbs_free++;
 644        req->actual = req->length - remain_length;
 645        xhci_dbc_giveback(req, status);
 646}
 647
 648static enum evtreturn xhci_dbc_do_handle_events(struct xhci_dbc *dbc)
 649{
 650        dma_addr_t              deq;
 651        struct dbc_ep           *dep;
 652        union xhci_trb          *evt;
 653        u32                     ctrl, portsc;
 654        struct xhci_hcd         *xhci = dbc->xhci;
 655        bool                    update_erdp = false;
 656
 657        /* DbC state machine: */
 658        switch (dbc->state) {
 659        case DS_DISABLED:
 660        case DS_INITIALIZED:
 661
 662                return EVT_ERR;
 663        case DS_ENABLED:
 664                portsc = readl(&dbc->regs->portsc);
 665                if (portsc & DBC_PORTSC_CONN_STATUS) {
 666                        dbc->state = DS_CONNECTED;
 667                        xhci_info(xhci, "DbC connected\n");
 668                }
 669
 670                return EVT_DONE;
 671        case DS_CONNECTED:
 672                ctrl = readl(&dbc->regs->control);
 673                if (ctrl & DBC_CTRL_DBC_RUN) {
 674                        dbc->state = DS_CONFIGURED;
 675                        xhci_info(xhci, "DbC configured\n");
 676                        portsc = readl(&dbc->regs->portsc);
 677                        writel(portsc, &dbc->regs->portsc);
 678                        return EVT_GSER;
 679                }
 680
 681                return EVT_DONE;
 682        case DS_CONFIGURED:
 683                /* Handle cable unplug event: */
 684                portsc = readl(&dbc->regs->portsc);
 685                if (!(portsc & DBC_PORTSC_PORT_ENABLED) &&
 686                    !(portsc & DBC_PORTSC_CONN_STATUS)) {
 687                        xhci_info(xhci, "DbC cable unplugged\n");
 688                        dbc->state = DS_ENABLED;
 689                        xhci_dbc_flush_requests(dbc);
 690
 691                        return EVT_DISC;
 692                }
 693
 694                /* Handle debug port reset event: */
 695                if (portsc & DBC_PORTSC_RESET_CHANGE) {
 696                        xhci_info(xhci, "DbC port reset\n");
 697                        writel(portsc, &dbc->regs->portsc);
 698                        dbc->state = DS_ENABLED;
 699                        xhci_dbc_flush_requests(dbc);
 700
 701                        return EVT_DISC;
 702                }
 703
 704                /* Handle endpoint stall event: */
 705                ctrl = readl(&dbc->regs->control);
 706                if ((ctrl & DBC_CTRL_HALT_IN_TR) ||
 707                    (ctrl & DBC_CTRL_HALT_OUT_TR)) {
 708                        xhci_info(xhci, "DbC Endpoint stall\n");
 709                        dbc->state = DS_STALLED;
 710
 711                        if (ctrl & DBC_CTRL_HALT_IN_TR) {
 712                                dep = get_in_ep(xhci);
 713                                xhci_dbc_flush_endpoint_requests(dep);
 714                        }
 715
 716                        if (ctrl & DBC_CTRL_HALT_OUT_TR) {
 717                                dep = get_out_ep(xhci);
 718                                xhci_dbc_flush_endpoint_requests(dep);
 719                        }
 720
 721                        return EVT_DONE;
 722                }
 723
 724                /* Clear DbC run change bit: */
 725                if (ctrl & DBC_CTRL_DBC_RUN_CHANGE) {
 726                        writel(ctrl, &dbc->regs->control);
 727                        ctrl = readl(&dbc->regs->control);
 728                }
 729
 730                break;
 731        case DS_STALLED:
 732                ctrl = readl(&dbc->regs->control);
 733                if (!(ctrl & DBC_CTRL_HALT_IN_TR) &&
 734                    !(ctrl & DBC_CTRL_HALT_OUT_TR) &&
 735                    (ctrl & DBC_CTRL_DBC_RUN)) {
 736                        dbc->state = DS_CONFIGURED;
 737                        break;
 738                }
 739
 740                return EVT_DONE;
 741        default:
 742                xhci_err(xhci, "Unknown DbC state %d\n", dbc->state);
 743                break;
 744        }
 745
 746        /* Handle the events in the event ring: */
 747        evt = dbc->ring_evt->dequeue;
 748        while ((le32_to_cpu(evt->event_cmd.flags) & TRB_CYCLE) ==
 749                        dbc->ring_evt->cycle_state) {
 750                /*
 751                 * Add a barrier between reading the cycle flag and any
 752                 * reads of the event's flags/data below:
 753                 */
 754                rmb();
 755
 756                trace_xhci_dbc_handle_event(dbc->ring_evt, &evt->generic);
 757
 758                switch (le32_to_cpu(evt->event_cmd.flags) & TRB_TYPE_BITMASK) {
 759                case TRB_TYPE(TRB_PORT_STATUS):
 760                        dbc_handle_port_status(xhci, evt);
 761                        break;
 762                case TRB_TYPE(TRB_TRANSFER):
 763                        dbc_handle_xfer_event(xhci, evt);
 764                        break;
 765                default:
 766                        break;
 767                }
 768
 769                inc_deq(xhci, dbc->ring_evt);
 770                evt = dbc->ring_evt->dequeue;
 771                update_erdp = true;
 772        }
 773
 774        /* Update event ring dequeue pointer: */
 775        if (update_erdp) {
 776                deq = xhci_trb_virt_to_dma(dbc->ring_evt->deq_seg,
 777                                           dbc->ring_evt->dequeue);
 778                xhci_write_64(xhci, deq, &dbc->regs->erdp);
 779        }
 780
 781        return EVT_DONE;
 782}
 783
 784static void xhci_dbc_handle_events(struct work_struct *work)
 785{
 786        int                     ret;
 787        enum evtreturn          evtr;
 788        struct xhci_dbc         *dbc;
 789        unsigned long           flags;
 790        struct xhci_hcd         *xhci;
 791
 792        dbc = container_of(to_delayed_work(work), struct xhci_dbc, event_work);
 793        xhci = dbc->xhci;
 794
 795        spin_lock_irqsave(&dbc->lock, flags);
 796        evtr = xhci_dbc_do_handle_events(dbc);
 797        spin_unlock_irqrestore(&dbc->lock, flags);
 798
 799        switch (evtr) {
 800        case EVT_GSER:
 801                ret = xhci_dbc_tty_register_device(xhci);
 802                if (ret) {
 803                        xhci_err(xhci, "failed to alloc tty device\n");
 804                        break;
 805                }
 806
 807                xhci_info(xhci, "DbC now attached to /dev/ttyDBC0\n");
 808                break;
 809        case EVT_DISC:
 810                xhci_dbc_tty_unregister_device(xhci);
 811                break;
 812        case EVT_DONE:
 813                break;
 814        default:
 815                xhci_info(xhci, "stop handling dbc events\n");
 816                return;
 817        }
 818
 819        mod_delayed_work(system_wq, &dbc->event_work, 1);
 820}
 821
 822static void xhci_do_dbc_exit(struct xhci_hcd *xhci)
 823{
 824        unsigned long           flags;
 825
 826        spin_lock_irqsave(&xhci->lock, flags);
 827        kfree(xhci->dbc);
 828        xhci->dbc = NULL;
 829        spin_unlock_irqrestore(&xhci->lock, flags);
 830}
 831
 832static int xhci_do_dbc_init(struct xhci_hcd *xhci)
 833{
 834        u32                     reg;
 835        struct xhci_dbc         *dbc;
 836        unsigned long           flags;
 837        void __iomem            *base;
 838        int                     dbc_cap_offs;
 839
 840        base = &xhci->cap_regs->hc_capbase;
 841        dbc_cap_offs = xhci_find_next_ext_cap(base, 0, XHCI_EXT_CAPS_DEBUG);
 842        if (!dbc_cap_offs)
 843                return -ENODEV;
 844
 845        dbc = kzalloc(sizeof(*dbc), GFP_KERNEL);
 846        if (!dbc)
 847                return -ENOMEM;
 848
 849        dbc->regs = base + dbc_cap_offs;
 850
 851        /* We will avoid using DbC in xhci driver if it's in use. */
 852        reg = readl(&dbc->regs->control);
 853        if (reg & DBC_CTRL_DBC_ENABLE) {
 854                kfree(dbc);
 855                return -EBUSY;
 856        }
 857
 858        spin_lock_irqsave(&xhci->lock, flags);
 859        if (xhci->dbc) {
 860                spin_unlock_irqrestore(&xhci->lock, flags);
 861                kfree(dbc);
 862                return -EBUSY;
 863        }
 864        xhci->dbc = dbc;
 865        spin_unlock_irqrestore(&xhci->lock, flags);
 866
 867        dbc->xhci = xhci;
 868        INIT_DELAYED_WORK(&dbc->event_work, xhci_dbc_handle_events);
 869        spin_lock_init(&dbc->lock);
 870
 871        return 0;
 872}
 873
 874static ssize_t dbc_show(struct device *dev,
 875                        struct device_attribute *attr,
 876                        char *buf)
 877{
 878        const char              *p;
 879        struct xhci_dbc         *dbc;
 880        struct xhci_hcd         *xhci;
 881
 882        xhci = hcd_to_xhci(dev_get_drvdata(dev));
 883        dbc = xhci->dbc;
 884
 885        switch (dbc->state) {
 886        case DS_DISABLED:
 887                p = "disabled";
 888                break;
 889        case DS_INITIALIZED:
 890                p = "initialized";
 891                break;
 892        case DS_ENABLED:
 893                p = "enabled";
 894                break;
 895        case DS_CONNECTED:
 896                p = "connected";
 897                break;
 898        case DS_CONFIGURED:
 899                p = "configured";
 900                break;
 901        case DS_STALLED:
 902                p = "stalled";
 903                break;
 904        default:
 905                p = "unknown";
 906        }
 907
 908        return sprintf(buf, "%s\n", p);
 909}
 910
 911static ssize_t dbc_store(struct device *dev,
 912                         struct device_attribute *attr,
 913                         const char *buf, size_t count)
 914{
 915        struct xhci_hcd         *xhci;
 916
 917        xhci = hcd_to_xhci(dev_get_drvdata(dev));
 918
 919        if (!strncmp(buf, "enable", 6))
 920                xhci_dbc_start(xhci);
 921        else if (!strncmp(buf, "disable", 7))
 922                xhci_dbc_stop(xhci);
 923        else
 924                return -EINVAL;
 925
 926        return count;
 927}
 928
 929static DEVICE_ATTR_RW(dbc);
 930
 931int xhci_dbc_init(struct xhci_hcd *xhci)
 932{
 933        int                     ret;
 934        struct device           *dev = xhci_to_hcd(xhci)->self.controller;
 935
 936        ret = xhci_do_dbc_init(xhci);
 937        if (ret)
 938                goto init_err3;
 939
 940        ret = xhci_dbc_tty_register_driver(xhci);
 941        if (ret)
 942                goto init_err2;
 943
 944        ret = device_create_file(dev, &dev_attr_dbc);
 945        if (ret)
 946                goto init_err1;
 947
 948        return 0;
 949
 950init_err1:
 951        xhci_dbc_tty_unregister_driver();
 952init_err2:
 953        xhci_do_dbc_exit(xhci);
 954init_err3:
 955        return ret;
 956}
 957
 958void xhci_dbc_exit(struct xhci_hcd *xhci)
 959{
 960        struct device           *dev = xhci_to_hcd(xhci)->self.controller;
 961
 962        if (!xhci->dbc)
 963                return;
 964
 965        device_remove_file(dev, &dev_attr_dbc);
 966        xhci_dbc_tty_unregister_driver();
 967        xhci_dbc_stop(xhci);
 968        xhci_do_dbc_exit(xhci);
 969}
 970
 971#ifdef CONFIG_PM
 972int xhci_dbc_suspend(struct xhci_hcd *xhci)
 973{
 974        struct xhci_dbc         *dbc = xhci->dbc;
 975
 976        if (!dbc)
 977                return 0;
 978
 979        if (dbc->state == DS_CONFIGURED)
 980                dbc->resume_required = 1;
 981
 982        xhci_dbc_stop(xhci);
 983
 984        return 0;
 985}
 986
 987int xhci_dbc_resume(struct xhci_hcd *xhci)
 988{
 989        int                     ret = 0;
 990        struct xhci_dbc         *dbc = xhci->dbc;
 991
 992        if (!dbc)
 993                return 0;
 994
 995        if (dbc->resume_required) {
 996                dbc->resume_required = 0;
 997                xhci_dbc_start(xhci);
 998        }
 999
1000        return ret;
1001}
1002#endif /* CONFIG_PM */
1003