linux/drivers/usb/early/xhci-dbc.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * xhci-dbc.c - xHCI debug capability early driver
   4 *
   5 * Copyright (C) 2016 Intel Corporation
   6 *
   7 * Author: Lu Baolu <baolu.lu@linux.intel.com>
   8 */
   9
  10#define pr_fmt(fmt)     KBUILD_MODNAME ":%s: " fmt, __func__
  11
  12#include <linux/console.h>
  13#include <linux/pci_regs.h>
  14#include <linux/pci_ids.h>
  15#include <linux/memblock.h>
  16#include <linux/io.h>
  17#include <linux/iopoll.h>
  18#include <asm/pci-direct.h>
  19#include <asm/fixmap.h>
  20#include <linux/bcd.h>
  21#include <linux/export.h>
  22#include <linux/module.h>
  23#include <linux/delay.h>
  24#include <linux/kthread.h>
  25#include <linux/usb/xhci-dbgp.h>
  26
  27#include "../host/xhci.h"
  28#include "xhci-dbc.h"
  29
  30static struct xdbc_state xdbc;
  31static bool early_console_keep;
  32
  33#ifdef XDBC_TRACE
  34#define xdbc_trace      trace_printk
  35#else
  36static inline void xdbc_trace(const char *fmt, ...) { }
  37#endif /* XDBC_TRACE */
  38
  39static void __iomem * __init xdbc_map_pci_mmio(u32 bus, u32 dev, u32 func)
  40{
  41        u64 val64, sz64, mask64;
  42        void __iomem *base;
  43        u32 val, sz;
  44        u8 byte;
  45
  46        val = read_pci_config(bus, dev, func, PCI_BASE_ADDRESS_0);
  47        write_pci_config(bus, dev, func, PCI_BASE_ADDRESS_0, ~0);
  48        sz = read_pci_config(bus, dev, func, PCI_BASE_ADDRESS_0);
  49        write_pci_config(bus, dev, func, PCI_BASE_ADDRESS_0, val);
  50
  51        if (val == 0xffffffff || sz == 0xffffffff) {
  52                pr_notice("invalid mmio bar\n");
  53                return NULL;
  54        }
  55
  56        val64   = val & PCI_BASE_ADDRESS_MEM_MASK;
  57        sz64    = sz & PCI_BASE_ADDRESS_MEM_MASK;
  58        mask64  = PCI_BASE_ADDRESS_MEM_MASK;
  59
  60        if ((val & PCI_BASE_ADDRESS_MEM_TYPE_MASK) == PCI_BASE_ADDRESS_MEM_TYPE_64) {
  61                val = read_pci_config(bus, dev, func, PCI_BASE_ADDRESS_0 + 4);
  62                write_pci_config(bus, dev, func, PCI_BASE_ADDRESS_0 + 4, ~0);
  63                sz = read_pci_config(bus, dev, func, PCI_BASE_ADDRESS_0 + 4);
  64                write_pci_config(bus, dev, func, PCI_BASE_ADDRESS_0 + 4, val);
  65
  66                val64   |= (u64)val << 32;
  67                sz64    |= (u64)sz << 32;
  68                mask64  |= ~0ULL << 32;
  69        }
  70
  71        sz64 &= mask64;
  72
  73        if (!sz64) {
  74                pr_notice("invalid mmio address\n");
  75                return NULL;
  76        }
  77
  78        sz64 = 1ULL << __ffs64(sz64);
  79
  80        /* Check if the mem space is enabled: */
  81        byte = read_pci_config_byte(bus, dev, func, PCI_COMMAND);
  82        if (!(byte & PCI_COMMAND_MEMORY)) {
  83                byte |= PCI_COMMAND_MEMORY;
  84                write_pci_config_byte(bus, dev, func, PCI_COMMAND, byte);
  85        }
  86
  87        xdbc.xhci_start = val64;
  88        xdbc.xhci_length = sz64;
  89        base = early_ioremap(val64, sz64);
  90
  91        return base;
  92}
  93
  94static void * __init xdbc_get_page(dma_addr_t *dma_addr)
  95{
  96        void *virt;
  97
  98        virt = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
  99        if (!virt)
 100                return NULL;
 101
 102        if (dma_addr)
 103                *dma_addr = (dma_addr_t)__pa(virt);
 104
 105        return virt;
 106}
 107
 108static u32 __init xdbc_find_dbgp(int xdbc_num, u32 *b, u32 *d, u32 *f)
 109{
 110        u32 bus, dev, func, class;
 111
 112        for (bus = 0; bus < XDBC_PCI_MAX_BUSES; bus++) {
 113                for (dev = 0; dev < XDBC_PCI_MAX_DEVICES; dev++) {
 114                        for (func = 0; func < XDBC_PCI_MAX_FUNCTION; func++) {
 115
 116                                class = read_pci_config(bus, dev, func, PCI_CLASS_REVISION);
 117                                if ((class >> 8) != PCI_CLASS_SERIAL_USB_XHCI)
 118                                        continue;
 119
 120                                if (xdbc_num-- != 0)
 121                                        continue;
 122
 123                                *b = bus;
 124                                *d = dev;
 125                                *f = func;
 126
 127                                return 0;
 128                        }
 129                }
 130        }
 131
 132        return -1;
 133}
 134
 135static int handshake(void __iomem *ptr, u32 mask, u32 done, int wait, int delay)
 136{
 137        u32 result;
 138
 139        return readl_poll_timeout_atomic(ptr, result,
 140                                         ((result & mask) == done),
 141                                         delay, wait);
 142}
 143
 144static void __init xdbc_bios_handoff(void)
 145{
 146        int offset, timeout;
 147        u32 val;
 148
 149        offset = xhci_find_next_ext_cap(xdbc.xhci_base, 0, XHCI_EXT_CAPS_LEGACY);
 150        val = readl(xdbc.xhci_base + offset);
 151
 152        if (val & XHCI_HC_BIOS_OWNED) {
 153                writel(val | XHCI_HC_OS_OWNED, xdbc.xhci_base + offset);
 154                timeout = handshake(xdbc.xhci_base + offset, XHCI_HC_BIOS_OWNED, 0, 5000, 10);
 155
 156                if (timeout) {
 157                        pr_notice("failed to hand over xHCI control from BIOS\n");
 158                        writel(val & ~XHCI_HC_BIOS_OWNED, xdbc.xhci_base + offset);
 159                }
 160        }
 161
 162        /* Disable BIOS SMIs and clear all SMI events: */
 163        val = readl(xdbc.xhci_base + offset + XHCI_LEGACY_CONTROL_OFFSET);
 164        val &= XHCI_LEGACY_DISABLE_SMI;
 165        val |= XHCI_LEGACY_SMI_EVENTS;
 166        writel(val, xdbc.xhci_base + offset + XHCI_LEGACY_CONTROL_OFFSET);
 167}
 168
 169static int __init
 170xdbc_alloc_ring(struct xdbc_segment *seg, struct xdbc_ring *ring)
 171{
 172        seg->trbs = xdbc_get_page(&seg->dma);
 173        if (!seg->trbs)
 174                return -ENOMEM;
 175
 176        ring->segment = seg;
 177
 178        return 0;
 179}
 180
 181static void __init xdbc_free_ring(struct xdbc_ring *ring)
 182{
 183        struct xdbc_segment *seg = ring->segment;
 184
 185        if (!seg)
 186                return;
 187
 188        memblock_free(seg->dma, PAGE_SIZE);
 189        ring->segment = NULL;
 190}
 191
 192static void xdbc_reset_ring(struct xdbc_ring *ring)
 193{
 194        struct xdbc_segment *seg = ring->segment;
 195        struct xdbc_trb *link_trb;
 196
 197        memset(seg->trbs, 0, PAGE_SIZE);
 198
 199        ring->enqueue = seg->trbs;
 200        ring->dequeue = seg->trbs;
 201        ring->cycle_state = 1;
 202
 203        if (ring != &xdbc.evt_ring) {
 204                link_trb = &seg->trbs[XDBC_TRBS_PER_SEGMENT - 1];
 205                link_trb->field[0] = cpu_to_le32(lower_32_bits(seg->dma));
 206                link_trb->field[1] = cpu_to_le32(upper_32_bits(seg->dma));
 207                link_trb->field[3] = cpu_to_le32(TRB_TYPE(TRB_LINK)) | cpu_to_le32(LINK_TOGGLE);
 208        }
 209}
 210
 211static inline void xdbc_put_utf16(u16 *s, const char *c, size_t size)
 212{
 213        int i;
 214
 215        for (i = 0; i < size; i++)
 216                s[i] = cpu_to_le16(c[i]);
 217}
 218
 219static void xdbc_mem_init(void)
 220{
 221        struct xdbc_ep_context *ep_in, *ep_out;
 222        struct usb_string_descriptor *s_desc;
 223        struct xdbc_erst_entry *entry;
 224        struct xdbc_strings *strings;
 225        struct xdbc_context *ctx;
 226        unsigned int max_burst;
 227        u32 string_length;
 228        int index = 0;
 229        u32 dev_info;
 230
 231        xdbc_reset_ring(&xdbc.evt_ring);
 232        xdbc_reset_ring(&xdbc.in_ring);
 233        xdbc_reset_ring(&xdbc.out_ring);
 234        memset(xdbc.table_base, 0, PAGE_SIZE);
 235        memset(xdbc.out_buf, 0, PAGE_SIZE);
 236
 237        /* Initialize event ring segment table: */
 238        xdbc.erst_size  = 16;
 239        xdbc.erst_base  = xdbc.table_base + index * XDBC_TABLE_ENTRY_SIZE;
 240        xdbc.erst_dma   = xdbc.table_dma + index * XDBC_TABLE_ENTRY_SIZE;
 241
 242        index += XDBC_ERST_ENTRY_NUM;
 243        entry = (struct xdbc_erst_entry *)xdbc.erst_base;
 244
 245        entry->seg_addr         = cpu_to_le64(xdbc.evt_seg.dma);
 246        entry->seg_size         = cpu_to_le32(XDBC_TRBS_PER_SEGMENT);
 247        entry->__reserved_0     = 0;
 248
 249        /* Initialize ERST registers: */
 250        writel(1, &xdbc.xdbc_reg->ersts);
 251        xdbc_write64(xdbc.erst_dma, &xdbc.xdbc_reg->erstba);
 252        xdbc_write64(xdbc.evt_seg.dma, &xdbc.xdbc_reg->erdp);
 253
 254        /* Debug capability contexts: */
 255        xdbc.dbcc_size  = 64 * 3;
 256        xdbc.dbcc_base  = xdbc.table_base + index * XDBC_TABLE_ENTRY_SIZE;
 257        xdbc.dbcc_dma   = xdbc.table_dma + index * XDBC_TABLE_ENTRY_SIZE;
 258
 259        index += XDBC_DBCC_ENTRY_NUM;
 260
 261        /* Popluate the strings: */
 262        xdbc.string_size = sizeof(struct xdbc_strings);
 263        xdbc.string_base = xdbc.table_base + index * XDBC_TABLE_ENTRY_SIZE;
 264        xdbc.string_dma  = xdbc.table_dma + index * XDBC_TABLE_ENTRY_SIZE;
 265        strings          = (struct xdbc_strings *)xdbc.string_base;
 266
 267        index += XDBC_STRING_ENTRY_NUM;
 268
 269        /* Serial string: */
 270        s_desc                  = (struct usb_string_descriptor *)strings->serial;
 271        s_desc->bLength         = (strlen(XDBC_STRING_SERIAL) + 1) * 2;
 272        s_desc->bDescriptorType = USB_DT_STRING;
 273
 274        xdbc_put_utf16(s_desc->wData, XDBC_STRING_SERIAL, strlen(XDBC_STRING_SERIAL));
 275        string_length = s_desc->bLength;
 276        string_length <<= 8;
 277
 278        /* Product string: */
 279        s_desc                  = (struct usb_string_descriptor *)strings->product;
 280        s_desc->bLength         = (strlen(XDBC_STRING_PRODUCT) + 1) * 2;
 281        s_desc->bDescriptorType = USB_DT_STRING;
 282
 283        xdbc_put_utf16(s_desc->wData, XDBC_STRING_PRODUCT, strlen(XDBC_STRING_PRODUCT));
 284        string_length += s_desc->bLength;
 285        string_length <<= 8;
 286
 287        /* Manufacture string: */
 288        s_desc                  = (struct usb_string_descriptor *)strings->manufacturer;
 289        s_desc->bLength         = (strlen(XDBC_STRING_MANUFACTURER) + 1) * 2;
 290        s_desc->bDescriptorType = USB_DT_STRING;
 291
 292        xdbc_put_utf16(s_desc->wData, XDBC_STRING_MANUFACTURER, strlen(XDBC_STRING_MANUFACTURER));
 293        string_length += s_desc->bLength;
 294        string_length <<= 8;
 295
 296        /* String0: */
 297        strings->string0[0]     = 4;
 298        strings->string0[1]     = USB_DT_STRING;
 299        strings->string0[2]     = 0x09;
 300        strings->string0[3]     = 0x04;
 301
 302        string_length += 4;
 303
 304        /* Populate info Context: */
 305        ctx = (struct xdbc_context *)xdbc.dbcc_base;
 306
 307        ctx->info.string0       = cpu_to_le64(xdbc.string_dma);
 308        ctx->info.manufacturer  = cpu_to_le64(xdbc.string_dma + XDBC_MAX_STRING_LENGTH);
 309        ctx->info.product       = cpu_to_le64(xdbc.string_dma + XDBC_MAX_STRING_LENGTH * 2);
 310        ctx->info.serial        = cpu_to_le64(xdbc.string_dma + XDBC_MAX_STRING_LENGTH * 3);
 311        ctx->info.length        = cpu_to_le32(string_length);
 312
 313        /* Populate bulk out endpoint context: */
 314        max_burst = DEBUG_MAX_BURST(readl(&xdbc.xdbc_reg->control));
 315        ep_out = (struct xdbc_ep_context *)&ctx->out;
 316
 317        ep_out->ep_info1        = 0;
 318        ep_out->ep_info2        = cpu_to_le32(EP_TYPE(BULK_OUT_EP) | MAX_PACKET(1024) | MAX_BURST(max_burst));
 319        ep_out->deq             = cpu_to_le64(xdbc.out_seg.dma | xdbc.out_ring.cycle_state);
 320
 321        /* Populate bulk in endpoint context: */
 322        ep_in = (struct xdbc_ep_context *)&ctx->in;
 323
 324        ep_in->ep_info1         = 0;
 325        ep_in->ep_info2         = cpu_to_le32(EP_TYPE(BULK_IN_EP) | MAX_PACKET(1024) | MAX_BURST(max_burst));
 326        ep_in->deq              = cpu_to_le64(xdbc.in_seg.dma | xdbc.in_ring.cycle_state);
 327
 328        /* Set DbC context and info registers: */
 329        xdbc_write64(xdbc.dbcc_dma, &xdbc.xdbc_reg->dccp);
 330
 331        dev_info = cpu_to_le32((XDBC_VENDOR_ID << 16) | XDBC_PROTOCOL);
 332        writel(dev_info, &xdbc.xdbc_reg->devinfo1);
 333
 334        dev_info = cpu_to_le32((XDBC_DEVICE_REV << 16) | XDBC_PRODUCT_ID);
 335        writel(dev_info, &xdbc.xdbc_reg->devinfo2);
 336
 337        xdbc.in_buf = xdbc.out_buf + XDBC_MAX_PACKET;
 338        xdbc.in_dma = xdbc.out_dma + XDBC_MAX_PACKET;
 339}
 340
 341static void xdbc_do_reset_debug_port(u32 id, u32 count)
 342{
 343        void __iomem *ops_reg;
 344        void __iomem *portsc;
 345        u32 val, cap_length;
 346        int i;
 347
 348        cap_length = readl(xdbc.xhci_base) & 0xff;
 349        ops_reg = xdbc.xhci_base + cap_length;
 350
 351        id--;
 352        for (i = id; i < (id + count); i++) {
 353                portsc = ops_reg + 0x400 + i * 0x10;
 354                val = readl(portsc);
 355                if (!(val & PORT_CONNECT))
 356                        writel(val | PORT_RESET, portsc);
 357        }
 358}
 359
 360static void xdbc_reset_debug_port(void)
 361{
 362        u32 val, port_offset, port_count;
 363        int offset = 0;
 364
 365        do {
 366                offset = xhci_find_next_ext_cap(xdbc.xhci_base, offset, XHCI_EXT_CAPS_PROTOCOL);
 367                if (!offset)
 368                        break;
 369
 370                val = readl(xdbc.xhci_base + offset);
 371                if (XHCI_EXT_PORT_MAJOR(val) != 0x3)
 372                        continue;
 373
 374                val = readl(xdbc.xhci_base + offset + 8);
 375                port_offset = XHCI_EXT_PORT_OFF(val);
 376                port_count = XHCI_EXT_PORT_COUNT(val);
 377
 378                xdbc_do_reset_debug_port(port_offset, port_count);
 379        } while (1);
 380}
 381
 382static void
 383xdbc_queue_trb(struct xdbc_ring *ring, u32 field1, u32 field2, u32 field3, u32 field4)
 384{
 385        struct xdbc_trb *trb, *link_trb;
 386
 387        trb = ring->enqueue;
 388        trb->field[0] = cpu_to_le32(field1);
 389        trb->field[1] = cpu_to_le32(field2);
 390        trb->field[2] = cpu_to_le32(field3);
 391        trb->field[3] = cpu_to_le32(field4);
 392
 393        ++(ring->enqueue);
 394        if (ring->enqueue >= &ring->segment->trbs[TRBS_PER_SEGMENT - 1]) {
 395                link_trb = ring->enqueue;
 396                if (ring->cycle_state)
 397                        link_trb->field[3] |= cpu_to_le32(TRB_CYCLE);
 398                else
 399                        link_trb->field[3] &= cpu_to_le32(~TRB_CYCLE);
 400
 401                ring->enqueue = ring->segment->trbs;
 402                ring->cycle_state ^= 1;
 403        }
 404}
 405
 406static void xdbc_ring_doorbell(int target)
 407{
 408        writel(DOOR_BELL_TARGET(target), &xdbc.xdbc_reg->doorbell);
 409}
 410
 411static int xdbc_start(void)
 412{
 413        u32 ctrl, status;
 414        int ret;
 415
 416        ctrl = readl(&xdbc.xdbc_reg->control);
 417        writel(ctrl | CTRL_DBC_ENABLE | CTRL_PORT_ENABLE, &xdbc.xdbc_reg->control);
 418        ret = handshake(&xdbc.xdbc_reg->control, CTRL_DBC_ENABLE, CTRL_DBC_ENABLE, 100000, 100);
 419        if (ret) {
 420                xdbc_trace("failed to initialize hardware\n");
 421                return ret;
 422        }
 423
 424        /* Reset port to avoid bus hang: */
 425        if (xdbc.vendor == PCI_VENDOR_ID_INTEL)
 426                xdbc_reset_debug_port();
 427
 428        /* Wait for port connection: */
 429        ret = handshake(&xdbc.xdbc_reg->portsc, PORTSC_CONN_STATUS, PORTSC_CONN_STATUS, 5000000, 100);
 430        if (ret) {
 431                xdbc_trace("waiting for connection timed out\n");
 432                return ret;
 433        }
 434
 435        /* Wait for debug device to be configured: */
 436        ret = handshake(&xdbc.xdbc_reg->control, CTRL_DBC_RUN, CTRL_DBC_RUN, 5000000, 100);
 437        if (ret) {
 438                xdbc_trace("waiting for device configuration timed out\n");
 439                return ret;
 440        }
 441
 442        /* Check port number: */
 443        status = readl(&xdbc.xdbc_reg->status);
 444        if (!DCST_DEBUG_PORT(status)) {
 445                xdbc_trace("invalid root hub port number\n");
 446                return -ENODEV;
 447        }
 448
 449        xdbc.port_number = DCST_DEBUG_PORT(status);
 450
 451        xdbc_trace("DbC is running now, control 0x%08x port ID %d\n",
 452                   readl(&xdbc.xdbc_reg->control), xdbc.port_number);
 453
 454        return 0;
 455}
 456
 457static int xdbc_bulk_transfer(void *data, int size, bool read)
 458{
 459        struct xdbc_ring *ring;
 460        struct xdbc_trb *trb;
 461        u32 length, control;
 462        u32 cycle;
 463        u64 addr;
 464
 465        if (size > XDBC_MAX_PACKET) {
 466                xdbc_trace("bad parameter, size %d\n", size);
 467                return -EINVAL;
 468        }
 469
 470        if (!(xdbc.flags & XDBC_FLAGS_INITIALIZED) ||
 471            !(xdbc.flags & XDBC_FLAGS_CONFIGURED) ||
 472            (!read && (xdbc.flags & XDBC_FLAGS_OUT_STALL)) ||
 473            (read && (xdbc.flags & XDBC_FLAGS_IN_STALL))) {
 474
 475                xdbc_trace("connection not ready, flags %08x\n", xdbc.flags);
 476                return -EIO;
 477        }
 478
 479        ring = (read ? &xdbc.in_ring : &xdbc.out_ring);
 480        trb = ring->enqueue;
 481        cycle = ring->cycle_state;
 482        length = TRB_LEN(size);
 483        control = TRB_TYPE(TRB_NORMAL) | TRB_IOC;
 484
 485        if (cycle)
 486                control &= cpu_to_le32(~TRB_CYCLE);
 487        else
 488                control |= cpu_to_le32(TRB_CYCLE);
 489
 490        if (read) {
 491                memset(xdbc.in_buf, 0, XDBC_MAX_PACKET);
 492                addr = xdbc.in_dma;
 493                xdbc.flags |= XDBC_FLAGS_IN_PROCESS;
 494        } else {
 495                memset(xdbc.out_buf, 0, XDBC_MAX_PACKET);
 496                memcpy(xdbc.out_buf, data, size);
 497                addr = xdbc.out_dma;
 498                xdbc.flags |= XDBC_FLAGS_OUT_PROCESS;
 499        }
 500
 501        xdbc_queue_trb(ring, lower_32_bits(addr), upper_32_bits(addr), length, control);
 502
 503        /*
 504         * Add a barrier between writes of trb fields and flipping
 505         * the cycle bit:
 506         */
 507        wmb();
 508        if (cycle)
 509                trb->field[3] |= cpu_to_le32(cycle);
 510        else
 511                trb->field[3] &= cpu_to_le32(~TRB_CYCLE);
 512
 513        xdbc_ring_doorbell(read ? IN_EP_DOORBELL : OUT_EP_DOORBELL);
 514
 515        return size;
 516}
 517
 518static int xdbc_handle_external_reset(void)
 519{
 520        int ret = 0;
 521
 522        xdbc.flags = 0;
 523        writel(0, &xdbc.xdbc_reg->control);
 524        ret = handshake(&xdbc.xdbc_reg->control, CTRL_DBC_ENABLE, 0, 100000, 10);
 525        if (ret)
 526                goto reset_out;
 527
 528        xdbc_mem_init();
 529
 530        ret = xdbc_start();
 531        if (ret < 0)
 532                goto reset_out;
 533
 534        xdbc_trace("dbc recovered\n");
 535
 536        xdbc.flags |= XDBC_FLAGS_INITIALIZED | XDBC_FLAGS_CONFIGURED;
 537
 538        xdbc_bulk_transfer(NULL, XDBC_MAX_PACKET, true);
 539
 540        return 0;
 541
 542reset_out:
 543        xdbc_trace("failed to recover from external reset\n");
 544        return ret;
 545}
 546
 547static int __init xdbc_early_setup(void)
 548{
 549        int ret;
 550
 551        writel(0, &xdbc.xdbc_reg->control);
 552        ret = handshake(&xdbc.xdbc_reg->control, CTRL_DBC_ENABLE, 0, 100000, 100);
 553        if (ret)
 554                return ret;
 555
 556        /* Allocate the table page: */
 557        xdbc.table_base = xdbc_get_page(&xdbc.table_dma);
 558        if (!xdbc.table_base)
 559                return -ENOMEM;
 560
 561        /* Get and store the transfer buffer: */
 562        xdbc.out_buf = xdbc_get_page(&xdbc.out_dma);
 563        if (!xdbc.out_buf)
 564                return -ENOMEM;
 565
 566        /* Allocate the event ring: */
 567        ret = xdbc_alloc_ring(&xdbc.evt_seg, &xdbc.evt_ring);
 568        if (ret < 0)
 569                return ret;
 570
 571        /* Allocate IN/OUT endpoint transfer rings: */
 572        ret = xdbc_alloc_ring(&xdbc.in_seg, &xdbc.in_ring);
 573        if (ret < 0)
 574                return ret;
 575
 576        ret = xdbc_alloc_ring(&xdbc.out_seg, &xdbc.out_ring);
 577        if (ret < 0)
 578                return ret;
 579
 580        xdbc_mem_init();
 581
 582        ret = xdbc_start();
 583        if (ret < 0) {
 584                writel(0, &xdbc.xdbc_reg->control);
 585                return ret;
 586        }
 587
 588        xdbc.flags |= XDBC_FLAGS_INITIALIZED | XDBC_FLAGS_CONFIGURED;
 589
 590        xdbc_bulk_transfer(NULL, XDBC_MAX_PACKET, true);
 591
 592        return 0;
 593}
 594
 595int __init early_xdbc_parse_parameter(char *s)
 596{
 597        unsigned long dbgp_num = 0;
 598        u32 bus, dev, func, offset;
 599        int ret;
 600
 601        if (!early_pci_allowed())
 602                return -EPERM;
 603
 604        if (strstr(s, "keep"))
 605                early_console_keep = true;
 606
 607        if (xdbc.xdbc_reg)
 608                return 0;
 609
 610        if (*s && kstrtoul(s, 0, &dbgp_num))
 611                dbgp_num = 0;
 612
 613        pr_notice("dbgp_num: %lu\n", dbgp_num);
 614
 615        /* Locate the host controller: */
 616        ret = xdbc_find_dbgp(dbgp_num, &bus, &dev, &func);
 617        if (ret) {
 618                pr_notice("failed to locate xhci host\n");
 619                return -ENODEV;
 620        }
 621
 622        xdbc.vendor     = read_pci_config_16(bus, dev, func, PCI_VENDOR_ID);
 623        xdbc.device     = read_pci_config_16(bus, dev, func, PCI_DEVICE_ID);
 624        xdbc.bus        = bus;
 625        xdbc.dev        = dev;
 626        xdbc.func       = func;
 627
 628        /* Map the IO memory: */
 629        xdbc.xhci_base = xdbc_map_pci_mmio(bus, dev, func);
 630        if (!xdbc.xhci_base)
 631                return -EINVAL;
 632
 633        /* Locate DbC registers: */
 634        offset = xhci_find_next_ext_cap(xdbc.xhci_base, 0, XHCI_EXT_CAPS_DEBUG);
 635        if (!offset) {
 636                pr_notice("xhci host doesn't support debug capability\n");
 637                early_iounmap(xdbc.xhci_base, xdbc.xhci_length);
 638                xdbc.xhci_base = NULL;
 639                xdbc.xhci_length = 0;
 640
 641                return -ENODEV;
 642        }
 643        xdbc.xdbc_reg = (struct xdbc_regs __iomem *)(xdbc.xhci_base + offset);
 644
 645        return 0;
 646}
 647
 648int __init early_xdbc_setup_hardware(void)
 649{
 650        int ret;
 651
 652        if (!xdbc.xdbc_reg)
 653                return -ENODEV;
 654
 655        xdbc_bios_handoff();
 656
 657        raw_spin_lock_init(&xdbc.lock);
 658
 659        ret = xdbc_early_setup();
 660        if (ret) {
 661                pr_notice("failed to setup the connection to host\n");
 662
 663                xdbc_free_ring(&xdbc.evt_ring);
 664                xdbc_free_ring(&xdbc.out_ring);
 665                xdbc_free_ring(&xdbc.in_ring);
 666
 667                if (xdbc.table_dma)
 668                        memblock_free(xdbc.table_dma, PAGE_SIZE);
 669
 670                if (xdbc.out_dma)
 671                        memblock_free(xdbc.out_dma, PAGE_SIZE);
 672
 673                xdbc.table_base = NULL;
 674                xdbc.out_buf = NULL;
 675        }
 676
 677        return ret;
 678}
 679
 680static void xdbc_handle_port_status(struct xdbc_trb *evt_trb)
 681{
 682        u32 port_reg;
 683
 684        port_reg = readl(&xdbc.xdbc_reg->portsc);
 685        if (port_reg & PORTSC_CONN_CHANGE) {
 686                xdbc_trace("connect status change event\n");
 687
 688                /* Check whether cable unplugged: */
 689                if (!(port_reg & PORTSC_CONN_STATUS)) {
 690                        xdbc.flags = 0;
 691                        xdbc_trace("cable unplugged\n");
 692                }
 693        }
 694
 695        if (port_reg & PORTSC_RESET_CHANGE)
 696                xdbc_trace("port reset change event\n");
 697
 698        if (port_reg & PORTSC_LINK_CHANGE)
 699                xdbc_trace("port link status change event\n");
 700
 701        if (port_reg & PORTSC_CONFIG_CHANGE)
 702                xdbc_trace("config error change\n");
 703
 704        /* Write back the value to clear RW1C bits: */
 705        writel(port_reg, &xdbc.xdbc_reg->portsc);
 706}
 707
 708static void xdbc_handle_tx_event(struct xdbc_trb *evt_trb)
 709{
 710        u32 comp_code;
 711        int ep_id;
 712
 713        comp_code       = GET_COMP_CODE(le32_to_cpu(evt_trb->field[2]));
 714        ep_id           = TRB_TO_EP_ID(le32_to_cpu(evt_trb->field[3]));
 715
 716        switch (comp_code) {
 717        case COMP_SUCCESS:
 718        case COMP_SHORT_PACKET:
 719                break;
 720        case COMP_TRB_ERROR:
 721        case COMP_BABBLE_DETECTED_ERROR:
 722        case COMP_USB_TRANSACTION_ERROR:
 723        case COMP_STALL_ERROR:
 724        default:
 725                if (ep_id == XDBC_EPID_OUT || ep_id == XDBC_EPID_OUT_INTEL)
 726                        xdbc.flags |= XDBC_FLAGS_OUT_STALL;
 727                if (ep_id == XDBC_EPID_IN || ep_id == XDBC_EPID_IN_INTEL)
 728                        xdbc.flags |= XDBC_FLAGS_IN_STALL;
 729
 730                xdbc_trace("endpoint %d stalled\n", ep_id);
 731                break;
 732        }
 733
 734        if (ep_id == XDBC_EPID_IN || ep_id == XDBC_EPID_IN_INTEL) {
 735                xdbc.flags &= ~XDBC_FLAGS_IN_PROCESS;
 736                xdbc_bulk_transfer(NULL, XDBC_MAX_PACKET, true);
 737        } else if (ep_id == XDBC_EPID_OUT || ep_id == XDBC_EPID_OUT_INTEL) {
 738                xdbc.flags &= ~XDBC_FLAGS_OUT_PROCESS;
 739        } else {
 740                xdbc_trace("invalid endpoint id %d\n", ep_id);
 741        }
 742}
 743
 744static void xdbc_handle_events(void)
 745{
 746        struct xdbc_trb *evt_trb;
 747        bool update_erdp = false;
 748        u32 reg;
 749        u8 cmd;
 750
 751        cmd = read_pci_config_byte(xdbc.bus, xdbc.dev, xdbc.func, PCI_COMMAND);
 752        if (!(cmd & PCI_COMMAND_MASTER)) {
 753                cmd |= PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY;
 754                write_pci_config_byte(xdbc.bus, xdbc.dev, xdbc.func, PCI_COMMAND, cmd);
 755        }
 756
 757        if (!(xdbc.flags & XDBC_FLAGS_INITIALIZED))
 758                return;
 759
 760        /* Handle external reset events: */
 761        reg = readl(&xdbc.xdbc_reg->control);
 762        if (!(reg & CTRL_DBC_ENABLE)) {
 763                if (xdbc_handle_external_reset()) {
 764                        xdbc_trace("failed to recover connection\n");
 765                        return;
 766                }
 767        }
 768
 769        /* Handle configure-exit event: */
 770        reg = readl(&xdbc.xdbc_reg->control);
 771        if (reg & CTRL_DBC_RUN_CHANGE) {
 772                writel(reg, &xdbc.xdbc_reg->control);
 773                if (reg & CTRL_DBC_RUN)
 774                        xdbc.flags |= XDBC_FLAGS_CONFIGURED;
 775                else
 776                        xdbc.flags &= ~XDBC_FLAGS_CONFIGURED;
 777        }
 778
 779        /* Handle endpoint stall event: */
 780        reg = readl(&xdbc.xdbc_reg->control);
 781        if (reg & CTRL_HALT_IN_TR) {
 782                xdbc.flags |= XDBC_FLAGS_IN_STALL;
 783        } else {
 784                xdbc.flags &= ~XDBC_FLAGS_IN_STALL;
 785                if (!(xdbc.flags & XDBC_FLAGS_IN_PROCESS))
 786                        xdbc_bulk_transfer(NULL, XDBC_MAX_PACKET, true);
 787        }
 788
 789        if (reg & CTRL_HALT_OUT_TR)
 790                xdbc.flags |= XDBC_FLAGS_OUT_STALL;
 791        else
 792                xdbc.flags &= ~XDBC_FLAGS_OUT_STALL;
 793
 794        /* Handle the events in the event ring: */
 795        evt_trb = xdbc.evt_ring.dequeue;
 796        while ((le32_to_cpu(evt_trb->field[3]) & TRB_CYCLE) == xdbc.evt_ring.cycle_state) {
 797                /*
 798                 * Add a barrier between reading the cycle flag and any
 799                 * reads of the event's flags/data below:
 800                 */
 801                rmb();
 802
 803                switch ((le32_to_cpu(evt_trb->field[3]) & TRB_TYPE_BITMASK)) {
 804                case TRB_TYPE(TRB_PORT_STATUS):
 805                        xdbc_handle_port_status(evt_trb);
 806                        break;
 807                case TRB_TYPE(TRB_TRANSFER):
 808                        xdbc_handle_tx_event(evt_trb);
 809                        break;
 810                default:
 811                        break;
 812                }
 813
 814                ++(xdbc.evt_ring.dequeue);
 815                if (xdbc.evt_ring.dequeue == &xdbc.evt_seg.trbs[TRBS_PER_SEGMENT]) {
 816                        xdbc.evt_ring.dequeue = xdbc.evt_seg.trbs;
 817                        xdbc.evt_ring.cycle_state ^= 1;
 818                }
 819
 820                evt_trb = xdbc.evt_ring.dequeue;
 821                update_erdp = true;
 822        }
 823
 824        /* Update event ring dequeue pointer: */
 825        if (update_erdp)
 826                xdbc_write64(__pa(xdbc.evt_ring.dequeue), &xdbc.xdbc_reg->erdp);
 827}
 828
 829static int xdbc_bulk_write(const char *bytes, int size)
 830{
 831        int ret, timeout = 0;
 832        unsigned long flags;
 833
 834retry:
 835        if (in_nmi()) {
 836                if (!raw_spin_trylock_irqsave(&xdbc.lock, flags))
 837                        return -EAGAIN;
 838        } else {
 839                raw_spin_lock_irqsave(&xdbc.lock, flags);
 840        }
 841
 842        xdbc_handle_events();
 843
 844        /* Check completion of the previous request: */
 845        if ((xdbc.flags & XDBC_FLAGS_OUT_PROCESS) && (timeout < 2000000)) {
 846                raw_spin_unlock_irqrestore(&xdbc.lock, flags);
 847                udelay(100);
 848                timeout += 100;
 849                goto retry;
 850        }
 851
 852        if (xdbc.flags & XDBC_FLAGS_OUT_PROCESS) {
 853                raw_spin_unlock_irqrestore(&xdbc.lock, flags);
 854                xdbc_trace("previous transfer not completed yet\n");
 855
 856                return -ETIMEDOUT;
 857        }
 858
 859        ret = xdbc_bulk_transfer((void *)bytes, size, false);
 860        raw_spin_unlock_irqrestore(&xdbc.lock, flags);
 861
 862        return ret;
 863}
 864
 865static void early_xdbc_write(struct console *con, const char *str, u32 n)
 866{
 867        static char buf[XDBC_MAX_PACKET];
 868        int chunk, ret;
 869        int use_cr = 0;
 870
 871        if (!xdbc.xdbc_reg)
 872                return;
 873        memset(buf, 0, XDBC_MAX_PACKET);
 874        while (n > 0) {
 875                for (chunk = 0; chunk < XDBC_MAX_PACKET && n > 0; str++, chunk++, n--) {
 876
 877                        if (!use_cr && *str == '\n') {
 878                                use_cr = 1;
 879                                buf[chunk] = '\r';
 880                                str--;
 881                                n++;
 882                                continue;
 883                        }
 884
 885                        if (use_cr)
 886                                use_cr = 0;
 887                        buf[chunk] = *str;
 888                }
 889
 890                if (chunk > 0) {
 891                        ret = xdbc_bulk_write(buf, chunk);
 892                        if (ret < 0)
 893                                xdbc_trace("missed message {%s}\n", buf);
 894                }
 895        }
 896}
 897
 898static struct console early_xdbc_console = {
 899        .name           = "earlyxdbc",
 900        .write          = early_xdbc_write,
 901        .flags          = CON_PRINTBUFFER,
 902        .index          = -1,
 903};
 904
 905void __init early_xdbc_register_console(void)
 906{
 907        if (early_console)
 908                return;
 909
 910        early_console = &early_xdbc_console;
 911        if (early_console_keep)
 912                early_console->flags &= ~CON_BOOT;
 913        else
 914                early_console->flags |= CON_BOOT;
 915        register_console(early_console);
 916}
 917
 918static void xdbc_unregister_console(void)
 919{
 920        if (early_xdbc_console.flags & CON_ENABLED)
 921                unregister_console(&early_xdbc_console);
 922}
 923
 924static int xdbc_scrub_function(void *ptr)
 925{
 926        unsigned long flags;
 927
 928        while (true) {
 929                raw_spin_lock_irqsave(&xdbc.lock, flags);
 930                xdbc_handle_events();
 931
 932                if (!(xdbc.flags & XDBC_FLAGS_INITIALIZED)) {
 933                        raw_spin_unlock_irqrestore(&xdbc.lock, flags);
 934                        break;
 935                }
 936
 937                raw_spin_unlock_irqrestore(&xdbc.lock, flags);
 938                schedule_timeout_interruptible(1);
 939        }
 940
 941        xdbc_unregister_console();
 942        writel(0, &xdbc.xdbc_reg->control);
 943        xdbc_trace("dbc scrub function exits\n");
 944
 945        return 0;
 946}
 947
 948static int __init xdbc_init(void)
 949{
 950        unsigned long flags;
 951        void __iomem *base;
 952        int ret = 0;
 953        u32 offset;
 954
 955        if (!(xdbc.flags & XDBC_FLAGS_INITIALIZED))
 956                return 0;
 957
 958        /*
 959         * It's time to shut down the DbC, so that the debug
 960         * port can be reused by the host controller:
 961         */
 962        if (early_xdbc_console.index == -1 ||
 963            (early_xdbc_console.flags & CON_BOOT)) {
 964                xdbc_trace("hardware not used anymore\n");
 965                goto free_and_quit;
 966        }
 967
 968        base = ioremap(xdbc.xhci_start, xdbc.xhci_length);
 969        if (!base) {
 970                xdbc_trace("failed to remap the io address\n");
 971                ret = -ENOMEM;
 972                goto free_and_quit;
 973        }
 974
 975        raw_spin_lock_irqsave(&xdbc.lock, flags);
 976        early_iounmap(xdbc.xhci_base, xdbc.xhci_length);
 977        xdbc.xhci_base = base;
 978        offset = xhci_find_next_ext_cap(xdbc.xhci_base, 0, XHCI_EXT_CAPS_DEBUG);
 979        xdbc.xdbc_reg = (struct xdbc_regs __iomem *)(xdbc.xhci_base + offset);
 980        raw_spin_unlock_irqrestore(&xdbc.lock, flags);
 981
 982        kthread_run(xdbc_scrub_function, NULL, "%s", "xdbc");
 983
 984        return 0;
 985
 986free_and_quit:
 987        xdbc_free_ring(&xdbc.evt_ring);
 988        xdbc_free_ring(&xdbc.out_ring);
 989        xdbc_free_ring(&xdbc.in_ring);
 990        memblock_free(xdbc.table_dma, PAGE_SIZE);
 991        memblock_free(xdbc.out_dma, PAGE_SIZE);
 992        writel(0, &xdbc.xdbc_reg->control);
 993        early_iounmap(xdbc.xhci_base, xdbc.xhci_length);
 994
 995        return ret;
 996}
 997subsys_initcall(xdbc_init);
 998