linux/drivers/usb/host/xhci-dbg.c
<<
>>
Prefs
   1/*
   2 * xHCI host controller driver
   3 *
   4 * Copyright (C) 2008 Intel Corp.
   5 *
   6 * Author: Sarah Sharp
   7 * Some code borrowed from the Linux EHCI driver.
   8 *
   9 * This program is free software; you can redistribute it and/or modify
  10 * it under the terms of the GNU General Public License version 2 as
  11 * published by the Free Software Foundation.
  12 *
  13 * This program is distributed in the hope that it will be useful, but
  14 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
  15 * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  16 * for more details.
  17 *
  18 * You should have received a copy of the GNU General Public License
  19 * along with this program; if not, write to the Free Software Foundation,
  20 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  21 */
  22
  23#include "xhci.h"
  24
  25#define XHCI_INIT_VALUE 0x0
  26
  27/* Add verbose debugging later, just print everything for now */
  28
  29void xhci_dbg_regs(struct xhci_hcd *xhci)
  30{
  31        u32 temp;
  32
  33        xhci_dbg(xhci, "// xHCI capability registers at %p:\n",
  34                        xhci->cap_regs);
  35        temp = readl(&xhci->cap_regs->hc_capbase);
  36        xhci_dbg(xhci, "// @%p = 0x%x (CAPLENGTH AND HCIVERSION)\n",
  37                        &xhci->cap_regs->hc_capbase, temp);
  38        xhci_dbg(xhci, "//   CAPLENGTH: 0x%x\n",
  39                        (unsigned int) HC_LENGTH(temp));
  40        xhci_dbg(xhci, "//   HCIVERSION: 0x%x\n",
  41                        (unsigned int) HC_VERSION(temp));
  42
  43        xhci_dbg(xhci, "// xHCI operational registers at %p:\n", xhci->op_regs);
  44
  45        temp = readl(&xhci->cap_regs->run_regs_off);
  46        xhci_dbg(xhci, "// @%p = 0x%x RTSOFF\n",
  47                        &xhci->cap_regs->run_regs_off,
  48                        (unsigned int) temp & RTSOFF_MASK);
  49        xhci_dbg(xhci, "// xHCI runtime registers at %p:\n", xhci->run_regs);
  50
  51        temp = readl(&xhci->cap_regs->db_off);
  52        xhci_dbg(xhci, "// @%p = 0x%x DBOFF\n", &xhci->cap_regs->db_off, temp);
  53        xhci_dbg(xhci, "// Doorbell array at %p:\n", xhci->dba);
  54}
  55
  56static void xhci_print_cap_regs(struct xhci_hcd *xhci)
  57{
  58        u32 temp;
  59        u32 hci_version;
  60
  61        xhci_dbg(xhci, "xHCI capability registers at %p:\n", xhci->cap_regs);
  62
  63        temp = readl(&xhci->cap_regs->hc_capbase);
  64        hci_version = HC_VERSION(temp);
  65        xhci_dbg(xhci, "CAPLENGTH AND HCIVERSION 0x%x:\n",
  66                        (unsigned int) temp);
  67        xhci_dbg(xhci, "CAPLENGTH: 0x%x\n",
  68                        (unsigned int) HC_LENGTH(temp));
  69        xhci_dbg(xhci, "HCIVERSION: 0x%x\n", hci_version);
  70
  71        temp = readl(&xhci->cap_regs->hcs_params1);
  72        xhci_dbg(xhci, "HCSPARAMS 1: 0x%x\n",
  73                        (unsigned int) temp);
  74        xhci_dbg(xhci, "  Max device slots: %u\n",
  75                        (unsigned int) HCS_MAX_SLOTS(temp));
  76        xhci_dbg(xhci, "  Max interrupters: %u\n",
  77                        (unsigned int) HCS_MAX_INTRS(temp));
  78        xhci_dbg(xhci, "  Max ports: %u\n",
  79                        (unsigned int) HCS_MAX_PORTS(temp));
  80
  81        temp = readl(&xhci->cap_regs->hcs_params2);
  82        xhci_dbg(xhci, "HCSPARAMS 2: 0x%x\n",
  83                        (unsigned int) temp);
  84        xhci_dbg(xhci, "  Isoc scheduling threshold: %u\n",
  85                        (unsigned int) HCS_IST(temp));
  86        xhci_dbg(xhci, "  Maximum allowed segments in event ring: %u\n",
  87                        (unsigned int) HCS_ERST_MAX(temp));
  88
  89        temp = readl(&xhci->cap_regs->hcs_params3);
  90        xhci_dbg(xhci, "HCSPARAMS 3 0x%x:\n",
  91                        (unsigned int) temp);
  92        xhci_dbg(xhci, "  Worst case U1 device exit latency: %u\n",
  93                        (unsigned int) HCS_U1_LATENCY(temp));
  94        xhci_dbg(xhci, "  Worst case U2 device exit latency: %u\n",
  95                        (unsigned int) HCS_U2_LATENCY(temp));
  96
  97        temp = readl(&xhci->cap_regs->hcc_params);
  98        xhci_dbg(xhci, "HCC PARAMS 0x%x:\n", (unsigned int) temp);
  99        xhci_dbg(xhci, "  HC generates %s bit addresses\n",
 100                        HCC_64BIT_ADDR(temp) ? "64" : "32");
 101        xhci_dbg(xhci, "  HC %s Contiguous Frame ID Capability\n",
 102                        HCC_CFC(temp) ? "has" : "hasn't");
 103        xhci_dbg(xhci, "  HC %s generate Stopped - Short Package event\n",
 104                        HCC_SPC(temp) ? "can" : "can't");
 105        /* FIXME */
 106        xhci_dbg(xhci, "  FIXME: more HCCPARAMS debugging\n");
 107
 108        temp = readl(&xhci->cap_regs->run_regs_off);
 109        xhci_dbg(xhci, "RTSOFF 0x%x:\n", temp & RTSOFF_MASK);
 110
 111        /* xhci 1.1 controllers have the HCCPARAMS2 register */
 112        if (hci_version > 0x100) {
 113                temp = readl(&xhci->cap_regs->hcc_params2);
 114                xhci_dbg(xhci, "HCC PARAMS2 0x%x:\n", (unsigned int) temp);
 115                xhci_dbg(xhci, "  HC %s Force save context capability",
 116                         HCC2_FSC(temp) ? "supports" : "doesn't support");
 117                xhci_dbg(xhci, "  HC %s Large ESIT Payload Capability",
 118                         HCC2_LEC(temp) ? "supports" : "doesn't support");
 119                xhci_dbg(xhci, "  HC %s Extended TBC capability",
 120                         HCC2_ETC(temp) ? "supports" : "doesn't support");
 121        }
 122}
 123
 124static void xhci_print_command_reg(struct xhci_hcd *xhci)
 125{
 126        u32 temp;
 127
 128        temp = readl(&xhci->op_regs->command);
 129        xhci_dbg(xhci, "USBCMD 0x%x:\n", temp);
 130        xhci_dbg(xhci, "  HC is %s\n",
 131                        (temp & CMD_RUN) ? "running" : "being stopped");
 132        xhci_dbg(xhci, "  HC has %sfinished hard reset\n",
 133                        (temp & CMD_RESET) ? "not " : "");
 134        xhci_dbg(xhci, "  Event Interrupts %s\n",
 135                        (temp & CMD_EIE) ? "enabled " : "disabled");
 136        xhci_dbg(xhci, "  Host System Error Interrupts %s\n",
 137                        (temp & CMD_HSEIE) ? "enabled " : "disabled");
 138        xhci_dbg(xhci, "  HC has %sfinished light reset\n",
 139                        (temp & CMD_LRESET) ? "not " : "");
 140}
 141
 142static void xhci_print_status(struct xhci_hcd *xhci)
 143{
 144        u32 temp;
 145
 146        temp = readl(&xhci->op_regs->status);
 147        xhci_dbg(xhci, "USBSTS 0x%x:\n", temp);
 148        xhci_dbg(xhci, "  Event ring is %sempty\n",
 149                        (temp & STS_EINT) ? "not " : "");
 150        xhci_dbg(xhci, "  %sHost System Error\n",
 151                        (temp & STS_FATAL) ? "WARNING: " : "No ");
 152        xhci_dbg(xhci, "  HC is %s\n",
 153                        (temp & STS_HALT) ? "halted" : "running");
 154}
 155
 156static void xhci_print_op_regs(struct xhci_hcd *xhci)
 157{
 158        xhci_dbg(xhci, "xHCI operational registers at %p:\n", xhci->op_regs);
 159        xhci_print_command_reg(xhci);
 160        xhci_print_status(xhci);
 161}
 162
 163static void xhci_print_ports(struct xhci_hcd *xhci)
 164{
 165        __le32 __iomem *addr;
 166        int i, j;
 167        int ports;
 168        char *names[NUM_PORT_REGS] = {
 169                "status",
 170                "power",
 171                "link",
 172                "reserved",
 173        };
 174
 175        ports = HCS_MAX_PORTS(xhci->hcs_params1);
 176        addr = &xhci->op_regs->port_status_base;
 177        for (i = 0; i < ports; i++) {
 178                for (j = 0; j < NUM_PORT_REGS; j++) {
 179                        xhci_dbg(xhci, "%p port %s reg = 0x%x\n",
 180                                        addr, names[j],
 181                                        (unsigned int) readl(addr));
 182                        addr++;
 183                }
 184        }
 185}
 186
 187void xhci_print_ir_set(struct xhci_hcd *xhci, int set_num)
 188{
 189        struct xhci_intr_reg __iomem *ir_set = &xhci->run_regs->ir_set[set_num];
 190        void __iomem *addr;
 191        u32 temp;
 192        u64 temp_64;
 193
 194        addr = &ir_set->irq_pending;
 195        temp = readl(addr);
 196        if (temp == XHCI_INIT_VALUE)
 197                return;
 198
 199        xhci_dbg(xhci, "  %p: ir_set[%i]\n", ir_set, set_num);
 200
 201        xhci_dbg(xhci, "  %p: ir_set.pending = 0x%x\n", addr,
 202                        (unsigned int)temp);
 203
 204        addr = &ir_set->irq_control;
 205        temp = readl(addr);
 206        xhci_dbg(xhci, "  %p: ir_set.control = 0x%x\n", addr,
 207                        (unsigned int)temp);
 208
 209        addr = &ir_set->erst_size;
 210        temp = readl(addr);
 211        xhci_dbg(xhci, "  %p: ir_set.erst_size = 0x%x\n", addr,
 212                        (unsigned int)temp);
 213
 214        addr = &ir_set->rsvd;
 215        temp = readl(addr);
 216        if (temp != XHCI_INIT_VALUE)
 217                xhci_dbg(xhci, "  WARN: %p: ir_set.rsvd = 0x%x\n",
 218                                addr, (unsigned int)temp);
 219
 220        addr = &ir_set->erst_base;
 221        temp_64 = xhci_read_64(xhci, addr);
 222        xhci_dbg(xhci, "  %p: ir_set.erst_base = @%08llx\n",
 223                        addr, temp_64);
 224
 225        addr = &ir_set->erst_dequeue;
 226        temp_64 = xhci_read_64(xhci, addr);
 227        xhci_dbg(xhci, "  %p: ir_set.erst_dequeue = @%08llx\n",
 228                        addr, temp_64);
 229}
 230
 231void xhci_print_run_regs(struct xhci_hcd *xhci)
 232{
 233        u32 temp;
 234        int i;
 235
 236        xhci_dbg(xhci, "xHCI runtime registers at %p:\n", xhci->run_regs);
 237        temp = readl(&xhci->run_regs->microframe_index);
 238        xhci_dbg(xhci, "  %p: Microframe index = 0x%x\n",
 239                        &xhci->run_regs->microframe_index,
 240                        (unsigned int) temp);
 241        for (i = 0; i < 7; i++) {
 242                temp = readl(&xhci->run_regs->rsvd[i]);
 243                if (temp != XHCI_INIT_VALUE)
 244                        xhci_dbg(xhci, "  WARN: %p: Rsvd[%i] = 0x%x\n",
 245                                        &xhci->run_regs->rsvd[i],
 246                                        i, (unsigned int) temp);
 247        }
 248}
 249
 250void xhci_print_registers(struct xhci_hcd *xhci)
 251{
 252        xhci_print_cap_regs(xhci);
 253        xhci_print_op_regs(xhci);
 254        xhci_print_ports(xhci);
 255}
 256
 257void xhci_print_trb_offsets(struct xhci_hcd *xhci, union xhci_trb *trb)
 258{
 259        int i;
 260        for (i = 0; i < 4; i++)
 261                xhci_dbg(xhci, "Offset 0x%x = 0x%x\n",
 262                                i*4, trb->generic.field[i]);
 263}
 264
 265/**
 266 * Debug a transfer request block (TRB).
 267 */
 268void xhci_debug_trb(struct xhci_hcd *xhci, union xhci_trb *trb)
 269{
 270        u64     address;
 271        u32     type = le32_to_cpu(trb->link.control) & TRB_TYPE_BITMASK;
 272
 273        switch (type) {
 274        case TRB_TYPE(TRB_LINK):
 275                xhci_dbg(xhci, "Link TRB:\n");
 276                xhci_print_trb_offsets(xhci, trb);
 277
 278                address = le64_to_cpu(trb->link.segment_ptr);
 279                xhci_dbg(xhci, "Next ring segment DMA address = 0x%llx\n", address);
 280
 281                xhci_dbg(xhci, "Interrupter target = 0x%x\n",
 282                         GET_INTR_TARGET(le32_to_cpu(trb->link.intr_target)));
 283                xhci_dbg(xhci, "Cycle bit = %u\n",
 284                         le32_to_cpu(trb->link.control) & TRB_CYCLE);
 285                xhci_dbg(xhci, "Toggle cycle bit = %u\n",
 286                         le32_to_cpu(trb->link.control) & LINK_TOGGLE);
 287                xhci_dbg(xhci, "No Snoop bit = %u\n",
 288                         le32_to_cpu(trb->link.control) & TRB_NO_SNOOP);
 289                break;
 290        case TRB_TYPE(TRB_TRANSFER):
 291                address = le64_to_cpu(trb->trans_event.buffer);
 292                /*
 293                 * FIXME: look at flags to figure out if it's an address or if
 294                 * the data is directly in the buffer field.
 295                 */
 296                xhci_dbg(xhci, "DMA address or buffer contents= %llu\n", address);
 297                break;
 298        case TRB_TYPE(TRB_COMPLETION):
 299                address = le64_to_cpu(trb->event_cmd.cmd_trb);
 300                xhci_dbg(xhci, "Command TRB pointer = %llu\n", address);
 301                xhci_dbg(xhci, "Completion status = %u\n",
 302                         GET_COMP_CODE(le32_to_cpu(trb->event_cmd.status)));
 303                xhci_dbg(xhci, "Flags = 0x%x\n",
 304                         le32_to_cpu(trb->event_cmd.flags));
 305                break;
 306        default:
 307                xhci_dbg(xhci, "Unknown TRB with TRB type ID %u\n",
 308                                (unsigned int) type>>10);
 309                xhci_print_trb_offsets(xhci, trb);
 310                break;
 311        }
 312}
 313
 314/**
 315 * Debug a segment with an xHCI ring.
 316 *
 317 * @return The Link TRB of the segment, or NULL if there is no Link TRB
 318 * (which is a bug, since all segments must have a Link TRB).
 319 *
 320 * Prints out all TRBs in the segment, even those after the Link TRB.
 321 *
 322 * XXX: should we print out TRBs that the HC owns?  As long as we don't
 323 * write, that should be fine...  We shouldn't expect that the memory pointed to
 324 * by the TRB is valid at all.  Do we care about ones the HC owns?  Probably,
 325 * for HC debugging.
 326 */
 327void xhci_debug_segment(struct xhci_hcd *xhci, struct xhci_segment *seg)
 328{
 329        int i;
 330        u64 addr = seg->dma;
 331        union xhci_trb *trb = seg->trbs;
 332
 333        for (i = 0; i < TRBS_PER_SEGMENT; i++) {
 334                trb = &seg->trbs[i];
 335                xhci_dbg(xhci, "@%016llx %08x %08x %08x %08x\n", addr,
 336                         lower_32_bits(le64_to_cpu(trb->link.segment_ptr)),
 337                         upper_32_bits(le64_to_cpu(trb->link.segment_ptr)),
 338                         le32_to_cpu(trb->link.intr_target),
 339                         le32_to_cpu(trb->link.control));
 340                addr += sizeof(*trb);
 341        }
 342}
 343
 344void xhci_dbg_ring_ptrs(struct xhci_hcd *xhci, struct xhci_ring *ring)
 345{
 346        xhci_dbg(xhci, "Ring deq = %p (virt), 0x%llx (dma)\n",
 347                        ring->dequeue,
 348                        (unsigned long long)xhci_trb_virt_to_dma(ring->deq_seg,
 349                                                            ring->dequeue));
 350        xhci_dbg(xhci, "Ring deq updated %u times\n",
 351                        ring->deq_updates);
 352        xhci_dbg(xhci, "Ring enq = %p (virt), 0x%llx (dma)\n",
 353                        ring->enqueue,
 354                        (unsigned long long)xhci_trb_virt_to_dma(ring->enq_seg,
 355                                                            ring->enqueue));
 356        xhci_dbg(xhci, "Ring enq updated %u times\n",
 357                        ring->enq_updates);
 358}
 359
 360/**
 361 * Debugging for an xHCI ring, which is a queue broken into multiple segments.
 362 *
 363 * Print out each segment in the ring.  Check that the DMA address in
 364 * each link segment actually matches the segment's stored DMA address.
 365 * Check that the link end bit is only set at the end of the ring.
 366 * Check that the dequeue and enqueue pointers point to real data in this ring
 367 * (not some other ring).
 368 */
 369void xhci_debug_ring(struct xhci_hcd *xhci, struct xhci_ring *ring)
 370{
 371        /* FIXME: Throw an error if any segment doesn't have a Link TRB */
 372        struct xhci_segment *seg;
 373        struct xhci_segment *first_seg = ring->first_seg;
 374        xhci_debug_segment(xhci, first_seg);
 375
 376        if (!ring->enq_updates && !ring->deq_updates) {
 377                xhci_dbg(xhci, "  Ring has not been updated\n");
 378                return;
 379        }
 380        for (seg = first_seg->next; seg != first_seg; seg = seg->next)
 381                xhci_debug_segment(xhci, seg);
 382}
 383
 384void xhci_dbg_ep_rings(struct xhci_hcd *xhci,
 385                unsigned int slot_id, unsigned int ep_index,
 386                struct xhci_virt_ep *ep)
 387{
 388        int i;
 389        struct xhci_ring *ring;
 390
 391        if (ep->ep_state & EP_HAS_STREAMS) {
 392                for (i = 1; i < ep->stream_info->num_streams; i++) {
 393                        ring = ep->stream_info->stream_rings[i];
 394                        xhci_dbg(xhci, "Dev %d endpoint %d stream ID %d:\n",
 395                                slot_id, ep_index, i);
 396                        xhci_debug_segment(xhci, ring->deq_seg);
 397                }
 398        } else {
 399                ring = ep->ring;
 400                if (!ring)
 401                        return;
 402                xhci_dbg(xhci, "Dev %d endpoint ring %d:\n",
 403                                slot_id, ep_index);
 404                xhci_debug_segment(xhci, ring->deq_seg);
 405        }
 406}
 407
 408void xhci_dbg_erst(struct xhci_hcd *xhci, struct xhci_erst *erst)
 409{
 410        u64 addr = erst->erst_dma_addr;
 411        int i;
 412        struct xhci_erst_entry *entry;
 413
 414        for (i = 0; i < erst->num_entries; i++) {
 415                entry = &erst->entries[i];
 416                xhci_dbg(xhci, "@%016llx %08x %08x %08x %08x\n",
 417                         addr,
 418                         lower_32_bits(le64_to_cpu(entry->seg_addr)),
 419                         upper_32_bits(le64_to_cpu(entry->seg_addr)),
 420                         le32_to_cpu(entry->seg_size),
 421                         le32_to_cpu(entry->rsvd));
 422                addr += sizeof(*entry);
 423        }
 424}
 425
 426void xhci_dbg_cmd_ptrs(struct xhci_hcd *xhci)
 427{
 428        u64 val;
 429
 430        val = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
 431        xhci_dbg(xhci, "// xHC command ring deq ptr low bits + flags = @%08x\n",
 432                        lower_32_bits(val));
 433        xhci_dbg(xhci, "// xHC command ring deq ptr high bits = @%08x\n",
 434                        upper_32_bits(val));
 435}
 436
 437/* Print the last 32 bytes for 64-byte contexts */
 438static void dbg_rsvd64(struct xhci_hcd *xhci, u64 *ctx, dma_addr_t dma)
 439{
 440        int i;
 441        for (i = 0; i < 4; i++) {
 442                xhci_dbg(xhci, "@%p (virt) @%08llx "
 443                         "(dma) %#08llx - rsvd64[%d]\n",
 444                         &ctx[4 + i], (unsigned long long)dma,
 445                         ctx[4 + i], i);
 446                dma += 8;
 447        }
 448}
 449
 450char *xhci_get_slot_state(struct xhci_hcd *xhci,
 451                struct xhci_container_ctx *ctx)
 452{
 453        struct xhci_slot_ctx *slot_ctx = xhci_get_slot_ctx(xhci, ctx);
 454
 455        switch (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state))) {
 456        case SLOT_STATE_ENABLED:
 457                return "enabled/disabled";
 458        case SLOT_STATE_DEFAULT:
 459                return "default";
 460        case SLOT_STATE_ADDRESSED:
 461                return "addressed";
 462        case SLOT_STATE_CONFIGURED:
 463                return "configured";
 464        default:
 465                return "reserved";
 466        }
 467}
 468
 469static void xhci_dbg_slot_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx)
 470{
 471        /* Fields are 32 bits wide, DMA addresses are in bytes */
 472        int field_size = 32 / 8;
 473        int i;
 474
 475        struct xhci_slot_ctx *slot_ctx = xhci_get_slot_ctx(xhci, ctx);
 476        dma_addr_t dma = ctx->dma +
 477                ((unsigned long)slot_ctx - (unsigned long)ctx->bytes);
 478        int csz = HCC_64BYTE_CONTEXT(xhci->hcc_params);
 479
 480        xhci_dbg(xhci, "Slot Context:\n");
 481        xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - dev_info\n",
 482                        &slot_ctx->dev_info,
 483                        (unsigned long long)dma, slot_ctx->dev_info);
 484        dma += field_size;
 485        xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - dev_info2\n",
 486                        &slot_ctx->dev_info2,
 487                        (unsigned long long)dma, slot_ctx->dev_info2);
 488        dma += field_size;
 489        xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - tt_info\n",
 490                        &slot_ctx->tt_info,
 491                        (unsigned long long)dma, slot_ctx->tt_info);
 492        dma += field_size;
 493        xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - dev_state\n",
 494                        &slot_ctx->dev_state,
 495                        (unsigned long long)dma, slot_ctx->dev_state);
 496        dma += field_size;
 497        for (i = 0; i < 4; i++) {
 498                xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - rsvd[%d]\n",
 499                                &slot_ctx->reserved[i], (unsigned long long)dma,
 500                                slot_ctx->reserved[i], i);
 501                dma += field_size;
 502        }
 503
 504        if (csz)
 505                dbg_rsvd64(xhci, (u64 *)slot_ctx, dma);
 506}
 507
 508static void xhci_dbg_ep_ctx(struct xhci_hcd *xhci,
 509                     struct xhci_container_ctx *ctx,
 510                     unsigned int last_ep)
 511{
 512        int i, j;
 513        int last_ep_ctx = 31;
 514        /* Fields are 32 bits wide, DMA addresses are in bytes */
 515        int field_size = 32 / 8;
 516        int csz = HCC_64BYTE_CONTEXT(xhci->hcc_params);
 517
 518        if (last_ep < 31)
 519                last_ep_ctx = last_ep + 1;
 520        for (i = 0; i < last_ep_ctx; i++) {
 521                unsigned int epaddr = xhci_get_endpoint_address(i);
 522                struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, ctx, i);
 523                dma_addr_t dma = ctx->dma +
 524                        ((unsigned long)ep_ctx - (unsigned long)ctx->bytes);
 525
 526                xhci_dbg(xhci, "%s Endpoint %02d Context (ep_index %02d):\n",
 527                                usb_endpoint_out(epaddr) ? "OUT" : "IN",
 528                                epaddr & USB_ENDPOINT_NUMBER_MASK, i);
 529                xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - ep_info\n",
 530                                &ep_ctx->ep_info,
 531                                (unsigned long long)dma, ep_ctx->ep_info);
 532                dma += field_size;
 533                xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - ep_info2\n",
 534                                &ep_ctx->ep_info2,
 535                                (unsigned long long)dma, ep_ctx->ep_info2);
 536                dma += field_size;
 537                xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08llx - deq\n",
 538                                &ep_ctx->deq,
 539                                (unsigned long long)dma, ep_ctx->deq);
 540                dma += 2*field_size;
 541                xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - tx_info\n",
 542                                &ep_ctx->tx_info,
 543                                (unsigned long long)dma, ep_ctx->tx_info);
 544                dma += field_size;
 545                for (j = 0; j < 3; j++) {
 546                        xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - rsvd[%d]\n",
 547                                        &ep_ctx->reserved[j],
 548                                        (unsigned long long)dma,
 549                                        ep_ctx->reserved[j], j);
 550                        dma += field_size;
 551                }
 552
 553                if (csz)
 554                        dbg_rsvd64(xhci, (u64 *)ep_ctx, dma);
 555        }
 556}
 557
 558void xhci_dbg_ctx(struct xhci_hcd *xhci,
 559                  struct xhci_container_ctx *ctx,
 560                  unsigned int last_ep)
 561{
 562        int i;
 563        /* Fields are 32 bits wide, DMA addresses are in bytes */
 564        int field_size = 32 / 8;
 565        dma_addr_t dma = ctx->dma;
 566        int csz = HCC_64BYTE_CONTEXT(xhci->hcc_params);
 567
 568        if (ctx->type == XHCI_CTX_TYPE_INPUT) {
 569                struct xhci_input_control_ctx *ctrl_ctx =
 570                        xhci_get_input_control_ctx(ctx);
 571                if (!ctrl_ctx) {
 572                        xhci_warn(xhci, "Could not get input context, bad type.\n");
 573                        return;
 574                }
 575
 576                xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - drop flags\n",
 577                         &ctrl_ctx->drop_flags, (unsigned long long)dma,
 578                         ctrl_ctx->drop_flags);
 579                dma += field_size;
 580                xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - add flags\n",
 581                         &ctrl_ctx->add_flags, (unsigned long long)dma,
 582                         ctrl_ctx->add_flags);
 583                dma += field_size;
 584                for (i = 0; i < 6; i++) {
 585                        xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - rsvd2[%d]\n",
 586                                 &ctrl_ctx->rsvd2[i], (unsigned long long)dma,
 587                                 ctrl_ctx->rsvd2[i], i);
 588                        dma += field_size;
 589                }
 590
 591                if (csz)
 592                        dbg_rsvd64(xhci, (u64 *)ctrl_ctx, dma);
 593        }
 594
 595        xhci_dbg_slot_ctx(xhci, ctx);
 596        xhci_dbg_ep_ctx(xhci, ctx, last_ep);
 597}
 598
 599void xhci_dbg_trace(struct xhci_hcd *xhci, void (*trace)(struct va_format *),
 600                        const char *fmt, ...)
 601{
 602        struct va_format vaf;
 603        va_list args;
 604
 605        va_start(args, fmt);
 606        vaf.fmt = fmt;
 607        vaf.va = &args;
 608        xhci_dbg(xhci, "%pV\n", &vaf);
 609        trace(&vaf);
 610        va_end(args);
 611}
 612EXPORT_SYMBOL_GPL(xhci_dbg_trace);
 613