linux/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
<<
>>
Prefs
   1/*
   2 *  Copyright (C) 2017 Chelsio Communications.  All rights reserved.
   3 *
   4 *  This program is free software; you can redistribute it and/or modify it
   5 *  under the terms and conditions of the GNU General Public License,
   6 *  version 2, as published by the Free Software Foundation.
   7 *
   8 *  This program is distributed in the hope it will be useful, but WITHOUT
   9 *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10 *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  11 *  more details.
  12 *
  13 *  The full GNU General Public License is included in this distribution in
  14 *  the file called "COPYING".
  15 *
  16 */
  17
  18#include <linux/sort.h>
  19
  20#include "t4_regs.h"
  21#include "cxgb4.h"
  22#include "cudbg_if.h"
  23#include "cudbg_lib_common.h"
  24#include "cudbg_entity.h"
  25#include "cudbg_lib.h"
  26#include "cudbg_zlib.h"
  27
  28static int cudbg_do_compression(struct cudbg_init *pdbg_init,
  29                                struct cudbg_buffer *pin_buff,
  30                                struct cudbg_buffer *dbg_buff)
  31{
  32        struct cudbg_buffer temp_in_buff = { 0 };
  33        int bytes_left, bytes_read, bytes;
  34        u32 offset = dbg_buff->offset;
  35        int rc;
  36
  37        temp_in_buff.offset = pin_buff->offset;
  38        temp_in_buff.data = pin_buff->data;
  39        temp_in_buff.size = pin_buff->size;
  40
  41        bytes_left = pin_buff->size;
  42        bytes_read = 0;
  43        while (bytes_left > 0) {
  44                /* Do compression in smaller chunks */
  45                bytes = min_t(unsigned long, bytes_left,
  46                              (unsigned long)CUDBG_CHUNK_SIZE);
  47                temp_in_buff.data = (char *)pin_buff->data + bytes_read;
  48                temp_in_buff.size = bytes;
  49                rc = cudbg_compress_buff(pdbg_init, &temp_in_buff, dbg_buff);
  50                if (rc)
  51                        return rc;
  52                bytes_left -= bytes;
  53                bytes_read += bytes;
  54        }
  55
  56        pin_buff->size = dbg_buff->offset - offset;
  57        return 0;
  58}
  59
  60static int cudbg_write_and_release_buff(struct cudbg_init *pdbg_init,
  61                                        struct cudbg_buffer *pin_buff,
  62                                        struct cudbg_buffer *dbg_buff)
  63{
  64        int rc = 0;
  65
  66        if (pdbg_init->compress_type == CUDBG_COMPRESSION_NONE) {
  67                cudbg_update_buff(pin_buff, dbg_buff);
  68        } else {
  69                rc = cudbg_do_compression(pdbg_init, pin_buff, dbg_buff);
  70                if (rc)
  71                        goto out;
  72        }
  73
  74out:
  75        cudbg_put_buff(pdbg_init, pin_buff);
  76        return rc;
  77}
  78
  79static int is_fw_attached(struct cudbg_init *pdbg_init)
  80{
  81        struct adapter *padap = pdbg_init->adap;
  82
  83        if (!(padap->flags & FW_OK) || padap->use_bd)
  84                return 0;
  85
  86        return 1;
  87}
  88
  89/* This function will add additional padding bytes into debug_buffer to make it
  90 * 4 byte aligned.
  91 */
  92void cudbg_align_debug_buffer(struct cudbg_buffer *dbg_buff,
  93                              struct cudbg_entity_hdr *entity_hdr)
  94{
  95        u8 zero_buf[4] = {0};
  96        u8 padding, remain;
  97
  98        remain = (dbg_buff->offset - entity_hdr->start_offset) % 4;
  99        padding = 4 - remain;
 100        if (remain) {
 101                memcpy(((u8 *)dbg_buff->data) + dbg_buff->offset, &zero_buf,
 102                       padding);
 103                dbg_buff->offset += padding;
 104                entity_hdr->num_pad = padding;
 105        }
 106        entity_hdr->size = dbg_buff->offset - entity_hdr->start_offset;
 107}
 108
 109struct cudbg_entity_hdr *cudbg_get_entity_hdr(void *outbuf, int i)
 110{
 111        struct cudbg_hdr *cudbg_hdr = (struct cudbg_hdr *)outbuf;
 112
 113        return (struct cudbg_entity_hdr *)
 114               ((char *)outbuf + cudbg_hdr->hdr_len +
 115                (sizeof(struct cudbg_entity_hdr) * (i - 1)));
 116}
 117
 118static int cudbg_read_vpd_reg(struct adapter *padap, u32 addr, u32 len,
 119                              void *dest)
 120{
 121        int vaddr, rc;
 122
 123        vaddr = t4_eeprom_ptov(addr, padap->pf, EEPROMPFSIZE);
 124        if (vaddr < 0)
 125                return vaddr;
 126
 127        rc = pci_read_vpd(padap->pdev, vaddr, len, dest);
 128        if (rc < 0)
 129                return rc;
 130
 131        return 0;
 132}
 133
 134static int cudbg_mem_desc_cmp(const void *a, const void *b)
 135{
 136        return ((const struct cudbg_mem_desc *)a)->base -
 137               ((const struct cudbg_mem_desc *)b)->base;
 138}
 139
 140int cudbg_fill_meminfo(struct adapter *padap,
 141                       struct cudbg_meminfo *meminfo_buff)
 142{
 143        struct cudbg_mem_desc *md;
 144        u32 lo, hi, used, alloc;
 145        int n, i;
 146
 147        memset(meminfo_buff->avail, 0,
 148               ARRAY_SIZE(meminfo_buff->avail) *
 149               sizeof(struct cudbg_mem_desc));
 150        memset(meminfo_buff->mem, 0,
 151               (ARRAY_SIZE(cudbg_region) + 3) * sizeof(struct cudbg_mem_desc));
 152        md  = meminfo_buff->mem;
 153
 154        for (i = 0; i < ARRAY_SIZE(meminfo_buff->mem); i++) {
 155                meminfo_buff->mem[i].limit = 0;
 156                meminfo_buff->mem[i].idx = i;
 157        }
 158
 159        /* Find and sort the populated memory ranges */
 160        i = 0;
 161        lo = t4_read_reg(padap, MA_TARGET_MEM_ENABLE_A);
 162        if (lo & EDRAM0_ENABLE_F) {
 163                hi = t4_read_reg(padap, MA_EDRAM0_BAR_A);
 164                meminfo_buff->avail[i].base =
 165                        cudbg_mbytes_to_bytes(EDRAM0_BASE_G(hi));
 166                meminfo_buff->avail[i].limit =
 167                        meminfo_buff->avail[i].base +
 168                        cudbg_mbytes_to_bytes(EDRAM0_SIZE_G(hi));
 169                meminfo_buff->avail[i].idx = 0;
 170                i++;
 171        }
 172
 173        if (lo & EDRAM1_ENABLE_F) {
 174                hi =  t4_read_reg(padap, MA_EDRAM1_BAR_A);
 175                meminfo_buff->avail[i].base =
 176                        cudbg_mbytes_to_bytes(EDRAM1_BASE_G(hi));
 177                meminfo_buff->avail[i].limit =
 178                        meminfo_buff->avail[i].base +
 179                        cudbg_mbytes_to_bytes(EDRAM1_SIZE_G(hi));
 180                meminfo_buff->avail[i].idx = 1;
 181                i++;
 182        }
 183
 184        if (is_t5(padap->params.chip)) {
 185                if (lo & EXT_MEM0_ENABLE_F) {
 186                        hi = t4_read_reg(padap, MA_EXT_MEMORY0_BAR_A);
 187                        meminfo_buff->avail[i].base =
 188                                cudbg_mbytes_to_bytes(EXT_MEM_BASE_G(hi));
 189                        meminfo_buff->avail[i].limit =
 190                                meminfo_buff->avail[i].base +
 191                                cudbg_mbytes_to_bytes(EXT_MEM_SIZE_G(hi));
 192                        meminfo_buff->avail[i].idx = 3;
 193                        i++;
 194                }
 195
 196                if (lo & EXT_MEM1_ENABLE_F) {
 197                        hi = t4_read_reg(padap, MA_EXT_MEMORY1_BAR_A);
 198                        meminfo_buff->avail[i].base =
 199                                cudbg_mbytes_to_bytes(EXT_MEM1_BASE_G(hi));
 200                        meminfo_buff->avail[i].limit =
 201                                meminfo_buff->avail[i].base +
 202                                cudbg_mbytes_to_bytes(EXT_MEM1_SIZE_G(hi));
 203                        meminfo_buff->avail[i].idx = 4;
 204                        i++;
 205                }
 206        } else {
 207                if (lo & EXT_MEM_ENABLE_F) {
 208                        hi = t4_read_reg(padap, MA_EXT_MEMORY_BAR_A);
 209                        meminfo_buff->avail[i].base =
 210                                cudbg_mbytes_to_bytes(EXT_MEM_BASE_G(hi));
 211                        meminfo_buff->avail[i].limit =
 212                                meminfo_buff->avail[i].base +
 213                                cudbg_mbytes_to_bytes(EXT_MEM_SIZE_G(hi));
 214                        meminfo_buff->avail[i].idx = 2;
 215                        i++;
 216                }
 217
 218                if (lo & HMA_MUX_F) {
 219                        hi = t4_read_reg(padap, MA_EXT_MEMORY1_BAR_A);
 220                        meminfo_buff->avail[i].base =
 221                                cudbg_mbytes_to_bytes(EXT_MEM1_BASE_G(hi));
 222                        meminfo_buff->avail[i].limit =
 223                                meminfo_buff->avail[i].base +
 224                                cudbg_mbytes_to_bytes(EXT_MEM1_SIZE_G(hi));
 225                        meminfo_buff->avail[i].idx = 5;
 226                        i++;
 227                }
 228        }
 229
 230        if (!i) /* no memory available */
 231                return CUDBG_STATUS_ENTITY_NOT_FOUND;
 232
 233        meminfo_buff->avail_c = i;
 234        sort(meminfo_buff->avail, i, sizeof(struct cudbg_mem_desc),
 235             cudbg_mem_desc_cmp, NULL);
 236        (md++)->base = t4_read_reg(padap, SGE_DBQ_CTXT_BADDR_A);
 237        (md++)->base = t4_read_reg(padap, SGE_IMSG_CTXT_BADDR_A);
 238        (md++)->base = t4_read_reg(padap, SGE_FLM_CACHE_BADDR_A);
 239        (md++)->base = t4_read_reg(padap, TP_CMM_TCB_BASE_A);
 240        (md++)->base = t4_read_reg(padap, TP_CMM_MM_BASE_A);
 241        (md++)->base = t4_read_reg(padap, TP_CMM_TIMER_BASE_A);
 242        (md++)->base = t4_read_reg(padap, TP_CMM_MM_RX_FLST_BASE_A);
 243        (md++)->base = t4_read_reg(padap, TP_CMM_MM_TX_FLST_BASE_A);
 244        (md++)->base = t4_read_reg(padap, TP_CMM_MM_PS_FLST_BASE_A);
 245
 246        /* the next few have explicit upper bounds */
 247        md->base = t4_read_reg(padap, TP_PMM_TX_BASE_A);
 248        md->limit = md->base - 1 +
 249                    t4_read_reg(padap, TP_PMM_TX_PAGE_SIZE_A) *
 250                    PMTXMAXPAGE_G(t4_read_reg(padap, TP_PMM_TX_MAX_PAGE_A));
 251        md++;
 252
 253        md->base = t4_read_reg(padap, TP_PMM_RX_BASE_A);
 254        md->limit = md->base - 1 +
 255                    t4_read_reg(padap, TP_PMM_RX_PAGE_SIZE_A) *
 256                    PMRXMAXPAGE_G(t4_read_reg(padap, TP_PMM_RX_MAX_PAGE_A));
 257        md++;
 258
 259        if (t4_read_reg(padap, LE_DB_CONFIG_A) & HASHEN_F) {
 260                if (CHELSIO_CHIP_VERSION(padap->params.chip) <= CHELSIO_T5) {
 261                        hi = t4_read_reg(padap, LE_DB_TID_HASHBASE_A) / 4;
 262                        md->base = t4_read_reg(padap, LE_DB_HASH_TID_BASE_A);
 263                } else {
 264                        hi = t4_read_reg(padap, LE_DB_HASH_TID_BASE_A);
 265                        md->base = t4_read_reg(padap,
 266                                               LE_DB_HASH_TBL_BASE_ADDR_A);
 267                }
 268                md->limit = 0;
 269        } else {
 270                md->base = 0;
 271                md->idx = ARRAY_SIZE(cudbg_region);  /* hide it */
 272        }
 273        md++;
 274
 275#define ulp_region(reg) do { \
 276        md->base = t4_read_reg(padap, ULP_ ## reg ## _LLIMIT_A);\
 277        (md++)->limit = t4_read_reg(padap, ULP_ ## reg ## _ULIMIT_A);\
 278} while (0)
 279
 280        ulp_region(RX_ISCSI);
 281        ulp_region(RX_TDDP);
 282        ulp_region(TX_TPT);
 283        ulp_region(RX_STAG);
 284        ulp_region(RX_RQ);
 285        ulp_region(RX_RQUDP);
 286        ulp_region(RX_PBL);
 287        ulp_region(TX_PBL);
 288#undef ulp_region
 289        md->base = 0;
 290        md->idx = ARRAY_SIZE(cudbg_region);
 291        if (!is_t4(padap->params.chip)) {
 292                u32 fifo_size = t4_read_reg(padap, SGE_DBVFIFO_SIZE_A);
 293                u32 sge_ctrl = t4_read_reg(padap, SGE_CONTROL2_A);
 294                u32 size = 0;
 295
 296                if (is_t5(padap->params.chip)) {
 297                        if (sge_ctrl & VFIFO_ENABLE_F)
 298                                size = DBVFIFO_SIZE_G(fifo_size);
 299                } else {
 300                        size = T6_DBVFIFO_SIZE_G(fifo_size);
 301                }
 302
 303                if (size) {
 304                        md->base = BASEADDR_G(t4_read_reg(padap,
 305                                                          SGE_DBVFIFO_BADDR_A));
 306                        md->limit = md->base + (size << 2) - 1;
 307                }
 308        }
 309
 310        md++;
 311
 312        md->base = t4_read_reg(padap, ULP_RX_CTX_BASE_A);
 313        md->limit = 0;
 314        md++;
 315        md->base = t4_read_reg(padap, ULP_TX_ERR_TABLE_BASE_A);
 316        md->limit = 0;
 317        md++;
 318
 319        md->base = padap->vres.ocq.start;
 320        if (padap->vres.ocq.size)
 321                md->limit = md->base + padap->vres.ocq.size - 1;
 322        else
 323                md->idx = ARRAY_SIZE(cudbg_region);  /* hide it */
 324        md++;
 325
 326        /* add any address-space holes, there can be up to 3 */
 327        for (n = 0; n < i - 1; n++)
 328                if (meminfo_buff->avail[n].limit <
 329                    meminfo_buff->avail[n + 1].base)
 330                        (md++)->base = meminfo_buff->avail[n].limit;
 331
 332        if (meminfo_buff->avail[n].limit)
 333                (md++)->base = meminfo_buff->avail[n].limit;
 334
 335        n = md - meminfo_buff->mem;
 336        meminfo_buff->mem_c = n;
 337
 338        sort(meminfo_buff->mem, n, sizeof(struct cudbg_mem_desc),
 339             cudbg_mem_desc_cmp, NULL);
 340
 341        lo = t4_read_reg(padap, CIM_SDRAM_BASE_ADDR_A);
 342        hi = t4_read_reg(padap, CIM_SDRAM_ADDR_SIZE_A) + lo - 1;
 343        meminfo_buff->up_ram_lo = lo;
 344        meminfo_buff->up_ram_hi = hi;
 345
 346        lo = t4_read_reg(padap, CIM_EXTMEM2_BASE_ADDR_A);
 347        hi = t4_read_reg(padap, CIM_EXTMEM2_ADDR_SIZE_A) + lo - 1;
 348        meminfo_buff->up_extmem2_lo = lo;
 349        meminfo_buff->up_extmem2_hi = hi;
 350
 351        lo = t4_read_reg(padap, TP_PMM_RX_MAX_PAGE_A);
 352        for (i = 0, meminfo_buff->free_rx_cnt = 0; i < 2; i++)
 353                meminfo_buff->free_rx_cnt +=
 354                        FREERXPAGECOUNT_G(t4_read_reg(padap,
 355                                                      TP_FLM_FREE_RX_CNT_A));
 356
 357        meminfo_buff->rx_pages_data[0] =  PMRXMAXPAGE_G(lo);
 358        meminfo_buff->rx_pages_data[1] =
 359                t4_read_reg(padap, TP_PMM_RX_PAGE_SIZE_A) >> 10;
 360        meminfo_buff->rx_pages_data[2] = (lo & PMRXNUMCHN_F) ? 2 : 1;
 361
 362        lo = t4_read_reg(padap, TP_PMM_TX_MAX_PAGE_A);
 363        hi = t4_read_reg(padap, TP_PMM_TX_PAGE_SIZE_A);
 364        for (i = 0, meminfo_buff->free_tx_cnt = 0; i < 4; i++)
 365                meminfo_buff->free_tx_cnt +=
 366                        FREETXPAGECOUNT_G(t4_read_reg(padap,
 367                                                      TP_FLM_FREE_TX_CNT_A));
 368
 369        meminfo_buff->tx_pages_data[0] = PMTXMAXPAGE_G(lo);
 370        meminfo_buff->tx_pages_data[1] =
 371                hi >= (1 << 20) ? (hi >> 20) : (hi >> 10);
 372        meminfo_buff->tx_pages_data[2] =
 373                hi >= (1 << 20) ? 'M' : 'K';
 374        meminfo_buff->tx_pages_data[3] = 1 << PMTXNUMCHN_G(lo);
 375
 376        meminfo_buff->p_structs = t4_read_reg(padap, TP_CMM_MM_MAX_PSTRUCT_A);
 377        meminfo_buff->p_structs_free_cnt =
 378                FREEPSTRUCTCOUNT_G(t4_read_reg(padap, TP_FLM_FREE_PS_CNT_A));
 379
 380        for (i = 0; i < 4; i++) {
 381                if (CHELSIO_CHIP_VERSION(padap->params.chip) > CHELSIO_T5)
 382                        lo = t4_read_reg(padap,
 383                                         MPS_RX_MAC_BG_PG_CNT0_A + i * 4);
 384                else
 385                        lo = t4_read_reg(padap, MPS_RX_PG_RSV0_A + i * 4);
 386                if (is_t5(padap->params.chip)) {
 387                        used = T5_USED_G(lo);
 388                        alloc = T5_ALLOC_G(lo);
 389                } else {
 390                        used = USED_G(lo);
 391                        alloc = ALLOC_G(lo);
 392                }
 393                meminfo_buff->port_used[i] = used;
 394                meminfo_buff->port_alloc[i] = alloc;
 395        }
 396
 397        for (i = 0; i < padap->params.arch.nchan; i++) {
 398                if (CHELSIO_CHIP_VERSION(padap->params.chip) > CHELSIO_T5)
 399                        lo = t4_read_reg(padap,
 400                                         MPS_RX_LPBK_BG_PG_CNT0_A + i * 4);
 401                else
 402                        lo = t4_read_reg(padap, MPS_RX_PG_RSV4_A + i * 4);
 403                if (is_t5(padap->params.chip)) {
 404                        used = T5_USED_G(lo);
 405                        alloc = T5_ALLOC_G(lo);
 406                } else {
 407                        used = USED_G(lo);
 408                        alloc = ALLOC_G(lo);
 409                }
 410                meminfo_buff->loopback_used[i] = used;
 411                meminfo_buff->loopback_alloc[i] = alloc;
 412        }
 413
 414        return 0;
 415}
 416
 417int cudbg_collect_reg_dump(struct cudbg_init *pdbg_init,
 418                           struct cudbg_buffer *dbg_buff,
 419                           struct cudbg_error *cudbg_err)
 420{
 421        struct adapter *padap = pdbg_init->adap;
 422        struct cudbg_buffer temp_buff = { 0 };
 423        u32 buf_size = 0;
 424        int rc = 0;
 425
 426        if (is_t4(padap->params.chip))
 427                buf_size = T4_REGMAP_SIZE;
 428        else if (is_t5(padap->params.chip) || is_t6(padap->params.chip))
 429                buf_size = T5_REGMAP_SIZE;
 430
 431        rc = cudbg_get_buff(pdbg_init, dbg_buff, buf_size, &temp_buff);
 432        if (rc)
 433                return rc;
 434        t4_get_regs(padap, (void *)temp_buff.data, temp_buff.size);
 435        return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
 436}
 437
 438int cudbg_collect_fw_devlog(struct cudbg_init *pdbg_init,
 439                            struct cudbg_buffer *dbg_buff,
 440                            struct cudbg_error *cudbg_err)
 441{
 442        struct adapter *padap = pdbg_init->adap;
 443        struct cudbg_buffer temp_buff = { 0 };
 444        struct devlog_params *dparams;
 445        int rc = 0;
 446
 447        rc = t4_init_devlog_params(padap);
 448        if (rc < 0) {
 449                cudbg_err->sys_err = rc;
 450                return rc;
 451        }
 452
 453        dparams = &padap->params.devlog;
 454        rc = cudbg_get_buff(pdbg_init, dbg_buff, dparams->size, &temp_buff);
 455        if (rc)
 456                return rc;
 457
 458        /* Collect FW devlog */
 459        if (dparams->start != 0) {
 460                spin_lock(&padap->win0_lock);
 461                rc = t4_memory_rw(padap, padap->params.drv_memwin,
 462                                  dparams->memtype, dparams->start,
 463                                  dparams->size,
 464                                  (__be32 *)(char *)temp_buff.data,
 465                                  1);
 466                spin_unlock(&padap->win0_lock);
 467                if (rc) {
 468                        cudbg_err->sys_err = rc;
 469                        cudbg_put_buff(pdbg_init, &temp_buff);
 470                        return rc;
 471                }
 472        }
 473        return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
 474}
 475
 476int cudbg_collect_cim_la(struct cudbg_init *pdbg_init,
 477                         struct cudbg_buffer *dbg_buff,
 478                         struct cudbg_error *cudbg_err)
 479{
 480        struct adapter *padap = pdbg_init->adap;
 481        struct cudbg_buffer temp_buff = { 0 };
 482        int size, rc;
 483        u32 cfg = 0;
 484
 485        if (is_t6(padap->params.chip)) {
 486                size = padap->params.cim_la_size / 10 + 1;
 487                size *= 10 * sizeof(u32);
 488        } else {
 489                size = padap->params.cim_la_size / 8;
 490                size *= 8 * sizeof(u32);
 491        }
 492
 493        size += sizeof(cfg);
 494        rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
 495        if (rc)
 496                return rc;
 497
 498        rc = t4_cim_read(padap, UP_UP_DBG_LA_CFG_A, 1, &cfg);
 499        if (rc) {
 500                cudbg_err->sys_err = rc;
 501                cudbg_put_buff(pdbg_init, &temp_buff);
 502                return rc;
 503        }
 504
 505        memcpy((char *)temp_buff.data, &cfg, sizeof(cfg));
 506        rc = t4_cim_read_la(padap,
 507                            (u32 *)((char *)temp_buff.data + sizeof(cfg)),
 508                            NULL);
 509        if (rc < 0) {
 510                cudbg_err->sys_err = rc;
 511                cudbg_put_buff(pdbg_init, &temp_buff);
 512                return rc;
 513        }
 514        return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
 515}
 516
 517int cudbg_collect_cim_ma_la(struct cudbg_init *pdbg_init,
 518                            struct cudbg_buffer *dbg_buff,
 519                            struct cudbg_error *cudbg_err)
 520{
 521        struct adapter *padap = pdbg_init->adap;
 522        struct cudbg_buffer temp_buff = { 0 };
 523        int size, rc;
 524
 525        size = 2 * CIM_MALA_SIZE * 5 * sizeof(u32);
 526        rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
 527        if (rc)
 528                return rc;
 529
 530        t4_cim_read_ma_la(padap,
 531                          (u32 *)temp_buff.data,
 532                          (u32 *)((char *)temp_buff.data +
 533                                  5 * CIM_MALA_SIZE));
 534        return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
 535}
 536
 537int cudbg_collect_cim_qcfg(struct cudbg_init *pdbg_init,
 538                           struct cudbg_buffer *dbg_buff,
 539                           struct cudbg_error *cudbg_err)
 540{
 541        struct adapter *padap = pdbg_init->adap;
 542        struct cudbg_buffer temp_buff = { 0 };
 543        struct cudbg_cim_qcfg *cim_qcfg_data;
 544        int rc;
 545
 546        rc = cudbg_get_buff(pdbg_init, dbg_buff, sizeof(struct cudbg_cim_qcfg),
 547                            &temp_buff);
 548        if (rc)
 549                return rc;
 550
 551        cim_qcfg_data = (struct cudbg_cim_qcfg *)temp_buff.data;
 552        cim_qcfg_data->chip = padap->params.chip;
 553        rc = t4_cim_read(padap, UP_IBQ_0_RDADDR_A,
 554                         ARRAY_SIZE(cim_qcfg_data->stat), cim_qcfg_data->stat);
 555        if (rc) {
 556                cudbg_err->sys_err = rc;
 557                cudbg_put_buff(pdbg_init, &temp_buff);
 558                return rc;
 559        }
 560
 561        rc = t4_cim_read(padap, UP_OBQ_0_REALADDR_A,
 562                         ARRAY_SIZE(cim_qcfg_data->obq_wr),
 563                         cim_qcfg_data->obq_wr);
 564        if (rc) {
 565                cudbg_err->sys_err = rc;
 566                cudbg_put_buff(pdbg_init, &temp_buff);
 567                return rc;
 568        }
 569
 570        t4_read_cimq_cfg(padap, cim_qcfg_data->base, cim_qcfg_data->size,
 571                         cim_qcfg_data->thres);
 572        return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
 573}
 574
 575static int cudbg_read_cim_ibq(struct cudbg_init *pdbg_init,
 576                              struct cudbg_buffer *dbg_buff,
 577                              struct cudbg_error *cudbg_err, int qid)
 578{
 579        struct adapter *padap = pdbg_init->adap;
 580        struct cudbg_buffer temp_buff = { 0 };
 581        int no_of_read_words, rc = 0;
 582        u32 qsize;
 583
 584        /* collect CIM IBQ */
 585        qsize = CIM_IBQ_SIZE * 4 * sizeof(u32);
 586        rc = cudbg_get_buff(pdbg_init, dbg_buff, qsize, &temp_buff);
 587        if (rc)
 588                return rc;
 589
 590        /* t4_read_cim_ibq will return no. of read words or error */
 591        no_of_read_words = t4_read_cim_ibq(padap, qid,
 592                                           (u32 *)temp_buff.data, qsize);
 593        /* no_of_read_words is less than or equal to 0 means error */
 594        if (no_of_read_words <= 0) {
 595                if (!no_of_read_words)
 596                        rc = CUDBG_SYSTEM_ERROR;
 597                else
 598                        rc = no_of_read_words;
 599                cudbg_err->sys_err = rc;
 600                cudbg_put_buff(pdbg_init, &temp_buff);
 601                return rc;
 602        }
 603        return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
 604}
 605
 606int cudbg_collect_cim_ibq_tp0(struct cudbg_init *pdbg_init,
 607                              struct cudbg_buffer *dbg_buff,
 608                              struct cudbg_error *cudbg_err)
 609{
 610        return cudbg_read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, 0);
 611}
 612
 613int cudbg_collect_cim_ibq_tp1(struct cudbg_init *pdbg_init,
 614                              struct cudbg_buffer *dbg_buff,
 615                              struct cudbg_error *cudbg_err)
 616{
 617        return cudbg_read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, 1);
 618}
 619
 620int cudbg_collect_cim_ibq_ulp(struct cudbg_init *pdbg_init,
 621                              struct cudbg_buffer *dbg_buff,
 622                              struct cudbg_error *cudbg_err)
 623{
 624        return cudbg_read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, 2);
 625}
 626
 627int cudbg_collect_cim_ibq_sge0(struct cudbg_init *pdbg_init,
 628                               struct cudbg_buffer *dbg_buff,
 629                               struct cudbg_error *cudbg_err)
 630{
 631        return cudbg_read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, 3);
 632}
 633
 634int cudbg_collect_cim_ibq_sge1(struct cudbg_init *pdbg_init,
 635                               struct cudbg_buffer *dbg_buff,
 636                               struct cudbg_error *cudbg_err)
 637{
 638        return cudbg_read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, 4);
 639}
 640
 641int cudbg_collect_cim_ibq_ncsi(struct cudbg_init *pdbg_init,
 642                               struct cudbg_buffer *dbg_buff,
 643                               struct cudbg_error *cudbg_err)
 644{
 645        return cudbg_read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, 5);
 646}
 647
 648u32 cudbg_cim_obq_size(struct adapter *padap, int qid)
 649{
 650        u32 value;
 651
 652        t4_write_reg(padap, CIM_QUEUE_CONFIG_REF_A, OBQSELECT_F |
 653                     QUENUMSELECT_V(qid));
 654        value = t4_read_reg(padap, CIM_QUEUE_CONFIG_CTRL_A);
 655        value = CIMQSIZE_G(value) * 64; /* size in number of words */
 656        return value * sizeof(u32);
 657}
 658
 659static int cudbg_read_cim_obq(struct cudbg_init *pdbg_init,
 660                              struct cudbg_buffer *dbg_buff,
 661                              struct cudbg_error *cudbg_err, int qid)
 662{
 663        struct adapter *padap = pdbg_init->adap;
 664        struct cudbg_buffer temp_buff = { 0 };
 665        int no_of_read_words, rc = 0;
 666        u32 qsize;
 667
 668        /* collect CIM OBQ */
 669        qsize =  cudbg_cim_obq_size(padap, qid);
 670        rc = cudbg_get_buff(pdbg_init, dbg_buff, qsize, &temp_buff);
 671        if (rc)
 672                return rc;
 673
 674        /* t4_read_cim_obq will return no. of read words or error */
 675        no_of_read_words = t4_read_cim_obq(padap, qid,
 676                                           (u32 *)temp_buff.data, qsize);
 677        /* no_of_read_words is less than or equal to 0 means error */
 678        if (no_of_read_words <= 0) {
 679                if (!no_of_read_words)
 680                        rc = CUDBG_SYSTEM_ERROR;
 681                else
 682                        rc = no_of_read_words;
 683                cudbg_err->sys_err = rc;
 684                cudbg_put_buff(pdbg_init, &temp_buff);
 685                return rc;
 686        }
 687        return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
 688}
 689
 690int cudbg_collect_cim_obq_ulp0(struct cudbg_init *pdbg_init,
 691                               struct cudbg_buffer *dbg_buff,
 692                               struct cudbg_error *cudbg_err)
 693{
 694        return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 0);
 695}
 696
 697int cudbg_collect_cim_obq_ulp1(struct cudbg_init *pdbg_init,
 698                               struct cudbg_buffer *dbg_buff,
 699                               struct cudbg_error *cudbg_err)
 700{
 701        return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 1);
 702}
 703
 704int cudbg_collect_cim_obq_ulp2(struct cudbg_init *pdbg_init,
 705                               struct cudbg_buffer *dbg_buff,
 706                               struct cudbg_error *cudbg_err)
 707{
 708        return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 2);
 709}
 710
 711int cudbg_collect_cim_obq_ulp3(struct cudbg_init *pdbg_init,
 712                               struct cudbg_buffer *dbg_buff,
 713                               struct cudbg_error *cudbg_err)
 714{
 715        return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 3);
 716}
 717
 718int cudbg_collect_cim_obq_sge(struct cudbg_init *pdbg_init,
 719                              struct cudbg_buffer *dbg_buff,
 720                              struct cudbg_error *cudbg_err)
 721{
 722        return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 4);
 723}
 724
 725int cudbg_collect_cim_obq_ncsi(struct cudbg_init *pdbg_init,
 726                               struct cudbg_buffer *dbg_buff,
 727                               struct cudbg_error *cudbg_err)
 728{
 729        return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 5);
 730}
 731
 732int cudbg_collect_obq_sge_rx_q0(struct cudbg_init *pdbg_init,
 733                                struct cudbg_buffer *dbg_buff,
 734                                struct cudbg_error *cudbg_err)
 735{
 736        return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 6);
 737}
 738
 739int cudbg_collect_obq_sge_rx_q1(struct cudbg_init *pdbg_init,
 740                                struct cudbg_buffer *dbg_buff,
 741                                struct cudbg_error *cudbg_err)
 742{
 743        return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 7);
 744}
 745
 746static int cudbg_meminfo_get_mem_index(struct adapter *padap,
 747                                       struct cudbg_meminfo *mem_info,
 748                                       u8 mem_type, u8 *idx)
 749{
 750        u8 i, flag;
 751
 752        switch (mem_type) {
 753        case MEM_EDC0:
 754                flag = EDC0_FLAG;
 755                break;
 756        case MEM_EDC1:
 757                flag = EDC1_FLAG;
 758                break;
 759        case MEM_MC0:
 760                /* Some T5 cards have both MC0 and MC1. */
 761                flag = is_t5(padap->params.chip) ? MC0_FLAG : MC_FLAG;
 762                break;
 763        case MEM_MC1:
 764                flag = MC1_FLAG;
 765                break;
 766        case MEM_HMA:
 767                flag = HMA_FLAG;
 768                break;
 769        default:
 770                return CUDBG_STATUS_ENTITY_NOT_FOUND;
 771        }
 772
 773        for (i = 0; i < mem_info->avail_c; i++) {
 774                if (mem_info->avail[i].idx == flag) {
 775                        *idx = i;
 776                        return 0;
 777                }
 778        }
 779
 780        return CUDBG_STATUS_ENTITY_NOT_FOUND;
 781}
 782
 783/* Fetch the @region_name's start and end from @meminfo. */
 784static int cudbg_get_mem_region(struct adapter *padap,
 785                                struct cudbg_meminfo *meminfo,
 786                                u8 mem_type, const char *region_name,
 787                                struct cudbg_mem_desc *mem_desc)
 788{
 789        u8 mc, found = 0;
 790        u32 i, idx = 0;
 791        int rc;
 792
 793        rc = cudbg_meminfo_get_mem_index(padap, meminfo, mem_type, &mc);
 794        if (rc)
 795                return rc;
 796
 797        for (i = 0; i < ARRAY_SIZE(cudbg_region); i++) {
 798                if (!strcmp(cudbg_region[i], region_name)) {
 799                        found = 1;
 800                        idx = i;
 801                        break;
 802                }
 803        }
 804        if (!found)
 805                return -EINVAL;
 806
 807        found = 0;
 808        for (i = 0; i < meminfo->mem_c; i++) {
 809                if (meminfo->mem[i].idx >= ARRAY_SIZE(cudbg_region))
 810                        continue; /* Skip holes */
 811
 812                if (!(meminfo->mem[i].limit))
 813                        meminfo->mem[i].limit =
 814                                i < meminfo->mem_c - 1 ?
 815                                meminfo->mem[i + 1].base - 1 : ~0;
 816
 817                if (meminfo->mem[i].idx == idx) {
 818                        /* Check if the region exists in @mem_type memory */
 819                        if (meminfo->mem[i].base < meminfo->avail[mc].base &&
 820                            meminfo->mem[i].limit < meminfo->avail[mc].base)
 821                                return -EINVAL;
 822
 823                        if (meminfo->mem[i].base > meminfo->avail[mc].limit)
 824                                return -EINVAL;
 825
 826                        memcpy(mem_desc, &meminfo->mem[i],
 827                               sizeof(struct cudbg_mem_desc));
 828                        found = 1;
 829                        break;
 830                }
 831        }
 832        if (!found)
 833                return -EINVAL;
 834
 835        return 0;
 836}
 837
 838/* Fetch and update the start and end of the requested memory region w.r.t 0
 839 * in the corresponding EDC/MC/HMA.
 840 */
 841static int cudbg_get_mem_relative(struct adapter *padap,
 842                                  struct cudbg_meminfo *meminfo,
 843                                  u8 mem_type, u32 *out_base, u32 *out_end)
 844{
 845        u8 mc_idx;
 846        int rc;
 847
 848        rc = cudbg_meminfo_get_mem_index(padap, meminfo, mem_type, &mc_idx);
 849        if (rc)
 850                return rc;
 851
 852        if (*out_base < meminfo->avail[mc_idx].base)
 853                *out_base = 0;
 854        else
 855                *out_base -= meminfo->avail[mc_idx].base;
 856
 857        if (*out_end > meminfo->avail[mc_idx].limit)
 858                *out_end = meminfo->avail[mc_idx].limit;
 859        else
 860                *out_end -= meminfo->avail[mc_idx].base;
 861
 862        return 0;
 863}
 864
 865/* Get TX and RX Payload region */
 866static int cudbg_get_payload_range(struct adapter *padap, u8 mem_type,
 867                                   const char *region_name,
 868                                   struct cudbg_region_info *payload)
 869{
 870        struct cudbg_mem_desc mem_desc = { 0 };
 871        struct cudbg_meminfo meminfo;
 872        int rc;
 873
 874        rc = cudbg_fill_meminfo(padap, &meminfo);
 875        if (rc)
 876                return rc;
 877
 878        rc = cudbg_get_mem_region(padap, &meminfo, mem_type, region_name,
 879                                  &mem_desc);
 880        if (rc) {
 881                payload->exist = false;
 882                return 0;
 883        }
 884
 885        payload->exist = true;
 886        payload->start = mem_desc.base;
 887        payload->end = mem_desc.limit;
 888
 889        return cudbg_get_mem_relative(padap, &meminfo, mem_type,
 890                                      &payload->start, &payload->end);
 891}
 892
 893static int cudbg_memory_read(struct cudbg_init *pdbg_init, int win,
 894                             int mtype, u32 addr, u32 len, void *hbuf)
 895{
 896        u32 win_pf, memoffset, mem_aperture, mem_base;
 897        struct adapter *adap = pdbg_init->adap;
 898        u32 pos, offset, resid;
 899        u32 *res_buf;
 900        u64 *buf;
 901        int ret;
 902
 903        /* Argument sanity checks ...
 904         */
 905        if (addr & 0x3 || (uintptr_t)hbuf & 0x3)
 906                return -EINVAL;
 907
 908        buf = (u64 *)hbuf;
 909
 910        /* Try to do 64-bit reads.  Residual will be handled later. */
 911        resid = len & 0x7;
 912        len -= resid;
 913
 914        ret = t4_memory_rw_init(adap, win, mtype, &memoffset, &mem_base,
 915                                &mem_aperture);
 916        if (ret)
 917                return ret;
 918
 919        addr = addr + memoffset;
 920        win_pf = is_t4(adap->params.chip) ? 0 : PFNUM_V(adap->pf);
 921
 922        pos = addr & ~(mem_aperture - 1);
 923        offset = addr - pos;
 924
 925        /* Set up initial PCI-E Memory Window to cover the start of our
 926         * transfer.
 927         */
 928        t4_memory_update_win(adap, win, pos | win_pf);
 929
 930        /* Transfer data from the adapter */
 931        while (len > 0) {
 932                *buf++ = le64_to_cpu((__force __le64)
 933                                     t4_read_reg64(adap, mem_base + offset));
 934                offset += sizeof(u64);
 935                len -= sizeof(u64);
 936
 937                /* If we've reached the end of our current window aperture,
 938                 * move the PCI-E Memory Window on to the next.
 939                 */
 940                if (offset == mem_aperture) {
 941                        pos += mem_aperture;
 942                        offset = 0;
 943                        t4_memory_update_win(adap, win, pos | win_pf);
 944                }
 945        }
 946
 947        res_buf = (u32 *)buf;
 948        /* Read residual in 32-bit multiples */
 949        while (resid > sizeof(u32)) {
 950                *res_buf++ = le32_to_cpu((__force __le32)
 951                                         t4_read_reg(adap, mem_base + offset));
 952                offset += sizeof(u32);
 953                resid -= sizeof(u32);
 954
 955                /* If we've reached the end of our current window aperture,
 956                 * move the PCI-E Memory Window on to the next.
 957                 */
 958                if (offset == mem_aperture) {
 959                        pos += mem_aperture;
 960                        offset = 0;
 961                        t4_memory_update_win(adap, win, pos | win_pf);
 962                }
 963        }
 964
 965        /* Transfer residual < 32-bits */
 966        if (resid)
 967                t4_memory_rw_residual(adap, resid, mem_base + offset,
 968                                      (u8 *)res_buf, T4_MEMORY_READ);
 969
 970        return 0;
 971}
 972
 973#define CUDBG_YIELD_ITERATION 256
 974
 975static int cudbg_read_fw_mem(struct cudbg_init *pdbg_init,
 976                             struct cudbg_buffer *dbg_buff, u8 mem_type,
 977                             unsigned long tot_len,
 978                             struct cudbg_error *cudbg_err)
 979{
 980        static const char * const region_name[] = { "Tx payload:",
 981                                                    "Rx payload:" };
 982        unsigned long bytes, bytes_left, bytes_read = 0;
 983        struct adapter *padap = pdbg_init->adap;
 984        struct cudbg_buffer temp_buff = { 0 };
 985        struct cudbg_region_info payload[2];
 986        u32 yield_count = 0;
 987        int rc = 0;
 988        u8 i;
 989
 990        /* Get TX/RX Payload region range if they exist */
 991        memset(payload, 0, sizeof(payload));
 992        for (i = 0; i < ARRAY_SIZE(region_name); i++) {
 993                rc = cudbg_get_payload_range(padap, mem_type, region_name[i],
 994                                             &payload[i]);
 995                if (rc)
 996                        return rc;
 997
 998                if (payload[i].exist) {
 999                        /* Align start and end to avoid wrap around */
1000                        payload[i].start = roundup(payload[i].start,
1001                                                   CUDBG_CHUNK_SIZE);
1002                        payload[i].end = rounddown(payload[i].end,
1003                                                   CUDBG_CHUNK_SIZE);
1004                }
1005        }
1006
1007        bytes_left = tot_len;
1008        while (bytes_left > 0) {
1009                /* As MC size is huge and read through PIO access, this
1010                 * loop will hold cpu for a longer time. OS may think that
1011                 * the process is hanged and will generate CPU stall traces.
1012                 * So yield the cpu regularly.
1013                 */
1014                yield_count++;
1015                if (!(yield_count % CUDBG_YIELD_ITERATION))
1016                        schedule();
1017
1018                bytes = min_t(unsigned long, bytes_left,
1019                              (unsigned long)CUDBG_CHUNK_SIZE);
1020                rc = cudbg_get_buff(pdbg_init, dbg_buff, bytes, &temp_buff);
1021                if (rc)
1022                        return rc;
1023
1024                for (i = 0; i < ARRAY_SIZE(payload); i++)
1025                        if (payload[i].exist &&
1026                            bytes_read >= payload[i].start &&
1027                            bytes_read + bytes <= payload[i].end)
1028                                /* TX and RX Payload regions can't overlap */
1029                                goto skip_read;
1030
1031                spin_lock(&padap->win0_lock);
1032                rc = cudbg_memory_read(pdbg_init, MEMWIN_NIC, mem_type,
1033                                       bytes_read, bytes, temp_buff.data);
1034                spin_unlock(&padap->win0_lock);
1035                if (rc) {
1036                        cudbg_err->sys_err = rc;
1037                        cudbg_put_buff(pdbg_init, &temp_buff);
1038                        return rc;
1039                }
1040
1041skip_read:
1042                bytes_left -= bytes;
1043                bytes_read += bytes;
1044                rc = cudbg_write_and_release_buff(pdbg_init, &temp_buff,
1045                                                  dbg_buff);
1046                if (rc) {
1047                        cudbg_put_buff(pdbg_init, &temp_buff);
1048                        return rc;
1049                }
1050        }
1051        return rc;
1052}
1053
1054static void cudbg_t4_fwcache(struct cudbg_init *pdbg_init,
1055                             struct cudbg_error *cudbg_err)
1056{
1057        struct adapter *padap = pdbg_init->adap;
1058        int rc;
1059
1060        if (is_fw_attached(pdbg_init)) {
1061                /* Flush uP dcache before reading edcX/mcX  */
1062                rc = t4_fwcache(padap, FW_PARAM_DEV_FWCACHE_FLUSH);
1063                if (rc)
1064                        cudbg_err->sys_warn = rc;
1065        }
1066}
1067
1068static int cudbg_collect_mem_region(struct cudbg_init *pdbg_init,
1069                                    struct cudbg_buffer *dbg_buff,
1070                                    struct cudbg_error *cudbg_err,
1071                                    u8 mem_type)
1072{
1073        struct adapter *padap = pdbg_init->adap;
1074        struct cudbg_meminfo mem_info;
1075        unsigned long size;
1076        u8 mc_idx;
1077        int rc;
1078
1079        memset(&mem_info, 0, sizeof(struct cudbg_meminfo));
1080        rc = cudbg_fill_meminfo(padap, &mem_info);
1081        if (rc)
1082                return rc;
1083
1084        cudbg_t4_fwcache(pdbg_init, cudbg_err);
1085        rc = cudbg_meminfo_get_mem_index(padap, &mem_info, mem_type, &mc_idx);
1086        if (rc)
1087                return rc;
1088
1089        size = mem_info.avail[mc_idx].limit - mem_info.avail[mc_idx].base;
1090        return cudbg_read_fw_mem(pdbg_init, dbg_buff, mem_type, size,
1091                                 cudbg_err);
1092}
1093
1094int cudbg_collect_edc0_meminfo(struct cudbg_init *pdbg_init,
1095                               struct cudbg_buffer *dbg_buff,
1096                               struct cudbg_error *cudbg_err)
1097{
1098        return cudbg_collect_mem_region(pdbg_init, dbg_buff, cudbg_err,
1099                                        MEM_EDC0);
1100}
1101
1102int cudbg_collect_edc1_meminfo(struct cudbg_init *pdbg_init,
1103                               struct cudbg_buffer *dbg_buff,
1104                               struct cudbg_error *cudbg_err)
1105{
1106        return cudbg_collect_mem_region(pdbg_init, dbg_buff, cudbg_err,
1107                                        MEM_EDC1);
1108}
1109
1110int cudbg_collect_mc0_meminfo(struct cudbg_init *pdbg_init,
1111                              struct cudbg_buffer *dbg_buff,
1112                              struct cudbg_error *cudbg_err)
1113{
1114        return cudbg_collect_mem_region(pdbg_init, dbg_buff, cudbg_err,
1115                                        MEM_MC0);
1116}
1117
1118int cudbg_collect_mc1_meminfo(struct cudbg_init *pdbg_init,
1119                              struct cudbg_buffer *dbg_buff,
1120                              struct cudbg_error *cudbg_err)
1121{
1122        return cudbg_collect_mem_region(pdbg_init, dbg_buff, cudbg_err,
1123                                        MEM_MC1);
1124}
1125
1126int cudbg_collect_hma_meminfo(struct cudbg_init *pdbg_init,
1127                              struct cudbg_buffer *dbg_buff,
1128                              struct cudbg_error *cudbg_err)
1129{
1130        return cudbg_collect_mem_region(pdbg_init, dbg_buff, cudbg_err,
1131                                        MEM_HMA);
1132}
1133
1134int cudbg_collect_rss(struct cudbg_init *pdbg_init,
1135                      struct cudbg_buffer *dbg_buff,
1136                      struct cudbg_error *cudbg_err)
1137{
1138        struct adapter *padap = pdbg_init->adap;
1139        struct cudbg_buffer temp_buff = { 0 };
1140        int rc, nentries;
1141
1142        nentries = t4_chip_rss_size(padap);
1143        rc = cudbg_get_buff(pdbg_init, dbg_buff, nentries * sizeof(u16),
1144                            &temp_buff);
1145        if (rc)
1146                return rc;
1147
1148        rc = t4_read_rss(padap, (u16 *)temp_buff.data);
1149        if (rc) {
1150                cudbg_err->sys_err = rc;
1151                cudbg_put_buff(pdbg_init, &temp_buff);
1152                return rc;
1153        }
1154        return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
1155}
1156
1157int cudbg_collect_rss_vf_config(struct cudbg_init *pdbg_init,
1158                                struct cudbg_buffer *dbg_buff,
1159                                struct cudbg_error *cudbg_err)
1160{
1161        struct adapter *padap = pdbg_init->adap;
1162        struct cudbg_buffer temp_buff = { 0 };
1163        struct cudbg_rss_vf_conf *vfconf;
1164        int vf, rc, vf_count;
1165
1166        vf_count = padap->params.arch.vfcount;
1167        rc = cudbg_get_buff(pdbg_init, dbg_buff,
1168                            vf_count * sizeof(struct cudbg_rss_vf_conf),
1169                            &temp_buff);
1170        if (rc)
1171                return rc;
1172
1173        vfconf = (struct cudbg_rss_vf_conf *)temp_buff.data;
1174        for (vf = 0; vf < vf_count; vf++)
1175                t4_read_rss_vf_config(padap, vf, &vfconf[vf].rss_vf_vfl,
1176                                      &vfconf[vf].rss_vf_vfh, true);
1177        return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
1178}
1179
1180int cudbg_collect_path_mtu(struct cudbg_init *pdbg_init,
1181                           struct cudbg_buffer *dbg_buff,
1182                           struct cudbg_error *cudbg_err)
1183{
1184        struct adapter *padap = pdbg_init->adap;
1185        struct cudbg_buffer temp_buff = { 0 };
1186        int rc;
1187
1188        rc = cudbg_get_buff(pdbg_init, dbg_buff, NMTUS * sizeof(u16),
1189                            &temp_buff);
1190        if (rc)
1191                return rc;
1192
1193        t4_read_mtu_tbl(padap, (u16 *)temp_buff.data, NULL);
1194        return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
1195}
1196
1197int cudbg_collect_pm_stats(struct cudbg_init *pdbg_init,
1198                           struct cudbg_buffer *dbg_buff,
1199                           struct cudbg_error *cudbg_err)
1200{
1201        struct adapter *padap = pdbg_init->adap;
1202        struct cudbg_buffer temp_buff = { 0 };
1203        struct cudbg_pm_stats *pm_stats_buff;
1204        int rc;
1205
1206        rc = cudbg_get_buff(pdbg_init, dbg_buff, sizeof(struct cudbg_pm_stats),
1207                            &temp_buff);
1208        if (rc)
1209                return rc;
1210
1211        pm_stats_buff = (struct cudbg_pm_stats *)temp_buff.data;
1212        t4_pmtx_get_stats(padap, pm_stats_buff->tx_cnt, pm_stats_buff->tx_cyc);
1213        t4_pmrx_get_stats(padap, pm_stats_buff->rx_cnt, pm_stats_buff->rx_cyc);
1214        return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
1215}
1216
1217int cudbg_collect_hw_sched(struct cudbg_init *pdbg_init,
1218                           struct cudbg_buffer *dbg_buff,
1219                           struct cudbg_error *cudbg_err)
1220{
1221        struct adapter *padap = pdbg_init->adap;
1222        struct cudbg_buffer temp_buff = { 0 };
1223        struct cudbg_hw_sched *hw_sched_buff;
1224        int i, rc = 0;
1225
1226        if (!padap->params.vpd.cclk)
1227                return CUDBG_STATUS_CCLK_NOT_DEFINED;
1228
1229        rc = cudbg_get_buff(pdbg_init, dbg_buff, sizeof(struct cudbg_hw_sched),
1230                            &temp_buff);
1231        hw_sched_buff = (struct cudbg_hw_sched *)temp_buff.data;
1232        hw_sched_buff->map = t4_read_reg(padap, TP_TX_MOD_QUEUE_REQ_MAP_A);
1233        hw_sched_buff->mode = TIMERMODE_G(t4_read_reg(padap, TP_MOD_CONFIG_A));
1234        t4_read_pace_tbl(padap, hw_sched_buff->pace_tab);
1235        for (i = 0; i < NTX_SCHED; ++i)
1236                t4_get_tx_sched(padap, i, &hw_sched_buff->kbps[i],
1237                                &hw_sched_buff->ipg[i], true);
1238        return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
1239}
1240
1241int cudbg_collect_tp_indirect(struct cudbg_init *pdbg_init,
1242                              struct cudbg_buffer *dbg_buff,
1243                              struct cudbg_error *cudbg_err)
1244{
1245        struct adapter *padap = pdbg_init->adap;
1246        struct cudbg_buffer temp_buff = { 0 };
1247        struct ireg_buf *ch_tp_pio;
1248        int i, rc, n = 0;
1249        u32 size;
1250
1251        if (is_t5(padap->params.chip))
1252                n = sizeof(t5_tp_pio_array) +
1253                    sizeof(t5_tp_tm_pio_array) +
1254                    sizeof(t5_tp_mib_index_array);
1255        else
1256                n = sizeof(t6_tp_pio_array) +
1257                    sizeof(t6_tp_tm_pio_array) +
1258                    sizeof(t6_tp_mib_index_array);
1259
1260        n = n / (IREG_NUM_ELEM * sizeof(u32));
1261        size = sizeof(struct ireg_buf) * n;
1262        rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
1263        if (rc)
1264                return rc;
1265
1266        ch_tp_pio = (struct ireg_buf *)temp_buff.data;
1267
1268        /* TP_PIO */
1269        if (is_t5(padap->params.chip))
1270                n = sizeof(t5_tp_pio_array) / (IREG_NUM_ELEM * sizeof(u32));
1271        else if (is_t6(padap->params.chip))
1272                n = sizeof(t6_tp_pio_array) / (IREG_NUM_ELEM * sizeof(u32));
1273
1274        for (i = 0; i < n; i++) {
1275                struct ireg_field *tp_pio = &ch_tp_pio->tp_pio;
1276                u32 *buff = ch_tp_pio->outbuf;
1277
1278                if (is_t5(padap->params.chip)) {
1279                        tp_pio->ireg_addr = t5_tp_pio_array[i][0];
1280                        tp_pio->ireg_data = t5_tp_pio_array[i][1];
1281                        tp_pio->ireg_local_offset = t5_tp_pio_array[i][2];
1282                        tp_pio->ireg_offset_range = t5_tp_pio_array[i][3];
1283                } else if (is_t6(padap->params.chip)) {
1284                        tp_pio->ireg_addr = t6_tp_pio_array[i][0];
1285                        tp_pio->ireg_data = t6_tp_pio_array[i][1];
1286                        tp_pio->ireg_local_offset = t6_tp_pio_array[i][2];
1287                        tp_pio->ireg_offset_range = t6_tp_pio_array[i][3];
1288                }
1289                t4_tp_pio_read(padap, buff, tp_pio->ireg_offset_range,
1290                               tp_pio->ireg_local_offset, true);
1291                ch_tp_pio++;
1292        }
1293
1294        /* TP_TM_PIO */
1295        if (is_t5(padap->params.chip))
1296                n = sizeof(t5_tp_tm_pio_array) / (IREG_NUM_ELEM * sizeof(u32));
1297        else if (is_t6(padap->params.chip))
1298                n = sizeof(t6_tp_tm_pio_array) / (IREG_NUM_ELEM * sizeof(u32));
1299
1300        for (i = 0; i < n; i++) {
1301                struct ireg_field *tp_pio = &ch_tp_pio->tp_pio;
1302                u32 *buff = ch_tp_pio->outbuf;
1303
1304                if (is_t5(padap->params.chip)) {
1305                        tp_pio->ireg_addr = t5_tp_tm_pio_array[i][0];
1306                        tp_pio->ireg_data = t5_tp_tm_pio_array[i][1];
1307                        tp_pio->ireg_local_offset = t5_tp_tm_pio_array[i][2];
1308                        tp_pio->ireg_offset_range = t5_tp_tm_pio_array[i][3];
1309                } else if (is_t6(padap->params.chip)) {
1310                        tp_pio->ireg_addr = t6_tp_tm_pio_array[i][0];
1311                        tp_pio->ireg_data = t6_tp_tm_pio_array[i][1];
1312                        tp_pio->ireg_local_offset = t6_tp_tm_pio_array[i][2];
1313                        tp_pio->ireg_offset_range = t6_tp_tm_pio_array[i][3];
1314                }
1315                t4_tp_tm_pio_read(padap, buff, tp_pio->ireg_offset_range,
1316                                  tp_pio->ireg_local_offset, true);
1317                ch_tp_pio++;
1318        }
1319
1320        /* TP_MIB_INDEX */
1321        if (is_t5(padap->params.chip))
1322                n = sizeof(t5_tp_mib_index_array) /
1323                    (IREG_NUM_ELEM * sizeof(u32));
1324        else if (is_t6(padap->params.chip))
1325                n = sizeof(t6_tp_mib_index_array) /
1326                    (IREG_NUM_ELEM * sizeof(u32));
1327
1328        for (i = 0; i < n ; i++) {
1329                struct ireg_field *tp_pio = &ch_tp_pio->tp_pio;
1330                u32 *buff = ch_tp_pio->outbuf;
1331
1332                if (is_t5(padap->params.chip)) {
1333                        tp_pio->ireg_addr = t5_tp_mib_index_array[i][0];
1334                        tp_pio->ireg_data = t5_tp_mib_index_array[i][1];
1335                        tp_pio->ireg_local_offset =
1336                                t5_tp_mib_index_array[i][2];
1337                        tp_pio->ireg_offset_range =
1338                                t5_tp_mib_index_array[i][3];
1339                } else if (is_t6(padap->params.chip)) {
1340                        tp_pio->ireg_addr = t6_tp_mib_index_array[i][0];
1341                        tp_pio->ireg_data = t6_tp_mib_index_array[i][1];
1342                        tp_pio->ireg_local_offset =
1343                                t6_tp_mib_index_array[i][2];
1344                        tp_pio->ireg_offset_range =
1345                                t6_tp_mib_index_array[i][3];
1346                }
1347                t4_tp_mib_read(padap, buff, tp_pio->ireg_offset_range,
1348                               tp_pio->ireg_local_offset, true);
1349                ch_tp_pio++;
1350        }
1351        return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
1352}
1353
1354static void cudbg_read_sge_qbase_indirect_reg(struct adapter *padap,
1355                                              struct sge_qbase_reg_field *qbase,
1356                                              u32 func, bool is_pf)
1357{
1358        u32 *buff, i;
1359
1360        if (is_pf) {
1361                buff = qbase->pf_data_value[func];
1362        } else {
1363                buff = qbase->vf_data_value[func];
1364                /* In SGE_QBASE_INDEX,
1365                 * Entries 0->7 are PF0->7, Entries 8->263 are VFID0->256.
1366                 */
1367                func += 8;
1368        }
1369
1370        t4_write_reg(padap, qbase->reg_addr, func);
1371        for (i = 0; i < SGE_QBASE_DATA_REG_NUM; i++, buff++)
1372                *buff = t4_read_reg(padap, qbase->reg_data[i]);
1373}
1374
1375int cudbg_collect_sge_indirect(struct cudbg_init *pdbg_init,
1376                               struct cudbg_buffer *dbg_buff,
1377                               struct cudbg_error *cudbg_err)
1378{
1379        struct adapter *padap = pdbg_init->adap;
1380        struct cudbg_buffer temp_buff = { 0 };
1381        struct sge_qbase_reg_field *sge_qbase;
1382        struct ireg_buf *ch_sge_dbg;
1383        int i, rc;
1384
1385        rc = cudbg_get_buff(pdbg_init, dbg_buff,
1386                            sizeof(*ch_sge_dbg) * 2 + sizeof(*sge_qbase),
1387                            &temp_buff);
1388        if (rc)
1389                return rc;
1390
1391        ch_sge_dbg = (struct ireg_buf *)temp_buff.data;
1392        for (i = 0; i < 2; i++) {
1393                struct ireg_field *sge_pio = &ch_sge_dbg->tp_pio;
1394                u32 *buff = ch_sge_dbg->outbuf;
1395
1396                sge_pio->ireg_addr = t5_sge_dbg_index_array[i][0];
1397                sge_pio->ireg_data = t5_sge_dbg_index_array[i][1];
1398                sge_pio->ireg_local_offset = t5_sge_dbg_index_array[i][2];
1399                sge_pio->ireg_offset_range = t5_sge_dbg_index_array[i][3];
1400                t4_read_indirect(padap,
1401                                 sge_pio->ireg_addr,
1402                                 sge_pio->ireg_data,
1403                                 buff,
1404                                 sge_pio->ireg_offset_range,
1405                                 sge_pio->ireg_local_offset);
1406                ch_sge_dbg++;
1407        }
1408
1409        if (CHELSIO_CHIP_VERSION(padap->params.chip) > CHELSIO_T5) {
1410                sge_qbase = (struct sge_qbase_reg_field *)ch_sge_dbg;
1411                /* 1 addr reg SGE_QBASE_INDEX and 4 data reg
1412                 * SGE_QBASE_MAP[0-3]
1413                 */
1414                sge_qbase->reg_addr = t6_sge_qbase_index_array[0];
1415                for (i = 0; i < SGE_QBASE_DATA_REG_NUM; i++)
1416                        sge_qbase->reg_data[i] =
1417                                t6_sge_qbase_index_array[i + 1];
1418
1419                for (i = 0; i <= PCIE_FW_MASTER_M; i++)
1420                        cudbg_read_sge_qbase_indirect_reg(padap, sge_qbase,
1421                                                          i, true);
1422
1423                for (i = 0; i < padap->params.arch.vfcount; i++)
1424                        cudbg_read_sge_qbase_indirect_reg(padap, sge_qbase,
1425                                                          i, false);
1426
1427                sge_qbase->vfcount = padap->params.arch.vfcount;
1428        }
1429
1430        return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
1431}
1432
1433int cudbg_collect_ulprx_la(struct cudbg_init *pdbg_init,
1434                           struct cudbg_buffer *dbg_buff,
1435                           struct cudbg_error *cudbg_err)
1436{
1437        struct adapter *padap = pdbg_init->adap;
1438        struct cudbg_buffer temp_buff = { 0 };
1439        struct cudbg_ulprx_la *ulprx_la_buff;
1440        int rc;
1441
1442        rc = cudbg_get_buff(pdbg_init, dbg_buff, sizeof(struct cudbg_ulprx_la),
1443                            &temp_buff);
1444        if (rc)
1445                return rc;
1446
1447        ulprx_la_buff = (struct cudbg_ulprx_la *)temp_buff.data;
1448        t4_ulprx_read_la(padap, (u32 *)ulprx_la_buff->data);
1449        ulprx_la_buff->size = ULPRX_LA_SIZE;
1450        return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
1451}
1452
1453int cudbg_collect_tp_la(struct cudbg_init *pdbg_init,
1454                        struct cudbg_buffer *dbg_buff,
1455                        struct cudbg_error *cudbg_err)
1456{
1457        struct adapter *padap = pdbg_init->adap;
1458        struct cudbg_buffer temp_buff = { 0 };
1459        struct cudbg_tp_la *tp_la_buff;
1460        int size, rc;
1461
1462        size = sizeof(struct cudbg_tp_la) + TPLA_SIZE *  sizeof(u64);
1463        rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
1464        if (rc)
1465                return rc;
1466
1467        tp_la_buff = (struct cudbg_tp_la *)temp_buff.data;
1468        tp_la_buff->mode = DBGLAMODE_G(t4_read_reg(padap, TP_DBG_LA_CONFIG_A));
1469        t4_tp_read_la(padap, (u64 *)tp_la_buff->data, NULL);
1470        return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
1471}
1472
1473int cudbg_collect_meminfo(struct cudbg_init *pdbg_init,
1474                          struct cudbg_buffer *dbg_buff,
1475                          struct cudbg_error *cudbg_err)
1476{
1477        struct adapter *padap = pdbg_init->adap;
1478        struct cudbg_buffer temp_buff = { 0 };
1479        struct cudbg_meminfo *meminfo_buff;
1480        struct cudbg_ver_hdr *ver_hdr;
1481        int rc;
1482
1483        rc = cudbg_get_buff(pdbg_init, dbg_buff,
1484                            sizeof(struct cudbg_ver_hdr) +
1485                            sizeof(struct cudbg_meminfo),
1486                            &temp_buff);
1487        if (rc)
1488                return rc;
1489
1490        ver_hdr = (struct cudbg_ver_hdr *)temp_buff.data;
1491        ver_hdr->signature = CUDBG_ENTITY_SIGNATURE;
1492        ver_hdr->revision = CUDBG_MEMINFO_REV;
1493        ver_hdr->size = sizeof(struct cudbg_meminfo);
1494
1495        meminfo_buff = (struct cudbg_meminfo *)(temp_buff.data +
1496                                                sizeof(*ver_hdr));
1497        rc = cudbg_fill_meminfo(padap, meminfo_buff);
1498        if (rc) {
1499                cudbg_err->sys_err = rc;
1500                cudbg_put_buff(pdbg_init, &temp_buff);
1501                return rc;
1502        }
1503
1504        return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
1505}
1506
1507int cudbg_collect_cim_pif_la(struct cudbg_init *pdbg_init,
1508                             struct cudbg_buffer *dbg_buff,
1509                             struct cudbg_error *cudbg_err)
1510{
1511        struct cudbg_cim_pif_la *cim_pif_la_buff;
1512        struct adapter *padap = pdbg_init->adap;
1513        struct cudbg_buffer temp_buff = { 0 };
1514        int size, rc;
1515
1516        size = sizeof(struct cudbg_cim_pif_la) +
1517               2 * CIM_PIFLA_SIZE * 6 * sizeof(u32);
1518        rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
1519        if (rc)
1520                return rc;
1521
1522        cim_pif_la_buff = (struct cudbg_cim_pif_la *)temp_buff.data;
1523        cim_pif_la_buff->size = CIM_PIFLA_SIZE;
1524        t4_cim_read_pif_la(padap, (u32 *)cim_pif_la_buff->data,
1525                           (u32 *)cim_pif_la_buff->data + 6 * CIM_PIFLA_SIZE,
1526                           NULL, NULL);
1527        return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
1528}
1529
1530int cudbg_collect_clk_info(struct cudbg_init *pdbg_init,
1531                           struct cudbg_buffer *dbg_buff,
1532                           struct cudbg_error *cudbg_err)
1533{
1534        struct adapter *padap = pdbg_init->adap;
1535        struct cudbg_buffer temp_buff = { 0 };
1536        struct cudbg_clk_info *clk_info_buff;
1537        u64 tp_tick_us;
1538        int rc;
1539
1540        if (!padap->params.vpd.cclk)
1541                return CUDBG_STATUS_CCLK_NOT_DEFINED;
1542
1543        rc = cudbg_get_buff(pdbg_init, dbg_buff, sizeof(struct cudbg_clk_info),
1544                            &temp_buff);
1545        if (rc)
1546                return rc;
1547
1548        clk_info_buff = (struct cudbg_clk_info *)temp_buff.data;
1549        clk_info_buff->cclk_ps = 1000000000 / padap->params.vpd.cclk; /* psec */
1550        clk_info_buff->res = t4_read_reg(padap, TP_TIMER_RESOLUTION_A);
1551        clk_info_buff->tre = TIMERRESOLUTION_G(clk_info_buff->res);
1552        clk_info_buff->dack_re = DELAYEDACKRESOLUTION_G(clk_info_buff->res);
1553        tp_tick_us = (clk_info_buff->cclk_ps << clk_info_buff->tre) / 1000000;
1554
1555        clk_info_buff->dack_timer =
1556                (clk_info_buff->cclk_ps << clk_info_buff->dack_re) / 1000000 *
1557                t4_read_reg(padap, TP_DACK_TIMER_A);
1558        clk_info_buff->retransmit_min =
1559                tp_tick_us * t4_read_reg(padap, TP_RXT_MIN_A);
1560        clk_info_buff->retransmit_max =
1561                tp_tick_us * t4_read_reg(padap, TP_RXT_MAX_A);
1562        clk_info_buff->persist_timer_min =
1563                tp_tick_us * t4_read_reg(padap, TP_PERS_MIN_A);
1564        clk_info_buff->persist_timer_max =
1565                tp_tick_us * t4_read_reg(padap, TP_PERS_MAX_A);
1566        clk_info_buff->keepalive_idle_timer =
1567                tp_tick_us * t4_read_reg(padap, TP_KEEP_IDLE_A);
1568        clk_info_buff->keepalive_interval =
1569                tp_tick_us * t4_read_reg(padap, TP_KEEP_INTVL_A);
1570        clk_info_buff->initial_srtt =
1571                tp_tick_us * INITSRTT_G(t4_read_reg(padap, TP_INIT_SRTT_A));
1572        clk_info_buff->finwait2_timer =
1573                tp_tick_us * t4_read_reg(padap, TP_FINWAIT2_TIMER_A);
1574
1575        return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
1576}
1577
1578int cudbg_collect_pcie_indirect(struct cudbg_init *pdbg_init,
1579                                struct cudbg_buffer *dbg_buff,
1580                                struct cudbg_error *cudbg_err)
1581{
1582        struct adapter *padap = pdbg_init->adap;
1583        struct cudbg_buffer temp_buff = { 0 };
1584        struct ireg_buf *ch_pcie;
1585        int i, rc, n;
1586        u32 size;
1587
1588        n = sizeof(t5_pcie_pdbg_array) / (IREG_NUM_ELEM * sizeof(u32));
1589        size = sizeof(struct ireg_buf) * n * 2;
1590        rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
1591        if (rc)
1592                return rc;
1593
1594        ch_pcie = (struct ireg_buf *)temp_buff.data;
1595        /* PCIE_PDBG */
1596        for (i = 0; i < n; i++) {
1597                struct ireg_field *pcie_pio = &ch_pcie->tp_pio;
1598                u32 *buff = ch_pcie->outbuf;
1599
1600                pcie_pio->ireg_addr = t5_pcie_pdbg_array[i][0];
1601                pcie_pio->ireg_data = t5_pcie_pdbg_array[i][1];
1602                pcie_pio->ireg_local_offset = t5_pcie_pdbg_array[i][2];
1603                pcie_pio->ireg_offset_range = t5_pcie_pdbg_array[i][3];
1604                t4_read_indirect(padap,
1605                                 pcie_pio->ireg_addr,
1606                                 pcie_pio->ireg_data,
1607                                 buff,
1608                                 pcie_pio->ireg_offset_range,
1609                                 pcie_pio->ireg_local_offset);
1610                ch_pcie++;
1611        }
1612
1613        /* PCIE_CDBG */
1614        n = sizeof(t5_pcie_cdbg_array) / (IREG_NUM_ELEM * sizeof(u32));
1615        for (i = 0; i < n; i++) {
1616                struct ireg_field *pcie_pio = &ch_pcie->tp_pio;
1617                u32 *buff = ch_pcie->outbuf;
1618
1619                pcie_pio->ireg_addr = t5_pcie_cdbg_array[i][0];
1620                pcie_pio->ireg_data = t5_pcie_cdbg_array[i][1];
1621                pcie_pio->ireg_local_offset = t5_pcie_cdbg_array[i][2];
1622                pcie_pio->ireg_offset_range = t5_pcie_cdbg_array[i][3];
1623                t4_read_indirect(padap,
1624                                 pcie_pio->ireg_addr,
1625                                 pcie_pio->ireg_data,
1626                                 buff,
1627                                 pcie_pio->ireg_offset_range,
1628                                 pcie_pio->ireg_local_offset);
1629                ch_pcie++;
1630        }
1631        return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
1632}
1633
1634int cudbg_collect_pm_indirect(struct cudbg_init *pdbg_init,
1635                              struct cudbg_buffer *dbg_buff,
1636                              struct cudbg_error *cudbg_err)
1637{
1638        struct adapter *padap = pdbg_init->adap;
1639        struct cudbg_buffer temp_buff = { 0 };
1640        struct ireg_buf *ch_pm;
1641        int i, rc, n;
1642        u32 size;
1643
1644        n = sizeof(t5_pm_rx_array) / (IREG_NUM_ELEM * sizeof(u32));
1645        size = sizeof(struct ireg_buf) * n * 2;
1646        rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
1647        if (rc)
1648                return rc;
1649
1650        ch_pm = (struct ireg_buf *)temp_buff.data;
1651        /* PM_RX */
1652        for (i = 0; i < n; i++) {
1653                struct ireg_field *pm_pio = &ch_pm->tp_pio;
1654                u32 *buff = ch_pm->outbuf;
1655
1656                pm_pio->ireg_addr = t5_pm_rx_array[i][0];
1657                pm_pio->ireg_data = t5_pm_rx_array[i][1];
1658                pm_pio->ireg_local_offset = t5_pm_rx_array[i][2];
1659                pm_pio->ireg_offset_range = t5_pm_rx_array[i][3];
1660                t4_read_indirect(padap,
1661                                 pm_pio->ireg_addr,
1662                                 pm_pio->ireg_data,
1663                                 buff,
1664                                 pm_pio->ireg_offset_range,
1665                                 pm_pio->ireg_local_offset);
1666                ch_pm++;
1667        }
1668
1669        /* PM_TX */
1670        n = sizeof(t5_pm_tx_array) / (IREG_NUM_ELEM * sizeof(u32));
1671        for (i = 0; i < n; i++) {
1672                struct ireg_field *pm_pio = &ch_pm->tp_pio;
1673                u32 *buff = ch_pm->outbuf;
1674
1675                pm_pio->ireg_addr = t5_pm_tx_array[i][0];
1676                pm_pio->ireg_data = t5_pm_tx_array[i][1];
1677                pm_pio->ireg_local_offset = t5_pm_tx_array[i][2];
1678                pm_pio->ireg_offset_range = t5_pm_tx_array[i][3];
1679                t4_read_indirect(padap,
1680                                 pm_pio->ireg_addr,
1681                                 pm_pio->ireg_data,
1682                                 buff,
1683                                 pm_pio->ireg_offset_range,
1684                                 pm_pio->ireg_local_offset);
1685                ch_pm++;
1686        }
1687        return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
1688}
1689
1690int cudbg_collect_tid(struct cudbg_init *pdbg_init,
1691                      struct cudbg_buffer *dbg_buff,
1692                      struct cudbg_error *cudbg_err)
1693{
1694        struct adapter *padap = pdbg_init->adap;
1695        struct cudbg_tid_info_region_rev1 *tid1;
1696        struct cudbg_buffer temp_buff = { 0 };
1697        struct cudbg_tid_info_region *tid;
1698        u32 para[2], val[2];
1699        int rc;
1700
1701        rc = cudbg_get_buff(pdbg_init, dbg_buff,
1702                            sizeof(struct cudbg_tid_info_region_rev1),
1703                            &temp_buff);
1704        if (rc)
1705                return rc;
1706
1707        tid1 = (struct cudbg_tid_info_region_rev1 *)temp_buff.data;
1708        tid = &tid1->tid;
1709        tid1->ver_hdr.signature = CUDBG_ENTITY_SIGNATURE;
1710        tid1->ver_hdr.revision = CUDBG_TID_INFO_REV;
1711        tid1->ver_hdr.size = sizeof(struct cudbg_tid_info_region_rev1) -
1712                             sizeof(struct cudbg_ver_hdr);
1713
1714        /* If firmware is not attached/alive, use backdoor register
1715         * access to collect dump.
1716         */
1717        if (!is_fw_attached(pdbg_init))
1718                goto fill_tid;
1719
1720#define FW_PARAM_PFVF_A(param) \
1721        (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) | \
1722         FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_##param) | \
1723         FW_PARAMS_PARAM_Y_V(0) | \
1724         FW_PARAMS_PARAM_Z_V(0))
1725
1726        para[0] = FW_PARAM_PFVF_A(ETHOFLD_START);
1727        para[1] = FW_PARAM_PFVF_A(ETHOFLD_END);
1728        rc = t4_query_params(padap, padap->mbox, padap->pf, 0, 2, para, val);
1729        if (rc <  0) {
1730                cudbg_err->sys_err = rc;
1731                cudbg_put_buff(pdbg_init, &temp_buff);
1732                return rc;
1733        }
1734        tid->uotid_base = val[0];
1735        tid->nuotids = val[1] - val[0] + 1;
1736
1737        if (is_t5(padap->params.chip)) {
1738                tid->sb = t4_read_reg(padap, LE_DB_SERVER_INDEX_A) / 4;
1739        } else if (is_t6(padap->params.chip)) {
1740                tid1->tid_start =
1741                        t4_read_reg(padap, LE_DB_ACTIVE_TABLE_START_INDEX_A);
1742                tid->sb = t4_read_reg(padap, LE_DB_SRVR_START_INDEX_A);
1743
1744                para[0] = FW_PARAM_PFVF_A(HPFILTER_START);
1745                para[1] = FW_PARAM_PFVF_A(HPFILTER_END);
1746                rc = t4_query_params(padap, padap->mbox, padap->pf, 0, 2,
1747                                     para, val);
1748                if (rc < 0) {
1749                        cudbg_err->sys_err = rc;
1750                        cudbg_put_buff(pdbg_init, &temp_buff);
1751                        return rc;
1752                }
1753                tid->hpftid_base = val[0];
1754                tid->nhpftids = val[1] - val[0] + 1;
1755        }
1756
1757#undef FW_PARAM_PFVF_A
1758
1759fill_tid:
1760        tid->ntids = padap->tids.ntids;
1761        tid->nstids = padap->tids.nstids;
1762        tid->stid_base = padap->tids.stid_base;
1763        tid->hash_base = padap->tids.hash_base;
1764
1765        tid->natids = padap->tids.natids;
1766        tid->nftids = padap->tids.nftids;
1767        tid->ftid_base = padap->tids.ftid_base;
1768        tid->aftid_base = padap->tids.aftid_base;
1769        tid->aftid_end = padap->tids.aftid_end;
1770
1771        tid->sftid_base = padap->tids.sftid_base;
1772        tid->nsftids = padap->tids.nsftids;
1773
1774        tid->flags = padap->flags;
1775        tid->le_db_conf = t4_read_reg(padap, LE_DB_CONFIG_A);
1776        tid->ip_users = t4_read_reg(padap, LE_DB_ACT_CNT_IPV4_A);
1777        tid->ipv6_users = t4_read_reg(padap, LE_DB_ACT_CNT_IPV6_A);
1778
1779        return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
1780}
1781
1782int cudbg_collect_pcie_config(struct cudbg_init *pdbg_init,
1783                              struct cudbg_buffer *dbg_buff,
1784                              struct cudbg_error *cudbg_err)
1785{
1786        struct adapter *padap = pdbg_init->adap;
1787        struct cudbg_buffer temp_buff = { 0 };
1788        u32 size, *value, j;
1789        int i, rc, n;
1790
1791        size = sizeof(u32) * CUDBG_NUM_PCIE_CONFIG_REGS;
1792        n = sizeof(t5_pcie_config_array) / (2 * sizeof(u32));
1793        rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
1794        if (rc)
1795                return rc;
1796
1797        value = (u32 *)temp_buff.data;
1798        for (i = 0; i < n; i++) {
1799                for (j = t5_pcie_config_array[i][0];
1800                     j <= t5_pcie_config_array[i][1]; j += 4) {
1801                        t4_hw_pci_read_cfg4(padap, j, value);
1802                        value++;
1803                }
1804        }
1805        return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
1806}
1807
1808static int cudbg_sge_ctxt_check_valid(u32 *buf, int type)
1809{
1810        int index, bit, bit_pos = 0;
1811
1812        switch (type) {
1813        case CTXT_EGRESS:
1814                bit_pos = 176;
1815                break;
1816        case CTXT_INGRESS:
1817                bit_pos = 141;
1818                break;
1819        case CTXT_FLM:
1820                bit_pos = 89;
1821                break;
1822        }
1823        index = bit_pos / 32;
1824        bit =  bit_pos % 32;
1825        return buf[index] & (1U << bit);
1826}
1827
1828static int cudbg_get_ctxt_region_info(struct adapter *padap,
1829                                      struct cudbg_region_info *ctx_info,
1830                                      u8 *mem_type)
1831{
1832        struct cudbg_mem_desc mem_desc;
1833        struct cudbg_meminfo meminfo;
1834        u32 i, j, value, found;
1835        u8 flq;
1836        int rc;
1837
1838        rc = cudbg_fill_meminfo(padap, &meminfo);
1839        if (rc)
1840                return rc;
1841
1842        /* Get EGRESS and INGRESS context region size */
1843        for (i = CTXT_EGRESS; i <= CTXT_INGRESS; i++) {
1844                found = 0;
1845                memset(&mem_desc, 0, sizeof(struct cudbg_mem_desc));
1846                for (j = 0; j < ARRAY_SIZE(meminfo.avail); j++) {
1847                        rc = cudbg_get_mem_region(padap, &meminfo, j,
1848                                                  cudbg_region[i],
1849                                                  &mem_desc);
1850                        if (!rc) {
1851                                found = 1;
1852                                rc = cudbg_get_mem_relative(padap, &meminfo, j,
1853                                                            &mem_desc.base,
1854                                                            &mem_desc.limit);
1855                                if (rc) {
1856                                        ctx_info[i].exist = false;
1857                                        break;
1858                                }
1859                                ctx_info[i].exist = true;
1860                                ctx_info[i].start = mem_desc.base;
1861                                ctx_info[i].end = mem_desc.limit;
1862                                mem_type[i] = j;
1863                                break;
1864                        }
1865                }
1866                if (!found)
1867                        ctx_info[i].exist = false;
1868        }
1869
1870        /* Get FLM and CNM max qid. */
1871        value = t4_read_reg(padap, SGE_FLM_CFG_A);
1872
1873        /* Get number of data freelist queues */
1874        flq = HDRSTARTFLQ_G(value);
1875        ctx_info[CTXT_FLM].exist = true;
1876        ctx_info[CTXT_FLM].end = (CUDBG_MAX_FL_QIDS >> flq) * SGE_CTXT_SIZE;
1877
1878        /* The number of CONM contexts are same as number of freelist
1879         * queues.
1880         */
1881        ctx_info[CTXT_CNM].exist = true;
1882        ctx_info[CTXT_CNM].end = ctx_info[CTXT_FLM].end;
1883
1884        return 0;
1885}
1886
1887int cudbg_dump_context_size(struct adapter *padap)
1888{
1889        struct cudbg_region_info region_info[CTXT_CNM + 1] = { {0} };
1890        u8 mem_type[CTXT_INGRESS + 1] = { 0 };
1891        u32 i, size = 0;
1892        int rc;
1893
1894        /* Get max valid qid for each type of queue */
1895        rc = cudbg_get_ctxt_region_info(padap, region_info, mem_type);
1896        if (rc)
1897                return rc;
1898
1899        for (i = 0; i < CTXT_CNM; i++) {
1900                if (!region_info[i].exist) {
1901                        if (i == CTXT_EGRESS || i == CTXT_INGRESS)
1902                                size += CUDBG_LOWMEM_MAX_CTXT_QIDS *
1903                                        SGE_CTXT_SIZE;
1904                        continue;
1905                }
1906
1907                size += (region_info[i].end - region_info[i].start + 1) /
1908                        SGE_CTXT_SIZE;
1909        }
1910        return size * sizeof(struct cudbg_ch_cntxt);
1911}
1912
1913static void cudbg_read_sge_ctxt(struct cudbg_init *pdbg_init, u32 cid,
1914                                enum ctxt_type ctype, u32 *data)
1915{
1916        struct adapter *padap = pdbg_init->adap;
1917        int rc = -1;
1918
1919        /* Under heavy traffic, the SGE Queue contexts registers will be
1920         * frequently accessed by firmware.
1921         *
1922         * To avoid conflicts with firmware, always ask firmware to fetch
1923         * the SGE Queue contexts via mailbox. On failure, fallback to
1924         * accessing hardware registers directly.
1925         */
1926        if (is_fw_attached(pdbg_init))
1927                rc = t4_sge_ctxt_rd(padap, padap->mbox, cid, ctype, data);
1928        if (rc)
1929                t4_sge_ctxt_rd_bd(padap, cid, ctype, data);
1930}
1931
1932static void cudbg_get_sge_ctxt_fw(struct cudbg_init *pdbg_init, u32 max_qid,
1933                                  u8 ctxt_type,
1934                                  struct cudbg_ch_cntxt **out_buff)
1935{
1936        struct cudbg_ch_cntxt *buff = *out_buff;
1937        int rc;
1938        u32 j;
1939
1940        for (j = 0; j < max_qid; j++) {
1941                cudbg_read_sge_ctxt(pdbg_init, j, ctxt_type, buff->data);
1942                rc = cudbg_sge_ctxt_check_valid(buff->data, ctxt_type);
1943                if (!rc)
1944                        continue;
1945
1946                buff->cntxt_type = ctxt_type;
1947                buff->cntxt_id = j;
1948                buff++;
1949                if (ctxt_type == CTXT_FLM) {
1950                        cudbg_read_sge_ctxt(pdbg_init, j, CTXT_CNM, buff->data);
1951                        buff->cntxt_type = CTXT_CNM;
1952                        buff->cntxt_id = j;
1953                        buff++;
1954                }
1955        }
1956
1957        *out_buff = buff;
1958}
1959
1960int cudbg_collect_dump_context(struct cudbg_init *pdbg_init,
1961                               struct cudbg_buffer *dbg_buff,
1962                               struct cudbg_error *cudbg_err)
1963{
1964        struct cudbg_region_info region_info[CTXT_CNM + 1] = { {0} };
1965        struct adapter *padap = pdbg_init->adap;
1966        u32 j, size, max_ctx_size, max_ctx_qid;
1967        u8 mem_type[CTXT_INGRESS + 1] = { 0 };
1968        struct cudbg_buffer temp_buff = { 0 };
1969        struct cudbg_ch_cntxt *buff;
1970        u64 *dst_off, *src_off;
1971        u8 *ctx_buf;
1972        u8 i, k;
1973        int rc;
1974
1975        /* Get max valid qid for each type of queue */
1976        rc = cudbg_get_ctxt_region_info(padap, region_info, mem_type);
1977        if (rc)
1978                return rc;
1979
1980        rc = cudbg_dump_context_size(padap);
1981        if (rc <= 0)
1982                return CUDBG_STATUS_ENTITY_NOT_FOUND;
1983
1984        size = rc;
1985        rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
1986        if (rc)
1987                return rc;
1988
1989        /* Get buffer with enough space to read the biggest context
1990         * region in memory.
1991         */
1992        max_ctx_size = max(region_info[CTXT_EGRESS].end -
1993                           region_info[CTXT_EGRESS].start + 1,
1994                           region_info[CTXT_INGRESS].end -
1995                           region_info[CTXT_INGRESS].start + 1);
1996
1997        ctx_buf = kvzalloc(max_ctx_size, GFP_KERNEL);
1998        if (!ctx_buf) {
1999                cudbg_put_buff(pdbg_init, &temp_buff);
2000                return -ENOMEM;
2001        }
2002
2003        buff = (struct cudbg_ch_cntxt *)temp_buff.data;
2004
2005        /* Collect EGRESS and INGRESS context data.
2006         * In case of failures, fallback to collecting via FW or
2007         * backdoor access.
2008         */
2009        for (i = CTXT_EGRESS; i <= CTXT_INGRESS; i++) {
2010                if (!region_info[i].exist) {
2011                        max_ctx_qid = CUDBG_LOWMEM_MAX_CTXT_QIDS;
2012                        cudbg_get_sge_ctxt_fw(pdbg_init, max_ctx_qid, i,
2013                                              &buff);
2014                        continue;
2015                }
2016
2017                max_ctx_size = region_info[i].end - region_info[i].start + 1;
2018                max_ctx_qid = max_ctx_size / SGE_CTXT_SIZE;
2019
2020                /* If firmware is not attached/alive, use backdoor register
2021                 * access to collect dump.
2022                 */
2023                if (is_fw_attached(pdbg_init)) {
2024                        t4_sge_ctxt_flush(padap, padap->mbox, i);
2025
2026                        rc = t4_memory_rw(padap, MEMWIN_NIC, mem_type[i],
2027                                          region_info[i].start, max_ctx_size,
2028                                          (__be32 *)ctx_buf, 1);
2029                }
2030
2031                if (rc || !is_fw_attached(pdbg_init)) {
2032                        max_ctx_qid = CUDBG_LOWMEM_MAX_CTXT_QIDS;
2033                        cudbg_get_sge_ctxt_fw(pdbg_init, max_ctx_qid, i,
2034                                              &buff);
2035                        continue;
2036                }
2037
2038                for (j = 0; j < max_ctx_qid; j++) {
2039                        src_off = (u64 *)(ctx_buf + j * SGE_CTXT_SIZE);
2040                        dst_off = (u64 *)buff->data;
2041
2042                        /* The data is stored in 64-bit cpu order.  Convert it
2043                         * to big endian before parsing.
2044                         */
2045                        for (k = 0; k < SGE_CTXT_SIZE / sizeof(u64); k++)
2046                                dst_off[k] = cpu_to_be64(src_off[k]);
2047
2048                        rc = cudbg_sge_ctxt_check_valid(buff->data, i);
2049                        if (!rc)
2050                                continue;
2051
2052                        buff->cntxt_type = i;
2053                        buff->cntxt_id = j;
2054                        buff++;
2055                }
2056        }
2057
2058        kvfree(ctx_buf);
2059
2060        /* Collect FREELIST and CONGESTION MANAGER contexts */
2061        max_ctx_size = region_info[CTXT_FLM].end -
2062                       region_info[CTXT_FLM].start + 1;
2063        max_ctx_qid = max_ctx_size / SGE_CTXT_SIZE;
2064        /* Since FLM and CONM are 1-to-1 mapped, the below function
2065         * will fetch both FLM and CONM contexts.
2066         */
2067        cudbg_get_sge_ctxt_fw(pdbg_init, max_ctx_qid, CTXT_FLM, &buff);
2068
2069        return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
2070}
2071
2072static inline void cudbg_tcamxy2valmask(u64 x, u64 y, u8 *addr, u64 *mask)
2073{
2074        *mask = x | y;
2075        y = (__force u64)cpu_to_be64(y);
2076        memcpy(addr, (char *)&y + 2, ETH_ALEN);
2077}
2078
2079static void cudbg_mps_rpl_backdoor(struct adapter *padap,
2080                                   struct fw_ldst_mps_rplc *mps_rplc)
2081{
2082        if (is_t5(padap->params.chip)) {
2083                mps_rplc->rplc255_224 = htonl(t4_read_reg(padap,
2084                                                          MPS_VF_RPLCT_MAP3_A));
2085                mps_rplc->rplc223_192 = htonl(t4_read_reg(padap,
2086                                                          MPS_VF_RPLCT_MAP2_A));
2087                mps_rplc->rplc191_160 = htonl(t4_read_reg(padap,
2088                                                          MPS_VF_RPLCT_MAP1_A));
2089                mps_rplc->rplc159_128 = htonl(t4_read_reg(padap,
2090                                                          MPS_VF_RPLCT_MAP0_A));
2091        } else {
2092                mps_rplc->rplc255_224 = htonl(t4_read_reg(padap,
2093                                                          MPS_VF_RPLCT_MAP7_A));
2094                mps_rplc->rplc223_192 = htonl(t4_read_reg(padap,
2095                                                          MPS_VF_RPLCT_MAP6_A));
2096                mps_rplc->rplc191_160 = htonl(t4_read_reg(padap,
2097                                                          MPS_VF_RPLCT_MAP5_A));
2098                mps_rplc->rplc159_128 = htonl(t4_read_reg(padap,
2099                                                          MPS_VF_RPLCT_MAP4_A));
2100        }
2101        mps_rplc->rplc127_96 = htonl(t4_read_reg(padap, MPS_VF_RPLCT_MAP3_A));
2102        mps_rplc->rplc95_64 = htonl(t4_read_reg(padap, MPS_VF_RPLCT_MAP2_A));
2103        mps_rplc->rplc63_32 = htonl(t4_read_reg(padap, MPS_VF_RPLCT_MAP1_A));
2104        mps_rplc->rplc31_0 = htonl(t4_read_reg(padap, MPS_VF_RPLCT_MAP0_A));
2105}
2106
2107static int cudbg_collect_tcam_index(struct cudbg_init *pdbg_init,
2108                                    struct cudbg_mps_tcam *tcam, u32 idx)
2109{
2110        struct adapter *padap = pdbg_init->adap;
2111        u64 tcamy, tcamx, val;
2112        u32 ctl, data2;
2113        int rc = 0;
2114
2115        if (CHELSIO_CHIP_VERSION(padap->params.chip) >= CHELSIO_T6) {
2116                /* CtlReqID   - 1: use Host Driver Requester ID
2117                 * CtlCmdType - 0: Read, 1: Write
2118                 * CtlTcamSel - 0: TCAM0, 1: TCAM1
2119                 * CtlXYBitSel- 0: Y bit, 1: X bit
2120                 */
2121
2122                /* Read tcamy */
2123                ctl = CTLREQID_V(1) | CTLCMDTYPE_V(0) | CTLXYBITSEL_V(0);
2124                if (idx < 256)
2125                        ctl |= CTLTCAMINDEX_V(idx) | CTLTCAMSEL_V(0);
2126                else
2127                        ctl |= CTLTCAMINDEX_V(idx - 256) | CTLTCAMSEL_V(1);
2128
2129                t4_write_reg(padap, MPS_CLS_TCAM_DATA2_CTL_A, ctl);
2130                val = t4_read_reg(padap, MPS_CLS_TCAM_RDATA1_REQ_ID1_A);
2131                tcamy = DMACH_G(val) << 32;
2132                tcamy |= t4_read_reg(padap, MPS_CLS_TCAM_RDATA0_REQ_ID1_A);
2133                data2 = t4_read_reg(padap, MPS_CLS_TCAM_RDATA2_REQ_ID1_A);
2134                tcam->lookup_type = DATALKPTYPE_G(data2);
2135
2136                /* 0 - Outer header, 1 - Inner header
2137                 * [71:48] bit locations are overloaded for
2138                 * outer vs. inner lookup types.
2139                 */
2140                if (tcam->lookup_type && tcam->lookup_type != DATALKPTYPE_M) {
2141                        /* Inner header VNI */
2142                        tcam->vniy = (data2 & DATAVIDH2_F) | DATAVIDH1_G(data2);
2143                        tcam->vniy = (tcam->vniy << 16) | VIDL_G(val);
2144                        tcam->dip_hit = data2 & DATADIPHIT_F;
2145                } else {
2146                        tcam->vlan_vld = data2 & DATAVIDH2_F;
2147                        tcam->ivlan = VIDL_G(val);
2148                }
2149
2150                tcam->port_num = DATAPORTNUM_G(data2);
2151
2152                /* Read tcamx. Change the control param */
2153                ctl |= CTLXYBITSEL_V(1);
2154                t4_write_reg(padap, MPS_CLS_TCAM_DATA2_CTL_A, ctl);
2155                val = t4_read_reg(padap, MPS_CLS_TCAM_RDATA1_REQ_ID1_A);
2156                tcamx = DMACH_G(val) << 32;
2157                tcamx |= t4_read_reg(padap, MPS_CLS_TCAM_RDATA0_REQ_ID1_A);
2158                data2 = t4_read_reg(padap, MPS_CLS_TCAM_RDATA2_REQ_ID1_A);
2159                if (tcam->lookup_type && tcam->lookup_type != DATALKPTYPE_M) {
2160                        /* Inner header VNI mask */
2161                        tcam->vnix = (data2 & DATAVIDH2_F) | DATAVIDH1_G(data2);
2162                        tcam->vnix = (tcam->vnix << 16) | VIDL_G(val);
2163                }
2164        } else {
2165                tcamy = t4_read_reg64(padap, MPS_CLS_TCAM_Y_L(idx));
2166                tcamx = t4_read_reg64(padap, MPS_CLS_TCAM_X_L(idx));
2167        }
2168
2169        /* If no entry, return */
2170        if (tcamx & tcamy)
2171                return rc;
2172
2173        tcam->cls_lo = t4_read_reg(padap, MPS_CLS_SRAM_L(idx));
2174        tcam->cls_hi = t4_read_reg(padap, MPS_CLS_SRAM_H(idx));
2175
2176        if (is_t5(padap->params.chip))
2177                tcam->repli = (tcam->cls_lo & REPLICATE_F);
2178        else if (is_t6(padap->params.chip))
2179                tcam->repli = (tcam->cls_lo & T6_REPLICATE_F);
2180
2181        if (tcam->repli) {
2182                struct fw_ldst_cmd ldst_cmd;
2183                struct fw_ldst_mps_rplc mps_rplc;
2184
2185                memset(&ldst_cmd, 0, sizeof(ldst_cmd));
2186                ldst_cmd.op_to_addrspace =
2187                        htonl(FW_CMD_OP_V(FW_LDST_CMD) |
2188                              FW_CMD_REQUEST_F | FW_CMD_READ_F |
2189                              FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_MPS));
2190                ldst_cmd.cycles_to_len16 = htonl(FW_LEN16(ldst_cmd));
2191                ldst_cmd.u.mps.rplc.fid_idx =
2192                        htons(FW_LDST_CMD_FID_V(FW_LDST_MPS_RPLC) |
2193                              FW_LDST_CMD_IDX_V(idx));
2194
2195                /* If firmware is not attached/alive, use backdoor register
2196                 * access to collect dump.
2197                 */
2198                if (is_fw_attached(pdbg_init))
2199                        rc = t4_wr_mbox(padap, padap->mbox, &ldst_cmd,
2200                                        sizeof(ldst_cmd), &ldst_cmd);
2201
2202                if (rc || !is_fw_attached(pdbg_init)) {
2203                        cudbg_mps_rpl_backdoor(padap, &mps_rplc);
2204                        /* Ignore error since we collected directly from
2205                         * reading registers.
2206                         */
2207                        rc = 0;
2208                } else {
2209                        mps_rplc = ldst_cmd.u.mps.rplc;
2210                }
2211
2212                tcam->rplc[0] = ntohl(mps_rplc.rplc31_0);
2213                tcam->rplc[1] = ntohl(mps_rplc.rplc63_32);
2214                tcam->rplc[2] = ntohl(mps_rplc.rplc95_64);
2215                tcam->rplc[3] = ntohl(mps_rplc.rplc127_96);
2216                if (padap->params.arch.mps_rplc_size > CUDBG_MAX_RPLC_SIZE) {
2217                        tcam->rplc[4] = ntohl(mps_rplc.rplc159_128);
2218                        tcam->rplc[5] = ntohl(mps_rplc.rplc191_160);
2219                        tcam->rplc[6] = ntohl(mps_rplc.rplc223_192);
2220                        tcam->rplc[7] = ntohl(mps_rplc.rplc255_224);
2221                }
2222        }
2223        cudbg_tcamxy2valmask(tcamx, tcamy, tcam->addr, &tcam->mask);
2224        tcam->idx = idx;
2225        tcam->rplc_size = padap->params.arch.mps_rplc_size;
2226        return rc;
2227}
2228
2229int cudbg_collect_mps_tcam(struct cudbg_init *pdbg_init,
2230                           struct cudbg_buffer *dbg_buff,
2231                           struct cudbg_error *cudbg_err)
2232{
2233        struct adapter *padap = pdbg_init->adap;
2234        struct cudbg_buffer temp_buff = { 0 };
2235        u32 size = 0, i, n, total_size = 0;
2236        struct cudbg_mps_tcam *tcam;
2237        int rc;
2238
2239        n = padap->params.arch.mps_tcam_size;
2240        size = sizeof(struct cudbg_mps_tcam) * n;
2241        rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
2242        if (rc)
2243                return rc;
2244
2245        tcam = (struct cudbg_mps_tcam *)temp_buff.data;
2246        for (i = 0; i < n; i++) {
2247                rc = cudbg_collect_tcam_index(pdbg_init, tcam, i);
2248                if (rc) {
2249                        cudbg_err->sys_err = rc;
2250                        cudbg_put_buff(pdbg_init, &temp_buff);
2251                        return rc;
2252                }
2253                total_size += sizeof(struct cudbg_mps_tcam);
2254                tcam++;
2255        }
2256
2257        if (!total_size) {
2258                rc = CUDBG_SYSTEM_ERROR;
2259                cudbg_err->sys_err = rc;
2260                cudbg_put_buff(pdbg_init, &temp_buff);
2261                return rc;
2262        }
2263        return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
2264}
2265
2266int cudbg_collect_vpd_data(struct cudbg_init *pdbg_init,
2267                           struct cudbg_buffer *dbg_buff,
2268                           struct cudbg_error *cudbg_err)
2269{
2270        struct adapter *padap = pdbg_init->adap;
2271        struct cudbg_buffer temp_buff = { 0 };
2272        char vpd_str[CUDBG_VPD_VER_LEN + 1];
2273        u32 scfg_vers, vpd_vers, fw_vers;
2274        struct cudbg_vpd_data *vpd_data;
2275        struct vpd_params vpd = { 0 };
2276        int rc, ret;
2277
2278        rc = t4_get_raw_vpd_params(padap, &vpd);
2279        if (rc)
2280                return rc;
2281
2282        rc = t4_get_fw_version(padap, &fw_vers);
2283        if (rc)
2284                return rc;
2285
2286        /* Serial Configuration Version is located beyond the PF's vpd size.
2287         * Temporarily give access to entire EEPROM to get it.
2288         */
2289        rc = pci_set_vpd_size(padap->pdev, EEPROMVSIZE);
2290        if (rc < 0)
2291                return rc;
2292
2293        ret = cudbg_read_vpd_reg(padap, CUDBG_SCFG_VER_ADDR, CUDBG_SCFG_VER_LEN,
2294                                 &scfg_vers);
2295
2296        /* Restore back to original PF's vpd size */
2297        rc = pci_set_vpd_size(padap->pdev, CUDBG_VPD_PF_SIZE);
2298        if (rc < 0)
2299                return rc;
2300
2301        if (ret)
2302                return ret;
2303
2304        rc = cudbg_read_vpd_reg(padap, CUDBG_VPD_VER_ADDR, CUDBG_VPD_VER_LEN,
2305                                vpd_str);
2306        if (rc)
2307                return rc;
2308
2309        vpd_str[CUDBG_VPD_VER_LEN] = '\0';
2310        rc = kstrtouint(vpd_str, 0, &vpd_vers);
2311        if (rc)
2312                return rc;
2313
2314        rc = cudbg_get_buff(pdbg_init, dbg_buff, sizeof(struct cudbg_vpd_data),
2315                            &temp_buff);
2316        if (rc)
2317                return rc;
2318
2319        vpd_data = (struct cudbg_vpd_data *)temp_buff.data;
2320        memcpy(vpd_data->sn, vpd.sn, SERNUM_LEN + 1);
2321        memcpy(vpd_data->bn, vpd.pn, PN_LEN + 1);
2322        memcpy(vpd_data->na, vpd.na, MACADDR_LEN + 1);
2323        memcpy(vpd_data->mn, vpd.id, ID_LEN + 1);
2324        vpd_data->scfg_vers = scfg_vers;
2325        vpd_data->vpd_vers = vpd_vers;
2326        vpd_data->fw_major = FW_HDR_FW_VER_MAJOR_G(fw_vers);
2327        vpd_data->fw_minor = FW_HDR_FW_VER_MINOR_G(fw_vers);
2328        vpd_data->fw_micro = FW_HDR_FW_VER_MICRO_G(fw_vers);
2329        vpd_data->fw_build = FW_HDR_FW_VER_BUILD_G(fw_vers);
2330        return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
2331}
2332
2333static int cudbg_read_tid(struct cudbg_init *pdbg_init, u32 tid,
2334                          struct cudbg_tid_data *tid_data)
2335{
2336        struct adapter *padap = pdbg_init->adap;
2337        int i, cmd_retry = 8;
2338        u32 val;
2339
2340        /* Fill REQ_DATA regs with 0's */
2341        for (i = 0; i < NUM_LE_DB_DBGI_REQ_DATA_INSTANCES; i++)
2342                t4_write_reg(padap, LE_DB_DBGI_REQ_DATA_A + (i << 2), 0);
2343
2344        /* Write DBIG command */
2345        val = DBGICMD_V(4) | DBGITID_V(tid);
2346        t4_write_reg(padap, LE_DB_DBGI_REQ_TCAM_CMD_A, val);
2347        tid_data->dbig_cmd = val;
2348
2349        val = DBGICMDSTRT_F | DBGICMDMODE_V(1); /* LE mode */
2350        t4_write_reg(padap, LE_DB_DBGI_CONFIG_A, val);
2351        tid_data->dbig_conf = val;
2352
2353        /* Poll the DBGICMDBUSY bit */
2354        val = 1;
2355        while (val) {
2356                val = t4_read_reg(padap, LE_DB_DBGI_CONFIG_A);
2357                val = val & DBGICMDBUSY_F;
2358                cmd_retry--;
2359                if (!cmd_retry)
2360                        return CUDBG_SYSTEM_ERROR;
2361        }
2362
2363        /* Check RESP status */
2364        val = t4_read_reg(padap, LE_DB_DBGI_RSP_STATUS_A);
2365        tid_data->dbig_rsp_stat = val;
2366        if (!(val & 1))
2367                return CUDBG_SYSTEM_ERROR;
2368
2369        /* Read RESP data */
2370        for (i = 0; i < NUM_LE_DB_DBGI_RSP_DATA_INSTANCES; i++)
2371                tid_data->data[i] = t4_read_reg(padap,
2372                                                LE_DB_DBGI_RSP_DATA_A +
2373                                                (i << 2));
2374        tid_data->tid = tid;
2375        return 0;
2376}
2377
2378static int cudbg_get_le_type(u32 tid, struct cudbg_tcam tcam_region)
2379{
2380        int type = LE_ET_UNKNOWN;
2381
2382        if (tid < tcam_region.server_start)
2383                type = LE_ET_TCAM_CON;
2384        else if (tid < tcam_region.filter_start)
2385                type = LE_ET_TCAM_SERVER;
2386        else if (tid < tcam_region.clip_start)
2387                type = LE_ET_TCAM_FILTER;
2388        else if (tid < tcam_region.routing_start)
2389                type = LE_ET_TCAM_CLIP;
2390        else if (tid < tcam_region.tid_hash_base)
2391                type = LE_ET_TCAM_ROUTING;
2392        else if (tid < tcam_region.max_tid)
2393                type = LE_ET_HASH_CON;
2394        else
2395                type = LE_ET_INVALID_TID;
2396
2397        return type;
2398}
2399
2400static int cudbg_is_ipv6_entry(struct cudbg_tid_data *tid_data,
2401                               struct cudbg_tcam tcam_region)
2402{
2403        int ipv6 = 0;
2404        int le_type;
2405
2406        le_type = cudbg_get_le_type(tid_data->tid, tcam_region);
2407        if (tid_data->tid & 1)
2408                return 0;
2409
2410        if (le_type == LE_ET_HASH_CON) {
2411                ipv6 = tid_data->data[16] & 0x8000;
2412        } else if (le_type == LE_ET_TCAM_CON) {
2413                ipv6 = tid_data->data[16] & 0x8000;
2414                if (ipv6)
2415                        ipv6 = tid_data->data[9] == 0x00C00000;
2416        } else {
2417                ipv6 = 0;
2418        }
2419        return ipv6;
2420}
2421
2422void cudbg_fill_le_tcam_info(struct adapter *padap,
2423                             struct cudbg_tcam *tcam_region)
2424{
2425        u32 value;
2426
2427        /* Get the LE regions */
2428        value = t4_read_reg(padap, LE_DB_TID_HASHBASE_A); /* hash base index */
2429        tcam_region->tid_hash_base = value;
2430
2431        /* Get routing table index */
2432        value = t4_read_reg(padap, LE_DB_ROUTING_TABLE_INDEX_A);
2433        tcam_region->routing_start = value;
2434
2435        /* Get clip table index. For T6 there is separate CLIP TCAM */
2436        if (is_t6(padap->params.chip))
2437                value = t4_read_reg(padap, LE_DB_CLCAM_TID_BASE_A);
2438        else
2439                value = t4_read_reg(padap, LE_DB_CLIP_TABLE_INDEX_A);
2440        tcam_region->clip_start = value;
2441
2442        /* Get filter table index */
2443        value = t4_read_reg(padap, LE_DB_FILTER_TABLE_INDEX_A);
2444        tcam_region->filter_start = value;
2445
2446        /* Get server table index */
2447        value = t4_read_reg(padap, LE_DB_SERVER_INDEX_A);
2448        tcam_region->server_start = value;
2449
2450        /* Check whether hash is enabled and calculate the max tids */
2451        value = t4_read_reg(padap, LE_DB_CONFIG_A);
2452        if ((value >> HASHEN_S) & 1) {
2453                value = t4_read_reg(padap, LE_DB_HASH_CONFIG_A);
2454                if (CHELSIO_CHIP_VERSION(padap->params.chip) > CHELSIO_T5) {
2455                        tcam_region->max_tid = (value & 0xFFFFF) +
2456                                               tcam_region->tid_hash_base;
2457                } else {
2458                        value = HASHTIDSIZE_G(value);
2459                        value = 1 << value;
2460                        tcam_region->max_tid = value +
2461                                               tcam_region->tid_hash_base;
2462                }
2463        } else { /* hash not enabled */
2464                if (is_t6(padap->params.chip))
2465                        tcam_region->max_tid = (value & ASLIPCOMPEN_F) ?
2466                                               CUDBG_MAX_TID_COMP_EN :
2467                                               CUDBG_MAX_TID_COMP_DIS;
2468                else
2469                        tcam_region->max_tid = CUDBG_MAX_TCAM_TID;
2470        }
2471
2472        if (is_t6(padap->params.chip))
2473                tcam_region->max_tid += CUDBG_T6_CLIP;
2474}
2475
2476int cudbg_collect_le_tcam(struct cudbg_init *pdbg_init,
2477                          struct cudbg_buffer *dbg_buff,
2478                          struct cudbg_error *cudbg_err)
2479{
2480        struct adapter *padap = pdbg_init->adap;
2481        struct cudbg_buffer temp_buff = { 0 };
2482        struct cudbg_tcam tcam_region = { 0 };
2483        struct cudbg_tid_data *tid_data;
2484        u32 bytes = 0;
2485        int rc, size;
2486        u32 i;
2487
2488        cudbg_fill_le_tcam_info(padap, &tcam_region);
2489
2490        size = sizeof(struct cudbg_tid_data) * tcam_region.max_tid;
2491        size += sizeof(struct cudbg_tcam);
2492        rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
2493        if (rc)
2494                return rc;
2495
2496        memcpy(temp_buff.data, &tcam_region, sizeof(struct cudbg_tcam));
2497        bytes = sizeof(struct cudbg_tcam);
2498        tid_data = (struct cudbg_tid_data *)(temp_buff.data + bytes);
2499        /* read all tid */
2500        for (i = 0; i < tcam_region.max_tid; ) {
2501                rc = cudbg_read_tid(pdbg_init, i, tid_data);
2502                if (rc) {
2503                        cudbg_err->sys_warn = CUDBG_STATUS_PARTIAL_DATA;
2504                        /* Update tcam header and exit */
2505                        tcam_region.max_tid = i;
2506                        memcpy(temp_buff.data, &tcam_region,
2507                               sizeof(struct cudbg_tcam));
2508                        goto out;
2509                }
2510
2511                if (cudbg_is_ipv6_entry(tid_data, tcam_region)) {
2512                        /* T6 CLIP TCAM: ipv6 takes 4 entries */
2513                        if (is_t6(padap->params.chip) &&
2514                            i >= tcam_region.clip_start &&
2515                            i < tcam_region.clip_start + CUDBG_T6_CLIP)
2516                                i += 4;
2517                        else /* Main TCAM: ipv6 takes two tids */
2518                                i += 2;
2519                } else {
2520                        i++;
2521                }
2522
2523                tid_data++;
2524                bytes += sizeof(struct cudbg_tid_data);
2525        }
2526
2527out:
2528        return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
2529}
2530
2531int cudbg_collect_cctrl(struct cudbg_init *pdbg_init,
2532                        struct cudbg_buffer *dbg_buff,
2533                        struct cudbg_error *cudbg_err)
2534{
2535        struct adapter *padap = pdbg_init->adap;
2536        struct cudbg_buffer temp_buff = { 0 };
2537        u32 size;
2538        int rc;
2539
2540        size = sizeof(u16) * NMTUS * NCCTRL_WIN;
2541        rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
2542        if (rc)
2543                return rc;
2544
2545        t4_read_cong_tbl(padap, (void *)temp_buff.data);
2546        return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
2547}
2548
2549int cudbg_collect_ma_indirect(struct cudbg_init *pdbg_init,
2550                              struct cudbg_buffer *dbg_buff,
2551                              struct cudbg_error *cudbg_err)
2552{
2553        struct adapter *padap = pdbg_init->adap;
2554        struct cudbg_buffer temp_buff = { 0 };
2555        struct ireg_buf *ma_indr;
2556        int i, rc, n;
2557        u32 size, j;
2558
2559        if (CHELSIO_CHIP_VERSION(padap->params.chip) < CHELSIO_T6)
2560                return CUDBG_STATUS_ENTITY_NOT_FOUND;
2561
2562        n = sizeof(t6_ma_ireg_array) / (IREG_NUM_ELEM * sizeof(u32));
2563        size = sizeof(struct ireg_buf) * n * 2;
2564        rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
2565        if (rc)
2566                return rc;
2567
2568        ma_indr = (struct ireg_buf *)temp_buff.data;
2569        for (i = 0; i < n; i++) {
2570                struct ireg_field *ma_fli = &ma_indr->tp_pio;
2571                u32 *buff = ma_indr->outbuf;
2572
2573                ma_fli->ireg_addr = t6_ma_ireg_array[i][0];
2574                ma_fli->ireg_data = t6_ma_ireg_array[i][1];
2575                ma_fli->ireg_local_offset = t6_ma_ireg_array[i][2];
2576                ma_fli->ireg_offset_range = t6_ma_ireg_array[i][3];
2577                t4_read_indirect(padap, ma_fli->ireg_addr, ma_fli->ireg_data,
2578                                 buff, ma_fli->ireg_offset_range,
2579                                 ma_fli->ireg_local_offset);
2580                ma_indr++;
2581        }
2582
2583        n = sizeof(t6_ma_ireg_array2) / (IREG_NUM_ELEM * sizeof(u32));
2584        for (i = 0; i < n; i++) {
2585                struct ireg_field *ma_fli = &ma_indr->tp_pio;
2586                u32 *buff = ma_indr->outbuf;
2587
2588                ma_fli->ireg_addr = t6_ma_ireg_array2[i][0];
2589                ma_fli->ireg_data = t6_ma_ireg_array2[i][1];
2590                ma_fli->ireg_local_offset = t6_ma_ireg_array2[i][2];
2591                for (j = 0; j < t6_ma_ireg_array2[i][3]; j++) {
2592                        t4_read_indirect(padap, ma_fli->ireg_addr,
2593                                         ma_fli->ireg_data, buff, 1,
2594                                         ma_fli->ireg_local_offset);
2595                        buff++;
2596                        ma_fli->ireg_local_offset += 0x20;
2597                }
2598                ma_indr++;
2599        }
2600        return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
2601}
2602
2603int cudbg_collect_ulptx_la(struct cudbg_init *pdbg_init,
2604                           struct cudbg_buffer *dbg_buff,
2605                           struct cudbg_error *cudbg_err)
2606{
2607        struct adapter *padap = pdbg_init->adap;
2608        struct cudbg_buffer temp_buff = { 0 };
2609        struct cudbg_ulptx_la *ulptx_la_buff;
2610        struct cudbg_ver_hdr *ver_hdr;
2611        u32 i, j;
2612        int rc;
2613
2614        rc = cudbg_get_buff(pdbg_init, dbg_buff,
2615                            sizeof(struct cudbg_ver_hdr) +
2616                            sizeof(struct cudbg_ulptx_la),
2617                            &temp_buff);
2618        if (rc)
2619                return rc;
2620
2621        ver_hdr = (struct cudbg_ver_hdr *)temp_buff.data;
2622        ver_hdr->signature = CUDBG_ENTITY_SIGNATURE;
2623        ver_hdr->revision = CUDBG_ULPTX_LA_REV;
2624        ver_hdr->size = sizeof(struct cudbg_ulptx_la);
2625
2626        ulptx_la_buff = (struct cudbg_ulptx_la *)(temp_buff.data +
2627                                                  sizeof(*ver_hdr));
2628        for (i = 0; i < CUDBG_NUM_ULPTX; i++) {
2629                ulptx_la_buff->rdptr[i] = t4_read_reg(padap,
2630                                                      ULP_TX_LA_RDPTR_0_A +
2631                                                      0x10 * i);
2632                ulptx_la_buff->wrptr[i] = t4_read_reg(padap,
2633                                                      ULP_TX_LA_WRPTR_0_A +
2634                                                      0x10 * i);
2635                ulptx_la_buff->rddata[i] = t4_read_reg(padap,
2636                                                       ULP_TX_LA_RDDATA_0_A +
2637                                                       0x10 * i);
2638                for (j = 0; j < CUDBG_NUM_ULPTX_READ; j++)
2639                        ulptx_la_buff->rd_data[i][j] =
2640                                t4_read_reg(padap,
2641                                            ULP_TX_LA_RDDATA_0_A + 0x10 * i);
2642        }
2643
2644        for (i = 0; i < CUDBG_NUM_ULPTX_ASIC_READ; i++) {
2645                t4_write_reg(padap, ULP_TX_ASIC_DEBUG_CTRL_A, 0x1);
2646                ulptx_la_buff->rdptr_asic[i] =
2647                                t4_read_reg(padap, ULP_TX_ASIC_DEBUG_CTRL_A);
2648                ulptx_la_buff->rddata_asic[i][0] =
2649                                t4_read_reg(padap, ULP_TX_ASIC_DEBUG_0_A);
2650                ulptx_la_buff->rddata_asic[i][1] =
2651                                t4_read_reg(padap, ULP_TX_ASIC_DEBUG_1_A);
2652                ulptx_la_buff->rddata_asic[i][2] =
2653                                t4_read_reg(padap, ULP_TX_ASIC_DEBUG_2_A);
2654                ulptx_la_buff->rddata_asic[i][3] =
2655                                t4_read_reg(padap, ULP_TX_ASIC_DEBUG_3_A);
2656                ulptx_la_buff->rddata_asic[i][4] =
2657                                t4_read_reg(padap, ULP_TX_ASIC_DEBUG_4_A);
2658                ulptx_la_buff->rddata_asic[i][5] =
2659                                t4_read_reg(padap, PM_RX_BASE_ADDR);
2660        }
2661
2662        return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
2663}
2664
2665int cudbg_collect_up_cim_indirect(struct cudbg_init *pdbg_init,
2666                                  struct cudbg_buffer *dbg_buff,
2667                                  struct cudbg_error *cudbg_err)
2668{
2669        struct adapter *padap = pdbg_init->adap;
2670        struct cudbg_buffer temp_buff = { 0 };
2671        u32 local_offset, local_range;
2672        struct ireg_buf *up_cim;
2673        u32 size, j, iter;
2674        u32 instance = 0;
2675        int i, rc, n;
2676
2677        if (is_t5(padap->params.chip))
2678                n = sizeof(t5_up_cim_reg_array) /
2679                    ((IREG_NUM_ELEM + 1) * sizeof(u32));
2680        else if (is_t6(padap->params.chip))
2681                n = sizeof(t6_up_cim_reg_array) /
2682                    ((IREG_NUM_ELEM + 1) * sizeof(u32));
2683        else
2684                return CUDBG_STATUS_NOT_IMPLEMENTED;
2685
2686        size = sizeof(struct ireg_buf) * n;
2687        rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
2688        if (rc)
2689                return rc;
2690
2691        up_cim = (struct ireg_buf *)temp_buff.data;
2692        for (i = 0; i < n; i++) {
2693                struct ireg_field *up_cim_reg = &up_cim->tp_pio;
2694                u32 *buff = up_cim->outbuf;
2695
2696                if (is_t5(padap->params.chip)) {
2697                        up_cim_reg->ireg_addr = t5_up_cim_reg_array[i][0];
2698                        up_cim_reg->ireg_data = t5_up_cim_reg_array[i][1];
2699                        up_cim_reg->ireg_local_offset =
2700                                                t5_up_cim_reg_array[i][2];
2701                        up_cim_reg->ireg_offset_range =
2702                                                t5_up_cim_reg_array[i][3];
2703                        instance = t5_up_cim_reg_array[i][4];
2704                } else if (is_t6(padap->params.chip)) {
2705                        up_cim_reg->ireg_addr = t6_up_cim_reg_array[i][0];
2706                        up_cim_reg->ireg_data = t6_up_cim_reg_array[i][1];
2707                        up_cim_reg->ireg_local_offset =
2708                                                t6_up_cim_reg_array[i][2];
2709                        up_cim_reg->ireg_offset_range =
2710                                                t6_up_cim_reg_array[i][3];
2711                        instance = t6_up_cim_reg_array[i][4];
2712                }
2713
2714                switch (instance) {
2715                case NUM_CIM_CTL_TSCH_CHANNEL_INSTANCES:
2716                        iter = up_cim_reg->ireg_offset_range;
2717                        local_offset = 0x120;
2718                        local_range = 1;
2719                        break;
2720                case NUM_CIM_CTL_TSCH_CHANNEL_TSCH_CLASS_INSTANCES:
2721                        iter = up_cim_reg->ireg_offset_range;
2722                        local_offset = 0x10;
2723                        local_range = 1;
2724                        break;
2725                default:
2726                        iter = 1;
2727                        local_offset = 0;
2728                        local_range = up_cim_reg->ireg_offset_range;
2729                        break;
2730                }
2731
2732                for (j = 0; j < iter; j++, buff++) {
2733                        rc = t4_cim_read(padap,
2734                                         up_cim_reg->ireg_local_offset +
2735                                         (j * local_offset), local_range, buff);
2736                        if (rc) {
2737                                cudbg_put_buff(pdbg_init, &temp_buff);
2738                                return rc;
2739                        }
2740                }
2741                up_cim++;
2742        }
2743        return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
2744}
2745
2746int cudbg_collect_pbt_tables(struct cudbg_init *pdbg_init,
2747                             struct cudbg_buffer *dbg_buff,
2748                             struct cudbg_error *cudbg_err)
2749{
2750        struct adapter *padap = pdbg_init->adap;
2751        struct cudbg_buffer temp_buff = { 0 };
2752        struct cudbg_pbt_tables *pbt;
2753        int i, rc;
2754        u32 addr;
2755
2756        rc = cudbg_get_buff(pdbg_init, dbg_buff,
2757                            sizeof(struct cudbg_pbt_tables),
2758                            &temp_buff);
2759        if (rc)
2760                return rc;
2761
2762        pbt = (struct cudbg_pbt_tables *)temp_buff.data;
2763        /* PBT dynamic entries */
2764        addr = CUDBG_CHAC_PBT_ADDR;
2765        for (i = 0; i < CUDBG_PBT_DYNAMIC_ENTRIES; i++) {
2766                rc = t4_cim_read(padap, addr + (i * 4), 1,
2767                                 &pbt->pbt_dynamic[i]);
2768                if (rc) {
2769                        cudbg_err->sys_err = rc;
2770                        cudbg_put_buff(pdbg_init, &temp_buff);
2771                        return rc;
2772                }
2773        }
2774
2775        /* PBT static entries */
2776        /* static entries start when bit 6 is set */
2777        addr = CUDBG_CHAC_PBT_ADDR + (1 << 6);
2778        for (i = 0; i < CUDBG_PBT_STATIC_ENTRIES; i++) {
2779                rc = t4_cim_read(padap, addr + (i * 4), 1,
2780                                 &pbt->pbt_static[i]);
2781                if (rc) {
2782                        cudbg_err->sys_err = rc;
2783                        cudbg_put_buff(pdbg_init, &temp_buff);
2784                        return rc;
2785                }
2786        }
2787
2788        /* LRF entries */
2789        addr = CUDBG_CHAC_PBT_LRF;
2790        for (i = 0; i < CUDBG_LRF_ENTRIES; i++) {
2791                rc = t4_cim_read(padap, addr + (i * 4), 1,
2792                                 &pbt->lrf_table[i]);
2793                if (rc) {
2794                        cudbg_err->sys_err = rc;
2795                        cudbg_put_buff(pdbg_init, &temp_buff);
2796                        return rc;
2797                }
2798        }
2799
2800        /* PBT data entries */
2801        addr = CUDBG_CHAC_PBT_DATA;
2802        for (i = 0; i < CUDBG_PBT_DATA_ENTRIES; i++) {
2803                rc = t4_cim_read(padap, addr + (i * 4), 1,
2804                                 &pbt->pbt_data[i]);
2805                if (rc) {
2806                        cudbg_err->sys_err = rc;
2807                        cudbg_put_buff(pdbg_init, &temp_buff);
2808                        return rc;
2809                }
2810        }
2811        return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
2812}
2813
2814int cudbg_collect_mbox_log(struct cudbg_init *pdbg_init,
2815                           struct cudbg_buffer *dbg_buff,
2816                           struct cudbg_error *cudbg_err)
2817{
2818        struct adapter *padap = pdbg_init->adap;
2819        struct cudbg_mbox_log *mboxlog = NULL;
2820        struct cudbg_buffer temp_buff = { 0 };
2821        struct mbox_cmd_log *log = NULL;
2822        struct mbox_cmd *entry;
2823        unsigned int entry_idx;
2824        u16 mbox_cmds;
2825        int i, k, rc;
2826        u64 flit;
2827        u32 size;
2828
2829        log = padap->mbox_log;
2830        mbox_cmds = padap->mbox_log->size;
2831        size = sizeof(struct cudbg_mbox_log) * mbox_cmds;
2832        rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
2833        if (rc)
2834                return rc;
2835
2836        mboxlog = (struct cudbg_mbox_log *)temp_buff.data;
2837        for (k = 0; k < mbox_cmds; k++) {
2838                entry_idx = log->cursor + k;
2839                if (entry_idx >= log->size)
2840                        entry_idx -= log->size;
2841
2842                entry = mbox_cmd_log_entry(log, entry_idx);
2843                /* skip over unused entries */
2844                if (entry->timestamp == 0)
2845                        continue;
2846
2847                memcpy(&mboxlog->entry, entry, sizeof(struct mbox_cmd));
2848                for (i = 0; i < MBOX_LEN / 8; i++) {
2849                        flit = entry->cmd[i];
2850                        mboxlog->hi[i] = (u32)(flit >> 32);
2851                        mboxlog->lo[i] = (u32)flit;
2852                }
2853                mboxlog++;
2854        }
2855        return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
2856}
2857
2858int cudbg_collect_hma_indirect(struct cudbg_init *pdbg_init,
2859                               struct cudbg_buffer *dbg_buff,
2860                               struct cudbg_error *cudbg_err)
2861{
2862        struct adapter *padap = pdbg_init->adap;
2863        struct cudbg_buffer temp_buff = { 0 };
2864        struct ireg_buf *hma_indr;
2865        int i, rc, n;
2866        u32 size;
2867
2868        if (CHELSIO_CHIP_VERSION(padap->params.chip) < CHELSIO_T6)
2869                return CUDBG_STATUS_ENTITY_NOT_FOUND;
2870
2871        n = sizeof(t6_hma_ireg_array) / (IREG_NUM_ELEM * sizeof(u32));
2872        size = sizeof(struct ireg_buf) * n;
2873        rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
2874        if (rc)
2875                return rc;
2876
2877        hma_indr = (struct ireg_buf *)temp_buff.data;
2878        for (i = 0; i < n; i++) {
2879                struct ireg_field *hma_fli = &hma_indr->tp_pio;
2880                u32 *buff = hma_indr->outbuf;
2881
2882                hma_fli->ireg_addr = t6_hma_ireg_array[i][0];
2883                hma_fli->ireg_data = t6_hma_ireg_array[i][1];
2884                hma_fli->ireg_local_offset = t6_hma_ireg_array[i][2];
2885                hma_fli->ireg_offset_range = t6_hma_ireg_array[i][3];
2886                t4_read_indirect(padap, hma_fli->ireg_addr, hma_fli->ireg_data,
2887                                 buff, hma_fli->ireg_offset_range,
2888                                 hma_fli->ireg_local_offset);
2889                hma_indr++;
2890        }
2891        return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
2892}
2893