linux/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 *  Copyright (C) 2017 Chelsio Communications.  All rights reserved.
   4 */
   5
   6#include "t4_regs.h"
   7#include "cxgb4.h"
   8#include "cxgb4_cudbg.h"
   9#include "cudbg_zlib.h"
  10
  11static const struct cxgb4_collect_entity cxgb4_collect_mem_dump[] = {
  12        { CUDBG_EDC0, cudbg_collect_edc0_meminfo },
  13        { CUDBG_EDC1, cudbg_collect_edc1_meminfo },
  14        { CUDBG_MC0, cudbg_collect_mc0_meminfo },
  15        { CUDBG_MC1, cudbg_collect_mc1_meminfo },
  16        { CUDBG_HMA, cudbg_collect_hma_meminfo },
  17};
  18
  19static const struct cxgb4_collect_entity cxgb4_collect_hw_dump[] = {
  20        { CUDBG_MBOX_LOG, cudbg_collect_mbox_log },
  21        { CUDBG_QDESC, cudbg_collect_qdesc },
  22        { CUDBG_DEV_LOG, cudbg_collect_fw_devlog },
  23        { CUDBG_REG_DUMP, cudbg_collect_reg_dump },
  24        { CUDBG_CIM_LA, cudbg_collect_cim_la },
  25        { CUDBG_CIM_MA_LA, cudbg_collect_cim_ma_la },
  26        { CUDBG_CIM_QCFG, cudbg_collect_cim_qcfg },
  27        { CUDBG_CIM_IBQ_TP0, cudbg_collect_cim_ibq_tp0 },
  28        { CUDBG_CIM_IBQ_TP1, cudbg_collect_cim_ibq_tp1 },
  29        { CUDBG_CIM_IBQ_ULP, cudbg_collect_cim_ibq_ulp },
  30        { CUDBG_CIM_IBQ_SGE0, cudbg_collect_cim_ibq_sge0 },
  31        { CUDBG_CIM_IBQ_SGE1, cudbg_collect_cim_ibq_sge1 },
  32        { CUDBG_CIM_IBQ_NCSI, cudbg_collect_cim_ibq_ncsi },
  33        { CUDBG_CIM_OBQ_ULP0, cudbg_collect_cim_obq_ulp0 },
  34        { CUDBG_CIM_OBQ_ULP1, cudbg_collect_cim_obq_ulp1 },
  35        { CUDBG_CIM_OBQ_ULP2, cudbg_collect_cim_obq_ulp2 },
  36        { CUDBG_CIM_OBQ_ULP3, cudbg_collect_cim_obq_ulp3 },
  37        { CUDBG_CIM_OBQ_SGE, cudbg_collect_cim_obq_sge },
  38        { CUDBG_CIM_OBQ_NCSI, cudbg_collect_cim_obq_ncsi },
  39        { CUDBG_RSS, cudbg_collect_rss },
  40        { CUDBG_RSS_VF_CONF, cudbg_collect_rss_vf_config },
  41        { CUDBG_PATH_MTU, cudbg_collect_path_mtu },
  42        { CUDBG_PM_STATS, cudbg_collect_pm_stats },
  43        { CUDBG_HW_SCHED, cudbg_collect_hw_sched },
  44        { CUDBG_TP_INDIRECT, cudbg_collect_tp_indirect },
  45        { CUDBG_SGE_INDIRECT, cudbg_collect_sge_indirect },
  46        { CUDBG_ULPRX_LA, cudbg_collect_ulprx_la },
  47        { CUDBG_TP_LA, cudbg_collect_tp_la },
  48        { CUDBG_MEMINFO, cudbg_collect_meminfo },
  49        { CUDBG_CIM_PIF_LA, cudbg_collect_cim_pif_la },
  50        { CUDBG_CLK, cudbg_collect_clk_info },
  51        { CUDBG_CIM_OBQ_RXQ0, cudbg_collect_obq_sge_rx_q0 },
  52        { CUDBG_CIM_OBQ_RXQ1, cudbg_collect_obq_sge_rx_q1 },
  53        { CUDBG_PCIE_INDIRECT, cudbg_collect_pcie_indirect },
  54        { CUDBG_PM_INDIRECT, cudbg_collect_pm_indirect },
  55        { CUDBG_TID_INFO, cudbg_collect_tid },
  56        { CUDBG_PCIE_CONFIG, cudbg_collect_pcie_config },
  57        { CUDBG_DUMP_CONTEXT, cudbg_collect_dump_context },
  58        { CUDBG_MPS_TCAM, cudbg_collect_mps_tcam },
  59        { CUDBG_VPD_DATA, cudbg_collect_vpd_data },
  60        { CUDBG_LE_TCAM, cudbg_collect_le_tcam },
  61        { CUDBG_CCTRL, cudbg_collect_cctrl },
  62        { CUDBG_MA_INDIRECT, cudbg_collect_ma_indirect },
  63        { CUDBG_ULPTX_LA, cudbg_collect_ulptx_la },
  64        { CUDBG_UP_CIM_INDIRECT, cudbg_collect_up_cim_indirect },
  65        { CUDBG_PBT_TABLE, cudbg_collect_pbt_tables },
  66        { CUDBG_HMA_INDIRECT, cudbg_collect_hma_indirect },
  67};
  68
  69static const struct cxgb4_collect_entity cxgb4_collect_flash_dump[] = {
  70        { CUDBG_FLASH, cudbg_collect_flash },
  71};
  72
  73u32 cxgb4_get_dump_length(struct adapter *adap, u32 flag)
  74{
  75        u32 i, entity;
  76        u32 len = 0;
  77        u32 wsize;
  78
  79        if (flag & CXGB4_ETH_DUMP_HW) {
  80                for (i = 0; i < ARRAY_SIZE(cxgb4_collect_hw_dump); i++) {
  81                        entity = cxgb4_collect_hw_dump[i].entity;
  82                        len += cudbg_get_entity_length(adap, entity);
  83                }
  84        }
  85
  86        if (flag & CXGB4_ETH_DUMP_MEM) {
  87                for (i = 0; i < ARRAY_SIZE(cxgb4_collect_mem_dump); i++) {
  88                        entity = cxgb4_collect_mem_dump[i].entity;
  89                        len += cudbg_get_entity_length(adap, entity);
  90                }
  91        }
  92
  93        if (flag & CXGB4_ETH_DUMP_FLASH)
  94                len += adap->params.sf_size;
  95
  96        /* If compression is enabled, a smaller destination buffer is enough */
  97        wsize = cudbg_get_workspace_size();
  98        if (wsize && len > CUDBG_DUMP_BUFF_SIZE)
  99                len = CUDBG_DUMP_BUFF_SIZE;
 100
 101        return len;
 102}
 103
 104static void cxgb4_cudbg_collect_entity(struct cudbg_init *pdbg_init,
 105                                       struct cudbg_buffer *dbg_buff,
 106                                       const struct cxgb4_collect_entity *e_arr,
 107                                       u32 arr_size, void *buf, u32 *tot_size)
 108{
 109        struct cudbg_error cudbg_err = { 0 };
 110        struct cudbg_entity_hdr *entity_hdr;
 111        u32 i, total_size = 0;
 112        int ret;
 113
 114        for (i = 0; i < arr_size; i++) {
 115                const struct cxgb4_collect_entity *e = &e_arr[i];
 116
 117                entity_hdr = cudbg_get_entity_hdr(buf, e->entity);
 118                entity_hdr->entity_type = e->entity;
 119                entity_hdr->start_offset = dbg_buff->offset;
 120                memset(&cudbg_err, 0, sizeof(struct cudbg_error));
 121                ret = e->collect_cb(pdbg_init, dbg_buff, &cudbg_err);
 122                if (ret) {
 123                        entity_hdr->size = 0;
 124                        dbg_buff->offset = entity_hdr->start_offset;
 125                } else {
 126                        cudbg_align_debug_buffer(dbg_buff, entity_hdr);
 127                }
 128
 129                /* Log error and continue with next entity */
 130                if (cudbg_err.sys_err)
 131                        ret = CUDBG_SYSTEM_ERROR;
 132
 133                entity_hdr->hdr_flags = ret;
 134                entity_hdr->sys_err = cudbg_err.sys_err;
 135                entity_hdr->sys_warn = cudbg_err.sys_warn;
 136                total_size += entity_hdr->size;
 137        }
 138
 139        *tot_size += total_size;
 140}
 141
 142static int cudbg_alloc_compress_buff(struct cudbg_init *pdbg_init)
 143{
 144        u32 workspace_size;
 145
 146        workspace_size = cudbg_get_workspace_size();
 147        pdbg_init->compress_buff = vzalloc(CUDBG_COMPRESS_BUFF_SIZE +
 148                                           workspace_size);
 149        if (!pdbg_init->compress_buff)
 150                return -ENOMEM;
 151
 152        pdbg_init->compress_buff_size = CUDBG_COMPRESS_BUFF_SIZE;
 153        pdbg_init->workspace = (u8 *)pdbg_init->compress_buff +
 154                               CUDBG_COMPRESS_BUFF_SIZE - workspace_size;
 155        return 0;
 156}
 157
 158static void cudbg_free_compress_buff(struct cudbg_init *pdbg_init)
 159{
 160        if (pdbg_init->compress_buff)
 161                vfree(pdbg_init->compress_buff);
 162}
 163
 164int cxgb4_cudbg_collect(struct adapter *adap, void *buf, u32 *buf_size,
 165                        u32 flag)
 166{
 167        struct cudbg_buffer dbg_buff = { 0 };
 168        u32 size, min_size, total_size = 0;
 169        struct cudbg_init cudbg_init;
 170        struct cudbg_hdr *cudbg_hdr;
 171        int rc;
 172
 173        size = *buf_size;
 174
 175        memset(&cudbg_init, 0, sizeof(struct cudbg_init));
 176        cudbg_init.adap = adap;
 177        cudbg_init.outbuf = buf;
 178        cudbg_init.outbuf_size = size;
 179
 180        dbg_buff.data = buf;
 181        dbg_buff.size = size;
 182        dbg_buff.offset = 0;
 183
 184        cudbg_hdr = (struct cudbg_hdr *)buf;
 185        cudbg_hdr->signature = CUDBG_SIGNATURE;
 186        cudbg_hdr->hdr_len = sizeof(struct cudbg_hdr);
 187        cudbg_hdr->major_ver = CUDBG_MAJOR_VERSION;
 188        cudbg_hdr->minor_ver = CUDBG_MINOR_VERSION;
 189        cudbg_hdr->max_entities = CUDBG_MAX_ENTITY;
 190        cudbg_hdr->chip_ver = adap->params.chip;
 191        cudbg_hdr->dump_type = CUDBG_DUMP_TYPE_MINI;
 192
 193        min_size = sizeof(struct cudbg_hdr) +
 194                   sizeof(struct cudbg_entity_hdr) *
 195                   cudbg_hdr->max_entities;
 196        if (size < min_size)
 197                return -ENOMEM;
 198
 199        rc = cudbg_get_workspace_size();
 200        if (rc) {
 201                /* Zlib available.  So, use zlib deflate */
 202                cudbg_init.compress_type = CUDBG_COMPRESSION_ZLIB;
 203                rc = cudbg_alloc_compress_buff(&cudbg_init);
 204                if (rc) {
 205                        /* Ignore error and continue without compression. */
 206                        dev_warn(adap->pdev_dev,
 207                                 "Fail allocating compression buffer ret: %d.  Continuing without compression.\n",
 208                                 rc);
 209                        cudbg_init.compress_type = CUDBG_COMPRESSION_NONE;
 210                        rc = 0;
 211                }
 212        } else {
 213                cudbg_init.compress_type = CUDBG_COMPRESSION_NONE;
 214        }
 215
 216        cudbg_hdr->compress_type = cudbg_init.compress_type;
 217        dbg_buff.offset += min_size;
 218        total_size = dbg_buff.offset;
 219
 220        if (flag & CXGB4_ETH_DUMP_HW)
 221                cxgb4_cudbg_collect_entity(&cudbg_init, &dbg_buff,
 222                                           cxgb4_collect_hw_dump,
 223                                           ARRAY_SIZE(cxgb4_collect_hw_dump),
 224                                           buf,
 225                                           &total_size);
 226
 227        if (flag & CXGB4_ETH_DUMP_MEM)
 228                cxgb4_cudbg_collect_entity(&cudbg_init, &dbg_buff,
 229                                           cxgb4_collect_mem_dump,
 230                                           ARRAY_SIZE(cxgb4_collect_mem_dump),
 231                                           buf,
 232                                           &total_size);
 233
 234        if (flag & CXGB4_ETH_DUMP_FLASH)
 235                cxgb4_cudbg_collect_entity(&cudbg_init, &dbg_buff,
 236                                           cxgb4_collect_flash_dump,
 237                                           ARRAY_SIZE(cxgb4_collect_flash_dump),
 238                                           buf,
 239                                           &total_size);
 240
 241        cudbg_free_compress_buff(&cudbg_init);
 242        cudbg_hdr->data_len = total_size;
 243        if (cudbg_init.compress_type != CUDBG_COMPRESSION_NONE)
 244                *buf_size = size;
 245        else
 246                *buf_size = total_size;
 247        return 0;
 248}
 249
 250void cxgb4_init_ethtool_dump(struct adapter *adapter)
 251{
 252        adapter->eth_dump.flag = CXGB4_ETH_DUMP_NONE;
 253        adapter->eth_dump.version = adapter->params.fw_vers;
 254        adapter->eth_dump.len = 0;
 255}
 256
 257static int cxgb4_cudbg_vmcoredd_collect(struct vmcoredd_data *data, void *buf)
 258{
 259        struct adapter *adap = container_of(data, struct adapter, vmcoredd);
 260        u32 len = data->size;
 261
 262        return cxgb4_cudbg_collect(adap, buf, &len, CXGB4_ETH_DUMP_ALL);
 263}
 264
 265int cxgb4_cudbg_vmcore_add_dump(struct adapter *adap)
 266{
 267        struct vmcoredd_data *data = &adap->vmcoredd;
 268        u32 len;
 269
 270        len = sizeof(struct cudbg_hdr) +
 271              sizeof(struct cudbg_entity_hdr) * CUDBG_MAX_ENTITY;
 272        len += CUDBG_DUMP_BUFF_SIZE;
 273
 274        data->size = len;
 275        snprintf(data->dump_name, sizeof(data->dump_name), "%s_%s",
 276                 cxgb4_driver_name, adap->name);
 277        data->vmcoredd_callback = cxgb4_cudbg_vmcoredd_collect;
 278
 279        return vmcore_add_device_dump(data);
 280}
 281