linux/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
<<
>>
Prefs
   1/*
   2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
   3 *
   4 * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
   5 *
   6 * This software is available to you under a choice of one of two
   7 * licenses.  You may choose to be licensed under the terms of the GNU
   8 * General Public License (GPL) Version 2, available from the file
   9 * COPYING in the main directory of this source tree, or the
  10 * OpenIB.org BSD license below:
  11 *
  12 *     Redistribution and use in source and binary forms, with or
  13 *     without modification, are permitted provided that the following
  14 *     conditions are met:
  15 *
  16 *      - Redistributions of source code must retain the above
  17 *        copyright notice, this list of conditions and the following
  18 *        disclaimer.
  19 *
  20 *      - Redistributions in binary form must reproduce the above
  21 *        copyright notice, this list of conditions and the following
  22 *        disclaimer in the documentation and/or other materials
  23 *        provided with the distribution.
  24 *
  25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  32 * SOFTWARE.
  33 */
  34
  35#include <linux/delay.h>
  36#include "cxgb4.h"
  37#include "t4_regs.h"
  38#include "t4_values.h"
  39#include "t4fw_api.h"
  40
  41/**
  42 *      t4_wait_op_done_val - wait until an operation is completed
  43 *      @adapter: the adapter performing the operation
  44 *      @reg: the register to check for completion
  45 *      @mask: a single-bit field within @reg that indicates completion
  46 *      @polarity: the value of the field when the operation is completed
  47 *      @attempts: number of check iterations
  48 *      @delay: delay in usecs between iterations
  49 *      @valp: where to store the value of the register at completion time
  50 *
  51 *      Wait until an operation is completed by checking a bit in a register
  52 *      up to @attempts times.  If @valp is not NULL the value of the register
  53 *      at the time it indicated completion is stored there.  Returns 0 if the
  54 *      operation completes and -EAGAIN otherwise.
  55 */
  56static int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
  57                               int polarity, int attempts, int delay, u32 *valp)
  58{
  59        while (1) {
  60                u32 val = t4_read_reg(adapter, reg);
  61
  62                if (!!(val & mask) == polarity) {
  63                        if (valp)
  64                                *valp = val;
  65                        return 0;
  66                }
  67                if (--attempts == 0)
  68                        return -EAGAIN;
  69                if (delay)
  70                        udelay(delay);
  71        }
  72}
  73
  74static inline int t4_wait_op_done(struct adapter *adapter, int reg, u32 mask,
  75                                  int polarity, int attempts, int delay)
  76{
  77        return t4_wait_op_done_val(adapter, reg, mask, polarity, attempts,
  78                                   delay, NULL);
  79}
  80
  81/**
  82 *      t4_set_reg_field - set a register field to a value
  83 *      @adapter: the adapter to program
  84 *      @addr: the register address
  85 *      @mask: specifies the portion of the register to modify
  86 *      @val: the new value for the register field
  87 *
  88 *      Sets a register field specified by the supplied mask to the
  89 *      given value.
  90 */
  91void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
  92                      u32 val)
  93{
  94        u32 v = t4_read_reg(adapter, addr) & ~mask;
  95
  96        t4_write_reg(adapter, addr, v | val);
  97        (void) t4_read_reg(adapter, addr);      /* flush */
  98}
  99
 100/**
 101 *      t4_read_indirect - read indirectly addressed registers
 102 *      @adap: the adapter
 103 *      @addr_reg: register holding the indirect address
 104 *      @data_reg: register holding the value of the indirect register
 105 *      @vals: where the read register values are stored
 106 *      @nregs: how many indirect registers to read
 107 *      @start_idx: index of first indirect register to read
 108 *
 109 *      Reads registers that are accessed indirectly through an address/data
 110 *      register pair.
 111 */
 112void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
 113                             unsigned int data_reg, u32 *vals,
 114                             unsigned int nregs, unsigned int start_idx)
 115{
 116        while (nregs--) {
 117                t4_write_reg(adap, addr_reg, start_idx);
 118                *vals++ = t4_read_reg(adap, data_reg);
 119                start_idx++;
 120        }
 121}
 122
 123/**
 124 *      t4_write_indirect - write indirectly addressed registers
 125 *      @adap: the adapter
 126 *      @addr_reg: register holding the indirect addresses
 127 *      @data_reg: register holding the value for the indirect registers
 128 *      @vals: values to write
 129 *      @nregs: how many indirect registers to write
 130 *      @start_idx: address of first indirect register to write
 131 *
 132 *      Writes a sequential block of registers that are accessed indirectly
 133 *      through an address/data register pair.
 134 */
 135void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
 136                       unsigned int data_reg, const u32 *vals,
 137                       unsigned int nregs, unsigned int start_idx)
 138{
 139        while (nregs--) {
 140                t4_write_reg(adap, addr_reg, start_idx++);
 141                t4_write_reg(adap, data_reg, *vals++);
 142        }
 143}
 144
 145/*
 146 * Read a 32-bit PCI Configuration Space register via the PCI-E backdoor
 147 * mechanism.  This guarantees that we get the real value even if we're
 148 * operating within a Virtual Machine and the Hypervisor is trapping our
 149 * Configuration Space accesses.
 150 */
 151void t4_hw_pci_read_cfg4(struct adapter *adap, int reg, u32 *val)
 152{
 153        u32 req = FUNCTION_V(adap->pf) | REGISTER_V(reg);
 154
 155        if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
 156                req |= ENABLE_F;
 157        else
 158                req |= T6_ENABLE_F;
 159
 160        if (is_t4(adap->params.chip))
 161                req |= LOCALCFG_F;
 162
 163        t4_write_reg(adap, PCIE_CFG_SPACE_REQ_A, req);
 164        *val = t4_read_reg(adap, PCIE_CFG_SPACE_DATA_A);
 165
 166        /* Reset ENABLE to 0 so reads of PCIE_CFG_SPACE_DATA won't cause a
 167         * Configuration Space read.  (None of the other fields matter when
 168         * ENABLE is 0 so a simple register write is easier than a
 169         * read-modify-write via t4_set_reg_field().)
 170         */
 171        t4_write_reg(adap, PCIE_CFG_SPACE_REQ_A, 0);
 172}
 173
 174/*
 175 * t4_report_fw_error - report firmware error
 176 * @adap: the adapter
 177 *
 178 * The adapter firmware can indicate error conditions to the host.
 179 * If the firmware has indicated an error, print out the reason for
 180 * the firmware error.
 181 */
 182static void t4_report_fw_error(struct adapter *adap)
 183{
 184        static const char *const reason[] = {
 185                "Crash",                        /* PCIE_FW_EVAL_CRASH */
 186                "During Device Preparation",    /* PCIE_FW_EVAL_PREP */
 187                "During Device Configuration",  /* PCIE_FW_EVAL_CONF */
 188                "During Device Initialization", /* PCIE_FW_EVAL_INIT */
 189                "Unexpected Event",             /* PCIE_FW_EVAL_UNEXPECTEDEVENT */
 190                "Insufficient Airflow",         /* PCIE_FW_EVAL_OVERHEAT */
 191                "Device Shutdown",              /* PCIE_FW_EVAL_DEVICESHUTDOWN */
 192                "Reserved",                     /* reserved */
 193        };
 194        u32 pcie_fw;
 195
 196        pcie_fw = t4_read_reg(adap, PCIE_FW_A);
 197        if (pcie_fw & PCIE_FW_ERR_F)
 198                dev_err(adap->pdev_dev, "Firmware reports adapter error: %s\n",
 199                        reason[PCIE_FW_EVAL_G(pcie_fw)]);
 200}
 201
 202/*
 203 * Get the reply to a mailbox command and store it in @rpl in big-endian order.
 204 */
 205static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit,
 206                         u32 mbox_addr)
 207{
 208        for ( ; nflit; nflit--, mbox_addr += 8)
 209                *rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr));
 210}
 211
 212/*
 213 * Handle a FW assertion reported in a mailbox.
 214 */
 215static void fw_asrt(struct adapter *adap, u32 mbox_addr)
 216{
 217        struct fw_debug_cmd asrt;
 218
 219        get_mbox_rpl(adap, (__be64 *)&asrt, sizeof(asrt) / 8, mbox_addr);
 220        dev_alert(adap->pdev_dev,
 221                  "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n",
 222                  asrt.u.assert.filename_0_7, be32_to_cpu(asrt.u.assert.line),
 223                  be32_to_cpu(asrt.u.assert.x), be32_to_cpu(asrt.u.assert.y));
 224}
 225
 226static void dump_mbox(struct adapter *adap, int mbox, u32 data_reg)
 227{
 228        dev_err(adap->pdev_dev,
 229                "mbox %d: %llx %llx %llx %llx %llx %llx %llx %llx\n", mbox,
 230                (unsigned long long)t4_read_reg64(adap, data_reg),
 231                (unsigned long long)t4_read_reg64(adap, data_reg + 8),
 232                (unsigned long long)t4_read_reg64(adap, data_reg + 16),
 233                (unsigned long long)t4_read_reg64(adap, data_reg + 24),
 234                (unsigned long long)t4_read_reg64(adap, data_reg + 32),
 235                (unsigned long long)t4_read_reg64(adap, data_reg + 40),
 236                (unsigned long long)t4_read_reg64(adap, data_reg + 48),
 237                (unsigned long long)t4_read_reg64(adap, data_reg + 56));
 238}
 239
 240/**
 241 *      t4_wr_mbox_meat_timeout - send a command to FW through the given mailbox
 242 *      @adap: the adapter
 243 *      @mbox: index of the mailbox to use
 244 *      @cmd: the command to write
 245 *      @size: command length in bytes
 246 *      @rpl: where to optionally store the reply
 247 *      @sleep_ok: if true we may sleep while awaiting command completion
 248 *      @timeout: time to wait for command to finish before timing out
 249 *
 250 *      Sends the given command to FW through the selected mailbox and waits
 251 *      for the FW to execute the command.  If @rpl is not %NULL it is used to
 252 *      store the FW's reply to the command.  The command and its optional
 253 *      reply are of the same length.  FW can take up to %FW_CMD_MAX_TIMEOUT ms
 254 *      to respond.  @sleep_ok determines whether we may sleep while awaiting
 255 *      the response.  If sleeping is allowed we use progressive backoff
 256 *      otherwise we spin.
 257 *
 258 *      The return value is 0 on success or a negative errno on failure.  A
 259 *      failure can happen either because we are not able to execute the
 260 *      command or FW executes it but signals an error.  In the latter case
 261 *      the return value is the error code indicated by FW (negated).
 262 */
 263int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
 264                            int size, void *rpl, bool sleep_ok, int timeout)
 265{
 266        static const int delay[] = {
 267                1, 1, 3, 5, 10, 10, 20, 50, 100, 200
 268        };
 269
 270        u32 v;
 271        u64 res;
 272        int i, ms, delay_idx;
 273        const __be64 *p = cmd;
 274        u32 data_reg = PF_REG(mbox, CIM_PF_MAILBOX_DATA_A);
 275        u32 ctl_reg = PF_REG(mbox, CIM_PF_MAILBOX_CTRL_A);
 276
 277        if ((size & 15) || size > MBOX_LEN)
 278                return -EINVAL;
 279
 280        /*
 281         * If the device is off-line, as in EEH, commands will time out.
 282         * Fail them early so we don't waste time waiting.
 283         */
 284        if (adap->pdev->error_state != pci_channel_io_normal)
 285                return -EIO;
 286
 287        v = MBOWNER_G(t4_read_reg(adap, ctl_reg));
 288        for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++)
 289                v = MBOWNER_G(t4_read_reg(adap, ctl_reg));
 290
 291        if (v != MBOX_OWNER_DRV)
 292                return v ? -EBUSY : -ETIMEDOUT;
 293
 294        for (i = 0; i < size; i += 8)
 295                t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p++));
 296
 297        t4_write_reg(adap, ctl_reg, MBMSGVALID_F | MBOWNER_V(MBOX_OWNER_FW));
 298        t4_read_reg(adap, ctl_reg);          /* flush write */
 299
 300        delay_idx = 0;
 301        ms = delay[0];
 302
 303        for (i = 0; i < timeout; i += ms) {
 304                if (sleep_ok) {
 305                        ms = delay[delay_idx];  /* last element may repeat */
 306                        if (delay_idx < ARRAY_SIZE(delay) - 1)
 307                                delay_idx++;
 308                        msleep(ms);
 309                } else
 310                        mdelay(ms);
 311
 312                v = t4_read_reg(adap, ctl_reg);
 313                if (MBOWNER_G(v) == MBOX_OWNER_DRV) {
 314                        if (!(v & MBMSGVALID_F)) {
 315                                t4_write_reg(adap, ctl_reg, 0);
 316                                continue;
 317                        }
 318
 319                        res = t4_read_reg64(adap, data_reg);
 320                        if (FW_CMD_OP_G(res >> 32) == FW_DEBUG_CMD) {
 321                                fw_asrt(adap, data_reg);
 322                                res = FW_CMD_RETVAL_V(EIO);
 323                        } else if (rpl) {
 324                                get_mbox_rpl(adap, rpl, size / 8, data_reg);
 325                        }
 326
 327                        if (FW_CMD_RETVAL_G((int)res))
 328                                dump_mbox(adap, mbox, data_reg);
 329                        t4_write_reg(adap, ctl_reg, 0);
 330                        return -FW_CMD_RETVAL_G((int)res);
 331                }
 332        }
 333
 334        dump_mbox(adap, mbox, data_reg);
 335        dev_err(adap->pdev_dev, "command %#x in mailbox %d timed out\n",
 336                *(const u8 *)cmd, mbox);
 337        t4_report_fw_error(adap);
 338        return -ETIMEDOUT;
 339}
 340
 341int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
 342                    void *rpl, bool sleep_ok)
 343{
 344        return t4_wr_mbox_meat_timeout(adap, mbox, cmd, size, rpl, sleep_ok,
 345                                       FW_CMD_MAX_TIMEOUT);
 346}
 347
 348/**
 349 *      t4_memory_rw - read/write EDC 0, EDC 1 or MC via PCIE memory window
 350 *      @adap: the adapter
 351 *      @win: PCI-E Memory Window to use
 352 *      @mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC
 353 *      @addr: address within indicated memory type
 354 *      @len: amount of memory to transfer
 355 *      @hbuf: host memory buffer
 356 *      @dir: direction of transfer T4_MEMORY_READ (1) or T4_MEMORY_WRITE (0)
 357 *
 358 *      Reads/writes an [almost] arbitrary memory region in the firmware: the
 359 *      firmware memory address and host buffer must be aligned on 32-bit
 360 *      boudaries; the length may be arbitrary.  The memory is transferred as
 361 *      a raw byte sequence from/to the firmware's memory.  If this memory
 362 *      contains data structures which contain multi-byte integers, it's the
 363 *      caller's responsibility to perform appropriate byte order conversions.
 364 */
 365int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
 366                 u32 len, void *hbuf, int dir)
 367{
 368        u32 pos, offset, resid, memoffset;
 369        u32 edc_size, mc_size, win_pf, mem_reg, mem_aperture, mem_base;
 370        u32 *buf;
 371
 372        /* Argument sanity checks ...
 373         */
 374        if (addr & 0x3 || (uintptr_t)hbuf & 0x3)
 375                return -EINVAL;
 376        buf = (u32 *)hbuf;
 377
 378        /* It's convenient to be able to handle lengths which aren't a
 379         * multiple of 32-bits because we often end up transferring files to
 380         * the firmware.  So we'll handle that by normalizing the length here
 381         * and then handling any residual transfer at the end.
 382         */
 383        resid = len & 0x3;
 384        len -= resid;
 385
 386        /* Offset into the region of memory which is being accessed
 387         * MEM_EDC0 = 0
 388         * MEM_EDC1 = 1
 389         * MEM_MC   = 2 -- MEM_MC for chips with only 1 memory controller
 390         * MEM_MC1  = 3 -- for chips with 2 memory controllers (e.g. T5)
 391         */
 392        edc_size  = EDRAM0_SIZE_G(t4_read_reg(adap, MA_EDRAM0_BAR_A));
 393        if (mtype != MEM_MC1)
 394                memoffset = (mtype * (edc_size * 1024 * 1024));
 395        else {
 396                mc_size = EXT_MEM0_SIZE_G(t4_read_reg(adap,
 397                                                      MA_EXT_MEMORY0_BAR_A));
 398                memoffset = (MEM_MC0 * edc_size + mc_size) * 1024 * 1024;
 399        }
 400
 401        /* Determine the PCIE_MEM_ACCESS_OFFSET */
 402        addr = addr + memoffset;
 403
 404        /* Each PCI-E Memory Window is programmed with a window size -- or
 405         * "aperture" -- which controls the granularity of its mapping onto
 406         * adapter memory.  We need to grab that aperture in order to know
 407         * how to use the specified window.  The window is also programmed
 408         * with the base address of the Memory Window in BAR0's address
 409         * space.  For T4 this is an absolute PCI-E Bus Address.  For T5
 410         * the address is relative to BAR0.
 411         */
 412        mem_reg = t4_read_reg(adap,
 413                              PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A,
 414                                                  win));
 415        mem_aperture = 1 << (WINDOW_G(mem_reg) + WINDOW_SHIFT_X);
 416        mem_base = PCIEOFST_G(mem_reg) << PCIEOFST_SHIFT_X;
 417        if (is_t4(adap->params.chip))
 418                mem_base -= adap->t4_bar0;
 419        win_pf = is_t4(adap->params.chip) ? 0 : PFNUM_V(adap->pf);
 420
 421        /* Calculate our initial PCI-E Memory Window Position and Offset into
 422         * that Window.
 423         */
 424        pos = addr & ~(mem_aperture-1);
 425        offset = addr - pos;
 426
 427        /* Set up initial PCI-E Memory Window to cover the start of our
 428         * transfer.  (Read it back to ensure that changes propagate before we
 429         * attempt to use the new value.)
 430         */
 431        t4_write_reg(adap,
 432                     PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, win),
 433                     pos | win_pf);
 434        t4_read_reg(adap,
 435                    PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, win));
 436
 437        /* Transfer data to/from the adapter as long as there's an integral
 438         * number of 32-bit transfers to complete.
 439         *
 440         * A note on Endianness issues:
 441         *
 442         * The "register" reads and writes below from/to the PCI-E Memory
 443         * Window invoke the standard adapter Big-Endian to PCI-E Link
 444         * Little-Endian "swizzel."  As a result, if we have the following
 445         * data in adapter memory:
 446         *
 447         *     Memory:  ... | b0 | b1 | b2 | b3 | ...
 448         *     Address:      i+0  i+1  i+2  i+3
 449         *
 450         * Then a read of the adapter memory via the PCI-E Memory Window
 451         * will yield:
 452         *
 453         *     x = readl(i)
 454         *         31                  0
 455         *         [ b3 | b2 | b1 | b0 ]
 456         *
 457         * If this value is stored into local memory on a Little-Endian system
 458         * it will show up correctly in local memory as:
 459         *
 460         *     ( ..., b0, b1, b2, b3, ... )
 461         *
 462         * But on a Big-Endian system, the store will show up in memory
 463         * incorrectly swizzled as:
 464         *
 465         *     ( ..., b3, b2, b1, b0, ... )
 466         *
 467         * So we need to account for this in the reads and writes to the
 468         * PCI-E Memory Window below by undoing the register read/write
 469         * swizzels.
 470         */
 471        while (len > 0) {
 472                if (dir == T4_MEMORY_READ)
 473                        *buf++ = le32_to_cpu((__force __le32)t4_read_reg(adap,
 474                                                mem_base + offset));
 475                else
 476                        t4_write_reg(adap, mem_base + offset,
 477                                     (__force u32)cpu_to_le32(*buf++));
 478                offset += sizeof(__be32);
 479                len -= sizeof(__be32);
 480
 481                /* If we've reached the end of our current window aperture,
 482                 * move the PCI-E Memory Window on to the next.  Note that
 483                 * doing this here after "len" may be 0 allows us to set up
 484                 * the PCI-E Memory Window for a possible final residual
 485                 * transfer below ...
 486                 */
 487                if (offset == mem_aperture) {
 488                        pos += mem_aperture;
 489                        offset = 0;
 490                        t4_write_reg(adap,
 491                                PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A,
 492                                                    win), pos | win_pf);
 493                        t4_read_reg(adap,
 494                                PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A,
 495                                                    win));
 496                }
 497        }
 498
 499        /* If the original transfer had a length which wasn't a multiple of
 500         * 32-bits, now's where we need to finish off the transfer of the
 501         * residual amount.  The PCI-E Memory Window has already been moved
 502         * above (if necessary) to cover this final transfer.
 503         */
 504        if (resid) {
 505                union {
 506                        u32 word;
 507                        char byte[4];
 508                } last;
 509                unsigned char *bp;
 510                int i;
 511
 512                if (dir == T4_MEMORY_READ) {
 513                        last.word = le32_to_cpu(
 514                                        (__force __le32)t4_read_reg(adap,
 515                                                mem_base + offset));
 516                        for (bp = (unsigned char *)buf, i = resid; i < 4; i++)
 517                                bp[i] = last.byte[i];
 518                } else {
 519                        last.word = *buf;
 520                        for (i = resid; i < 4; i++)
 521                                last.byte[i] = 0;
 522                        t4_write_reg(adap, mem_base + offset,
 523                                     (__force u32)cpu_to_le32(last.word));
 524                }
 525        }
 526
 527        return 0;
 528}
 529
 530/* Return the specified PCI-E Configuration Space register from our Physical
 531 * Function.  We try first via a Firmware LDST Command since we prefer to let
 532 * the firmware own all of these registers, but if that fails we go for it
 533 * directly ourselves.
 534 */
 535u32 t4_read_pcie_cfg4(struct adapter *adap, int reg)
 536{
 537        u32 val, ldst_addrspace;
 538
 539        /* If fw_attach != 0, construct and send the Firmware LDST Command to
 540         * retrieve the specified PCI-E Configuration Space register.
 541         */
 542        struct fw_ldst_cmd ldst_cmd;
 543        int ret;
 544
 545        memset(&ldst_cmd, 0, sizeof(ldst_cmd));
 546        ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_FUNC_PCIE);
 547        ldst_cmd.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
 548                                               FW_CMD_REQUEST_F |
 549                                               FW_CMD_READ_F |
 550                                               ldst_addrspace);
 551        ldst_cmd.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst_cmd));
 552        ldst_cmd.u.pcie.select_naccess = FW_LDST_CMD_NACCESS_V(1);
 553        ldst_cmd.u.pcie.ctrl_to_fn =
 554                (FW_LDST_CMD_LC_F | FW_LDST_CMD_FN_V(adap->pf));
 555        ldst_cmd.u.pcie.r = reg;
 556
 557        /* If the LDST Command succeeds, return the result, otherwise
 558         * fall through to reading it directly ourselves ...
 559         */
 560        ret = t4_wr_mbox(adap, adap->mbox, &ldst_cmd, sizeof(ldst_cmd),
 561                         &ldst_cmd);
 562        if (ret == 0)
 563                val = be32_to_cpu(ldst_cmd.u.pcie.data[0]);
 564        else
 565                /* Read the desired Configuration Space register via the PCI-E
 566                 * Backdoor mechanism.
 567                 */
 568                t4_hw_pci_read_cfg4(adap, reg, &val);
 569        return val;
 570}
 571
 572/* Get the window based on base passed to it.
 573 * Window aperture is currently unhandled, but there is no use case for it
 574 * right now
 575 */
 576static u32 t4_get_window(struct adapter *adap, u32 pci_base, u64 pci_mask,
 577                         u32 memwin_base)
 578{
 579        u32 ret;
 580
 581        if (is_t4(adap->params.chip)) {
 582                u32 bar0;
 583
 584                /* Truncation intentional: we only read the bottom 32-bits of
 585                 * the 64-bit BAR0/BAR1 ...  We use the hardware backdoor
 586                 * mechanism to read BAR0 instead of using
 587                 * pci_resource_start() because we could be operating from
 588                 * within a Virtual Machine which is trapping our accesses to
 589                 * our Configuration Space and we need to set up the PCI-E
 590                 * Memory Window decoders with the actual addresses which will
 591                 * be coming across the PCI-E link.
 592                 */
 593                bar0 = t4_read_pcie_cfg4(adap, pci_base);
 594                bar0 &= pci_mask;
 595                adap->t4_bar0 = bar0;
 596
 597                ret = bar0 + memwin_base;
 598        } else {
 599                /* For T5, only relative offset inside the PCIe BAR is passed */
 600                ret = memwin_base;
 601        }
 602        return ret;
 603}
 604
 605/* Get the default utility window (win0) used by everyone */
 606u32 t4_get_util_window(struct adapter *adap)
 607{
 608        return t4_get_window(adap, PCI_BASE_ADDRESS_0,
 609                             PCI_BASE_ADDRESS_MEM_MASK, MEMWIN0_BASE);
 610}
 611
 612/* Set up memory window for accessing adapter memory ranges.  (Read
 613 * back MA register to ensure that changes propagate before we attempt
 614 * to use the new values.)
 615 */
 616void t4_setup_memwin(struct adapter *adap, u32 memwin_base, u32 window)
 617{
 618        t4_write_reg(adap,
 619                     PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, window),
 620                     memwin_base | BIR_V(0) |
 621                     WINDOW_V(ilog2(MEMWIN0_APERTURE) - WINDOW_SHIFT_X));
 622        t4_read_reg(adap,
 623                    PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, window));
 624}
 625
 626/**
 627 *      t4_get_regs_len - return the size of the chips register set
 628 *      @adapter: the adapter
 629 *
 630 *      Returns the size of the chip's BAR0 register space.
 631 */
 632unsigned int t4_get_regs_len(struct adapter *adapter)
 633{
 634        unsigned int chip_version = CHELSIO_CHIP_VERSION(adapter->params.chip);
 635
 636        switch (chip_version) {
 637        case CHELSIO_T4:
 638                return T4_REGMAP_SIZE;
 639
 640        case CHELSIO_T5:
 641        case CHELSIO_T6:
 642                return T5_REGMAP_SIZE;
 643        }
 644
 645        dev_err(adapter->pdev_dev,
 646                "Unsupported chip version %d\n", chip_version);
 647        return 0;
 648}
 649
 650/**
 651 *      t4_get_regs - read chip registers into provided buffer
 652 *      @adap: the adapter
 653 *      @buf: register buffer
 654 *      @buf_size: size (in bytes) of register buffer
 655 *
 656 *      If the provided register buffer isn't large enough for the chip's
 657 *      full register range, the register dump will be truncated to the
 658 *      register buffer's size.
 659 */
 660void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
 661{
 662        static const unsigned int t4_reg_ranges[] = {
 663                0x1008, 0x1108,
 664                0x1180, 0x11b4,
 665                0x11fc, 0x123c,
 666                0x1300, 0x173c,
 667                0x1800, 0x18fc,
 668                0x3000, 0x305c,
 669                0x3068, 0x30d8,
 670                0x30e0, 0x5924,
 671                0x5960, 0x59d4,
 672                0x5a00, 0x5af8,
 673                0x6000, 0x6098,
 674                0x6100, 0x6150,
 675                0x6200, 0x6208,
 676                0x6240, 0x6248,
 677                0x6280, 0x6338,
 678                0x6370, 0x638c,
 679                0x6400, 0x643c,
 680                0x6500, 0x6524,
 681                0x6a00, 0x6a38,
 682                0x6a60, 0x6a78,
 683                0x6b00, 0x6b84,
 684                0x6bf0, 0x6c84,
 685                0x6cf0, 0x6d84,
 686                0x6df0, 0x6e84,
 687                0x6ef0, 0x6f84,
 688                0x6ff0, 0x7084,
 689                0x70f0, 0x7184,
 690                0x71f0, 0x7284,
 691                0x72f0, 0x7384,
 692                0x73f0, 0x7450,
 693                0x7500, 0x7530,
 694                0x7600, 0x761c,
 695                0x7680, 0x76cc,
 696                0x7700, 0x7798,
 697                0x77c0, 0x77fc,
 698                0x7900, 0x79fc,
 699                0x7b00, 0x7c38,
 700                0x7d00, 0x7efc,
 701                0x8dc0, 0x8e1c,
 702                0x8e30, 0x8e78,
 703                0x8ea0, 0x8f6c,
 704                0x8fc0, 0x9074,
 705                0x90fc, 0x90fc,
 706                0x9400, 0x9458,
 707                0x9600, 0x96bc,
 708                0x9800, 0x9808,
 709                0x9820, 0x983c,
 710                0x9850, 0x9864,
 711                0x9c00, 0x9c6c,
 712                0x9c80, 0x9cec,
 713                0x9d00, 0x9d6c,
 714                0x9d80, 0x9dec,
 715                0x9e00, 0x9e6c,
 716                0x9e80, 0x9eec,
 717                0x9f00, 0x9f6c,
 718                0x9f80, 0x9fec,
 719                0xd004, 0xd03c,
 720                0xdfc0, 0xdfe0,
 721                0xe000, 0xea7c,
 722                0xf000, 0x11110,
 723                0x11118, 0x11190,
 724                0x19040, 0x1906c,
 725                0x19078, 0x19080,
 726                0x1908c, 0x19124,
 727                0x19150, 0x191b0,
 728                0x191d0, 0x191e8,
 729                0x19238, 0x1924c,
 730                0x193f8, 0x19474,
 731                0x19490, 0x194f8,
 732                0x19800, 0x19f4c,
 733                0x1a000, 0x1a06c,
 734                0x1a0b0, 0x1a120,
 735                0x1a128, 0x1a138,
 736                0x1a190, 0x1a1c4,
 737                0x1a1fc, 0x1a1fc,
 738                0x1e040, 0x1e04c,
 739                0x1e284, 0x1e28c,
 740                0x1e2c0, 0x1e2c0,
 741                0x1e2e0, 0x1e2e0,
 742                0x1e300, 0x1e384,
 743                0x1e3c0, 0x1e3c8,
 744                0x1e440, 0x1e44c,
 745                0x1e684, 0x1e68c,
 746                0x1e6c0, 0x1e6c0,
 747                0x1e6e0, 0x1e6e0,
 748                0x1e700, 0x1e784,
 749                0x1e7c0, 0x1e7c8,
 750                0x1e840, 0x1e84c,
 751                0x1ea84, 0x1ea8c,
 752                0x1eac0, 0x1eac0,
 753                0x1eae0, 0x1eae0,
 754                0x1eb00, 0x1eb84,
 755                0x1ebc0, 0x1ebc8,
 756                0x1ec40, 0x1ec4c,
 757                0x1ee84, 0x1ee8c,
 758                0x1eec0, 0x1eec0,
 759                0x1eee0, 0x1eee0,
 760                0x1ef00, 0x1ef84,
 761                0x1efc0, 0x1efc8,
 762                0x1f040, 0x1f04c,
 763                0x1f284, 0x1f28c,
 764                0x1f2c0, 0x1f2c0,
 765                0x1f2e0, 0x1f2e0,
 766                0x1f300, 0x1f384,
 767                0x1f3c0, 0x1f3c8,
 768                0x1f440, 0x1f44c,
 769                0x1f684, 0x1f68c,
 770                0x1f6c0, 0x1f6c0,
 771                0x1f6e0, 0x1f6e0,
 772                0x1f700, 0x1f784,
 773                0x1f7c0, 0x1f7c8,
 774                0x1f840, 0x1f84c,
 775                0x1fa84, 0x1fa8c,
 776                0x1fac0, 0x1fac0,
 777                0x1fae0, 0x1fae0,
 778                0x1fb00, 0x1fb84,
 779                0x1fbc0, 0x1fbc8,
 780                0x1fc40, 0x1fc4c,
 781                0x1fe84, 0x1fe8c,
 782                0x1fec0, 0x1fec0,
 783                0x1fee0, 0x1fee0,
 784                0x1ff00, 0x1ff84,
 785                0x1ffc0, 0x1ffc8,
 786                0x20000, 0x2002c,
 787                0x20100, 0x2013c,
 788                0x20190, 0x201c8,
 789                0x20200, 0x20318,
 790                0x20400, 0x20528,
 791                0x20540, 0x20614,
 792                0x21000, 0x21040,
 793                0x2104c, 0x21060,
 794                0x210c0, 0x210ec,
 795                0x21200, 0x21268,
 796                0x21270, 0x21284,
 797                0x212fc, 0x21388,
 798                0x21400, 0x21404,
 799                0x21500, 0x21518,
 800                0x2152c, 0x2153c,
 801                0x21550, 0x21554,
 802                0x21600, 0x21600,
 803                0x21608, 0x21628,
 804                0x21630, 0x2163c,
 805                0x21700, 0x2171c,
 806                0x21780, 0x2178c,
 807                0x21800, 0x21c38,
 808                0x21c80, 0x21d7c,
 809                0x21e00, 0x21e04,
 810                0x22000, 0x2202c,
 811                0x22100, 0x2213c,
 812                0x22190, 0x221c8,
 813                0x22200, 0x22318,
 814                0x22400, 0x22528,
 815                0x22540, 0x22614,
 816                0x23000, 0x23040,
 817                0x2304c, 0x23060,
 818                0x230c0, 0x230ec,
 819                0x23200, 0x23268,
 820                0x23270, 0x23284,
 821                0x232fc, 0x23388,
 822                0x23400, 0x23404,
 823                0x23500, 0x23518,
 824                0x2352c, 0x2353c,
 825                0x23550, 0x23554,
 826                0x23600, 0x23600,
 827                0x23608, 0x23628,
 828                0x23630, 0x2363c,
 829                0x23700, 0x2371c,
 830                0x23780, 0x2378c,
 831                0x23800, 0x23c38,
 832                0x23c80, 0x23d7c,
 833                0x23e00, 0x23e04,
 834                0x24000, 0x2402c,
 835                0x24100, 0x2413c,
 836                0x24190, 0x241c8,
 837                0x24200, 0x24318,
 838                0x24400, 0x24528,
 839                0x24540, 0x24614,
 840                0x25000, 0x25040,
 841                0x2504c, 0x25060,
 842                0x250c0, 0x250ec,
 843                0x25200, 0x25268,
 844                0x25270, 0x25284,
 845                0x252fc, 0x25388,
 846                0x25400, 0x25404,
 847                0x25500, 0x25518,
 848                0x2552c, 0x2553c,
 849                0x25550, 0x25554,
 850                0x25600, 0x25600,
 851                0x25608, 0x25628,
 852                0x25630, 0x2563c,
 853                0x25700, 0x2571c,
 854                0x25780, 0x2578c,
 855                0x25800, 0x25c38,
 856                0x25c80, 0x25d7c,
 857                0x25e00, 0x25e04,
 858                0x26000, 0x2602c,
 859                0x26100, 0x2613c,
 860                0x26190, 0x261c8,
 861                0x26200, 0x26318,
 862                0x26400, 0x26528,
 863                0x26540, 0x26614,
 864                0x27000, 0x27040,
 865                0x2704c, 0x27060,
 866                0x270c0, 0x270ec,
 867                0x27200, 0x27268,
 868                0x27270, 0x27284,
 869                0x272fc, 0x27388,
 870                0x27400, 0x27404,
 871                0x27500, 0x27518,
 872                0x2752c, 0x2753c,
 873                0x27550, 0x27554,
 874                0x27600, 0x27600,
 875                0x27608, 0x27628,
 876                0x27630, 0x2763c,
 877                0x27700, 0x2771c,
 878                0x27780, 0x2778c,
 879                0x27800, 0x27c38,
 880                0x27c80, 0x27d7c,
 881                0x27e00, 0x27e04,
 882        };
 883
 884        static const unsigned int t5_reg_ranges[] = {
 885                0x1008, 0x1148,
 886                0x1180, 0x11b4,
 887                0x11fc, 0x123c,
 888                0x1280, 0x173c,
 889                0x1800, 0x18fc,
 890                0x3000, 0x3028,
 891                0x3068, 0x30d8,
 892                0x30e0, 0x30fc,
 893                0x3140, 0x357c,
 894                0x35a8, 0x35cc,
 895                0x35ec, 0x35ec,
 896                0x3600, 0x5624,
 897                0x56cc, 0x575c,
 898                0x580c, 0x5814,
 899                0x5890, 0x58bc,
 900                0x5940, 0x59dc,
 901                0x59fc, 0x5a18,
 902                0x5a60, 0x5a9c,
 903                0x5b94, 0x5bfc,
 904                0x6000, 0x6040,
 905                0x6058, 0x614c,
 906                0x7700, 0x7798,
 907                0x77c0, 0x78fc,
 908                0x7b00, 0x7c54,
 909                0x7d00, 0x7efc,
 910                0x8dc0, 0x8de0,
 911                0x8df8, 0x8e84,
 912                0x8ea0, 0x8f84,
 913                0x8fc0, 0x90f8,
 914                0x9400, 0x9470,
 915                0x9600, 0x96f4,
 916                0x9800, 0x9808,
 917                0x9820, 0x983c,
 918                0x9850, 0x9864,
 919                0x9c00, 0x9c6c,
 920                0x9c80, 0x9cec,
 921                0x9d00, 0x9d6c,
 922                0x9d80, 0x9dec,
 923                0x9e00, 0x9e6c,
 924                0x9e80, 0x9eec,
 925                0x9f00, 0x9f6c,
 926                0x9f80, 0xa020,
 927                0xd004, 0xd03c,
 928                0xdfc0, 0xdfe0,
 929                0xe000, 0x11088,
 930                0x1109c, 0x11110,
 931                0x11118, 0x1117c,
 932                0x11190, 0x11204,
 933                0x19040, 0x1906c,
 934                0x19078, 0x19080,
 935                0x1908c, 0x19124,
 936                0x19150, 0x191b0,
 937                0x191d0, 0x191e8,
 938                0x19238, 0x19290,
 939                0x193f8, 0x19474,
 940                0x19490, 0x194cc,
 941                0x194f0, 0x194f8,
 942                0x19c00, 0x19c60,
 943                0x19c94, 0x19e10,
 944                0x19e50, 0x19f34,
 945                0x19f40, 0x19f50,
 946                0x19f90, 0x19fe4,
 947                0x1a000, 0x1a06c,
 948                0x1a0b0, 0x1a120,
 949                0x1a128, 0x1a138,
 950                0x1a190, 0x1a1c4,
 951                0x1a1fc, 0x1a1fc,
 952                0x1e008, 0x1e00c,
 953                0x1e040, 0x1e04c,
 954                0x1e284, 0x1e290,
 955                0x1e2c0, 0x1e2c0,
 956                0x1e2e0, 0x1e2e0,
 957                0x1e300, 0x1e384,
 958                0x1e3c0, 0x1e3c8,
 959                0x1e408, 0x1e40c,
 960                0x1e440, 0x1e44c,
 961                0x1e684, 0x1e690,
 962                0x1e6c0, 0x1e6c0,
 963                0x1e6e0, 0x1e6e0,
 964                0x1e700, 0x1e784,
 965                0x1e7c0, 0x1e7c8,
 966                0x1e808, 0x1e80c,
 967                0x1e840, 0x1e84c,
 968                0x1ea84, 0x1ea90,
 969                0x1eac0, 0x1eac0,
 970                0x1eae0, 0x1eae0,
 971                0x1eb00, 0x1eb84,
 972                0x1ebc0, 0x1ebc8,
 973                0x1ec08, 0x1ec0c,
 974                0x1ec40, 0x1ec4c,
 975                0x1ee84, 0x1ee90,
 976                0x1eec0, 0x1eec0,
 977                0x1eee0, 0x1eee0,
 978                0x1ef00, 0x1ef84,
 979                0x1efc0, 0x1efc8,
 980                0x1f008, 0x1f00c,
 981                0x1f040, 0x1f04c,
 982                0x1f284, 0x1f290,
 983                0x1f2c0, 0x1f2c0,
 984                0x1f2e0, 0x1f2e0,
 985                0x1f300, 0x1f384,
 986                0x1f3c0, 0x1f3c8,
 987                0x1f408, 0x1f40c,
 988                0x1f440, 0x1f44c,
 989                0x1f684, 0x1f690,
 990                0x1f6c0, 0x1f6c0,
 991                0x1f6e0, 0x1f6e0,
 992                0x1f700, 0x1f784,
 993                0x1f7c0, 0x1f7c8,
 994                0x1f808, 0x1f80c,
 995                0x1f840, 0x1f84c,
 996                0x1fa84, 0x1fa90,
 997                0x1fac0, 0x1fac0,
 998                0x1fae0, 0x1fae0,
 999                0x1fb00, 0x1fb84,
1000                0x1fbc0, 0x1fbc8,
1001                0x1fc08, 0x1fc0c,
1002                0x1fc40, 0x1fc4c,
1003                0x1fe84, 0x1fe90,
1004                0x1fec0, 0x1fec0,
1005                0x1fee0, 0x1fee0,
1006                0x1ff00, 0x1ff84,
1007                0x1ffc0, 0x1ffc8,
1008                0x30000, 0x30030,
1009                0x30100, 0x30144,
1010                0x30190, 0x301d0,
1011                0x30200, 0x30318,
1012                0x30400, 0x3052c,
1013                0x30540, 0x3061c,
1014                0x30800, 0x30834,
1015                0x308c0, 0x30908,
1016                0x30910, 0x309ac,
1017                0x30a00, 0x30a2c,
1018                0x30a44, 0x30a50,
1019                0x30a74, 0x30c24,
1020                0x30d00, 0x30d00,
1021                0x30d08, 0x30d14,
1022                0x30d1c, 0x30d20,
1023                0x30d3c, 0x30d50,
1024                0x31200, 0x3120c,
1025                0x31220, 0x31220,
1026                0x31240, 0x31240,
1027                0x31600, 0x3160c,
1028                0x31a00, 0x31a1c,
1029                0x31e00, 0x31e20,
1030                0x31e38, 0x31e3c,
1031                0x31e80, 0x31e80,
1032                0x31e88, 0x31ea8,
1033                0x31eb0, 0x31eb4,
1034                0x31ec8, 0x31ed4,
1035                0x31fb8, 0x32004,
1036                0x32200, 0x32200,
1037                0x32208, 0x32240,
1038                0x32248, 0x32280,
1039                0x32288, 0x322c0,
1040                0x322c8, 0x322fc,
1041                0x32600, 0x32630,
1042                0x32a00, 0x32abc,
1043                0x32b00, 0x32b70,
1044                0x33000, 0x33048,
1045                0x33060, 0x3309c,
1046                0x330f0, 0x33148,
1047                0x33160, 0x3319c,
1048                0x331f0, 0x332e4,
1049                0x332f8, 0x333e4,
1050                0x333f8, 0x33448,
1051                0x33460, 0x3349c,
1052                0x334f0, 0x33548,
1053                0x33560, 0x3359c,
1054                0x335f0, 0x336e4,
1055                0x336f8, 0x337e4,
1056                0x337f8, 0x337fc,
1057                0x33814, 0x33814,
1058                0x3382c, 0x3382c,
1059                0x33880, 0x3388c,
1060                0x338e8, 0x338ec,
1061                0x33900, 0x33948,
1062                0x33960, 0x3399c,
1063                0x339f0, 0x33ae4,
1064                0x33af8, 0x33b10,
1065                0x33b28, 0x33b28,
1066                0x33b3c, 0x33b50,
1067                0x33bf0, 0x33c10,
1068                0x33c28, 0x33c28,
1069                0x33c3c, 0x33c50,
1070                0x33cf0, 0x33cfc,
1071                0x34000, 0x34030,
1072                0x34100, 0x34144,
1073                0x34190, 0x341d0,
1074                0x34200, 0x34318,
1075                0x34400, 0x3452c,
1076                0x34540, 0x3461c,
1077                0x34800, 0x34834,
1078                0x348c0, 0x34908,
1079                0x34910, 0x349ac,
1080                0x34a00, 0x34a2c,
1081                0x34a44, 0x34a50,
1082                0x34a74, 0x34c24,
1083                0x34d00, 0x34d00,
1084                0x34d08, 0x34d14,
1085                0x34d1c, 0x34d20,
1086                0x34d3c, 0x34d50,
1087                0x35200, 0x3520c,
1088                0x35220, 0x35220,
1089                0x35240, 0x35240,
1090                0x35600, 0x3560c,
1091                0x35a00, 0x35a1c,
1092                0x35e00, 0x35e20,
1093                0x35e38, 0x35e3c,
1094                0x35e80, 0x35e80,
1095                0x35e88, 0x35ea8,
1096                0x35eb0, 0x35eb4,
1097                0x35ec8, 0x35ed4,
1098                0x35fb8, 0x36004,
1099                0x36200, 0x36200,
1100                0x36208, 0x36240,
1101                0x36248, 0x36280,
1102                0x36288, 0x362c0,
1103                0x362c8, 0x362fc,
1104                0x36600, 0x36630,
1105                0x36a00, 0x36abc,
1106                0x36b00, 0x36b70,
1107                0x37000, 0x37048,
1108                0x37060, 0x3709c,
1109                0x370f0, 0x37148,
1110                0x37160, 0x3719c,
1111                0x371f0, 0x372e4,
1112                0x372f8, 0x373e4,
1113                0x373f8, 0x37448,
1114                0x37460, 0x3749c,
1115                0x374f0, 0x37548,
1116                0x37560, 0x3759c,
1117                0x375f0, 0x376e4,
1118                0x376f8, 0x377e4,
1119                0x377f8, 0x377fc,
1120                0x37814, 0x37814,
1121                0x3782c, 0x3782c,
1122                0x37880, 0x3788c,
1123                0x378e8, 0x378ec,
1124                0x37900, 0x37948,
1125                0x37960, 0x3799c,
1126                0x379f0, 0x37ae4,
1127                0x37af8, 0x37b10,
1128                0x37b28, 0x37b28,
1129                0x37b3c, 0x37b50,
1130                0x37bf0, 0x37c10,
1131                0x37c28, 0x37c28,
1132                0x37c3c, 0x37c50,
1133                0x37cf0, 0x37cfc,
1134                0x38000, 0x38030,
1135                0x38100, 0x38144,
1136                0x38190, 0x381d0,
1137                0x38200, 0x38318,
1138                0x38400, 0x3852c,
1139                0x38540, 0x3861c,
1140                0x38800, 0x38834,
1141                0x388c0, 0x38908,
1142                0x38910, 0x389ac,
1143                0x38a00, 0x38a2c,
1144                0x38a44, 0x38a50,
1145                0x38a74, 0x38c24,
1146                0x38d00, 0x38d00,
1147                0x38d08, 0x38d14,
1148                0x38d1c, 0x38d20,
1149                0x38d3c, 0x38d50,
1150                0x39200, 0x3920c,
1151                0x39220, 0x39220,
1152                0x39240, 0x39240,
1153                0x39600, 0x3960c,
1154                0x39a00, 0x39a1c,
1155                0x39e00, 0x39e20,
1156                0x39e38, 0x39e3c,
1157                0x39e80, 0x39e80,
1158                0x39e88, 0x39ea8,
1159                0x39eb0, 0x39eb4,
1160                0x39ec8, 0x39ed4,
1161                0x39fb8, 0x3a004,
1162                0x3a200, 0x3a200,
1163                0x3a208, 0x3a240,
1164                0x3a248, 0x3a280,
1165                0x3a288, 0x3a2c0,
1166                0x3a2c8, 0x3a2fc,
1167                0x3a600, 0x3a630,
1168                0x3aa00, 0x3aabc,
1169                0x3ab00, 0x3ab70,
1170                0x3b000, 0x3b048,
1171                0x3b060, 0x3b09c,
1172                0x3b0f0, 0x3b148,
1173                0x3b160, 0x3b19c,
1174                0x3b1f0, 0x3b2e4,
1175                0x3b2f8, 0x3b3e4,
1176                0x3b3f8, 0x3b448,
1177                0x3b460, 0x3b49c,
1178                0x3b4f0, 0x3b548,
1179                0x3b560, 0x3b59c,
1180                0x3b5f0, 0x3b6e4,
1181                0x3b6f8, 0x3b7e4,
1182                0x3b7f8, 0x3b7fc,
1183                0x3b814, 0x3b814,
1184                0x3b82c, 0x3b82c,
1185                0x3b880, 0x3b88c,
1186                0x3b8e8, 0x3b8ec,
1187                0x3b900, 0x3b948,
1188                0x3b960, 0x3b99c,
1189                0x3b9f0, 0x3bae4,
1190                0x3baf8, 0x3bb10,
1191                0x3bb28, 0x3bb28,
1192                0x3bb3c, 0x3bb50,
1193                0x3bbf0, 0x3bc10,
1194                0x3bc28, 0x3bc28,
1195                0x3bc3c, 0x3bc50,
1196                0x3bcf0, 0x3bcfc,
1197                0x3c000, 0x3c030,
1198                0x3c100, 0x3c144,
1199                0x3c190, 0x3c1d0,
1200                0x3c200, 0x3c318,
1201                0x3c400, 0x3c52c,
1202                0x3c540, 0x3c61c,
1203                0x3c800, 0x3c834,
1204                0x3c8c0, 0x3c908,
1205                0x3c910, 0x3c9ac,
1206                0x3ca00, 0x3ca2c,
1207                0x3ca44, 0x3ca50,
1208                0x3ca74, 0x3cc24,
1209                0x3cd00, 0x3cd00,
1210                0x3cd08, 0x3cd14,
1211                0x3cd1c, 0x3cd20,
1212                0x3cd3c, 0x3cd50,
1213                0x3d200, 0x3d20c,
1214                0x3d220, 0x3d220,
1215                0x3d240, 0x3d240,
1216                0x3d600, 0x3d60c,
1217                0x3da00, 0x3da1c,
1218                0x3de00, 0x3de20,
1219                0x3de38, 0x3de3c,
1220                0x3de80, 0x3de80,
1221                0x3de88, 0x3dea8,
1222                0x3deb0, 0x3deb4,
1223                0x3dec8, 0x3ded4,
1224                0x3dfb8, 0x3e004,
1225                0x3e200, 0x3e200,
1226                0x3e208, 0x3e240,
1227                0x3e248, 0x3e280,
1228                0x3e288, 0x3e2c0,
1229                0x3e2c8, 0x3e2fc,
1230                0x3e600, 0x3e630,
1231                0x3ea00, 0x3eabc,
1232                0x3eb00, 0x3eb70,
1233                0x3f000, 0x3f048,
1234                0x3f060, 0x3f09c,
1235                0x3f0f0, 0x3f148,
1236                0x3f160, 0x3f19c,
1237                0x3f1f0, 0x3f2e4,
1238                0x3f2f8, 0x3f3e4,
1239                0x3f3f8, 0x3f448,
1240                0x3f460, 0x3f49c,
1241                0x3f4f0, 0x3f548,
1242                0x3f560, 0x3f59c,
1243                0x3f5f0, 0x3f6e4,
1244                0x3f6f8, 0x3f7e4,
1245                0x3f7f8, 0x3f7fc,
1246                0x3f814, 0x3f814,
1247                0x3f82c, 0x3f82c,
1248                0x3f880, 0x3f88c,
1249                0x3f8e8, 0x3f8ec,
1250                0x3f900, 0x3f948,
1251                0x3f960, 0x3f99c,
1252                0x3f9f0, 0x3fae4,
1253                0x3faf8, 0x3fb10,
1254                0x3fb28, 0x3fb28,
1255                0x3fb3c, 0x3fb50,
1256                0x3fbf0, 0x3fc10,
1257                0x3fc28, 0x3fc28,
1258                0x3fc3c, 0x3fc50,
1259                0x3fcf0, 0x3fcfc,
1260                0x40000, 0x4000c,
1261                0x40040, 0x40068,
1262                0x4007c, 0x40144,
1263                0x40180, 0x4018c,
1264                0x40200, 0x40298,
1265                0x402ac, 0x4033c,
1266                0x403f8, 0x403fc,
1267                0x41304, 0x413c4,
1268                0x41400, 0x4141c,
1269                0x41480, 0x414d0,
1270                0x44000, 0x44078,
1271                0x440c0, 0x44278,
1272                0x442c0, 0x44478,
1273                0x444c0, 0x44678,
1274                0x446c0, 0x44878,
1275                0x448c0, 0x449fc,
1276                0x45000, 0x45068,
1277                0x45080, 0x45084,
1278                0x450a0, 0x450b0,
1279                0x45200, 0x45268,
1280                0x45280, 0x45284,
1281                0x452a0, 0x452b0,
1282                0x460c0, 0x460e4,
1283                0x47000, 0x4708c,
1284                0x47200, 0x47250,
1285                0x47400, 0x47420,
1286                0x47600, 0x47618,
1287                0x47800, 0x47814,
1288                0x48000, 0x4800c,
1289                0x48040, 0x48068,
1290                0x4807c, 0x48144,
1291                0x48180, 0x4818c,
1292                0x48200, 0x48298,
1293                0x482ac, 0x4833c,
1294                0x483f8, 0x483fc,
1295                0x49304, 0x493c4,
1296                0x49400, 0x4941c,
1297                0x49480, 0x494d0,
1298                0x4c000, 0x4c078,
1299                0x4c0c0, 0x4c278,
1300                0x4c2c0, 0x4c478,
1301                0x4c4c0, 0x4c678,
1302                0x4c6c0, 0x4c878,
1303                0x4c8c0, 0x4c9fc,
1304                0x4d000, 0x4d068,
1305                0x4d080, 0x4d084,
1306                0x4d0a0, 0x4d0b0,
1307                0x4d200, 0x4d268,
1308                0x4d280, 0x4d284,
1309                0x4d2a0, 0x4d2b0,
1310                0x4e0c0, 0x4e0e4,
1311                0x4f000, 0x4f08c,
1312                0x4f200, 0x4f250,
1313                0x4f400, 0x4f420,
1314                0x4f600, 0x4f618,
1315                0x4f800, 0x4f814,
1316                0x50000, 0x500cc,
1317                0x50400, 0x50400,
1318                0x50800, 0x508cc,
1319                0x50c00, 0x50c00,
1320                0x51000, 0x5101c,
1321                0x51300, 0x51308,
1322        };
1323
1324        static const unsigned int t6_reg_ranges[] = {
1325                0x1008, 0x114c,
1326                0x1180, 0x11b4,
1327                0x11fc, 0x1250,
1328                0x1280, 0x133c,
1329                0x1800, 0x18fc,
1330                0x3000, 0x302c,
1331                0x3060, 0x30d8,
1332                0x30e0, 0x30fc,
1333                0x3140, 0x357c,
1334                0x35a8, 0x35cc,
1335                0x35ec, 0x35ec,
1336                0x3600, 0x5624,
1337                0x56cc, 0x575c,
1338                0x580c, 0x5814,
1339                0x5890, 0x58bc,
1340                0x5940, 0x595c,
1341                0x5980, 0x598c,
1342                0x59b0, 0x59dc,
1343                0x59fc, 0x5a18,
1344                0x5a60, 0x5a6c,
1345                0x5a80, 0x5a9c,
1346                0x5b94, 0x5bfc,
1347                0x5c10, 0x5ec0,
1348                0x5ec8, 0x5ec8,
1349                0x6000, 0x6040,
1350                0x6058, 0x6154,
1351                0x7700, 0x7798,
1352                0x77c0, 0x7880,
1353                0x78cc, 0x78fc,
1354                0x7b00, 0x7c54,
1355                0x7d00, 0x7efc,
1356                0x8dc0, 0x8de0,
1357                0x8df8, 0x8e84,
1358                0x8ea0, 0x8f88,
1359                0x8fb8, 0x911c,
1360                0x9400, 0x9470,
1361                0x9600, 0x971c,
1362                0x9800, 0x9808,
1363                0x9820, 0x983c,
1364                0x9850, 0x9864,
1365                0x9c00, 0x9c6c,
1366                0x9c80, 0x9cec,
1367                0x9d00, 0x9d6c,
1368                0x9d80, 0x9dec,
1369                0x9e00, 0x9e6c,
1370                0x9e80, 0x9eec,
1371                0x9f00, 0x9f6c,
1372                0x9f80, 0xa020,
1373                0xd004, 0xd03c,
1374                0xdfc0, 0xdfe0,
1375                0xe000, 0xf008,
1376                0x11000, 0x11014,
1377                0x11048, 0x11110,
1378                0x11118, 0x1117c,
1379                0x11190, 0x11260,
1380                0x11300, 0x1130c,
1381                0x12000, 0x1205c,
1382                0x19040, 0x1906c,
1383                0x19078, 0x19080,
1384                0x1908c, 0x19124,
1385                0x19150, 0x191b0,
1386                0x191d0, 0x191e8,
1387                0x19238, 0x192b8,
1388                0x193f8, 0x19474,
1389                0x19490, 0x194cc,
1390                0x194f0, 0x194f8,
1391                0x19c00, 0x19c80,
1392                0x19c94, 0x19cbc,
1393                0x19ce4, 0x19d28,
1394                0x19d50, 0x19d78,
1395                0x19d94, 0x19dc8,
1396                0x19df0, 0x19e10,
1397                0x19e50, 0x19e6c,
1398                0x19ea0, 0x19f34,
1399                0x19f40, 0x19f50,
1400                0x19f90, 0x19fac,
1401                0x19fc4, 0x19fe4,
1402                0x1a000, 0x1a06c,
1403                0x1a0b0, 0x1a120,
1404                0x1a128, 0x1a138,
1405                0x1a190, 0x1a1c4,
1406                0x1a1fc, 0x1a1fc,
1407                0x1e008, 0x1e00c,
1408                0x1e040, 0x1e04c,
1409                0x1e284, 0x1e290,
1410                0x1e2c0, 0x1e2c0,
1411                0x1e2e0, 0x1e2e0,
1412                0x1e300, 0x1e384,
1413                0x1e3c0, 0x1e3c8,
1414                0x1e408, 0x1e40c,
1415                0x1e440, 0x1e44c,
1416                0x1e684, 0x1e690,
1417                0x1e6c0, 0x1e6c0,
1418                0x1e6e0, 0x1e6e0,
1419                0x1e700, 0x1e784,
1420                0x1e7c0, 0x1e7c8,
1421                0x1e808, 0x1e80c,
1422                0x1e840, 0x1e84c,
1423                0x1ea84, 0x1ea90,
1424                0x1eac0, 0x1eac0,
1425                0x1eae0, 0x1eae0,
1426                0x1eb00, 0x1eb84,
1427                0x1ebc0, 0x1ebc8,
1428                0x1ec08, 0x1ec0c,
1429                0x1ec40, 0x1ec4c,
1430                0x1ee84, 0x1ee90,
1431                0x1eec0, 0x1eec0,
1432                0x1eee0, 0x1eee0,
1433                0x1ef00, 0x1ef84,
1434                0x1efc0, 0x1efc8,
1435                0x1f008, 0x1f00c,
1436                0x1f040, 0x1f04c,
1437                0x1f284, 0x1f290,
1438                0x1f2c0, 0x1f2c0,
1439                0x1f2e0, 0x1f2e0,
1440                0x1f300, 0x1f384,
1441                0x1f3c0, 0x1f3c8,
1442                0x1f408, 0x1f40c,
1443                0x1f440, 0x1f44c,
1444                0x1f684, 0x1f690,
1445                0x1f6c0, 0x1f6c0,
1446                0x1f6e0, 0x1f6e0,
1447                0x1f700, 0x1f784,
1448                0x1f7c0, 0x1f7c8,
1449                0x1f808, 0x1f80c,
1450                0x1f840, 0x1f84c,
1451                0x1fa84, 0x1fa90,
1452                0x1fac0, 0x1fac0,
1453                0x1fae0, 0x1fae0,
1454                0x1fb00, 0x1fb84,
1455                0x1fbc0, 0x1fbc8,
1456                0x1fc08, 0x1fc0c,
1457                0x1fc40, 0x1fc4c,
1458                0x1fe84, 0x1fe90,
1459                0x1fec0, 0x1fec0,
1460                0x1fee0, 0x1fee0,
1461                0x1ff00, 0x1ff84,
1462                0x1ffc0, 0x1ffc8,
1463                0x30000, 0x30070,
1464                0x30100, 0x3015c,
1465                0x30190, 0x301d0,
1466                0x30200, 0x30318,
1467                0x30400, 0x3052c,
1468                0x30540, 0x3061c,
1469                0x30800, 0x3088c,
1470                0x308c0, 0x30908,
1471                0x30910, 0x309b8,
1472                0x30a00, 0x30a04,
1473                0x30a0c, 0x30a2c,
1474                0x30a44, 0x30a50,
1475                0x30a74, 0x30c24,
1476                0x30d00, 0x30d3c,
1477                0x30d44, 0x30d7c,
1478                0x30de0, 0x30de0,
1479                0x30e00, 0x30ed4,
1480                0x30f00, 0x30fa4,
1481                0x30fc0, 0x30fc4,
1482                0x31000, 0x31004,
1483                0x31080, 0x310fc,
1484                0x31208, 0x31220,
1485                0x3123c, 0x31254,
1486                0x31300, 0x31300,
1487                0x31308, 0x3131c,
1488                0x31338, 0x3133c,
1489                0x31380, 0x31380,
1490                0x31388, 0x313a8,
1491                0x313b4, 0x313b4,
1492                0x31400, 0x31420,
1493                0x31438, 0x3143c,
1494                0x31480, 0x31480,
1495                0x314a8, 0x314a8,
1496                0x314b0, 0x314b4,
1497                0x314c8, 0x314d4,
1498                0x31a40, 0x31a4c,
1499                0x31af0, 0x31b20,
1500                0x31b38, 0x31b3c,
1501                0x31b80, 0x31b80,
1502                0x31ba8, 0x31ba8,
1503                0x31bb0, 0x31bb4,
1504                0x31bc8, 0x31bd4,
1505                0x32140, 0x3218c,
1506                0x321f0, 0x32200,
1507                0x32218, 0x32218,
1508                0x32400, 0x32400,
1509                0x32408, 0x3241c,
1510                0x32618, 0x32620,
1511                0x32664, 0x32664,
1512                0x326a8, 0x326a8,
1513                0x326ec, 0x326ec,
1514                0x32a00, 0x32abc,
1515                0x32b00, 0x32b78,
1516                0x32c00, 0x32c00,
1517                0x32c08, 0x32c3c,
1518                0x32e00, 0x32e2c,
1519                0x32f00, 0x32f2c,
1520                0x33000, 0x330ac,
1521                0x330c0, 0x331ac,
1522                0x331c0, 0x332c4,
1523                0x332e4, 0x333c4,
1524                0x333e4, 0x334ac,
1525                0x334c0, 0x335ac,
1526                0x335c0, 0x336c4,
1527                0x336e4, 0x337c4,
1528                0x337e4, 0x337fc,
1529                0x33814, 0x33814,
1530                0x33854, 0x33868,
1531                0x33880, 0x3388c,
1532                0x338c0, 0x338d0,
1533                0x338e8, 0x338ec,
1534                0x33900, 0x339ac,
1535                0x339c0, 0x33ac4,
1536                0x33ae4, 0x33b10,
1537                0x33b24, 0x33b50,
1538                0x33bf0, 0x33c10,
1539                0x33c24, 0x33c50,
1540                0x33cf0, 0x33cfc,
1541                0x34000, 0x34070,
1542                0x34100, 0x3415c,
1543                0x34190, 0x341d0,
1544                0x34200, 0x34318,
1545                0x34400, 0x3452c,
1546                0x34540, 0x3461c,
1547                0x34800, 0x3488c,
1548                0x348c0, 0x34908,
1549                0x34910, 0x349b8,
1550                0x34a00, 0x34a04,
1551                0x34a0c, 0x34a2c,
1552                0x34a44, 0x34a50,
1553                0x34a74, 0x34c24,
1554                0x34d00, 0x34d3c,
1555                0x34d44, 0x34d7c,
1556                0x34de0, 0x34de0,
1557                0x34e00, 0x34ed4,
1558                0x34f00, 0x34fa4,
1559                0x34fc0, 0x34fc4,
1560                0x35000, 0x35004,
1561                0x35080, 0x350fc,
1562                0x35208, 0x35220,
1563                0x3523c, 0x35254,
1564                0x35300, 0x35300,
1565                0x35308, 0x3531c,
1566                0x35338, 0x3533c,
1567                0x35380, 0x35380,
1568                0x35388, 0x353a8,
1569                0x353b4, 0x353b4,
1570                0x35400, 0x35420,
1571                0x35438, 0x3543c,
1572                0x35480, 0x35480,
1573                0x354a8, 0x354a8,
1574                0x354b0, 0x354b4,
1575                0x354c8, 0x354d4,
1576                0x35a40, 0x35a4c,
1577                0x35af0, 0x35b20,
1578                0x35b38, 0x35b3c,
1579                0x35b80, 0x35b80,
1580                0x35ba8, 0x35ba8,
1581                0x35bb0, 0x35bb4,
1582                0x35bc8, 0x35bd4,
1583                0x36140, 0x3618c,
1584                0x361f0, 0x36200,
1585                0x36218, 0x36218,
1586                0x36400, 0x36400,
1587                0x36408, 0x3641c,
1588                0x36618, 0x36620,
1589                0x36664, 0x36664,
1590                0x366a8, 0x366a8,
1591                0x366ec, 0x366ec,
1592                0x36a00, 0x36abc,
1593                0x36b00, 0x36b78,
1594                0x36c00, 0x36c00,
1595                0x36c08, 0x36c3c,
1596                0x36e00, 0x36e2c,
1597                0x36f00, 0x36f2c,
1598                0x37000, 0x370ac,
1599                0x370c0, 0x371ac,
1600                0x371c0, 0x372c4,
1601                0x372e4, 0x373c4,
1602                0x373e4, 0x374ac,
1603                0x374c0, 0x375ac,
1604                0x375c0, 0x376c4,
1605                0x376e4, 0x377c4,
1606                0x377e4, 0x377fc,
1607                0x37814, 0x37814,
1608                0x37854, 0x37868,
1609                0x37880, 0x3788c,
1610                0x378c0, 0x378d0,
1611                0x378e8, 0x378ec,
1612                0x37900, 0x379ac,
1613                0x379c0, 0x37ac4,
1614                0x37ae4, 0x37b10,
1615                0x37b24, 0x37b50,
1616                0x37bf0, 0x37c10,
1617                0x37c24, 0x37c50,
1618                0x37cf0, 0x37cfc,
1619                0x40040, 0x40040,
1620                0x40080, 0x40084,
1621                0x40100, 0x40100,
1622                0x40140, 0x401bc,
1623                0x40200, 0x40214,
1624                0x40228, 0x40228,
1625                0x40240, 0x40258,
1626                0x40280, 0x40280,
1627                0x40304, 0x40304,
1628                0x40330, 0x4033c,
1629                0x41304, 0x413dc,
1630                0x41400, 0x4141c,
1631                0x41480, 0x414d0,
1632                0x44000, 0x4407c,
1633                0x440c0, 0x4427c,
1634                0x442c0, 0x4447c,
1635                0x444c0, 0x4467c,
1636                0x446c0, 0x4487c,
1637                0x448c0, 0x44a7c,
1638                0x44ac0, 0x44c7c,
1639                0x44cc0, 0x44e7c,
1640                0x44ec0, 0x4507c,
1641                0x450c0, 0x451fc,
1642                0x45800, 0x45868,
1643                0x45880, 0x45884,
1644                0x458a0, 0x458b0,
1645                0x45a00, 0x45a68,
1646                0x45a80, 0x45a84,
1647                0x45aa0, 0x45ab0,
1648                0x460c0, 0x460e4,
1649                0x47000, 0x4708c,
1650                0x47200, 0x47250,
1651                0x47400, 0x47420,
1652                0x47600, 0x47618,
1653                0x47800, 0x4782c,
1654                0x50000, 0x500cc,
1655                0x50400, 0x50400,
1656                0x50800, 0x508cc,
1657                0x50c00, 0x50c00,
1658                0x51000, 0x510b0,
1659                0x51300, 0x51324,
1660        };
1661
1662        u32 *buf_end = (u32 *)((char *)buf + buf_size);
1663        const unsigned int *reg_ranges;
1664        int reg_ranges_size, range;
1665        unsigned int chip_version = CHELSIO_CHIP_VERSION(adap->params.chip);
1666
1667        /* Select the right set of register ranges to dump depending on the
1668         * adapter chip type.
1669         */
1670        switch (chip_version) {
1671        case CHELSIO_T4:
1672                reg_ranges = t4_reg_ranges;
1673                reg_ranges_size = ARRAY_SIZE(t4_reg_ranges);
1674                break;
1675
1676        case CHELSIO_T5:
1677                reg_ranges = t5_reg_ranges;
1678                reg_ranges_size = ARRAY_SIZE(t5_reg_ranges);
1679                break;
1680
1681        case CHELSIO_T6:
1682                reg_ranges = t6_reg_ranges;
1683                reg_ranges_size = ARRAY_SIZE(t6_reg_ranges);
1684                break;
1685
1686        default:
1687                dev_err(adap->pdev_dev,
1688                        "Unsupported chip version %d\n", chip_version);
1689                return;
1690        }
1691
1692        /* Clear the register buffer and insert the appropriate register
1693         * values selected by the above register ranges.
1694         */
1695        memset(buf, 0, buf_size);
1696        for (range = 0; range < reg_ranges_size; range += 2) {
1697                unsigned int reg = reg_ranges[range];
1698                unsigned int last_reg = reg_ranges[range + 1];
1699                u32 *bufp = (u32 *)((char *)buf + reg);
1700
1701                /* Iterate across the register range filling in the register
1702                 * buffer but don't write past the end of the register buffer.
1703                 */
1704                while (reg <= last_reg && bufp < buf_end) {
1705                        *bufp++ = t4_read_reg(adap, reg);
1706                        reg += sizeof(u32);
1707                }
1708        }
1709}
1710
1711#define EEPROM_STAT_ADDR   0x7bfc
1712#define VPD_BASE           0x400
1713#define VPD_BASE_OLD       0
1714#define VPD_LEN            1024
1715#define CHELSIO_VPD_UNIQUE_ID 0x82
1716
1717/**
1718 *      t4_seeprom_wp - enable/disable EEPROM write protection
1719 *      @adapter: the adapter
1720 *      @enable: whether to enable or disable write protection
1721 *
1722 *      Enables or disables write protection on the serial EEPROM.
1723 */
1724int t4_seeprom_wp(struct adapter *adapter, bool enable)
1725{
1726        unsigned int v = enable ? 0xc : 0;
1727        int ret = pci_write_vpd(adapter->pdev, EEPROM_STAT_ADDR, 4, &v);
1728        return ret < 0 ? ret : 0;
1729}
1730
1731/**
1732 *      t4_get_raw_vpd_params - read VPD parameters from VPD EEPROM
1733 *      @adapter: adapter to read
1734 *      @p: where to store the parameters
1735 *
1736 *      Reads card parameters stored in VPD EEPROM.
1737 */
1738int t4_get_raw_vpd_params(struct adapter *adapter, struct vpd_params *p)
1739{
1740        int i, ret = 0, addr;
1741        int ec, sn, pn, na;
1742        u8 *vpd, csum;
1743        unsigned int vpdr_len, kw_offset, id_len;
1744
1745        vpd = vmalloc(VPD_LEN);
1746        if (!vpd)
1747                return -ENOMEM;
1748
1749        /* Card information normally starts at VPD_BASE but early cards had
1750         * it at 0.
1751         */
1752        ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(u32), vpd);
1753        if (ret < 0)
1754                goto out;
1755
1756        /* The VPD shall have a unique identifier specified by the PCI SIG.
1757         * For chelsio adapters, the identifier is 0x82. The first byte of a VPD
1758         * shall be CHELSIO_VPD_UNIQUE_ID (0x82). The VPD programming software
1759         * is expected to automatically put this entry at the
1760         * beginning of the VPD.
1761         */
1762        addr = *vpd == CHELSIO_VPD_UNIQUE_ID ? VPD_BASE : VPD_BASE_OLD;
1763
1764        ret = pci_read_vpd(adapter->pdev, addr, VPD_LEN, vpd);
1765        if (ret < 0)
1766                goto out;
1767
1768        if (vpd[0] != PCI_VPD_LRDT_ID_STRING) {
1769                dev_err(adapter->pdev_dev, "missing VPD ID string\n");
1770                ret = -EINVAL;
1771                goto out;
1772        }
1773
1774        id_len = pci_vpd_lrdt_size(vpd);
1775        if (id_len > ID_LEN)
1776                id_len = ID_LEN;
1777
1778        i = pci_vpd_find_tag(vpd, 0, VPD_LEN, PCI_VPD_LRDT_RO_DATA);
1779        if (i < 0) {
1780                dev_err(adapter->pdev_dev, "missing VPD-R section\n");
1781                ret = -EINVAL;
1782                goto out;
1783        }
1784
1785        vpdr_len = pci_vpd_lrdt_size(&vpd[i]);
1786        kw_offset = i + PCI_VPD_LRDT_TAG_SIZE;
1787        if (vpdr_len + kw_offset > VPD_LEN) {
1788                dev_err(adapter->pdev_dev, "bad VPD-R length %u\n", vpdr_len);
1789                ret = -EINVAL;
1790                goto out;
1791        }
1792
1793#define FIND_VPD_KW(var, name) do { \
1794        var = pci_vpd_find_info_keyword(vpd, kw_offset, vpdr_len, name); \
1795        if (var < 0) { \
1796                dev_err(adapter->pdev_dev, "missing VPD keyword " name "\n"); \
1797                ret = -EINVAL; \
1798                goto out; \
1799        } \
1800        var += PCI_VPD_INFO_FLD_HDR_SIZE; \
1801} while (0)
1802
1803        FIND_VPD_KW(i, "RV");
1804        for (csum = 0; i >= 0; i--)
1805                csum += vpd[i];
1806
1807        if (csum) {
1808                dev_err(adapter->pdev_dev,
1809                        "corrupted VPD EEPROM, actual csum %u\n", csum);
1810                ret = -EINVAL;
1811                goto out;
1812        }
1813
1814        FIND_VPD_KW(ec, "EC");
1815        FIND_VPD_KW(sn, "SN");
1816        FIND_VPD_KW(pn, "PN");
1817        FIND_VPD_KW(na, "NA");
1818#undef FIND_VPD_KW
1819
1820        memcpy(p->id, vpd + PCI_VPD_LRDT_TAG_SIZE, id_len);
1821        strim(p->id);
1822        memcpy(p->ec, vpd + ec, EC_LEN);
1823        strim(p->ec);
1824        i = pci_vpd_info_field_size(vpd + sn - PCI_VPD_INFO_FLD_HDR_SIZE);
1825        memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));
1826        strim(p->sn);
1827        i = pci_vpd_info_field_size(vpd + pn - PCI_VPD_INFO_FLD_HDR_SIZE);
1828        memcpy(p->pn, vpd + pn, min(i, PN_LEN));
1829        strim(p->pn);
1830        memcpy(p->na, vpd + na, min(i, MACADDR_LEN));
1831        strim((char *)p->na);
1832
1833out:
1834        vfree(vpd);
1835        return ret;
1836}
1837
1838/**
1839 *      t4_get_vpd_params - read VPD parameters & retrieve Core Clock
1840 *      @adapter: adapter to read
1841 *      @p: where to store the parameters
1842 *
1843 *      Reads card parameters stored in VPD EEPROM and retrieves the Core
1844 *      Clock.  This can only be called after a connection to the firmware
1845 *      is established.
1846 */
1847int t4_get_vpd_params(struct adapter *adapter, struct vpd_params *p)
1848{
1849        u32 cclk_param, cclk_val;
1850        int ret;
1851
1852        /* Grab the raw VPD parameters.
1853         */
1854        ret = t4_get_raw_vpd_params(adapter, p);
1855        if (ret)
1856                return ret;
1857
1858        /* Ask firmware for the Core Clock since it knows how to translate the
1859         * Reference Clock ('V2') VPD field into a Core Clock value ...
1860         */
1861        cclk_param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
1862                      FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CCLK));
1863        ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0,
1864                              1, &cclk_param, &cclk_val);
1865
1866        if (ret)
1867                return ret;
1868        p->cclk = cclk_val;
1869
1870        return 0;
1871}
1872
1873/* serial flash and firmware constants */
1874enum {
1875        SF_ATTEMPTS = 10,             /* max retries for SF operations */
1876
1877        /* flash command opcodes */
1878        SF_PROG_PAGE    = 2,          /* program page */
1879        SF_WR_DISABLE   = 4,          /* disable writes */
1880        SF_RD_STATUS    = 5,          /* read status register */
1881        SF_WR_ENABLE    = 6,          /* enable writes */
1882        SF_RD_DATA_FAST = 0xb,        /* read flash */
1883        SF_RD_ID        = 0x9f,       /* read ID */
1884        SF_ERASE_SECTOR = 0xd8,       /* erase sector */
1885
1886        FW_MAX_SIZE = 16 * SF_SEC_SIZE,
1887};
1888
1889/**
1890 *      sf1_read - read data from the serial flash
1891 *      @adapter: the adapter
1892 *      @byte_cnt: number of bytes to read
1893 *      @cont: whether another operation will be chained
1894 *      @lock: whether to lock SF for PL access only
1895 *      @valp: where to store the read data
1896 *
1897 *      Reads up to 4 bytes of data from the serial flash.  The location of
1898 *      the read needs to be specified prior to calling this by issuing the
1899 *      appropriate commands to the serial flash.
1900 */
1901static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
1902                    int lock, u32 *valp)
1903{
1904        int ret;
1905
1906        if (!byte_cnt || byte_cnt > 4)
1907                return -EINVAL;
1908        if (t4_read_reg(adapter, SF_OP_A) & SF_BUSY_F)
1909                return -EBUSY;
1910        t4_write_reg(adapter, SF_OP_A, SF_LOCK_V(lock) |
1911                     SF_CONT_V(cont) | BYTECNT_V(byte_cnt - 1));
1912        ret = t4_wait_op_done(adapter, SF_OP_A, SF_BUSY_F, 0, SF_ATTEMPTS, 5);
1913        if (!ret)
1914                *valp = t4_read_reg(adapter, SF_DATA_A);
1915        return ret;
1916}
1917
1918/**
1919 *      sf1_write - write data to the serial flash
1920 *      @adapter: the adapter
1921 *      @byte_cnt: number of bytes to write
1922 *      @cont: whether another operation will be chained
1923 *      @lock: whether to lock SF for PL access only
1924 *      @val: value to write
1925 *
1926 *      Writes up to 4 bytes of data to the serial flash.  The location of
1927 *      the write needs to be specified prior to calling this by issuing the
1928 *      appropriate commands to the serial flash.
1929 */
1930static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
1931                     int lock, u32 val)
1932{
1933        if (!byte_cnt || byte_cnt > 4)
1934                return -EINVAL;
1935        if (t4_read_reg(adapter, SF_OP_A) & SF_BUSY_F)
1936                return -EBUSY;
1937        t4_write_reg(adapter, SF_DATA_A, val);
1938        t4_write_reg(adapter, SF_OP_A, SF_LOCK_V(lock) |
1939                     SF_CONT_V(cont) | BYTECNT_V(byte_cnt - 1) | OP_V(1));
1940        return t4_wait_op_done(adapter, SF_OP_A, SF_BUSY_F, 0, SF_ATTEMPTS, 5);
1941}
1942
1943/**
1944 *      flash_wait_op - wait for a flash operation to complete
1945 *      @adapter: the adapter
1946 *      @attempts: max number of polls of the status register
1947 *      @delay: delay between polls in ms
1948 *
1949 *      Wait for a flash operation to complete by polling the status register.
1950 */
1951static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
1952{
1953        int ret;
1954        u32 status;
1955
1956        while (1) {
1957                if ((ret = sf1_write(adapter, 1, 1, 1, SF_RD_STATUS)) != 0 ||
1958                    (ret = sf1_read(adapter, 1, 0, 1, &status)) != 0)
1959                        return ret;
1960                if (!(status & 1))
1961                        return 0;
1962                if (--attempts == 0)
1963                        return -EAGAIN;
1964                if (delay)
1965                        msleep(delay);
1966        }
1967}
1968
1969/**
1970 *      t4_read_flash - read words from serial flash
1971 *      @adapter: the adapter
1972 *      @addr: the start address for the read
1973 *      @nwords: how many 32-bit words to read
1974 *      @data: where to store the read data
1975 *      @byte_oriented: whether to store data as bytes or as words
1976 *
1977 *      Read the specified number of 32-bit words from the serial flash.
1978 *      If @byte_oriented is set the read data is stored as a byte array
1979 *      (i.e., big-endian), otherwise as 32-bit words in the platform's
1980 *      natural endianness.
1981 */
1982int t4_read_flash(struct adapter *adapter, unsigned int addr,
1983                  unsigned int nwords, u32 *data, int byte_oriented)
1984{
1985        int ret;
1986
1987        if (addr + nwords * sizeof(u32) > adapter->params.sf_size || (addr & 3))
1988                return -EINVAL;
1989
1990        addr = swab32(addr) | SF_RD_DATA_FAST;
1991
1992        if ((ret = sf1_write(adapter, 4, 1, 0, addr)) != 0 ||
1993            (ret = sf1_read(adapter, 1, 1, 0, data)) != 0)
1994                return ret;
1995
1996        for ( ; nwords; nwords--, data++) {
1997                ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data);
1998                if (nwords == 1)
1999                        t4_write_reg(adapter, SF_OP_A, 0);    /* unlock SF */
2000                if (ret)
2001                        return ret;
2002                if (byte_oriented)
2003                        *data = (__force __u32)(cpu_to_be32(*data));
2004        }
2005        return 0;
2006}
2007
2008/**
2009 *      t4_write_flash - write up to a page of data to the serial flash
2010 *      @adapter: the adapter
2011 *      @addr: the start address to write
2012 *      @n: length of data to write in bytes
2013 *      @data: the data to write
2014 *
2015 *      Writes up to a page of data (256 bytes) to the serial flash starting
2016 *      at the given address.  All the data must be written to the same page.
2017 */
2018static int t4_write_flash(struct adapter *adapter, unsigned int addr,
2019                          unsigned int n, const u8 *data)
2020{
2021        int ret;
2022        u32 buf[64];
2023        unsigned int i, c, left, val, offset = addr & 0xff;
2024
2025        if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE)
2026                return -EINVAL;
2027
2028        val = swab32(addr) | SF_PROG_PAGE;
2029
2030        if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
2031            (ret = sf1_write(adapter, 4, 1, 1, val)) != 0)
2032                goto unlock;
2033
2034        for (left = n; left; left -= c) {
2035                c = min(left, 4U);
2036                for (val = 0, i = 0; i < c; ++i)
2037                        val = (val << 8) + *data++;
2038
2039                ret = sf1_write(adapter, c, c != left, 1, val);
2040                if (ret)
2041                        goto unlock;
2042        }
2043        ret = flash_wait_op(adapter, 8, 1);
2044        if (ret)
2045                goto unlock;
2046
2047        t4_write_reg(adapter, SF_OP_A, 0);    /* unlock SF */
2048
2049        /* Read the page to verify the write succeeded */
2050        ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
2051        if (ret)
2052                return ret;
2053
2054        if (memcmp(data - n, (u8 *)buf + offset, n)) {
2055                dev_err(adapter->pdev_dev,
2056                        "failed to correctly write the flash page at %#x\n",
2057                        addr);
2058                return -EIO;
2059        }
2060        return 0;
2061
2062unlock:
2063        t4_write_reg(adapter, SF_OP_A, 0);    /* unlock SF */
2064        return ret;
2065}
2066
2067/**
2068 *      t4_get_fw_version - read the firmware version
2069 *      @adapter: the adapter
2070 *      @vers: where to place the version
2071 *
2072 *      Reads the FW version from flash.
2073 */
2074int t4_get_fw_version(struct adapter *adapter, u32 *vers)
2075{
2076        return t4_read_flash(adapter, FLASH_FW_START +
2077                             offsetof(struct fw_hdr, fw_ver), 1,
2078                             vers, 0);
2079}
2080
2081/**
2082 *      t4_get_tp_version - read the TP microcode version
2083 *      @adapter: the adapter
2084 *      @vers: where to place the version
2085 *
2086 *      Reads the TP microcode version from flash.
2087 */
2088int t4_get_tp_version(struct adapter *adapter, u32 *vers)
2089{
2090        return t4_read_flash(adapter, FLASH_FW_START +
2091                             offsetof(struct fw_hdr, tp_microcode_ver),
2092                             1, vers, 0);
2093}
2094
2095/**
2096 *      t4_get_exprom_version - return the Expansion ROM version (if any)
2097 *      @adapter: the adapter
2098 *      @vers: where to place the version
2099 *
2100 *      Reads the Expansion ROM header from FLASH and returns the version
2101 *      number (if present) through the @vers return value pointer.  We return
2102 *      this in the Firmware Version Format since it's convenient.  Return
2103 *      0 on success, -ENOENT if no Expansion ROM is present.
2104 */
2105int t4_get_exprom_version(struct adapter *adap, u32 *vers)
2106{
2107        struct exprom_header {
2108                unsigned char hdr_arr[16];      /* must start with 0x55aa */
2109                unsigned char hdr_ver[4];       /* Expansion ROM version */
2110        } *hdr;
2111        u32 exprom_header_buf[DIV_ROUND_UP(sizeof(struct exprom_header),
2112                                           sizeof(u32))];
2113        int ret;
2114
2115        ret = t4_read_flash(adap, FLASH_EXP_ROM_START,
2116                            ARRAY_SIZE(exprom_header_buf), exprom_header_buf,
2117                            0);
2118        if (ret)
2119                return ret;
2120
2121        hdr = (struct exprom_header *)exprom_header_buf;
2122        if (hdr->hdr_arr[0] != 0x55 || hdr->hdr_arr[1] != 0xaa)
2123                return -ENOENT;
2124
2125        *vers = (FW_HDR_FW_VER_MAJOR_V(hdr->hdr_ver[0]) |
2126                 FW_HDR_FW_VER_MINOR_V(hdr->hdr_ver[1]) |
2127                 FW_HDR_FW_VER_MICRO_V(hdr->hdr_ver[2]) |
2128                 FW_HDR_FW_VER_BUILD_V(hdr->hdr_ver[3]));
2129        return 0;
2130}
2131
2132/* Is the given firmware API compatible with the one the driver was compiled
2133 * with?
2134 */
2135static int fw_compatible(const struct fw_hdr *hdr1, const struct fw_hdr *hdr2)
2136{
2137
2138        /* short circuit if it's the exact same firmware version */
2139        if (hdr1->chip == hdr2->chip && hdr1->fw_ver == hdr2->fw_ver)
2140                return 1;
2141
2142#define SAME_INTF(x) (hdr1->intfver_##x == hdr2->intfver_##x)
2143        if (hdr1->chip == hdr2->chip && SAME_INTF(nic) && SAME_INTF(vnic) &&
2144            SAME_INTF(ri) && SAME_INTF(iscsi) && SAME_INTF(fcoe))
2145                return 1;
2146#undef SAME_INTF
2147
2148        return 0;
2149}
2150
2151/* The firmware in the filesystem is usable, but should it be installed?
2152 * This routine explains itself in detail if it indicates the filesystem
2153 * firmware should be installed.
2154 */
2155static int should_install_fs_fw(struct adapter *adap, int card_fw_usable,
2156                                int k, int c)
2157{
2158        const char *reason;
2159
2160        if (!card_fw_usable) {
2161                reason = "incompatible or unusable";
2162                goto install;
2163        }
2164
2165        if (k > c) {
2166                reason = "older than the version supported with this driver";
2167                goto install;
2168        }
2169
2170        return 0;
2171
2172install:
2173        dev_err(adap->pdev_dev, "firmware on card (%u.%u.%u.%u) is %s, "
2174                "installing firmware %u.%u.%u.%u on card.\n",
2175                FW_HDR_FW_VER_MAJOR_G(c), FW_HDR_FW_VER_MINOR_G(c),
2176                FW_HDR_FW_VER_MICRO_G(c), FW_HDR_FW_VER_BUILD_G(c), reason,
2177                FW_HDR_FW_VER_MAJOR_G(k), FW_HDR_FW_VER_MINOR_G(k),
2178                FW_HDR_FW_VER_MICRO_G(k), FW_HDR_FW_VER_BUILD_G(k));
2179
2180        return 1;
2181}
2182
2183int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info,
2184               const u8 *fw_data, unsigned int fw_size,
2185               struct fw_hdr *card_fw, enum dev_state state,
2186               int *reset)
2187{
2188        int ret, card_fw_usable, fs_fw_usable;
2189        const struct fw_hdr *fs_fw;
2190        const struct fw_hdr *drv_fw;
2191
2192        drv_fw = &fw_info->fw_hdr;
2193
2194        /* Read the header of the firmware on the card */
2195        ret = -t4_read_flash(adap, FLASH_FW_START,
2196                            sizeof(*card_fw) / sizeof(uint32_t),
2197                            (uint32_t *)card_fw, 1);
2198        if (ret == 0) {
2199                card_fw_usable = fw_compatible(drv_fw, (const void *)card_fw);
2200        } else {
2201                dev_err(adap->pdev_dev,
2202                        "Unable to read card's firmware header: %d\n", ret);
2203                card_fw_usable = 0;
2204        }
2205
2206        if (fw_data != NULL) {
2207                fs_fw = (const void *)fw_data;
2208                fs_fw_usable = fw_compatible(drv_fw, fs_fw);
2209        } else {
2210                fs_fw = NULL;
2211                fs_fw_usable = 0;
2212        }
2213
2214        if (card_fw_usable && card_fw->fw_ver == drv_fw->fw_ver &&
2215            (!fs_fw_usable || fs_fw->fw_ver == drv_fw->fw_ver)) {
2216                /* Common case: the firmware on the card is an exact match and
2217                 * the filesystem one is an exact match too, or the filesystem
2218                 * one is absent/incompatible.
2219                 */
2220        } else if (fs_fw_usable && state == DEV_STATE_UNINIT &&
2221                   should_install_fs_fw(adap, card_fw_usable,
2222                                        be32_to_cpu(fs_fw->fw_ver),
2223                                        be32_to_cpu(card_fw->fw_ver))) {
2224                ret = -t4_fw_upgrade(adap, adap->mbox, fw_data,
2225                                     fw_size, 0);
2226                if (ret != 0) {
2227                        dev_err(adap->pdev_dev,
2228                                "failed to install firmware: %d\n", ret);
2229                        goto bye;
2230                }
2231
2232                /* Installed successfully, update the cached header too. */
2233                *card_fw = *fs_fw;
2234                card_fw_usable = 1;
2235                *reset = 0;     /* already reset as part of load_fw */
2236        }
2237
2238        if (!card_fw_usable) {
2239                uint32_t d, c, k;
2240
2241                d = be32_to_cpu(drv_fw->fw_ver);
2242                c = be32_to_cpu(card_fw->fw_ver);
2243                k = fs_fw ? be32_to_cpu(fs_fw->fw_ver) : 0;
2244
2245                dev_err(adap->pdev_dev, "Cannot find a usable firmware: "
2246                        "chip state %d, "
2247                        "driver compiled with %d.%d.%d.%d, "
2248                        "card has %d.%d.%d.%d, filesystem has %d.%d.%d.%d\n",
2249                        state,
2250                        FW_HDR_FW_VER_MAJOR_G(d), FW_HDR_FW_VER_MINOR_G(d),
2251                        FW_HDR_FW_VER_MICRO_G(d), FW_HDR_FW_VER_BUILD_G(d),
2252                        FW_HDR_FW_VER_MAJOR_G(c), FW_HDR_FW_VER_MINOR_G(c),
2253                        FW_HDR_FW_VER_MICRO_G(c), FW_HDR_FW_VER_BUILD_G(c),
2254                        FW_HDR_FW_VER_MAJOR_G(k), FW_HDR_FW_VER_MINOR_G(k),
2255                        FW_HDR_FW_VER_MICRO_G(k), FW_HDR_FW_VER_BUILD_G(k));
2256                ret = EINVAL;
2257                goto bye;
2258        }
2259
2260        /* We're using whatever's on the card and it's known to be good. */
2261        adap->params.fw_vers = be32_to_cpu(card_fw->fw_ver);
2262        adap->params.tp_vers = be32_to_cpu(card_fw->tp_microcode_ver);
2263
2264bye:
2265        return ret;
2266}
2267
2268/**
2269 *      t4_flash_erase_sectors - erase a range of flash sectors
2270 *      @adapter: the adapter
2271 *      @start: the first sector to erase
2272 *      @end: the last sector to erase
2273 *
2274 *      Erases the sectors in the given inclusive range.
2275 */
2276static int t4_flash_erase_sectors(struct adapter *adapter, int start, int end)
2277{
2278        int ret = 0;
2279
2280        if (end >= adapter->params.sf_nsec)
2281                return -EINVAL;
2282
2283        while (start <= end) {
2284                if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
2285                    (ret = sf1_write(adapter, 4, 0, 1,
2286                                     SF_ERASE_SECTOR | (start << 8))) != 0 ||
2287                    (ret = flash_wait_op(adapter, 14, 500)) != 0) {
2288                        dev_err(adapter->pdev_dev,
2289                                "erase of flash sector %d failed, error %d\n",
2290                                start, ret);
2291                        break;
2292                }
2293                start++;
2294        }
2295        t4_write_reg(adapter, SF_OP_A, 0);    /* unlock SF */
2296        return ret;
2297}
2298
2299/**
2300 *      t4_flash_cfg_addr - return the address of the flash configuration file
2301 *      @adapter: the adapter
2302 *
2303 *      Return the address within the flash where the Firmware Configuration
2304 *      File is stored.
2305 */
2306unsigned int t4_flash_cfg_addr(struct adapter *adapter)
2307{
2308        if (adapter->params.sf_size == 0x100000)
2309                return FLASH_FPGA_CFG_START;
2310        else
2311                return FLASH_CFG_START;
2312}
2313
2314/* Return TRUE if the specified firmware matches the adapter.  I.e. T4
2315 * firmware for T4 adapters, T5 firmware for T5 adapters, etc.  We go ahead
2316 * and emit an error message for mismatched firmware to save our caller the
2317 * effort ...
2318 */
2319static bool t4_fw_matches_chip(const struct adapter *adap,
2320                               const struct fw_hdr *hdr)
2321{
2322        /* The expression below will return FALSE for any unsupported adapter
2323         * which will keep us "honest" in the future ...
2324         */
2325        if ((is_t4(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T4) ||
2326            (is_t5(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T5) ||
2327            (is_t6(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T6))
2328                return true;
2329
2330        dev_err(adap->pdev_dev,
2331                "FW image (%d) is not suitable for this adapter (%d)\n",
2332                hdr->chip, CHELSIO_CHIP_VERSION(adap->params.chip));
2333        return false;
2334}
2335
2336/**
2337 *      t4_load_fw - download firmware
2338 *      @adap: the adapter
2339 *      @fw_data: the firmware image to write
2340 *      @size: image size
2341 *
2342 *      Write the supplied firmware image to the card's serial flash.
2343 */
2344int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
2345{
2346        u32 csum;
2347        int ret, addr;
2348        unsigned int i;
2349        u8 first_page[SF_PAGE_SIZE];
2350        const __be32 *p = (const __be32 *)fw_data;
2351        const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data;
2352        unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
2353        unsigned int fw_img_start = adap->params.sf_fw_start;
2354        unsigned int fw_start_sec = fw_img_start / sf_sec_size;
2355
2356        if (!size) {
2357                dev_err(adap->pdev_dev, "FW image has no data\n");
2358                return -EINVAL;
2359        }
2360        if (size & 511) {
2361                dev_err(adap->pdev_dev,
2362                        "FW image size not multiple of 512 bytes\n");
2363                return -EINVAL;
2364        }
2365        if ((unsigned int)be16_to_cpu(hdr->len512) * 512 != size) {
2366                dev_err(adap->pdev_dev,
2367                        "FW image size differs from size in FW header\n");
2368                return -EINVAL;
2369        }
2370        if (size > FW_MAX_SIZE) {
2371                dev_err(adap->pdev_dev, "FW image too large, max is %u bytes\n",
2372                        FW_MAX_SIZE);
2373                return -EFBIG;
2374        }
2375        if (!t4_fw_matches_chip(adap, hdr))
2376                return -EINVAL;
2377
2378        for (csum = 0, i = 0; i < size / sizeof(csum); i++)
2379                csum += be32_to_cpu(p[i]);
2380
2381        if (csum != 0xffffffff) {
2382                dev_err(adap->pdev_dev,
2383                        "corrupted firmware image, checksum %#x\n", csum);
2384                return -EINVAL;
2385        }
2386
2387        i = DIV_ROUND_UP(size, sf_sec_size);        /* # of sectors spanned */
2388        ret = t4_flash_erase_sectors(adap, fw_start_sec, fw_start_sec + i - 1);
2389        if (ret)
2390                goto out;
2391
2392        /*
2393         * We write the correct version at the end so the driver can see a bad
2394         * version if the FW write fails.  Start by writing a copy of the
2395         * first page with a bad version.
2396         */
2397        memcpy(first_page, fw_data, SF_PAGE_SIZE);
2398        ((struct fw_hdr *)first_page)->fw_ver = cpu_to_be32(0xffffffff);
2399        ret = t4_write_flash(adap, fw_img_start, SF_PAGE_SIZE, first_page);
2400        if (ret)
2401                goto out;
2402
2403        addr = fw_img_start;
2404        for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
2405                addr += SF_PAGE_SIZE;
2406                fw_data += SF_PAGE_SIZE;
2407                ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data);
2408                if (ret)
2409                        goto out;
2410        }
2411
2412        ret = t4_write_flash(adap,
2413                             fw_img_start + offsetof(struct fw_hdr, fw_ver),
2414                             sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver);
2415out:
2416        if (ret)
2417                dev_err(adap->pdev_dev, "firmware download failed, error %d\n",
2418                        ret);
2419        else
2420                ret = t4_get_fw_version(adap, &adap->params.fw_vers);
2421        return ret;
2422}
2423
2424/**
2425 *      t4_phy_fw_ver - return current PHY firmware version
2426 *      @adap: the adapter
2427 *      @phy_fw_ver: return value buffer for PHY firmware version
2428 *
2429 *      Returns the current version of external PHY firmware on the
2430 *      adapter.
2431 */
2432int t4_phy_fw_ver(struct adapter *adap, int *phy_fw_ver)
2433{
2434        u32 param, val;
2435        int ret;
2436
2437        param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
2438                 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PHYFW) |
2439                 FW_PARAMS_PARAM_Y_V(adap->params.portvec) |
2440                 FW_PARAMS_PARAM_Z_V(FW_PARAMS_PARAM_DEV_PHYFW_VERSION));
2441        ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1,
2442                              &param, &val);
2443        if (ret < 0)
2444                return ret;
2445        *phy_fw_ver = val;
2446        return 0;
2447}
2448
2449/**
2450 *      t4_load_phy_fw - download port PHY firmware
2451 *      @adap: the adapter
2452 *      @win: the PCI-E Memory Window index to use for t4_memory_rw()
2453 *      @win_lock: the lock to use to guard the memory copy
2454 *      @phy_fw_version: function to check PHY firmware versions
2455 *      @phy_fw_data: the PHY firmware image to write
2456 *      @phy_fw_size: image size
2457 *
2458 *      Transfer the specified PHY firmware to the adapter.  If a non-NULL
2459 *      @phy_fw_version is supplied, then it will be used to determine if
2460 *      it's necessary to perform the transfer by comparing the version
2461 *      of any existing adapter PHY firmware with that of the passed in
2462 *      PHY firmware image.  If @win_lock is non-NULL then it will be used
2463 *      around the call to t4_memory_rw() which transfers the PHY firmware
2464 *      to the adapter.
2465 *
2466 *      A negative error number will be returned if an error occurs.  If
2467 *      version number support is available and there's no need to upgrade
2468 *      the firmware, 0 will be returned.  If firmware is successfully
2469 *      transferred to the adapter, 1 will be retured.
2470 *
2471 *      NOTE: some adapters only have local RAM to store the PHY firmware.  As
2472 *      a result, a RESET of the adapter would cause that RAM to lose its
2473 *      contents.  Thus, loading PHY firmware on such adapters must happen
2474 *      after any FW_RESET_CMDs ...
2475 */
2476int t4_load_phy_fw(struct adapter *adap,
2477                   int win, spinlock_t *win_lock,
2478                   int (*phy_fw_version)(const u8 *, size_t),
2479                   const u8 *phy_fw_data, size_t phy_fw_size)
2480{
2481        unsigned long mtype = 0, maddr = 0;
2482        u32 param, val;
2483        int cur_phy_fw_ver = 0, new_phy_fw_vers = 0;
2484        int ret;
2485
2486        /* If we have version number support, then check to see if the adapter
2487         * already has up-to-date PHY firmware loaded.
2488         */
2489         if (phy_fw_version) {
2490                new_phy_fw_vers = phy_fw_version(phy_fw_data, phy_fw_size);
2491                ret = t4_phy_fw_ver(adap, &cur_phy_fw_ver);
2492                if (ret < 0)
2493                        return ret;
2494
2495                if (cur_phy_fw_ver >= new_phy_fw_vers) {
2496                        CH_WARN(adap, "PHY Firmware already up-to-date, "
2497                                "version %#x\n", cur_phy_fw_ver);
2498                        return 0;
2499                }
2500        }
2501
2502        /* Ask the firmware where it wants us to copy the PHY firmware image.
2503         * The size of the file requires a special version of the READ coommand
2504         * which will pass the file size via the values field in PARAMS_CMD and
2505         * retrieve the return value from firmware and place it in the same
2506         * buffer values
2507         */
2508        param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
2509                 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PHYFW) |
2510                 FW_PARAMS_PARAM_Y_V(adap->params.portvec) |
2511                 FW_PARAMS_PARAM_Z_V(FW_PARAMS_PARAM_DEV_PHYFW_DOWNLOAD));
2512        val = phy_fw_size;
2513        ret = t4_query_params_rw(adap, adap->mbox, adap->pf, 0, 1,
2514                                 &param, &val, 1);
2515        if (ret < 0)
2516                return ret;
2517        mtype = val >> 8;
2518        maddr = (val & 0xff) << 16;
2519
2520        /* Copy the supplied PHY Firmware image to the adapter memory location
2521         * allocated by the adapter firmware.
2522         */
2523        if (win_lock)
2524                spin_lock_bh(win_lock);
2525        ret = t4_memory_rw(adap, win, mtype, maddr,
2526                           phy_fw_size, (__be32 *)phy_fw_data,
2527                           T4_MEMORY_WRITE);
2528        if (win_lock)
2529                spin_unlock_bh(win_lock);
2530        if (ret)
2531                return ret;
2532
2533        /* Tell the firmware that the PHY firmware image has been written to
2534         * RAM and it can now start copying it over to the PHYs.  The chip
2535         * firmware will RESET the affected PHYs as part of this operation
2536         * leaving them running the new PHY firmware image.
2537         */
2538        param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
2539                 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PHYFW) |
2540                 FW_PARAMS_PARAM_Y_V(adap->params.portvec) |
2541                 FW_PARAMS_PARAM_Z_V(FW_PARAMS_PARAM_DEV_PHYFW_DOWNLOAD));
2542        ret = t4_set_params_timeout(adap, adap->mbox, adap->pf, 0, 1,
2543                                    &param, &val, 30000);
2544
2545        /* If we have version number support, then check to see that the new
2546         * firmware got loaded properly.
2547         */
2548        if (phy_fw_version) {
2549                ret = t4_phy_fw_ver(adap, &cur_phy_fw_ver);
2550                if (ret < 0)
2551                        return ret;
2552
2553                if (cur_phy_fw_ver != new_phy_fw_vers) {
2554                        CH_WARN(adap, "PHY Firmware did not update: "
2555                                "version on adapter %#x, "
2556                                "version flashed %#x\n",
2557                                cur_phy_fw_ver, new_phy_fw_vers);
2558                        return -ENXIO;
2559                }
2560        }
2561
2562        return 1;
2563}
2564
2565/**
2566 *      t4_fwcache - firmware cache operation
2567 *      @adap: the adapter
2568 *      @op  : the operation (flush or flush and invalidate)
2569 */
2570int t4_fwcache(struct adapter *adap, enum fw_params_param_dev_fwcache op)
2571{
2572        struct fw_params_cmd c;
2573
2574        memset(&c, 0, sizeof(c));
2575        c.op_to_vfn =
2576                cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD) |
2577                            FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
2578                            FW_PARAMS_CMD_PFN_V(adap->pf) |
2579                            FW_PARAMS_CMD_VFN_V(0));
2580        c.retval_len16 = cpu_to_be32(FW_LEN16(c));
2581        c.param[0].mnem =
2582                cpu_to_be32(FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
2583                            FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_FWCACHE));
2584        c.param[0].val = (__force __be32)op;
2585
2586        return t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), NULL);
2587}
2588
2589void t4_cim_read_pif_la(struct adapter *adap, u32 *pif_req, u32 *pif_rsp,
2590                        unsigned int *pif_req_wrptr,
2591                        unsigned int *pif_rsp_wrptr)
2592{
2593        int i, j;
2594        u32 cfg, val, req, rsp;
2595
2596        cfg = t4_read_reg(adap, CIM_DEBUGCFG_A);
2597        if (cfg & LADBGEN_F)
2598                t4_write_reg(adap, CIM_DEBUGCFG_A, cfg ^ LADBGEN_F);
2599
2600        val = t4_read_reg(adap, CIM_DEBUGSTS_A);
2601        req = POLADBGWRPTR_G(val);
2602        rsp = PILADBGWRPTR_G(val);
2603        if (pif_req_wrptr)
2604                *pif_req_wrptr = req;
2605        if (pif_rsp_wrptr)
2606                *pif_rsp_wrptr = rsp;
2607
2608        for (i = 0; i < CIM_PIFLA_SIZE; i++) {
2609                for (j = 0; j < 6; j++) {
2610                        t4_write_reg(adap, CIM_DEBUGCFG_A, POLADBGRDPTR_V(req) |
2611                                     PILADBGRDPTR_V(rsp));
2612                        *pif_req++ = t4_read_reg(adap, CIM_PO_LA_DEBUGDATA_A);
2613                        *pif_rsp++ = t4_read_reg(adap, CIM_PI_LA_DEBUGDATA_A);
2614                        req++;
2615                        rsp++;
2616                }
2617                req = (req + 2) & POLADBGRDPTR_M;
2618                rsp = (rsp + 2) & PILADBGRDPTR_M;
2619        }
2620        t4_write_reg(adap, CIM_DEBUGCFG_A, cfg);
2621}
2622
2623void t4_cim_read_ma_la(struct adapter *adap, u32 *ma_req, u32 *ma_rsp)
2624{
2625        u32 cfg;
2626        int i, j, idx;
2627
2628        cfg = t4_read_reg(adap, CIM_DEBUGCFG_A);
2629        if (cfg & LADBGEN_F)
2630                t4_write_reg(adap, CIM_DEBUGCFG_A, cfg ^ LADBGEN_F);
2631
2632        for (i = 0; i < CIM_MALA_SIZE; i++) {
2633                for (j = 0; j < 5; j++) {
2634                        idx = 8 * i + j;
2635                        t4_write_reg(adap, CIM_DEBUGCFG_A, POLADBGRDPTR_V(idx) |
2636                                     PILADBGRDPTR_V(idx));
2637                        *ma_req++ = t4_read_reg(adap, CIM_PO_LA_MADEBUGDATA_A);
2638                        *ma_rsp++ = t4_read_reg(adap, CIM_PI_LA_MADEBUGDATA_A);
2639                }
2640        }
2641        t4_write_reg(adap, CIM_DEBUGCFG_A, cfg);
2642}
2643
2644void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf)
2645{
2646        unsigned int i, j;
2647
2648        for (i = 0; i < 8; i++) {
2649                u32 *p = la_buf + i;
2650
2651                t4_write_reg(adap, ULP_RX_LA_CTL_A, i);
2652                j = t4_read_reg(adap, ULP_RX_LA_WRPTR_A);
2653                t4_write_reg(adap, ULP_RX_LA_RDPTR_A, j);
2654                for (j = 0; j < ULPRX_LA_SIZE; j++, p += 8)
2655                        *p = t4_read_reg(adap, ULP_RX_LA_RDDATA_A);
2656        }
2657}
2658
2659#define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\
2660                     FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_40G | \
2661                     FW_PORT_CAP_ANEG)
2662
2663/**
2664 *      t4_link_l1cfg - apply link configuration to MAC/PHY
2665 *      @phy: the PHY to setup
2666 *      @mac: the MAC to setup
2667 *      @lc: the requested link configuration
2668 *
2669 *      Set up a port's MAC and PHY according to a desired link configuration.
2670 *      - If the PHY can auto-negotiate first decide what to advertise, then
2671 *        enable/disable auto-negotiation as desired, and reset.
2672 *      - If the PHY does not auto-negotiate just reset it.
2673 *      - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
2674 *        otherwise do it later based on the outcome of auto-negotiation.
2675 */
2676int t4_link_l1cfg(struct adapter *adap, unsigned int mbox, unsigned int port,
2677                  struct link_config *lc)
2678{
2679        struct fw_port_cmd c;
2680        unsigned int fc = 0, mdi = FW_PORT_CAP_MDI_V(FW_PORT_CAP_MDI_AUTO);
2681
2682        lc->link_ok = 0;
2683        if (lc->requested_fc & PAUSE_RX)
2684                fc |= FW_PORT_CAP_FC_RX;
2685        if (lc->requested_fc & PAUSE_TX)
2686                fc |= FW_PORT_CAP_FC_TX;
2687
2688        memset(&c, 0, sizeof(c));
2689        c.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) |
2690                                     FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
2691                                     FW_PORT_CMD_PORTID_V(port));
2692        c.action_to_len16 =
2693                cpu_to_be32(FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_L1_CFG) |
2694                            FW_LEN16(c));
2695
2696        if (!(lc->supported & FW_PORT_CAP_ANEG)) {
2697                c.u.l1cfg.rcap = cpu_to_be32((lc->supported & ADVERT_MASK) |
2698                                             fc);
2699                lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
2700        } else if (lc->autoneg == AUTONEG_DISABLE) {
2701                c.u.l1cfg.rcap = cpu_to_be32(lc->requested_speed | fc | mdi);
2702                lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
2703        } else
2704                c.u.l1cfg.rcap = cpu_to_be32(lc->advertising | fc | mdi);
2705
2706        return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2707}
2708
2709/**
2710 *      t4_restart_aneg - restart autonegotiation
2711 *      @adap: the adapter
2712 *      @mbox: mbox to use for the FW command
2713 *      @port: the port id
2714 *
2715 *      Restarts autonegotiation for the selected port.
2716 */
2717int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port)
2718{
2719        struct fw_port_cmd c;
2720
2721        memset(&c, 0, sizeof(c));
2722        c.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) |
2723                                     FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
2724                                     FW_PORT_CMD_PORTID_V(port));
2725        c.action_to_len16 =
2726                cpu_to_be32(FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_L1_CFG) |
2727                            FW_LEN16(c));
2728        c.u.l1cfg.rcap = cpu_to_be32(FW_PORT_CAP_ANEG);
2729        return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2730}
2731
2732typedef void (*int_handler_t)(struct adapter *adap);
2733
2734struct intr_info {
2735        unsigned int mask;       /* bits to check in interrupt status */
2736        const char *msg;         /* message to print or NULL */
2737        short stat_idx;          /* stat counter to increment or -1 */
2738        unsigned short fatal;    /* whether the condition reported is fatal */
2739        int_handler_t int_handler; /* platform-specific int handler */
2740};
2741
2742/**
2743 *      t4_handle_intr_status - table driven interrupt handler
2744 *      @adapter: the adapter that generated the interrupt
2745 *      @reg: the interrupt status register to process
2746 *      @acts: table of interrupt actions
2747 *
2748 *      A table driven interrupt handler that applies a set of masks to an
2749 *      interrupt status word and performs the corresponding actions if the
2750 *      interrupts described by the mask have occurred.  The actions include
2751 *      optionally emitting a warning or alert message.  The table is terminated
2752 *      by an entry specifying mask 0.  Returns the number of fatal interrupt
2753 *      conditions.
2754 */
2755static int t4_handle_intr_status(struct adapter *adapter, unsigned int reg,
2756                                 const struct intr_info *acts)
2757{
2758        int fatal = 0;
2759        unsigned int mask = 0;
2760        unsigned int status = t4_read_reg(adapter, reg);
2761
2762        for ( ; acts->mask; ++acts) {
2763                if (!(status & acts->mask))
2764                        continue;
2765                if (acts->fatal) {
2766                        fatal++;
2767                        dev_alert(adapter->pdev_dev, "%s (0x%x)\n", acts->msg,
2768                                  status & acts->mask);
2769                } else if (acts->msg && printk_ratelimit())
2770                        dev_warn(adapter->pdev_dev, "%s (0x%x)\n", acts->msg,
2771                                 status & acts->mask);
2772                if (acts->int_handler)
2773                        acts->int_handler(adapter);
2774                mask |= acts->mask;
2775        }
2776        status &= mask;
2777        if (status)                           /* clear processed interrupts */
2778                t4_write_reg(adapter, reg, status);
2779        return fatal;
2780}
2781
2782/*
2783 * Interrupt handler for the PCIE module.
2784 */
2785static void pcie_intr_handler(struct adapter *adapter)
2786{
2787        static const struct intr_info sysbus_intr_info[] = {
2788                { RNPP_F, "RXNP array parity error", -1, 1 },
2789                { RPCP_F, "RXPC array parity error", -1, 1 },
2790                { RCIP_F, "RXCIF array parity error", -1, 1 },
2791                { RCCP_F, "Rx completions control array parity error", -1, 1 },
2792                { RFTP_F, "RXFT array parity error", -1, 1 },
2793                { 0 }
2794        };
2795        static const struct intr_info pcie_port_intr_info[] = {
2796                { TPCP_F, "TXPC array parity error", -1, 1 },
2797                { TNPP_F, "TXNP array parity error", -1, 1 },
2798                { TFTP_F, "TXFT array parity error", -1, 1 },
2799                { TCAP_F, "TXCA array parity error", -1, 1 },
2800                { TCIP_F, "TXCIF array parity error", -1, 1 },
2801                { RCAP_F, "RXCA array parity error", -1, 1 },
2802                { OTDD_F, "outbound request TLP discarded", -1, 1 },
2803                { RDPE_F, "Rx data parity error", -1, 1 },
2804                { TDUE_F, "Tx uncorrectable data error", -1, 1 },
2805                { 0 }
2806        };
2807        static const struct intr_info pcie_intr_info[] = {
2808                { MSIADDRLPERR_F, "MSI AddrL parity error", -1, 1 },
2809                { MSIADDRHPERR_F, "MSI AddrH parity error", -1, 1 },
2810                { MSIDATAPERR_F, "MSI data parity error", -1, 1 },
2811                { MSIXADDRLPERR_F, "MSI-X AddrL parity error", -1, 1 },
2812                { MSIXADDRHPERR_F, "MSI-X AddrH parity error", -1, 1 },
2813                { MSIXDATAPERR_F, "MSI-X data parity error", -1, 1 },
2814                { MSIXDIPERR_F, "MSI-X DI parity error", -1, 1 },
2815                { PIOCPLPERR_F, "PCI PIO completion FIFO parity error", -1, 1 },
2816                { PIOREQPERR_F, "PCI PIO request FIFO parity error", -1, 1 },
2817                { TARTAGPERR_F, "PCI PCI target tag FIFO parity error", -1, 1 },
2818                { CCNTPERR_F, "PCI CMD channel count parity error", -1, 1 },
2819                { CREQPERR_F, "PCI CMD channel request parity error", -1, 1 },
2820                { CRSPPERR_F, "PCI CMD channel response parity error", -1, 1 },
2821                { DCNTPERR_F, "PCI DMA channel count parity error", -1, 1 },
2822                { DREQPERR_F, "PCI DMA channel request parity error", -1, 1 },
2823                { DRSPPERR_F, "PCI DMA channel response parity error", -1, 1 },
2824                { HCNTPERR_F, "PCI HMA channel count parity error", -1, 1 },
2825                { HREQPERR_F, "PCI HMA channel request parity error", -1, 1 },
2826                { HRSPPERR_F, "PCI HMA channel response parity error", -1, 1 },
2827                { CFGSNPPERR_F, "PCI config snoop FIFO parity error", -1, 1 },
2828                { FIDPERR_F, "PCI FID parity error", -1, 1 },
2829                { INTXCLRPERR_F, "PCI INTx clear parity error", -1, 1 },
2830                { MATAGPERR_F, "PCI MA tag parity error", -1, 1 },
2831                { PIOTAGPERR_F, "PCI PIO tag parity error", -1, 1 },
2832                { RXCPLPERR_F, "PCI Rx completion parity error", -1, 1 },
2833                { RXWRPERR_F, "PCI Rx write parity error", -1, 1 },
2834                { RPLPERR_F, "PCI replay buffer parity error", -1, 1 },
2835                { PCIESINT_F, "PCI core secondary fault", -1, 1 },
2836                { PCIEPINT_F, "PCI core primary fault", -1, 1 },
2837                { UNXSPLCPLERR_F, "PCI unexpected split completion error",
2838                  -1, 0 },
2839                { 0 }
2840        };
2841
2842        static struct intr_info t5_pcie_intr_info[] = {
2843                { MSTGRPPERR_F, "Master Response Read Queue parity error",
2844                  -1, 1 },
2845                { MSTTIMEOUTPERR_F, "Master Timeout FIFO parity error", -1, 1 },
2846                { MSIXSTIPERR_F, "MSI-X STI SRAM parity error", -1, 1 },
2847                { MSIXADDRLPERR_F, "MSI-X AddrL parity error", -1, 1 },
2848                { MSIXADDRHPERR_F, "MSI-X AddrH parity error", -1, 1 },
2849                { MSIXDATAPERR_F, "MSI-X data parity error", -1, 1 },
2850                { MSIXDIPERR_F, "MSI-X DI parity error", -1, 1 },
2851                { PIOCPLGRPPERR_F, "PCI PIO completion Group FIFO parity error",
2852                  -1, 1 },
2853                { PIOREQGRPPERR_F, "PCI PIO request Group FIFO parity error",
2854                  -1, 1 },
2855                { TARTAGPERR_F, "PCI PCI target tag FIFO parity error", -1, 1 },
2856                { MSTTAGQPERR_F, "PCI master tag queue parity error", -1, 1 },
2857                { CREQPERR_F, "PCI CMD channel request parity error", -1, 1 },
2858                { CRSPPERR_F, "PCI CMD channel response parity error", -1, 1 },
2859                { DREQWRPERR_F, "PCI DMA channel write request parity error",
2860                  -1, 1 },
2861                { DREQPERR_F, "PCI DMA channel request parity error", -1, 1 },
2862                { DRSPPERR_F, "PCI DMA channel response parity error", -1, 1 },
2863                { HREQWRPERR_F, "PCI HMA channel count parity error", -1, 1 },
2864                { HREQPERR_F, "PCI HMA channel request parity error", -1, 1 },
2865                { HRSPPERR_F, "PCI HMA channel response parity error", -1, 1 },
2866                { CFGSNPPERR_F, "PCI config snoop FIFO parity error", -1, 1 },
2867                { FIDPERR_F, "PCI FID parity error", -1, 1 },
2868                { VFIDPERR_F, "PCI INTx clear parity error", -1, 1 },
2869                { MAGRPPERR_F, "PCI MA group FIFO parity error", -1, 1 },
2870                { PIOTAGPERR_F, "PCI PIO tag parity error", -1, 1 },
2871                { IPRXHDRGRPPERR_F, "PCI IP Rx header group parity error",
2872                  -1, 1 },
2873                { IPRXDATAGRPPERR_F, "PCI IP Rx data group parity error",
2874                  -1, 1 },
2875                { RPLPERR_F, "PCI IP replay buffer parity error", -1, 1 },
2876                { IPSOTPERR_F, "PCI IP SOT buffer parity error", -1, 1 },
2877                { TRGT1GRPPERR_F, "PCI TRGT1 group FIFOs parity error", -1, 1 },
2878                { READRSPERR_F, "Outbound read error", -1, 0 },
2879                { 0 }
2880        };
2881
2882        int fat;
2883
2884        if (is_t4(adapter->params.chip))
2885                fat = t4_handle_intr_status(adapter,
2886                                PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS_A,
2887                                sysbus_intr_info) +
2888                        t4_handle_intr_status(adapter,
2889                                        PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS_A,
2890                                        pcie_port_intr_info) +
2891                        t4_handle_intr_status(adapter, PCIE_INT_CAUSE_A,
2892                                              pcie_intr_info);
2893        else
2894                fat = t4_handle_intr_status(adapter, PCIE_INT_CAUSE_A,
2895                                            t5_pcie_intr_info);
2896
2897        if (fat)
2898                t4_fatal_err(adapter);
2899}
2900
2901/*
2902 * TP interrupt handler.
2903 */
2904static void tp_intr_handler(struct adapter *adapter)
2905{
2906        static const struct intr_info tp_intr_info[] = {
2907                { 0x3fffffff, "TP parity error", -1, 1 },
2908                { FLMTXFLSTEMPTY_F, "TP out of Tx pages", -1, 1 },
2909                { 0 }
2910        };
2911
2912        if (t4_handle_intr_status(adapter, TP_INT_CAUSE_A, tp_intr_info))
2913                t4_fatal_err(adapter);
2914}
2915
2916/*
2917 * SGE interrupt handler.
2918 */
2919static void sge_intr_handler(struct adapter *adapter)
2920{
2921        u64 v;
2922        u32 err;
2923
2924        static const struct intr_info sge_intr_info[] = {
2925                { ERR_CPL_EXCEED_IQE_SIZE_F,
2926                  "SGE received CPL exceeding IQE size", -1, 1 },
2927                { ERR_INVALID_CIDX_INC_F,
2928                  "SGE GTS CIDX increment too large", -1, 0 },
2929                { ERR_CPL_OPCODE_0_F, "SGE received 0-length CPL", -1, 0 },
2930                { DBFIFO_LP_INT_F, NULL, -1, 0, t4_db_full },
2931                { ERR_DATA_CPL_ON_HIGH_QID1_F | ERR_DATA_CPL_ON_HIGH_QID0_F,
2932                  "SGE IQID > 1023 received CPL for FL", -1, 0 },
2933                { ERR_BAD_DB_PIDX3_F, "SGE DBP 3 pidx increment too large", -1,
2934                  0 },
2935                { ERR_BAD_DB_PIDX2_F, "SGE DBP 2 pidx increment too large", -1,
2936                  0 },
2937                { ERR_BAD_DB_PIDX1_F, "SGE DBP 1 pidx increment too large", -1,
2938                  0 },
2939                { ERR_BAD_DB_PIDX0_F, "SGE DBP 0 pidx increment too large", -1,
2940                  0 },
2941                { ERR_ING_CTXT_PRIO_F,
2942                  "SGE too many priority ingress contexts", -1, 0 },
2943                { INGRESS_SIZE_ERR_F, "SGE illegal ingress QID", -1, 0 },
2944                { EGRESS_SIZE_ERR_F, "SGE illegal egress QID", -1, 0 },
2945                { 0 }
2946        };
2947
2948        static struct intr_info t4t5_sge_intr_info[] = {
2949                { ERR_DROPPED_DB_F, NULL, -1, 0, t4_db_dropped },
2950                { DBFIFO_HP_INT_F, NULL, -1, 0, t4_db_full },
2951                { ERR_EGR_CTXT_PRIO_F,
2952                  "SGE too many priority egress contexts", -1, 0 },
2953                { 0 }
2954        };
2955
2956        v = (u64)t4_read_reg(adapter, SGE_INT_CAUSE1_A) |
2957                ((u64)t4_read_reg(adapter, SGE_INT_CAUSE2_A) << 32);
2958        if (v) {
2959                dev_alert(adapter->pdev_dev, "SGE parity error (%#llx)\n",
2960                                (unsigned long long)v);
2961                t4_write_reg(adapter, SGE_INT_CAUSE1_A, v);
2962                t4_write_reg(adapter, SGE_INT_CAUSE2_A, v >> 32);
2963        }
2964
2965        v |= t4_handle_intr_status(adapter, SGE_INT_CAUSE3_A, sge_intr_info);
2966        if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
2967                v |= t4_handle_intr_status(adapter, SGE_INT_CAUSE3_A,
2968                                           t4t5_sge_intr_info);
2969
2970        err = t4_read_reg(adapter, SGE_ERROR_STATS_A);
2971        if (err & ERROR_QID_VALID_F) {
2972                dev_err(adapter->pdev_dev, "SGE error for queue %u\n",
2973                        ERROR_QID_G(err));
2974                if (err & UNCAPTURED_ERROR_F)
2975                        dev_err(adapter->pdev_dev,
2976                                "SGE UNCAPTURED_ERROR set (clearing)\n");
2977                t4_write_reg(adapter, SGE_ERROR_STATS_A, ERROR_QID_VALID_F |
2978                             UNCAPTURED_ERROR_F);
2979        }
2980
2981        if (v != 0)
2982                t4_fatal_err(adapter);
2983}
2984
2985#define CIM_OBQ_INTR (OBQULP0PARERR_F | OBQULP1PARERR_F | OBQULP2PARERR_F |\
2986                      OBQULP3PARERR_F | OBQSGEPARERR_F | OBQNCSIPARERR_F)
2987#define CIM_IBQ_INTR (IBQTP0PARERR_F | IBQTP1PARERR_F | IBQULPPARERR_F |\
2988                      IBQSGEHIPARERR_F | IBQSGELOPARERR_F | IBQNCSIPARERR_F)
2989
2990/*
2991 * CIM interrupt handler.
2992 */
2993static void cim_intr_handler(struct adapter *adapter)
2994{
2995        static const struct intr_info cim_intr_info[] = {
2996                { PREFDROPINT_F, "CIM control register prefetch drop", -1, 1 },
2997                { CIM_OBQ_INTR, "CIM OBQ parity error", -1, 1 },
2998                { CIM_IBQ_INTR, "CIM IBQ parity error", -1, 1 },
2999                { MBUPPARERR_F, "CIM mailbox uP parity error", -1, 1 },
3000                { MBHOSTPARERR_F, "CIM mailbox host parity error", -1, 1 },
3001                { TIEQINPARERRINT_F, "CIM TIEQ outgoing parity error", -1, 1 },
3002                { TIEQOUTPARERRINT_F, "CIM TIEQ incoming parity error", -1, 1 },
3003                { 0 }
3004        };
3005        static const struct intr_info cim_upintr_info[] = {
3006                { RSVDSPACEINT_F, "CIM reserved space access", -1, 1 },
3007                { ILLTRANSINT_F, "CIM illegal transaction", -1, 1 },
3008                { ILLWRINT_F, "CIM illegal write", -1, 1 },
3009                { ILLRDINT_F, "CIM illegal read", -1, 1 },
3010                { ILLRDBEINT_F, "CIM illegal read BE", -1, 1 },
3011                { ILLWRBEINT_F, "CIM illegal write BE", -1, 1 },
3012                { SGLRDBOOTINT_F, "CIM single read from boot space", -1, 1 },
3013                { SGLWRBOOTINT_F, "CIM single write to boot space", -1, 1 },
3014                { BLKWRBOOTINT_F, "CIM block write to boot space", -1, 1 },
3015                { SGLRDFLASHINT_F, "CIM single read from flash space", -1, 1 },
3016                { SGLWRFLASHINT_F, "CIM single write to flash space", -1, 1 },
3017                { BLKWRFLASHINT_F, "CIM block write to flash space", -1, 1 },
3018                { SGLRDEEPROMINT_F, "CIM single EEPROM read", -1, 1 },
3019                { SGLWREEPROMINT_F, "CIM single EEPROM write", -1, 1 },
3020                { BLKRDEEPROMINT_F, "CIM block EEPROM read", -1, 1 },
3021                { BLKWREEPROMINT_F, "CIM block EEPROM write", -1, 1 },
3022                { SGLRDCTLINT_F, "CIM single read from CTL space", -1, 1 },
3023                { SGLWRCTLINT_F, "CIM single write to CTL space", -1, 1 },
3024                { BLKRDCTLINT_F, "CIM block read from CTL space", -1, 1 },
3025                { BLKWRCTLINT_F, "CIM block write to CTL space", -1, 1 },
3026                { SGLRDPLINT_F, "CIM single read from PL space", -1, 1 },
3027                { SGLWRPLINT_F, "CIM single write to PL space", -1, 1 },
3028                { BLKRDPLINT_F, "CIM block read from PL space", -1, 1 },
3029                { BLKWRPLINT_F, "CIM block write to PL space", -1, 1 },
3030                { REQOVRLOOKUPINT_F, "CIM request FIFO overwrite", -1, 1 },
3031                { RSPOVRLOOKUPINT_F, "CIM response FIFO overwrite", -1, 1 },
3032                { TIMEOUTINT_F, "CIM PIF timeout", -1, 1 },
3033                { TIMEOUTMAINT_F, "CIM PIF MA timeout", -1, 1 },
3034                { 0 }
3035        };
3036
3037        int fat;
3038
3039        if (t4_read_reg(adapter, PCIE_FW_A) & PCIE_FW_ERR_F)
3040                t4_report_fw_error(adapter);
3041
3042        fat = t4_handle_intr_status(adapter, CIM_HOST_INT_CAUSE_A,
3043                                    cim_intr_info) +
3044              t4_handle_intr_status(adapter, CIM_HOST_UPACC_INT_CAUSE_A,
3045                                    cim_upintr_info);
3046        if (fat)
3047                t4_fatal_err(adapter);
3048}
3049
3050/*
3051 * ULP RX interrupt handler.
3052 */
3053static void ulprx_intr_handler(struct adapter *adapter)
3054{
3055        static const struct intr_info ulprx_intr_info[] = {
3056                { 0x1800000, "ULPRX context error", -1, 1 },
3057                { 0x7fffff, "ULPRX parity error", -1, 1 },
3058                { 0 }
3059        };
3060
3061        if (t4_handle_intr_status(adapter, ULP_RX_INT_CAUSE_A, ulprx_intr_info))
3062                t4_fatal_err(adapter);
3063}
3064
3065/*
3066 * ULP TX interrupt handler.
3067 */
3068static void ulptx_intr_handler(struct adapter *adapter)
3069{
3070        static const struct intr_info ulptx_intr_info[] = {
3071                { PBL_BOUND_ERR_CH3_F, "ULPTX channel 3 PBL out of bounds", -1,
3072                  0 },
3073                { PBL_BOUND_ERR_CH2_F, "ULPTX channel 2 PBL out of bounds", -1,
3074                  0 },
3075                { PBL_BOUND_ERR_CH1_F, "ULPTX channel 1 PBL out of bounds", -1,
3076                  0 },
3077                { PBL_BOUND_ERR_CH0_F, "ULPTX channel 0 PBL out of bounds", -1,
3078                  0 },
3079                { 0xfffffff, "ULPTX parity error", -1, 1 },
3080                { 0 }
3081        };
3082
3083        if (t4_handle_intr_status(adapter, ULP_TX_INT_CAUSE_A, ulptx_intr_info))
3084                t4_fatal_err(adapter);
3085}
3086
3087/*
3088 * PM TX interrupt handler.
3089 */
3090static void pmtx_intr_handler(struct adapter *adapter)
3091{
3092        static const struct intr_info pmtx_intr_info[] = {
3093                { PCMD_LEN_OVFL0_F, "PMTX channel 0 pcmd too large", -1, 1 },
3094                { PCMD_LEN_OVFL1_F, "PMTX channel 1 pcmd too large", -1, 1 },
3095                { PCMD_LEN_OVFL2_F, "PMTX channel 2 pcmd too large", -1, 1 },
3096                { ZERO_C_CMD_ERROR_F, "PMTX 0-length pcmd", -1, 1 },
3097                { PMTX_FRAMING_ERROR_F, "PMTX framing error", -1, 1 },
3098                { OESPI_PAR_ERROR_F, "PMTX oespi parity error", -1, 1 },
3099                { DB_OPTIONS_PAR_ERROR_F, "PMTX db_options parity error",
3100                  -1, 1 },
3101                { ICSPI_PAR_ERROR_F, "PMTX icspi parity error", -1, 1 },
3102                { PMTX_C_PCMD_PAR_ERROR_F, "PMTX c_pcmd parity error", -1, 1},
3103                { 0 }
3104        };
3105
3106        if (t4_handle_intr_status(adapter, PM_TX_INT_CAUSE_A, pmtx_intr_info))
3107                t4_fatal_err(adapter);
3108}
3109
3110/*
3111 * PM RX interrupt handler.
3112 */
3113static void pmrx_intr_handler(struct adapter *adapter)
3114{
3115        static const struct intr_info pmrx_intr_info[] = {
3116                { ZERO_E_CMD_ERROR_F, "PMRX 0-length pcmd", -1, 1 },
3117                { PMRX_FRAMING_ERROR_F, "PMRX framing error", -1, 1 },
3118                { OCSPI_PAR_ERROR_F, "PMRX ocspi parity error", -1, 1 },
3119                { DB_OPTIONS_PAR_ERROR_F, "PMRX db_options parity error",
3120                  -1, 1 },
3121                { IESPI_PAR_ERROR_F, "PMRX iespi parity error", -1, 1 },
3122                { PMRX_E_PCMD_PAR_ERROR_F, "PMRX e_pcmd parity error", -1, 1},
3123                { 0 }
3124        };
3125
3126        if (t4_handle_intr_status(adapter, PM_RX_INT_CAUSE_A, pmrx_intr_info))
3127                t4_fatal_err(adapter);
3128}
3129
3130/*
3131 * CPL switch interrupt handler.
3132 */
3133static void cplsw_intr_handler(struct adapter *adapter)
3134{
3135        static const struct intr_info cplsw_intr_info[] = {
3136                { CIM_OP_MAP_PERR_F, "CPLSW CIM op_map parity error", -1, 1 },
3137                { CIM_OVFL_ERROR_F, "CPLSW CIM overflow", -1, 1 },
3138                { TP_FRAMING_ERROR_F, "CPLSW TP framing error", -1, 1 },
3139                { SGE_FRAMING_ERROR_F, "CPLSW SGE framing error", -1, 1 },
3140                { CIM_FRAMING_ERROR_F, "CPLSW CIM framing error", -1, 1 },
3141                { ZERO_SWITCH_ERROR_F, "CPLSW no-switch error", -1, 1 },
3142                { 0 }
3143        };
3144
3145        if (t4_handle_intr_status(adapter, CPL_INTR_CAUSE_A, cplsw_intr_info))
3146                t4_fatal_err(adapter);
3147}
3148
3149/*
3150 * LE interrupt handler.
3151 */
3152static void le_intr_handler(struct adapter *adap)
3153{
3154        enum chip_type chip = CHELSIO_CHIP_VERSION(adap->params.chip);
3155        static const struct intr_info le_intr_info[] = {
3156                { LIPMISS_F, "LE LIP miss", -1, 0 },
3157                { LIP0_F, "LE 0 LIP error", -1, 0 },
3158                { PARITYERR_F, "LE parity error", -1, 1 },
3159                { UNKNOWNCMD_F, "LE unknown command", -1, 1 },
3160                { REQQPARERR_F, "LE request queue parity error", -1, 1 },
3161                { 0 }
3162        };
3163
3164        static struct intr_info t6_le_intr_info[] = {
3165                { T6_LIPMISS_F, "LE LIP miss", -1, 0 },
3166                { T6_LIP0_F, "LE 0 LIP error", -1, 0 },
3167                { TCAMINTPERR_F, "LE parity error", -1, 1 },
3168                { T6_UNKNOWNCMD_F, "LE unknown command", -1, 1 },
3169                { SSRAMINTPERR_F, "LE request queue parity error", -1, 1 },
3170                { 0 }
3171        };
3172
3173        if (t4_handle_intr_status(adap, LE_DB_INT_CAUSE_A,
3174                                  (chip <= CHELSIO_T5) ?
3175                                  le_intr_info : t6_le_intr_info))
3176                t4_fatal_err(adap);
3177}
3178
3179/*
3180 * MPS interrupt handler.
3181 */
3182static void mps_intr_handler(struct adapter *adapter)
3183{
3184        static const struct intr_info mps_rx_intr_info[] = {
3185                { 0xffffff, "MPS Rx parity error", -1, 1 },
3186                { 0 }
3187        };
3188        static const struct intr_info mps_tx_intr_info[] = {
3189                { TPFIFO_V(TPFIFO_M), "MPS Tx TP FIFO parity error", -1, 1 },
3190                { NCSIFIFO_F, "MPS Tx NC-SI FIFO parity error", -1, 1 },
3191                { TXDATAFIFO_V(TXDATAFIFO_M), "MPS Tx data FIFO parity error",
3192                  -1, 1 },
3193                { TXDESCFIFO_V(TXDESCFIFO_M), "MPS Tx desc FIFO parity error",
3194                  -1, 1 },
3195                { BUBBLE_F, "MPS Tx underflow", -1, 1 },
3196                { SECNTERR_F, "MPS Tx SOP/EOP error", -1, 1 },
3197                { FRMERR_F, "MPS Tx framing error", -1, 1 },
3198                { 0 }
3199        };
3200        static const struct intr_info mps_trc_intr_info[] = {
3201                { FILTMEM_V(FILTMEM_M), "MPS TRC filter parity error", -1, 1 },
3202                { PKTFIFO_V(PKTFIFO_M), "MPS TRC packet FIFO parity error",
3203                  -1, 1 },
3204                { MISCPERR_F, "MPS TRC misc parity error", -1, 1 },
3205                { 0 }
3206        };
3207        static const struct intr_info mps_stat_sram_intr_info[] = {
3208                { 0x1fffff, "MPS statistics SRAM parity error", -1, 1 },
3209                { 0 }
3210        };
3211        static const struct intr_info mps_stat_tx_intr_info[] = {
3212                { 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 },
3213                { 0 }
3214        };
3215        static const struct intr_info mps_stat_rx_intr_info[] = {
3216                { 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 },
3217                { 0 }
3218        };
3219        static const struct intr_info mps_cls_intr_info[] = {
3220                { MATCHSRAM_F, "MPS match SRAM parity error", -1, 1 },
3221                { MATCHTCAM_F, "MPS match TCAM parity error", -1, 1 },
3222                { HASHSRAM_F, "MPS hash SRAM parity error", -1, 1 },
3223                { 0 }
3224        };
3225
3226        int fat;
3227
3228        fat = t4_handle_intr_status(adapter, MPS_RX_PERR_INT_CAUSE_A,
3229                                    mps_rx_intr_info) +
3230              t4_handle_intr_status(adapter, MPS_TX_INT_CAUSE_A,
3231                                    mps_tx_intr_info) +
3232              t4_handle_intr_status(adapter, MPS_TRC_INT_CAUSE_A,
3233                                    mps_trc_intr_info) +
3234              t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_SRAM_A,
3235                                    mps_stat_sram_intr_info) +
3236              t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_TX_FIFO_A,
3237                                    mps_stat_tx_intr_info) +
3238              t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_RX_FIFO_A,
3239                                    mps_stat_rx_intr_info) +
3240              t4_handle_intr_status(adapter, MPS_CLS_INT_CAUSE_A,
3241                                    mps_cls_intr_info);
3242
3243        t4_write_reg(adapter, MPS_INT_CAUSE_A, 0);
3244        t4_read_reg(adapter, MPS_INT_CAUSE_A);                    /* flush */
3245        if (fat)
3246                t4_fatal_err(adapter);
3247}
3248
3249#define MEM_INT_MASK (PERR_INT_CAUSE_F | ECC_CE_INT_CAUSE_F | \
3250                      ECC_UE_INT_CAUSE_F)
3251
3252/*
3253 * EDC/MC interrupt handler.
3254 */
3255static void mem_intr_handler(struct adapter *adapter, int idx)
3256{
3257        static const char name[4][7] = { "EDC0", "EDC1", "MC/MC0", "MC1" };
3258
3259        unsigned int addr, cnt_addr, v;
3260
3261        if (idx <= MEM_EDC1) {
3262                addr = EDC_REG(EDC_INT_CAUSE_A, idx);
3263                cnt_addr = EDC_REG(EDC_ECC_STATUS_A, idx);
3264        } else if (idx == MEM_MC) {
3265                if (is_t4(adapter->params.chip)) {
3266                        addr = MC_INT_CAUSE_A;
3267                        cnt_addr = MC_ECC_STATUS_A;
3268                } else {
3269                        addr = MC_P_INT_CAUSE_A;
3270                        cnt_addr = MC_P_ECC_STATUS_A;
3271                }
3272        } else {
3273                addr = MC_REG(MC_P_INT_CAUSE_A, 1);
3274                cnt_addr = MC_REG(MC_P_ECC_STATUS_A, 1);
3275        }
3276
3277        v = t4_read_reg(adapter, addr) & MEM_INT_MASK;
3278        if (v & PERR_INT_CAUSE_F)
3279                dev_alert(adapter->pdev_dev, "%s FIFO parity error\n",
3280                          name[idx]);
3281        if (v & ECC_CE_INT_CAUSE_F) {
3282                u32 cnt = ECC_CECNT_G(t4_read_reg(adapter, cnt_addr));
3283
3284                t4_write_reg(adapter, cnt_addr, ECC_CECNT_V(ECC_CECNT_M));
3285                if (printk_ratelimit())
3286                        dev_warn(adapter->pdev_dev,
3287                                 "%u %s correctable ECC data error%s\n",
3288                                 cnt, name[idx], cnt > 1 ? "s" : "");
3289        }
3290        if (v & ECC_UE_INT_CAUSE_F)
3291                dev_alert(adapter->pdev_dev,
3292                          "%s uncorrectable ECC data error\n", name[idx]);
3293
3294        t4_write_reg(adapter, addr, v);
3295        if (v & (PERR_INT_CAUSE_F | ECC_UE_INT_CAUSE_F))
3296                t4_fatal_err(adapter);
3297}
3298
3299/*
3300 * MA interrupt handler.
3301 */
3302static void ma_intr_handler(struct adapter *adap)
3303{
3304        u32 v, status = t4_read_reg(adap, MA_INT_CAUSE_A);
3305
3306        if (status & MEM_PERR_INT_CAUSE_F) {
3307                dev_alert(adap->pdev_dev,
3308                          "MA parity error, parity status %#x\n",
3309                          t4_read_reg(adap, MA_PARITY_ERROR_STATUS1_A));
3310                if (is_t5(adap->params.chip))
3311                        dev_alert(adap->pdev_dev,
3312                                  "MA parity error, parity status %#x\n",
3313                                  t4_read_reg(adap,
3314                                              MA_PARITY_ERROR_STATUS2_A));
3315        }
3316        if (status & MEM_WRAP_INT_CAUSE_F) {
3317                v = t4_read_reg(adap, MA_INT_WRAP_STATUS_A);
3318                dev_alert(adap->pdev_dev, "MA address wrap-around error by "
3319                          "client %u to address %#x\n",
3320                          MEM_WRAP_CLIENT_NUM_G(v),
3321                          MEM_WRAP_ADDRESS_G(v) << 4);
3322        }
3323        t4_write_reg(adap, MA_INT_CAUSE_A, status);
3324        t4_fatal_err(adap);
3325}
3326
3327/*
3328 * SMB interrupt handler.
3329 */
3330static void smb_intr_handler(struct adapter *adap)
3331{
3332        static const struct intr_info smb_intr_info[] = {
3333                { MSTTXFIFOPARINT_F, "SMB master Tx FIFO parity error", -1, 1 },
3334                { MSTRXFIFOPARINT_F, "SMB master Rx FIFO parity error", -1, 1 },
3335                { SLVFIFOPARINT_F, "SMB slave FIFO parity error", -1, 1 },
3336                { 0 }
3337        };
3338
3339        if (t4_handle_intr_status(adap, SMB_INT_CAUSE_A, smb_intr_info))
3340                t4_fatal_err(adap);
3341}
3342
3343/*
3344 * NC-SI interrupt handler.
3345 */
3346static void ncsi_intr_handler(struct adapter *adap)
3347{
3348        static const struct intr_info ncsi_intr_info[] = {
3349                { CIM_DM_PRTY_ERR_F, "NC-SI CIM parity error", -1, 1 },
3350                { MPS_DM_PRTY_ERR_F, "NC-SI MPS parity error", -1, 1 },
3351                { TXFIFO_PRTY_ERR_F, "NC-SI Tx FIFO parity error", -1, 1 },
3352                { RXFIFO_PRTY_ERR_F, "NC-SI Rx FIFO parity error", -1, 1 },
3353                { 0 }
3354        };
3355
3356        if (t4_handle_intr_status(adap, NCSI_INT_CAUSE_A, ncsi_intr_info))
3357                t4_fatal_err(adap);
3358}
3359
3360/*
3361 * XGMAC interrupt handler.
3362 */
3363static void xgmac_intr_handler(struct adapter *adap, int port)
3364{
3365        u32 v, int_cause_reg;
3366
3367        if (is_t4(adap->params.chip))
3368                int_cause_reg = PORT_REG(port, XGMAC_PORT_INT_CAUSE_A);
3369        else
3370                int_cause_reg = T5_PORT_REG(port, MAC_PORT_INT_CAUSE_A);
3371
3372        v = t4_read_reg(adap, int_cause_reg);
3373
3374        v &= TXFIFO_PRTY_ERR_F | RXFIFO_PRTY_ERR_F;
3375        if (!v)
3376                return;
3377
3378        if (v & TXFIFO_PRTY_ERR_F)
3379                dev_alert(adap->pdev_dev, "XGMAC %d Tx FIFO parity error\n",
3380                          port);
3381        if (v & RXFIFO_PRTY_ERR_F)
3382                dev_alert(adap->pdev_dev, "XGMAC %d Rx FIFO parity error\n",
3383                          port);
3384        t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_INT_CAUSE_A), v);
3385        t4_fatal_err(adap);
3386}
3387
3388/*
3389 * PL interrupt handler.
3390 */
3391static void pl_intr_handler(struct adapter *adap)
3392{
3393        static const struct intr_info pl_intr_info[] = {
3394                { FATALPERR_F, "T4 fatal parity error", -1, 1 },
3395                { PERRVFID_F, "PL VFID_MAP parity error", -1, 1 },
3396                { 0 }
3397        };
3398
3399        if (t4_handle_intr_status(adap, PL_PL_INT_CAUSE_A, pl_intr_info))
3400                t4_fatal_err(adap);
3401}
3402
3403#define PF_INTR_MASK (PFSW_F)
3404#define GLBL_INTR_MASK (CIM_F | MPS_F | PL_F | PCIE_F | MC_F | EDC0_F | \
3405                EDC1_F | LE_F | TP_F | MA_F | PM_TX_F | PM_RX_F | ULP_RX_F | \
3406                CPL_SWITCH_F | SGE_F | ULP_TX_F)
3407
3408/**
3409 *      t4_slow_intr_handler - control path interrupt handler
3410 *      @adapter: the adapter
3411 *
3412 *      T4 interrupt handler for non-data global interrupt events, e.g., errors.
3413 *      The designation 'slow' is because it involves register reads, while
3414 *      data interrupts typically don't involve any MMIOs.
3415 */
3416int t4_slow_intr_handler(struct adapter *adapter)
3417{
3418        u32 cause = t4_read_reg(adapter, PL_INT_CAUSE_A);
3419
3420        if (!(cause & GLBL_INTR_MASK))
3421                return 0;
3422        if (cause & CIM_F)
3423                cim_intr_handler(adapter);
3424        if (cause & MPS_F)
3425                mps_intr_handler(adapter);
3426        if (cause & NCSI_F)
3427                ncsi_intr_handler(adapter);
3428        if (cause & PL_F)
3429                pl_intr_handler(adapter);
3430        if (cause & SMB_F)
3431                smb_intr_handler(adapter);
3432        if (cause & XGMAC0_F)
3433                xgmac_intr_handler(adapter, 0);
3434        if (cause & XGMAC1_F)
3435                xgmac_intr_handler(adapter, 1);
3436        if (cause & XGMAC_KR0_F)
3437                xgmac_intr_handler(adapter, 2);
3438        if (cause & XGMAC_KR1_F)
3439                xgmac_intr_handler(adapter, 3);
3440        if (cause & PCIE_F)
3441                pcie_intr_handler(adapter);
3442        if (cause & MC_F)
3443                mem_intr_handler(adapter, MEM_MC);
3444        if (is_t5(adapter->params.chip) && (cause & MC1_F))
3445                mem_intr_handler(adapter, MEM_MC1);
3446        if (cause & EDC0_F)
3447                mem_intr_handler(adapter, MEM_EDC0);
3448        if (cause & EDC1_F)
3449                mem_intr_handler(adapter, MEM_EDC1);
3450        if (cause & LE_F)
3451                le_intr_handler(adapter);
3452        if (cause & TP_F)
3453                tp_intr_handler(adapter);
3454        if (cause & MA_F)
3455                ma_intr_handler(adapter);
3456        if (cause & PM_TX_F)
3457                pmtx_intr_handler(adapter);
3458        if (cause & PM_RX_F)
3459                pmrx_intr_handler(adapter);
3460        if (cause & ULP_RX_F)
3461                ulprx_intr_handler(adapter);
3462        if (cause & CPL_SWITCH_F)
3463                cplsw_intr_handler(adapter);
3464        if (cause & SGE_F)
3465                sge_intr_handler(adapter);
3466        if (cause & ULP_TX_F)
3467                ulptx_intr_handler(adapter);
3468
3469        /* Clear the interrupts just processed for which we are the master. */
3470        t4_write_reg(adapter, PL_INT_CAUSE_A, cause & GLBL_INTR_MASK);
3471        (void)t4_read_reg(adapter, PL_INT_CAUSE_A); /* flush */
3472        return 1;
3473}
3474
3475/**
3476 *      t4_intr_enable - enable interrupts
3477 *      @adapter: the adapter whose interrupts should be enabled
3478 *
3479 *      Enable PF-specific interrupts for the calling function and the top-level
3480 *      interrupt concentrator for global interrupts.  Interrupts are already
3481 *      enabled at each module, here we just enable the roots of the interrupt
3482 *      hierarchies.
3483 *
3484 *      Note: this function should be called only when the driver manages
3485 *      non PF-specific interrupts from the various HW modules.  Only one PCI
3486 *      function at a time should be doing this.
3487 */
3488void t4_intr_enable(struct adapter *adapter)
3489{
3490        u32 val = 0;
3491        u32 pf = SOURCEPF_G(t4_read_reg(adapter, PL_WHOAMI_A));
3492
3493        if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
3494                val = ERR_DROPPED_DB_F | ERR_EGR_CTXT_PRIO_F | DBFIFO_HP_INT_F;
3495        t4_write_reg(adapter, SGE_INT_ENABLE3_A, ERR_CPL_EXCEED_IQE_SIZE_F |
3496                     ERR_INVALID_CIDX_INC_F | ERR_CPL_OPCODE_0_F |
3497                     ERR_DATA_CPL_ON_HIGH_QID1_F | INGRESS_SIZE_ERR_F |
3498                     ERR_DATA_CPL_ON_HIGH_QID0_F | ERR_BAD_DB_PIDX3_F |
3499                     ERR_BAD_DB_PIDX2_F | ERR_BAD_DB_PIDX1_F |
3500                     ERR_BAD_DB_PIDX0_F | ERR_ING_CTXT_PRIO_F |
3501                     DBFIFO_LP_INT_F | EGRESS_SIZE_ERR_F | val);
3502        t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE_A), PF_INTR_MASK);
3503        t4_set_reg_field(adapter, PL_INT_MAP0_A, 0, 1 << pf);
3504}
3505
3506/**
3507 *      t4_intr_disable - disable interrupts
3508 *      @adapter: the adapter whose interrupts should be disabled
3509 *
3510 *      Disable interrupts.  We only disable the top-level interrupt
3511 *      concentrators.  The caller must be a PCI function managing global
3512 *      interrupts.
3513 */
3514void t4_intr_disable(struct adapter *adapter)
3515{
3516        u32 pf = SOURCEPF_G(t4_read_reg(adapter, PL_WHOAMI_A));
3517
3518        t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE_A), 0);
3519        t4_set_reg_field(adapter, PL_INT_MAP0_A, 1 << pf, 0);
3520}
3521
3522/**
3523 *      hash_mac_addr - return the hash value of a MAC address
3524 *      @addr: the 48-bit Ethernet MAC address
3525 *
3526 *      Hashes a MAC address according to the hash function used by HW inexact
3527 *      (hash) address matching.
3528 */
3529static int hash_mac_addr(const u8 *addr)
3530{
3531        u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2];
3532        u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5];
3533        a ^= b;
3534        a ^= (a >> 12);
3535        a ^= (a >> 6);
3536        return a & 0x3f;
3537}
3538
3539/**
3540 *      t4_config_rss_range - configure a portion of the RSS mapping table
3541 *      @adapter: the adapter
3542 *      @mbox: mbox to use for the FW command
3543 *      @viid: virtual interface whose RSS subtable is to be written
3544 *      @start: start entry in the table to write
3545 *      @n: how many table entries to write
3546 *      @rspq: values for the response queue lookup table
3547 *      @nrspq: number of values in @rspq
3548 *
3549 *      Programs the selected part of the VI's RSS mapping table with the
3550 *      provided values.  If @nrspq < @n the supplied values are used repeatedly
3551 *      until the full table range is populated.
3552 *
3553 *      The caller must ensure the values in @rspq are in the range allowed for
3554 *      @viid.
3555 */
3556int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
3557                        int start, int n, const u16 *rspq, unsigned int nrspq)
3558{
3559        int ret;
3560        const u16 *rsp = rspq;
3561        const u16 *rsp_end = rspq + nrspq;
3562        struct fw_rss_ind_tbl_cmd cmd;
3563
3564        memset(&cmd, 0, sizeof(cmd));
3565        cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_RSS_IND_TBL_CMD) |
3566                               FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
3567                               FW_RSS_IND_TBL_CMD_VIID_V(viid));
3568        cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
3569
3570        /* each fw_rss_ind_tbl_cmd takes up to 32 entries */
3571        while (n > 0) {
3572                int nq = min(n, 32);
3573                __be32 *qp = &cmd.iq0_to_iq2;
3574
3575                cmd.niqid = cpu_to_be16(nq);
3576                cmd.startidx = cpu_to_be16(start);
3577
3578                start += nq;
3579                n -= nq;
3580
3581                while (nq > 0) {
3582                        unsigned int v;
3583
3584                        v = FW_RSS_IND_TBL_CMD_IQ0_V(*rsp);
3585                        if (++rsp >= rsp_end)
3586                                rsp = rspq;
3587                        v |= FW_RSS_IND_TBL_CMD_IQ1_V(*rsp);
3588                        if (++rsp >= rsp_end)
3589                                rsp = rspq;
3590                        v |= FW_RSS_IND_TBL_CMD_IQ2_V(*rsp);
3591                        if (++rsp >= rsp_end)
3592                                rsp = rspq;
3593
3594                        *qp++ = cpu_to_be32(v);
3595                        nq -= 3;
3596                }
3597
3598                ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL);
3599                if (ret)
3600                        return ret;
3601        }
3602        return 0;
3603}
3604
3605/**
3606 *      t4_config_glbl_rss - configure the global RSS mode
3607 *      @adapter: the adapter
3608 *      @mbox: mbox to use for the FW command
3609 *      @mode: global RSS mode
3610 *      @flags: mode-specific flags
3611 *
3612 *      Sets the global RSS mode.
3613 */
3614int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
3615                       unsigned int flags)
3616{
3617        struct fw_rss_glb_config_cmd c;
3618
3619        memset(&c, 0, sizeof(c));
3620        c.op_to_write = cpu_to_be32(FW_CMD_OP_V(FW_RSS_GLB_CONFIG_CMD) |
3621                                    FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
3622        c.retval_len16 = cpu_to_be32(FW_LEN16(c));
3623        if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) {
3624                c.u.manual.mode_pkd =
3625                        cpu_to_be32(FW_RSS_GLB_CONFIG_CMD_MODE_V(mode));
3626        } else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
3627                c.u.basicvirtual.mode_pkd =
3628                        cpu_to_be32(FW_RSS_GLB_CONFIG_CMD_MODE_V(mode));
3629                c.u.basicvirtual.synmapen_to_hashtoeplitz = cpu_to_be32(flags);
3630        } else
3631                return -EINVAL;
3632        return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
3633}
3634
3635/**
3636 *      t4_config_vi_rss - configure per VI RSS settings
3637 *      @adapter: the adapter
3638 *      @mbox: mbox to use for the FW command
3639 *      @viid: the VI id
3640 *      @flags: RSS flags
3641 *      @defq: id of the default RSS queue for the VI.
3642 *
3643 *      Configures VI-specific RSS properties.
3644 */
3645int t4_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid,
3646                     unsigned int flags, unsigned int defq)
3647{
3648        struct fw_rss_vi_config_cmd c;
3649
3650        memset(&c, 0, sizeof(c));
3651        c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_RSS_VI_CONFIG_CMD) |
3652                                   FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
3653                                   FW_RSS_VI_CONFIG_CMD_VIID_V(viid));
3654        c.retval_len16 = cpu_to_be32(FW_LEN16(c));
3655        c.u.basicvirtual.defaultq_to_udpen = cpu_to_be32(flags |
3656                                        FW_RSS_VI_CONFIG_CMD_DEFAULTQ_V(defq));
3657        return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
3658}
3659
3660/* Read an RSS table row */
3661static int rd_rss_row(struct adapter *adap, int row, u32 *val)
3662{
3663        t4_write_reg(adap, TP_RSS_LKP_TABLE_A, 0xfff00000 | row);
3664        return t4_wait_op_done_val(adap, TP_RSS_LKP_TABLE_A, LKPTBLROWVLD_F, 1,
3665                                   5, 0, val);
3666}
3667
3668/**
3669 *      t4_read_rss - read the contents of the RSS mapping table
3670 *      @adapter: the adapter
3671 *      @map: holds the contents of the RSS mapping table
3672 *
3673 *      Reads the contents of the RSS hash->queue mapping table.
3674 */
3675int t4_read_rss(struct adapter *adapter, u16 *map)
3676{
3677        u32 val;
3678        int i, ret;
3679
3680        for (i = 0; i < RSS_NENTRIES / 2; ++i) {
3681                ret = rd_rss_row(adapter, i, &val);
3682                if (ret)
3683                        return ret;
3684                *map++ = LKPTBLQUEUE0_G(val);
3685                *map++ = LKPTBLQUEUE1_G(val);
3686        }
3687        return 0;
3688}
3689
3690/**
3691 *      t4_fw_tp_pio_rw - Access TP PIO through LDST
3692 *      @adap: the adapter
3693 *      @vals: where the indirect register values are stored/written
3694 *      @nregs: how many indirect registers to read/write
3695 *      @start_idx: index of first indirect register to read/write
3696 *      @rw: Read (1) or Write (0)
3697 *
3698 *      Access TP PIO registers through LDST
3699 */
3700static void t4_fw_tp_pio_rw(struct adapter *adap, u32 *vals, unsigned int nregs,
3701                            unsigned int start_index, unsigned int rw)
3702{
3703        int ret, i;
3704        int cmd = FW_LDST_ADDRSPC_TP_PIO;
3705        struct fw_ldst_cmd c;
3706
3707        for (i = 0 ; i < nregs; i++) {
3708                memset(&c, 0, sizeof(c));
3709                c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
3710                                                FW_CMD_REQUEST_F |
3711                                                (rw ? FW_CMD_READ_F :
3712                                                      FW_CMD_WRITE_F) |
3713                                                FW_LDST_CMD_ADDRSPACE_V(cmd));
3714                c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
3715
3716                c.u.addrval.addr = cpu_to_be32(start_index + i);
3717                c.u.addrval.val  = rw ? 0 : cpu_to_be32(vals[i]);
3718                ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
3719                if (!ret && rw)
3720                        vals[i] = be32_to_cpu(c.u.addrval.val);
3721        }
3722}
3723
3724/**
3725 *      t4_read_rss_key - read the global RSS key
3726 *      @adap: the adapter
3727 *      @key: 10-entry array holding the 320-bit RSS key
3728 *
3729 *      Reads the global 320-bit RSS key.
3730 */
3731void t4_read_rss_key(struct adapter *adap, u32 *key)
3732{
3733        if (adap->flags & FW_OK)
3734                t4_fw_tp_pio_rw(adap, key, 10, TP_RSS_SECRET_KEY0_A, 1);
3735        else
3736                t4_read_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, key, 10,
3737                                 TP_RSS_SECRET_KEY0_A);
3738}
3739
3740/**
3741 *      t4_write_rss_key - program one of the RSS keys
3742 *      @adap: the adapter
3743 *      @key: 10-entry array holding the 320-bit RSS key
3744 *      @idx: which RSS key to write
3745 *
3746 *      Writes one of the RSS keys with the given 320-bit value.  If @idx is
3747 *      0..15 the corresponding entry in the RSS key table is written,
3748 *      otherwise the global RSS key is written.
3749 */
3750void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx)
3751{
3752        u8 rss_key_addr_cnt = 16;
3753        u32 vrt = t4_read_reg(adap, TP_RSS_CONFIG_VRT_A);
3754
3755        /* T6 and later: for KeyMode 3 (per-vf and per-vf scramble),
3756         * allows access to key addresses 16-63 by using KeyWrAddrX
3757         * as index[5:4](upper 2) into key table
3758         */
3759        if ((CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) &&
3760            (vrt & KEYEXTEND_F) && (KEYMODE_G(vrt) == 3))
3761                rss_key_addr_cnt = 32;
3762
3763        if (adap->flags & FW_OK)
3764                t4_fw_tp_pio_rw(adap, (void *)key, 10, TP_RSS_SECRET_KEY0_A, 0);
3765        else
3766                t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, key, 10,
3767                                  TP_RSS_SECRET_KEY0_A);
3768
3769        if (idx >= 0 && idx < rss_key_addr_cnt) {
3770                if (rss_key_addr_cnt > 16)
3771                        t4_write_reg(adap, TP_RSS_CONFIG_VRT_A,
3772                                     KEYWRADDRX_V(idx >> 4) |
3773                                     T6_VFWRADDR_V(idx) | KEYWREN_F);
3774                else
3775                        t4_write_reg(adap, TP_RSS_CONFIG_VRT_A,
3776                                     KEYWRADDR_V(idx) | KEYWREN_F);
3777        }
3778}
3779
3780/**
3781 *      t4_read_rss_pf_config - read PF RSS Configuration Table
3782 *      @adapter: the adapter
3783 *      @index: the entry in the PF RSS table to read
3784 *      @valp: where to store the returned value
3785 *
3786 *      Reads the PF RSS Configuration Table at the specified index and returns
3787 *      the value found there.
3788 */
3789void t4_read_rss_pf_config(struct adapter *adapter, unsigned int index,
3790                           u32 *valp)
3791{
3792        if (adapter->flags & FW_OK)
3793                t4_fw_tp_pio_rw(adapter, valp, 1,
3794                                TP_RSS_PF0_CONFIG_A + index, 1);
3795        else
3796                t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
3797                                 valp, 1, TP_RSS_PF0_CONFIG_A + index);
3798}
3799
3800/**
3801 *      t4_read_rss_vf_config - read VF RSS Configuration Table
3802 *      @adapter: the adapter
3803 *      @index: the entry in the VF RSS table to read
3804 *      @vfl: where to store the returned VFL
3805 *      @vfh: where to store the returned VFH
3806 *
3807 *      Reads the VF RSS Configuration Table at the specified index and returns
3808 *      the (VFL, VFH) values found there.
3809 */
3810void t4_read_rss_vf_config(struct adapter *adapter, unsigned int index,
3811                           u32 *vfl, u32 *vfh)
3812{
3813        u32 vrt, mask, data;
3814
3815        if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5) {
3816                mask = VFWRADDR_V(VFWRADDR_M);
3817                data = VFWRADDR_V(index);
3818        } else {
3819                 mask =  T6_VFWRADDR_V(T6_VFWRADDR_M);
3820                 data = T6_VFWRADDR_V(index);
3821        }
3822
3823        /* Request that the index'th VF Table values be read into VFL/VFH.
3824         */
3825        vrt = t4_read_reg(adapter, TP_RSS_CONFIG_VRT_A);
3826        vrt &= ~(VFRDRG_F | VFWREN_F | KEYWREN_F | mask);
3827        vrt |= data | VFRDEN_F;
3828        t4_write_reg(adapter, TP_RSS_CONFIG_VRT_A, vrt);
3829
3830        /* Grab the VFL/VFH values ...
3831         */
3832        if (adapter->flags & FW_OK) {
3833                t4_fw_tp_pio_rw(adapter, vfl, 1, TP_RSS_VFL_CONFIG_A, 1);
3834                t4_fw_tp_pio_rw(adapter, vfh, 1, TP_RSS_VFH_CONFIG_A, 1);
3835        } else {
3836                t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
3837                                 vfl, 1, TP_RSS_VFL_CONFIG_A);
3838                t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
3839                                 vfh, 1, TP_RSS_VFH_CONFIG_A);
3840        }
3841}
3842
3843/**
3844 *      t4_read_rss_pf_map - read PF RSS Map
3845 *      @adapter: the adapter
3846 *
3847 *      Reads the PF RSS Map register and returns its value.
3848 */
3849u32 t4_read_rss_pf_map(struct adapter *adapter)
3850{
3851        u32 pfmap;
3852
3853        if (adapter->flags & FW_OK)
3854                t4_fw_tp_pio_rw(adapter, &pfmap, 1, TP_RSS_PF_MAP_A, 1);
3855        else
3856                t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
3857                                 &pfmap, 1, TP_RSS_PF_MAP_A);
3858        return pfmap;
3859}
3860
3861/**
3862 *      t4_read_rss_pf_mask - read PF RSS Mask
3863 *      @adapter: the adapter
3864 *
3865 *      Reads the PF RSS Mask register and returns its value.
3866 */
3867u32 t4_read_rss_pf_mask(struct adapter *adapter)
3868{
3869        u32 pfmask;
3870
3871        if (adapter->flags & FW_OK)
3872                t4_fw_tp_pio_rw(adapter, &pfmask, 1, TP_RSS_PF_MSK_A, 1);
3873        else
3874                t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
3875                                 &pfmask, 1, TP_RSS_PF_MSK_A);
3876        return pfmask;
3877}
3878
3879/**
3880 *      t4_tp_get_tcp_stats - read TP's TCP MIB counters
3881 *      @adap: the adapter
3882 *      @v4: holds the TCP/IP counter values
3883 *      @v6: holds the TCP/IPv6 counter values
3884 *
3885 *      Returns the values of TP's TCP/IP and TCP/IPv6 MIB counters.
3886 *      Either @v4 or @v6 may be %NULL to skip the corresponding stats.
3887 */
3888void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
3889                         struct tp_tcp_stats *v6)
3890{
3891        u32 val[TP_MIB_TCP_RXT_SEG_LO_A - TP_MIB_TCP_OUT_RST_A + 1];
3892
3893#define STAT_IDX(x) ((TP_MIB_TCP_##x##_A) - TP_MIB_TCP_OUT_RST_A)
3894#define STAT(x)     val[STAT_IDX(x)]
3895#define STAT64(x)   (((u64)STAT(x##_HI) << 32) | STAT(x##_LO))
3896
3897        if (v4) {
3898                t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, val,
3899                                 ARRAY_SIZE(val), TP_MIB_TCP_OUT_RST_A);
3900                v4->tcp_out_rsts = STAT(OUT_RST);
3901                v4->tcp_in_segs  = STAT64(IN_SEG);
3902                v4->tcp_out_segs = STAT64(OUT_SEG);
3903                v4->tcp_retrans_segs = STAT64(RXT_SEG);
3904        }
3905        if (v6) {
3906                t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, val,
3907                                 ARRAY_SIZE(val), TP_MIB_TCP_V6OUT_RST_A);
3908                v6->tcp_out_rsts = STAT(OUT_RST);
3909                v6->tcp_in_segs  = STAT64(IN_SEG);
3910                v6->tcp_out_segs = STAT64(OUT_SEG);
3911                v6->tcp_retrans_segs = STAT64(RXT_SEG);
3912        }
3913#undef STAT64
3914#undef STAT
3915#undef STAT_IDX
3916}
3917
3918/**
3919 *      t4_tp_get_err_stats - read TP's error MIB counters
3920 *      @adap: the adapter
3921 *      @st: holds the counter values
3922 *
3923 *      Returns the values of TP's error counters.
3924 */
3925void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st)
3926{
3927        /* T6 and later has 2 channels */
3928        if (adap->params.arch.nchan == NCHAN) {
3929                t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
3930                                 st->mac_in_errs, 12, TP_MIB_MAC_IN_ERR_0_A);
3931                t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
3932                                 st->tnl_cong_drops, 8,
3933                                 TP_MIB_TNL_CNG_DROP_0_A);
3934                t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
3935                                 st->tnl_tx_drops, 4,
3936                                 TP_MIB_TNL_DROP_0_A);
3937                t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
3938                                 st->ofld_vlan_drops, 4,
3939                                 TP_MIB_OFD_VLN_DROP_0_A);
3940                t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
3941                                 st->tcp6_in_errs, 4,
3942                                 TP_MIB_TCP_V6IN_ERR_0_A);
3943        } else {
3944                t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
3945                                 st->mac_in_errs, 2, TP_MIB_MAC_IN_ERR_0_A);
3946                t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
3947                                 st->hdr_in_errs, 2, TP_MIB_HDR_IN_ERR_0_A);
3948                t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
3949                                 st->tcp_in_errs, 2, TP_MIB_TCP_IN_ERR_0_A);
3950                t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
3951                                 st->tnl_cong_drops, 2,
3952                                 TP_MIB_TNL_CNG_DROP_0_A);
3953                t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
3954                                 st->ofld_chan_drops, 2,
3955                                 TP_MIB_OFD_CHN_DROP_0_A);
3956                t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
3957                                 st->tnl_tx_drops, 2, TP_MIB_TNL_DROP_0_A);
3958                t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
3959                                 st->ofld_vlan_drops, 2,
3960                                 TP_MIB_OFD_VLN_DROP_0_A);
3961                t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
3962                                 st->tcp6_in_errs, 2, TP_MIB_TCP_V6IN_ERR_0_A);
3963        }
3964        t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
3965                         &st->ofld_no_neigh, 2, TP_MIB_OFD_ARP_DROP_A);
3966}
3967
3968/**
3969 *      t4_tp_get_cpl_stats - read TP's CPL MIB counters
3970 *      @adap: the adapter
3971 *      @st: holds the counter values
3972 *
3973 *      Returns the values of TP's CPL counters.
3974 */
3975void t4_tp_get_cpl_stats(struct adapter *adap, struct tp_cpl_stats *st)
3976{
3977        /* T6 and later has 2 channels */
3978        if (adap->params.arch.nchan == NCHAN) {
3979                t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, st->req,
3980                                 8, TP_MIB_CPL_IN_REQ_0_A);
3981        } else {
3982                t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, st->req,
3983                                 2, TP_MIB_CPL_IN_REQ_0_A);
3984                t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, st->rsp,
3985                                 2, TP_MIB_CPL_OUT_RSP_0_A);
3986        }
3987}
3988
3989/**
3990 *      t4_tp_get_rdma_stats - read TP's RDMA MIB counters
3991 *      @adap: the adapter
3992 *      @st: holds the counter values
3993 *
3994 *      Returns the values of TP's RDMA counters.
3995 */
3996void t4_tp_get_rdma_stats(struct adapter *adap, struct tp_rdma_stats *st)
3997{
3998        t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, &st->rqe_dfr_pkt,
3999                         2, TP_MIB_RQE_DFR_PKT_A);
4000}
4001
4002/**
4003 *      t4_get_fcoe_stats - read TP's FCoE MIB counters for a port
4004 *      @adap: the adapter
4005 *      @idx: the port index
4006 *      @st: holds the counter values
4007 *
4008 *      Returns the values of TP's FCoE counters for the selected port.
4009 */
4010void t4_get_fcoe_stats(struct adapter *adap, unsigned int idx,
4011                       struct tp_fcoe_stats *st)
4012{
4013        u32 val[2];
4014
4015        t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, &st->frames_ddp,
4016                         1, TP_MIB_FCOE_DDP_0_A + idx);
4017        t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, &st->frames_drop,
4018                         1, TP_MIB_FCOE_DROP_0_A + idx);
4019        t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, val,
4020                         2, TP_MIB_FCOE_BYTE_0_HI_A + 2 * idx);
4021        st->octets_ddp = ((u64)val[0] << 32) | val[1];
4022}
4023
4024/**
4025 *      t4_get_usm_stats - read TP's non-TCP DDP MIB counters
4026 *      @adap: the adapter
4027 *      @st: holds the counter values
4028 *
4029 *      Returns the values of TP's counters for non-TCP directly-placed packets.
4030 */
4031void t4_get_usm_stats(struct adapter *adap, struct tp_usm_stats *st)
4032{
4033        u32 val[4];
4034
4035        t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, val, 4,
4036                         TP_MIB_USM_PKTS_A);
4037        st->frames = val[0];
4038        st->drops = val[1];
4039        st->octets = ((u64)val[2] << 32) | val[3];
4040}
4041
4042/**
4043 *      t4_read_mtu_tbl - returns the values in the HW path MTU table
4044 *      @adap: the adapter
4045 *      @mtus: where to store the MTU values
4046 *      @mtu_log: where to store the MTU base-2 log (may be %NULL)
4047 *
4048 *      Reads the HW path MTU table.
4049 */
4050void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log)
4051{
4052        u32 v;
4053        int i;
4054
4055        for (i = 0; i < NMTUS; ++i) {
4056                t4_write_reg(adap, TP_MTU_TABLE_A,
4057                             MTUINDEX_V(0xff) | MTUVALUE_V(i));
4058                v = t4_read_reg(adap, TP_MTU_TABLE_A);
4059                mtus[i] = MTUVALUE_G(v);
4060                if (mtu_log)
4061                        mtu_log[i] = MTUWIDTH_G(v);
4062        }
4063}
4064
4065/**
4066 *      t4_read_cong_tbl - reads the congestion control table
4067 *      @adap: the adapter
4068 *      @incr: where to store the alpha values
4069 *
4070 *      Reads the additive increments programmed into the HW congestion
4071 *      control table.
4072 */
4073void t4_read_cong_tbl(struct adapter *adap, u16 incr[NMTUS][NCCTRL_WIN])
4074{
4075        unsigned int mtu, w;
4076
4077        for (mtu = 0; mtu < NMTUS; ++mtu)
4078                for (w = 0; w < NCCTRL_WIN; ++w) {
4079                        t4_write_reg(adap, TP_CCTRL_TABLE_A,
4080                                     ROWINDEX_V(0xffff) | (mtu << 5) | w);
4081                        incr[mtu][w] = (u16)t4_read_reg(adap,
4082                                                TP_CCTRL_TABLE_A) & 0x1fff;
4083                }
4084}
4085
4086/**
4087 *      t4_tp_wr_bits_indirect - set/clear bits in an indirect TP register
4088 *      @adap: the adapter
4089 *      @addr: the indirect TP register address
4090 *      @mask: specifies the field within the register to modify
4091 *      @val: new value for the field
4092 *
4093 *      Sets a field of an indirect TP register to the given value.
4094 */
4095void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
4096                            unsigned int mask, unsigned int val)
4097{
4098        t4_write_reg(adap, TP_PIO_ADDR_A, addr);
4099        val |= t4_read_reg(adap, TP_PIO_DATA_A) & ~mask;
4100        t4_write_reg(adap, TP_PIO_DATA_A, val);
4101}
4102
4103/**
4104 *      init_cong_ctrl - initialize congestion control parameters
4105 *      @a: the alpha values for congestion control
4106 *      @b: the beta values for congestion control
4107 *
4108 *      Initialize the congestion control parameters.
4109 */
4110static void init_cong_ctrl(unsigned short *a, unsigned short *b)
4111{
4112        a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
4113        a[9] = 2;
4114        a[10] = 3;
4115        a[11] = 4;
4116        a[12] = 5;
4117        a[13] = 6;
4118        a[14] = 7;
4119        a[15] = 8;
4120        a[16] = 9;
4121        a[17] = 10;
4122        a[18] = 14;
4123        a[19] = 17;
4124        a[20] = 21;
4125        a[21] = 25;
4126        a[22] = 30;
4127        a[23] = 35;
4128        a[24] = 45;
4129        a[25] = 60;
4130        a[26] = 80;
4131        a[27] = 100;
4132        a[28] = 200;
4133        a[29] = 300;
4134        a[30] = 400;
4135        a[31] = 500;
4136
4137        b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
4138        b[9] = b[10] = 1;
4139        b[11] = b[12] = 2;
4140        b[13] = b[14] = b[15] = b[16] = 3;
4141        b[17] = b[18] = b[19] = b[20] = b[21] = 4;
4142        b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
4143        b[28] = b[29] = 6;
4144        b[30] = b[31] = 7;
4145}
4146
4147/* The minimum additive increment value for the congestion control table */
4148#define CC_MIN_INCR 2U
4149
4150/**
4151 *      t4_load_mtus - write the MTU and congestion control HW tables
4152 *      @adap: the adapter
4153 *      @mtus: the values for the MTU table
4154 *      @alpha: the values for the congestion control alpha parameter
4155 *      @beta: the values for the congestion control beta parameter
4156 *
4157 *      Write the HW MTU table with the supplied MTUs and the high-speed
4158 *      congestion control table with the supplied alpha, beta, and MTUs.
4159 *      We write the two tables together because the additive increments
4160 *      depend on the MTUs.
4161 */
4162void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
4163                  const unsigned short *alpha, const unsigned short *beta)
4164{
4165        static const unsigned int avg_pkts[NCCTRL_WIN] = {
4166                2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
4167                896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
4168                28672, 40960, 57344, 81920, 114688, 163840, 229376
4169        };
4170
4171        unsigned int i, w;
4172
4173        for (i = 0; i < NMTUS; ++i) {
4174                unsigned int mtu = mtus[i];
4175                unsigned int log2 = fls(mtu);
4176
4177                if (!(mtu & ((1 << log2) >> 2)))     /* round */
4178                        log2--;
4179                t4_write_reg(adap, TP_MTU_TABLE_A, MTUINDEX_V(i) |
4180                             MTUWIDTH_V(log2) | MTUVALUE_V(mtu));
4181
4182                for (w = 0; w < NCCTRL_WIN; ++w) {
4183                        unsigned int inc;
4184
4185                        inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
4186                                  CC_MIN_INCR);
4187
4188                        t4_write_reg(adap, TP_CCTRL_TABLE_A, (i << 21) |
4189                                     (w << 16) | (beta[w] << 13) | inc);
4190                }
4191        }
4192}
4193
4194/* Calculates a rate in bytes/s given the number of 256-byte units per 4K core
4195 * clocks.  The formula is
4196 *
4197 * bytes/s = bytes256 * 256 * ClkFreq / 4096
4198 *
4199 * which is equivalent to
4200 *
4201 * bytes/s = 62.5 * bytes256 * ClkFreq_ms
4202 */
4203static u64 chan_rate(struct adapter *adap, unsigned int bytes256)
4204{
4205        u64 v = bytes256 * adap->params.vpd.cclk;
4206
4207        return v * 62 + v / 2;
4208}
4209
4210/**
4211 *      t4_get_chan_txrate - get the current per channel Tx rates
4212 *      @adap: the adapter
4213 *      @nic_rate: rates for NIC traffic
4214 *      @ofld_rate: rates for offloaded traffic
4215 *
4216 *      Return the current Tx rates in bytes/s for NIC and offloaded traffic
4217 *      for each channel.
4218 */
4219void t4_get_chan_txrate(struct adapter *adap, u64 *nic_rate, u64 *ofld_rate)
4220{
4221        u32 v;
4222
4223        v = t4_read_reg(adap, TP_TX_TRATE_A);
4224        nic_rate[0] = chan_rate(adap, TNLRATE0_G(v));
4225        nic_rate[1] = chan_rate(adap, TNLRATE1_G(v));
4226        if (adap->params.arch.nchan == NCHAN) {
4227                nic_rate[2] = chan_rate(adap, TNLRATE2_G(v));
4228                nic_rate[3] = chan_rate(adap, TNLRATE3_G(v));
4229        }
4230
4231        v = t4_read_reg(adap, TP_TX_ORATE_A);
4232        ofld_rate[0] = chan_rate(adap, OFDRATE0_G(v));
4233        ofld_rate[1] = chan_rate(adap, OFDRATE1_G(v));
4234        if (adap->params.arch.nchan == NCHAN) {
4235                ofld_rate[2] = chan_rate(adap, OFDRATE2_G(v));
4236                ofld_rate[3] = chan_rate(adap, OFDRATE3_G(v));
4237        }
4238}
4239
4240/**
4241 *      t4_pmtx_get_stats - returns the HW stats from PMTX
4242 *      @adap: the adapter
4243 *      @cnt: where to store the count statistics
4244 *      @cycles: where to store the cycle statistics
4245 *
4246 *      Returns performance statistics from PMTX.
4247 */
4248void t4_pmtx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
4249{
4250        int i;
4251        u32 data[2];
4252
4253        for (i = 0; i < PM_NSTATS; i++) {
4254                t4_write_reg(adap, PM_TX_STAT_CONFIG_A, i + 1);
4255                cnt[i] = t4_read_reg(adap, PM_TX_STAT_COUNT_A);
4256                if (is_t4(adap->params.chip)) {
4257                        cycles[i] = t4_read_reg64(adap, PM_TX_STAT_LSB_A);
4258                } else {
4259                        t4_read_indirect(adap, PM_TX_DBG_CTRL_A,
4260                                         PM_TX_DBG_DATA_A, data, 2,
4261                                         PM_TX_DBG_STAT_MSB_A);
4262                        cycles[i] = (((u64)data[0] << 32) | data[1]);
4263                }
4264        }
4265}
4266
4267/**
4268 *      t4_pmrx_get_stats - returns the HW stats from PMRX
4269 *      @adap: the adapter
4270 *      @cnt: where to store the count statistics
4271 *      @cycles: where to store the cycle statistics
4272 *
4273 *      Returns performance statistics from PMRX.
4274 */
4275void t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
4276{
4277        int i;
4278        u32 data[2];
4279
4280        for (i = 0; i < PM_NSTATS; i++) {
4281                t4_write_reg(adap, PM_RX_STAT_CONFIG_A, i + 1);
4282                cnt[i] = t4_read_reg(adap, PM_RX_STAT_COUNT_A);
4283                if (is_t4(adap->params.chip)) {
4284                        cycles[i] = t4_read_reg64(adap, PM_RX_STAT_LSB_A);
4285                } else {
4286                        t4_read_indirect(adap, PM_RX_DBG_CTRL_A,
4287                                         PM_RX_DBG_DATA_A, data, 2,
4288                                         PM_RX_DBG_STAT_MSB_A);
4289                        cycles[i] = (((u64)data[0] << 32) | data[1]);
4290                }
4291        }
4292}
4293
4294/**
4295 *      t4_get_mps_bg_map - return the buffer groups associated with a port
4296 *      @adap: the adapter
4297 *      @idx: the port index
4298 *
4299 *      Returns a bitmap indicating which MPS buffer groups are associated
4300 *      with the given port.  Bit i is set if buffer group i is used by the
4301 *      port.
4302 */
4303unsigned int t4_get_mps_bg_map(struct adapter *adap, int idx)
4304{
4305        u32 n = NUMPORTS_G(t4_read_reg(adap, MPS_CMN_CTL_A));
4306
4307        if (n == 0)
4308                return idx == 0 ? 0xf : 0;
4309        if (n == 1)
4310                return idx < 2 ? (3 << (2 * idx)) : 0;
4311        return 1 << idx;
4312}
4313
4314/**
4315 *      t4_get_port_type_description - return Port Type string description
4316 *      @port_type: firmware Port Type enumeration
4317 */
4318const char *t4_get_port_type_description(enum fw_port_type port_type)
4319{
4320        static const char *const port_type_description[] = {
4321                "R XFI",
4322                "R XAUI",
4323                "T SGMII",
4324                "T XFI",
4325                "T XAUI",
4326                "KX4",
4327                "CX4",
4328                "KX",
4329                "KR",
4330                "R SFP+",
4331                "KR/KX",
4332                "KR/KX/KX4",
4333                "R QSFP_10G",
4334                "R QSA",
4335                "R QSFP",
4336                "R BP40_BA",
4337        };
4338
4339        if (port_type < ARRAY_SIZE(port_type_description))
4340                return port_type_description[port_type];
4341        return "UNKNOWN";
4342}
4343
4344/**
4345 *      t4_get_port_stats_offset - collect port stats relative to a previous
4346 *                                 snapshot
4347 *      @adap: The adapter
4348 *      @idx: The port
4349 *      @stats: Current stats to fill
4350 *      @offset: Previous stats snapshot
4351 */
4352void t4_get_port_stats_offset(struct adapter *adap, int idx,
4353                              struct port_stats *stats,
4354                              struct port_stats *offset)
4355{
4356        u64 *s, *o;
4357        int i;
4358
4359        t4_get_port_stats(adap, idx, stats);
4360        for (i = 0, s = (u64 *)stats, o = (u64 *)offset;
4361                        i < (sizeof(struct port_stats) / sizeof(u64));
4362                        i++, s++, o++)
4363                *s -= *o;
4364}
4365
4366/**
4367 *      t4_get_port_stats - collect port statistics
4368 *      @adap: the adapter
4369 *      @idx: the port index
4370 *      @p: the stats structure to fill
4371 *
4372 *      Collect statistics related to the given port from HW.
4373 */
4374void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
4375{
4376        u32 bgmap = t4_get_mps_bg_map(adap, idx);
4377
4378#define GET_STAT(name) \
4379        t4_read_reg64(adap, \
4380        (is_t4(adap->params.chip) ? PORT_REG(idx, MPS_PORT_STAT_##name##_L) : \
4381        T5_PORT_REG(idx, MPS_PORT_STAT_##name##_L)))
4382#define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L)
4383
4384        p->tx_octets           = GET_STAT(TX_PORT_BYTES);
4385        p->tx_frames           = GET_STAT(TX_PORT_FRAMES);
4386        p->tx_bcast_frames     = GET_STAT(TX_PORT_BCAST);
4387        p->tx_mcast_frames     = GET_STAT(TX_PORT_MCAST);
4388        p->tx_ucast_frames     = GET_STAT(TX_PORT_UCAST);
4389        p->tx_error_frames     = GET_STAT(TX_PORT_ERROR);
4390        p->tx_frames_64        = GET_STAT(TX_PORT_64B);
4391        p->tx_frames_65_127    = GET_STAT(TX_PORT_65B_127B);
4392        p->tx_frames_128_255   = GET_STAT(TX_PORT_128B_255B);
4393        p->tx_frames_256_511   = GET_STAT(TX_PORT_256B_511B);
4394        p->tx_frames_512_1023  = GET_STAT(TX_PORT_512B_1023B);
4395        p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B);
4396        p->tx_frames_1519_max  = GET_STAT(TX_PORT_1519B_MAX);
4397        p->tx_drop             = GET_STAT(TX_PORT_DROP);
4398        p->tx_pause            = GET_STAT(TX_PORT_PAUSE);
4399        p->tx_ppp0             = GET_STAT(TX_PORT_PPP0);
4400        p->tx_ppp1             = GET_STAT(TX_PORT_PPP1);
4401        p->tx_ppp2             = GET_STAT(TX_PORT_PPP2);
4402        p->tx_ppp3             = GET_STAT(TX_PORT_PPP3);
4403        p->tx_ppp4             = GET_STAT(TX_PORT_PPP4);
4404        p->tx_ppp5             = GET_STAT(TX_PORT_PPP5);
4405        p->tx_ppp6             = GET_STAT(TX_PORT_PPP6);
4406        p->tx_ppp7             = GET_STAT(TX_PORT_PPP7);
4407
4408        p->rx_octets           = GET_STAT(RX_PORT_BYTES);
4409        p->rx_frames           = GET_STAT(RX_PORT_FRAMES);
4410        p->rx_bcast_frames     = GET_STAT(RX_PORT_BCAST);
4411        p->rx_mcast_frames     = GET_STAT(RX_PORT_MCAST);
4412        p->rx_ucast_frames     = GET_STAT(RX_PORT_UCAST);
4413        p->rx_too_long         = GET_STAT(RX_PORT_MTU_ERROR);
4414        p->rx_jabber           = GET_STAT(RX_PORT_MTU_CRC_ERROR);
4415        p->rx_fcs_err          = GET_STAT(RX_PORT_CRC_ERROR);
4416        p->rx_len_err          = GET_STAT(RX_PORT_LEN_ERROR);
4417        p->rx_symbol_err       = GET_STAT(RX_PORT_SYM_ERROR);
4418        p->rx_runt             = GET_STAT(RX_PORT_LESS_64B);
4419        p->rx_frames_64        = GET_STAT(RX_PORT_64B);
4420        p->rx_frames_65_127    = GET_STAT(RX_PORT_65B_127B);
4421        p->rx_frames_128_255   = GET_STAT(RX_PORT_128B_255B);
4422        p->rx_frames_256_511   = GET_STAT(RX_PORT_256B_511B);
4423        p->rx_frames_512_1023  = GET_STAT(RX_PORT_512B_1023B);
4424        p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B);
4425        p->rx_frames_1519_max  = GET_STAT(RX_PORT_1519B_MAX);
4426        p->rx_pause            = GET_STAT(RX_PORT_PAUSE);
4427        p->rx_ppp0             = GET_STAT(RX_PORT_PPP0);
4428        p->rx_ppp1             = GET_STAT(RX_PORT_PPP1);
4429        p->rx_ppp2             = GET_STAT(RX_PORT_PPP2);
4430        p->rx_ppp3             = GET_STAT(RX_PORT_PPP3);
4431        p->rx_ppp4             = GET_STAT(RX_PORT_PPP4);
4432        p->rx_ppp5             = GET_STAT(RX_PORT_PPP5);
4433        p->rx_ppp6             = GET_STAT(RX_PORT_PPP6);
4434        p->rx_ppp7             = GET_STAT(RX_PORT_PPP7);
4435
4436        p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0;
4437        p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0;
4438        p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0;
4439        p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0;
4440        p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0;
4441        p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0;
4442        p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0;
4443        p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0;
4444
4445#undef GET_STAT
4446#undef GET_STAT_COM
4447}
4448
4449/**
4450 *      t4_get_lb_stats - collect loopback port statistics
4451 *      @adap: the adapter
4452 *      @idx: the loopback port index
4453 *      @p: the stats structure to fill
4454 *
4455 *      Return HW statistics for the given loopback port.
4456 */
4457void t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p)
4458{
4459        u32 bgmap = t4_get_mps_bg_map(adap, idx);
4460
4461#define GET_STAT(name) \
4462        t4_read_reg64(adap, \
4463        (is_t4(adap->params.chip) ? \
4464        PORT_REG(idx, MPS_PORT_STAT_LB_PORT_##name##_L) : \
4465        T5_PORT_REG(idx, MPS_PORT_STAT_LB_PORT_##name##_L)))
4466#define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L)
4467
4468        p->octets           = GET_STAT(BYTES);
4469        p->frames           = GET_STAT(FRAMES);
4470        p->bcast_frames     = GET_STAT(BCAST);
4471        p->mcast_frames     = GET_STAT(MCAST);
4472        p->ucast_frames     = GET_STAT(UCAST);
4473        p->error_frames     = GET_STAT(ERROR);
4474
4475        p->frames_64        = GET_STAT(64B);
4476        p->frames_65_127    = GET_STAT(65B_127B);
4477        p->frames_128_255   = GET_STAT(128B_255B);
4478        p->frames_256_511   = GET_STAT(256B_511B);
4479        p->frames_512_1023  = GET_STAT(512B_1023B);
4480        p->frames_1024_1518 = GET_STAT(1024B_1518B);
4481        p->frames_1519_max  = GET_STAT(1519B_MAX);
4482        p->drop             = GET_STAT(DROP_FRAMES);
4483
4484        p->ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_DROP_FRAME) : 0;
4485        p->ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_DROP_FRAME) : 0;
4486        p->ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_DROP_FRAME) : 0;
4487        p->ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_DROP_FRAME) : 0;
4488        p->trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_TRUNC_FRAME) : 0;
4489        p->trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_TRUNC_FRAME) : 0;
4490        p->trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_TRUNC_FRAME) : 0;
4491        p->trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_TRUNC_FRAME) : 0;
4492
4493#undef GET_STAT
4494#undef GET_STAT_COM
4495}
4496
4497/*     t4_mk_filtdelwr - create a delete filter WR
4498 *     @ftid: the filter ID
4499 *     @wr: the filter work request to populate
4500 *     @qid: ingress queue to receive the delete notification
4501 *
4502 *     Creates a filter work request to delete the supplied filter.  If @qid is
4503 *     negative the delete notification is suppressed.
4504 */
4505void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid)
4506{
4507        memset(wr, 0, sizeof(*wr));
4508        wr->op_pkd = cpu_to_be32(FW_WR_OP_V(FW_FILTER_WR));
4509        wr->len16_pkd = cpu_to_be32(FW_WR_LEN16_V(sizeof(*wr) / 16));
4510        wr->tid_to_iq = cpu_to_be32(FW_FILTER_WR_TID_V(ftid) |
4511                                    FW_FILTER_WR_NOREPLY_V(qid < 0));
4512        wr->del_filter_to_l2tix = cpu_to_be32(FW_FILTER_WR_DEL_FILTER_F);
4513        if (qid >= 0)
4514                wr->rx_chan_rx_rpl_iq =
4515                        cpu_to_be16(FW_FILTER_WR_RX_RPL_IQ_V(qid));
4516}
4517
4518#define INIT_CMD(var, cmd, rd_wr) do { \
4519        (var).op_to_write = cpu_to_be32(FW_CMD_OP_V(FW_##cmd##_CMD) | \
4520                                        FW_CMD_REQUEST_F | \
4521                                        FW_CMD_##rd_wr##_F); \
4522        (var).retval_len16 = cpu_to_be32(FW_LEN16(var)); \
4523} while (0)
4524
4525int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox,
4526                          u32 addr, u32 val)
4527{
4528        u32 ldst_addrspace;
4529        struct fw_ldst_cmd c;
4530
4531        memset(&c, 0, sizeof(c));
4532        ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_FIRMWARE);
4533        c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
4534                                        FW_CMD_REQUEST_F |
4535                                        FW_CMD_WRITE_F |
4536                                        ldst_addrspace);
4537        c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
4538        c.u.addrval.addr = cpu_to_be32(addr);
4539        c.u.addrval.val = cpu_to_be32(val);
4540
4541        return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4542}
4543
4544/**
4545 *      t4_mdio_rd - read a PHY register through MDIO
4546 *      @adap: the adapter
4547 *      @mbox: mailbox to use for the FW command
4548 *      @phy_addr: the PHY address
4549 *      @mmd: the PHY MMD to access (0 for clause 22 PHYs)
4550 *      @reg: the register to read
4551 *      @valp: where to store the value
4552 *
4553 *      Issues a FW command through the given mailbox to read a PHY register.
4554 */
4555int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
4556               unsigned int mmd, unsigned int reg, u16 *valp)
4557{
4558        int ret;
4559        u32 ldst_addrspace;
4560        struct fw_ldst_cmd c;
4561
4562        memset(&c, 0, sizeof(c));
4563        ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_MDIO);
4564        c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
4565                                        FW_CMD_REQUEST_F | FW_CMD_READ_F |
4566                                        ldst_addrspace);
4567        c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
4568        c.u.mdio.paddr_mmd = cpu_to_be16(FW_LDST_CMD_PADDR_V(phy_addr) |
4569                                         FW_LDST_CMD_MMD_V(mmd));
4570        c.u.mdio.raddr = cpu_to_be16(reg);
4571
4572        ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4573        if (ret == 0)
4574                *valp = be16_to_cpu(c.u.mdio.rval);
4575        return ret;
4576}
4577
4578/**
4579 *      t4_mdio_wr - write a PHY register through MDIO
4580 *      @adap: the adapter
4581 *      @mbox: mailbox to use for the FW command
4582 *      @phy_addr: the PHY address
4583 *      @mmd: the PHY MMD to access (0 for clause 22 PHYs)
4584 *      @reg: the register to write
4585 *      @valp: value to write
4586 *
4587 *      Issues a FW command through the given mailbox to write a PHY register.
4588 */
4589int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
4590               unsigned int mmd, unsigned int reg, u16 val)
4591{
4592        u32 ldst_addrspace;
4593        struct fw_ldst_cmd c;
4594
4595        memset(&c, 0, sizeof(c));
4596        ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_MDIO);
4597        c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
4598                                        FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
4599                                        ldst_addrspace);
4600        c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
4601        c.u.mdio.paddr_mmd = cpu_to_be16(FW_LDST_CMD_PADDR_V(phy_addr) |
4602                                         FW_LDST_CMD_MMD_V(mmd));
4603        c.u.mdio.raddr = cpu_to_be16(reg);
4604        c.u.mdio.rval = cpu_to_be16(val);
4605
4606        return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4607}
4608
4609/**
4610 *      t4_sge_decode_idma_state - decode the idma state
4611 *      @adap: the adapter
4612 *      @state: the state idma is stuck in
4613 */
4614void t4_sge_decode_idma_state(struct adapter *adapter, int state)
4615{
4616        static const char * const t4_decode[] = {
4617                "IDMA_IDLE",
4618                "IDMA_PUSH_MORE_CPL_FIFO",
4619                "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
4620                "Not used",
4621                "IDMA_PHYSADDR_SEND_PCIEHDR",
4622                "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
4623                "IDMA_PHYSADDR_SEND_PAYLOAD",
4624                "IDMA_SEND_FIFO_TO_IMSG",
4625                "IDMA_FL_REQ_DATA_FL_PREP",
4626                "IDMA_FL_REQ_DATA_FL",
4627                "IDMA_FL_DROP",
4628                "IDMA_FL_H_REQ_HEADER_FL",
4629                "IDMA_FL_H_SEND_PCIEHDR",
4630                "IDMA_FL_H_PUSH_CPL_FIFO",
4631                "IDMA_FL_H_SEND_CPL",
4632                "IDMA_FL_H_SEND_IP_HDR_FIRST",
4633                "IDMA_FL_H_SEND_IP_HDR",
4634                "IDMA_FL_H_REQ_NEXT_HEADER_FL",
4635                "IDMA_FL_H_SEND_NEXT_PCIEHDR",
4636                "IDMA_FL_H_SEND_IP_HDR_PADDING",
4637                "IDMA_FL_D_SEND_PCIEHDR",
4638                "IDMA_FL_D_SEND_CPL_AND_IP_HDR",
4639                "IDMA_FL_D_REQ_NEXT_DATA_FL",
4640                "IDMA_FL_SEND_PCIEHDR",
4641                "IDMA_FL_PUSH_CPL_FIFO",
4642                "IDMA_FL_SEND_CPL",
4643                "IDMA_FL_SEND_PAYLOAD_FIRST",
4644                "IDMA_FL_SEND_PAYLOAD",
4645                "IDMA_FL_REQ_NEXT_DATA_FL",
4646                "IDMA_FL_SEND_NEXT_PCIEHDR",
4647                "IDMA_FL_SEND_PADDING",
4648                "IDMA_FL_SEND_COMPLETION_TO_IMSG",
4649                "IDMA_FL_SEND_FIFO_TO_IMSG",
4650                "IDMA_FL_REQ_DATAFL_DONE",
4651                "IDMA_FL_REQ_HEADERFL_DONE",
4652        };
4653        static const char * const t5_decode[] = {
4654                "IDMA_IDLE",
4655                "IDMA_ALMOST_IDLE",
4656                "IDMA_PUSH_MORE_CPL_FIFO",
4657                "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
4658                "IDMA_SGEFLRFLUSH_SEND_PCIEHDR",
4659                "IDMA_PHYSADDR_SEND_PCIEHDR",
4660                "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
4661                "IDMA_PHYSADDR_SEND_PAYLOAD",
4662                "IDMA_SEND_FIFO_TO_IMSG",
4663                "IDMA_FL_REQ_DATA_FL",
4664                "IDMA_FL_DROP",
4665                "IDMA_FL_DROP_SEND_INC",
4666                "IDMA_FL_H_REQ_HEADER_FL",
4667                "IDMA_FL_H_SEND_PCIEHDR",
4668                "IDMA_FL_H_PUSH_CPL_FIFO",
4669                "IDMA_FL_H_SEND_CPL",
4670                "IDMA_FL_H_SEND_IP_HDR_FIRST",
4671                "IDMA_FL_H_SEND_IP_HDR",
4672                "IDMA_FL_H_REQ_NEXT_HEADER_FL",
4673                "IDMA_FL_H_SEND_NEXT_PCIEHDR",
4674                "IDMA_FL_H_SEND_IP_HDR_PADDING",
4675                "IDMA_FL_D_SEND_PCIEHDR",
4676                "IDMA_FL_D_SEND_CPL_AND_IP_HDR",
4677                "IDMA_FL_D_REQ_NEXT_DATA_FL",
4678                "IDMA_FL_SEND_PCIEHDR",
4679                "IDMA_FL_PUSH_CPL_FIFO",
4680                "IDMA_FL_SEND_CPL",
4681                "IDMA_FL_SEND_PAYLOAD_FIRST",
4682                "IDMA_FL_SEND_PAYLOAD",
4683                "IDMA_FL_REQ_NEXT_DATA_FL",
4684                "IDMA_FL_SEND_NEXT_PCIEHDR",
4685                "IDMA_FL_SEND_PADDING",
4686                "IDMA_FL_SEND_COMPLETION_TO_IMSG",
4687        };
4688        static const u32 sge_regs[] = {
4689                SGE_DEBUG_DATA_LOW_INDEX_2_A,
4690                SGE_DEBUG_DATA_LOW_INDEX_3_A,
4691                SGE_DEBUG_DATA_HIGH_INDEX_10_A,
4692        };
4693        const char **sge_idma_decode;
4694        int sge_idma_decode_nstates;
4695        int i;
4696
4697        if (is_t4(adapter->params.chip)) {
4698                sge_idma_decode = (const char **)t4_decode;
4699                sge_idma_decode_nstates = ARRAY_SIZE(t4_decode);
4700        } else {
4701                sge_idma_decode = (const char **)t5_decode;
4702                sge_idma_decode_nstates = ARRAY_SIZE(t5_decode);
4703        }
4704
4705        if (state < sge_idma_decode_nstates)
4706                CH_WARN(adapter, "idma state %s\n", sge_idma_decode[state]);
4707        else
4708                CH_WARN(adapter, "idma state %d unknown\n", state);
4709
4710        for (i = 0; i < ARRAY_SIZE(sge_regs); i++)
4711                CH_WARN(adapter, "SGE register %#x value %#x\n",
4712                        sge_regs[i], t4_read_reg(adapter, sge_regs[i]));
4713}
4714
4715/**
4716 *      t4_sge_ctxt_flush - flush the SGE context cache
4717 *      @adap: the adapter
4718 *      @mbox: mailbox to use for the FW command
4719 *
4720 *      Issues a FW command through the given mailbox to flush the
4721 *      SGE context cache.
4722 */
4723int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox)
4724{
4725        int ret;
4726        u32 ldst_addrspace;
4727        struct fw_ldst_cmd c;
4728
4729        memset(&c, 0, sizeof(c));
4730        ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_SGE_EGRC);
4731        c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
4732                                        FW_CMD_REQUEST_F | FW_CMD_READ_F |
4733                                        ldst_addrspace);
4734        c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
4735        c.u.idctxt.msg_ctxtflush = cpu_to_be32(FW_LDST_CMD_CTXTFLUSH_F);
4736
4737        ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4738        return ret;
4739}
4740
4741/**
4742 *      t4_fw_hello - establish communication with FW
4743 *      @adap: the adapter
4744 *      @mbox: mailbox to use for the FW command
4745 *      @evt_mbox: mailbox to receive async FW events
4746 *      @master: specifies the caller's willingness to be the device master
4747 *      @state: returns the current device state (if non-NULL)
4748 *
4749 *      Issues a command to establish communication with FW.  Returns either
4750 *      an error (negative integer) or the mailbox of the Master PF.
4751 */
4752int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
4753                enum dev_master master, enum dev_state *state)
4754{
4755        int ret;
4756        struct fw_hello_cmd c;
4757        u32 v;
4758        unsigned int master_mbox;
4759        int retries = FW_CMD_HELLO_RETRIES;
4760
4761retry:
4762        memset(&c, 0, sizeof(c));
4763        INIT_CMD(c, HELLO, WRITE);
4764        c.err_to_clearinit = cpu_to_be32(
4765                FW_HELLO_CMD_MASTERDIS_V(master == MASTER_CANT) |
4766                FW_HELLO_CMD_MASTERFORCE_V(master == MASTER_MUST) |
4767                FW_HELLO_CMD_MBMASTER_V(master == MASTER_MUST ?
4768                                        mbox : FW_HELLO_CMD_MBMASTER_M) |
4769                FW_HELLO_CMD_MBASYNCNOT_V(evt_mbox) |
4770                FW_HELLO_CMD_STAGE_V(fw_hello_cmd_stage_os) |
4771                FW_HELLO_CMD_CLEARINIT_F);
4772
4773        /*
4774         * Issue the HELLO command to the firmware.  If it's not successful
4775         * but indicates that we got a "busy" or "timeout" condition, retry
4776         * the HELLO until we exhaust our retry limit.  If we do exceed our
4777         * retry limit, check to see if the firmware left us any error
4778         * information and report that if so.
4779         */
4780        ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4781        if (ret < 0) {
4782                if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0)
4783                        goto retry;
4784                if (t4_read_reg(adap, PCIE_FW_A) & PCIE_FW_ERR_F)
4785                        t4_report_fw_error(adap);
4786                return ret;
4787        }
4788
4789        v = be32_to_cpu(c.err_to_clearinit);
4790        master_mbox = FW_HELLO_CMD_MBMASTER_G(v);
4791        if (state) {
4792                if (v & FW_HELLO_CMD_ERR_F)
4793                        *state = DEV_STATE_ERR;
4794                else if (v & FW_HELLO_CMD_INIT_F)
4795                        *state = DEV_STATE_INIT;
4796                else
4797                        *state = DEV_STATE_UNINIT;
4798        }
4799
4800        /*
4801         * If we're not the Master PF then we need to wait around for the
4802         * Master PF Driver to finish setting up the adapter.
4803         *
4804         * Note that we also do this wait if we're a non-Master-capable PF and
4805         * there is no current Master PF; a Master PF may show up momentarily
4806         * and we wouldn't want to fail pointlessly.  (This can happen when an
4807         * OS loads lots of different drivers rapidly at the same time).  In
4808         * this case, the Master PF returned by the firmware will be
4809         * PCIE_FW_MASTER_M so the test below will work ...
4810         */
4811        if ((v & (FW_HELLO_CMD_ERR_F|FW_HELLO_CMD_INIT_F)) == 0 &&
4812            master_mbox != mbox) {
4813                int waiting = FW_CMD_HELLO_TIMEOUT;
4814
4815                /*
4816                 * Wait for the firmware to either indicate an error or
4817                 * initialized state.  If we see either of these we bail out
4818                 * and report the issue to the caller.  If we exhaust the
4819                 * "hello timeout" and we haven't exhausted our retries, try
4820                 * again.  Otherwise bail with a timeout error.
4821                 */
4822                for (;;) {
4823                        u32 pcie_fw;
4824
4825                        msleep(50);
4826                        waiting -= 50;
4827
4828                        /*
4829                         * If neither Error nor Initialialized are indicated
4830                         * by the firmware keep waiting till we exaust our
4831                         * timeout ... and then retry if we haven't exhausted
4832                         * our retries ...
4833                         */
4834                        pcie_fw = t4_read_reg(adap, PCIE_FW_A);
4835                        if (!(pcie_fw & (PCIE_FW_ERR_F|PCIE_FW_INIT_F))) {
4836                                if (waiting <= 0) {
4837                                        if (retries-- > 0)
4838                                                goto retry;
4839
4840                                        return -ETIMEDOUT;
4841                                }
4842                                continue;
4843                        }
4844
4845                        /*
4846                         * We either have an Error or Initialized condition
4847                         * report errors preferentially.
4848                         */
4849                        if (state) {
4850                                if (pcie_fw & PCIE_FW_ERR_F)
4851                                        *state = DEV_STATE_ERR;
4852                                else if (pcie_fw & PCIE_FW_INIT_F)
4853                                        *state = DEV_STATE_INIT;
4854                        }
4855
4856                        /*
4857                         * If we arrived before a Master PF was selected and
4858                         * there's not a valid Master PF, grab its identity
4859                         * for our caller.
4860                         */
4861                        if (master_mbox == PCIE_FW_MASTER_M &&
4862                            (pcie_fw & PCIE_FW_MASTER_VLD_F))
4863                                master_mbox = PCIE_FW_MASTER_G(pcie_fw);
4864                        break;
4865                }
4866        }
4867
4868        return master_mbox;
4869}
4870
4871/**
4872 *      t4_fw_bye - end communication with FW
4873 *      @adap: the adapter
4874 *      @mbox: mailbox to use for the FW command
4875 *
4876 *      Issues a command to terminate communication with FW.
4877 */
4878int t4_fw_bye(struct adapter *adap, unsigned int mbox)
4879{
4880        struct fw_bye_cmd c;
4881
4882        memset(&c, 0, sizeof(c));
4883        INIT_CMD(c, BYE, WRITE);
4884        return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4885}
4886
4887/**
4888 *      t4_init_cmd - ask FW to initialize the device
4889 *      @adap: the adapter
4890 *      @mbox: mailbox to use for the FW command
4891 *
4892 *      Issues a command to FW to partially initialize the device.  This
4893 *      performs initialization that generally doesn't depend on user input.
4894 */
4895int t4_early_init(struct adapter *adap, unsigned int mbox)
4896{
4897        struct fw_initialize_cmd c;
4898
4899        memset(&c, 0, sizeof(c));
4900        INIT_CMD(c, INITIALIZE, WRITE);
4901        return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4902}
4903
4904/**
4905 *      t4_fw_reset - issue a reset to FW
4906 *      @adap: the adapter
4907 *      @mbox: mailbox to use for the FW command
4908 *      @reset: specifies the type of reset to perform
4909 *
4910 *      Issues a reset command of the specified type to FW.
4911 */
4912int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset)
4913{
4914        struct fw_reset_cmd c;
4915
4916        memset(&c, 0, sizeof(c));
4917        INIT_CMD(c, RESET, WRITE);
4918        c.val = cpu_to_be32(reset);
4919        return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4920}
4921
4922/**
4923 *      t4_fw_halt - issue a reset/halt to FW and put uP into RESET
4924 *      @adap: the adapter
4925 *      @mbox: mailbox to use for the FW RESET command (if desired)
4926 *      @force: force uP into RESET even if FW RESET command fails
4927 *
4928 *      Issues a RESET command to firmware (if desired) with a HALT indication
4929 *      and then puts the microprocessor into RESET state.  The RESET command
4930 *      will only be issued if a legitimate mailbox is provided (mbox <=
4931 *      PCIE_FW_MASTER_M).
4932 *
4933 *      This is generally used in order for the host to safely manipulate the
4934 *      adapter without fear of conflicting with whatever the firmware might
4935 *      be doing.  The only way out of this state is to RESTART the firmware
4936 *      ...
4937 */
4938static int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force)
4939{
4940        int ret = 0;
4941
4942        /*
4943         * If a legitimate mailbox is provided, issue a RESET command
4944         * with a HALT indication.
4945         */
4946        if (mbox <= PCIE_FW_MASTER_M) {
4947                struct fw_reset_cmd c;
4948
4949                memset(&c, 0, sizeof(c));
4950                INIT_CMD(c, RESET, WRITE);
4951                c.val = cpu_to_be32(PIORST_F | PIORSTMODE_F);
4952                c.halt_pkd = cpu_to_be32(FW_RESET_CMD_HALT_F);
4953                ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4954        }
4955
4956        /*
4957         * Normally we won't complete the operation if the firmware RESET
4958         * command fails but if our caller insists we'll go ahead and put the
4959         * uP into RESET.  This can be useful if the firmware is hung or even
4960         * missing ...  We'll have to take the risk of putting the uP into
4961         * RESET without the cooperation of firmware in that case.
4962         *
4963         * We also force the firmware's HALT flag to be on in case we bypassed
4964         * the firmware RESET command above or we're dealing with old firmware
4965         * which doesn't have the HALT capability.  This will serve as a flag
4966         * for the incoming firmware to know that it's coming out of a HALT
4967         * rather than a RESET ... if it's new enough to understand that ...
4968         */
4969        if (ret == 0 || force) {
4970                t4_set_reg_field(adap, CIM_BOOT_CFG_A, UPCRST_F, UPCRST_F);
4971                t4_set_reg_field(adap, PCIE_FW_A, PCIE_FW_HALT_F,
4972                                 PCIE_FW_HALT_F);
4973        }
4974
4975        /*
4976         * And we always return the result of the firmware RESET command
4977         * even when we force the uP into RESET ...
4978         */
4979        return ret;
4980}
4981
4982/**
4983 *      t4_fw_restart - restart the firmware by taking the uP out of RESET
4984 *      @adap: the adapter
4985 *      @reset: if we want to do a RESET to restart things
4986 *
4987 *      Restart firmware previously halted by t4_fw_halt().  On successful
4988 *      return the previous PF Master remains as the new PF Master and there
4989 *      is no need to issue a new HELLO command, etc.
4990 *
4991 *      We do this in two ways:
4992 *
4993 *       1. If we're dealing with newer firmware we'll simply want to take
4994 *          the chip's microprocessor out of RESET.  This will cause the
4995 *          firmware to start up from its start vector.  And then we'll loop
4996 *          until the firmware indicates it's started again (PCIE_FW.HALT
4997 *          reset to 0) or we timeout.
4998 *
4999 *       2. If we're dealing with older firmware then we'll need to RESET
5000 *          the chip since older firmware won't recognize the PCIE_FW.HALT
5001 *          flag and automatically RESET itself on startup.
5002 */
5003static int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset)
5004{
5005        if (reset) {
5006                /*
5007                 * Since we're directing the RESET instead of the firmware
5008                 * doing it automatically, we need to clear the PCIE_FW.HALT
5009                 * bit.
5010                 */
5011                t4_set_reg_field(adap, PCIE_FW_A, PCIE_FW_HALT_F, 0);
5012
5013                /*
5014                 * If we've been given a valid mailbox, first try to get the
5015                 * firmware to do the RESET.  If that works, great and we can
5016                 * return success.  Otherwise, if we haven't been given a
5017                 * valid mailbox or the RESET command failed, fall back to
5018                 * hitting the chip with a hammer.
5019                 */
5020                if (mbox <= PCIE_FW_MASTER_M) {
5021                        t4_set_reg_field(adap, CIM_BOOT_CFG_A, UPCRST_F, 0);
5022                        msleep(100);
5023                        if (t4_fw_reset(adap, mbox,
5024                                        PIORST_F | PIORSTMODE_F) == 0)
5025                                return 0;
5026                }
5027
5028                t4_write_reg(adap, PL_RST_A, PIORST_F | PIORSTMODE_F);
5029                msleep(2000);
5030        } else {
5031                int ms;
5032
5033                t4_set_reg_field(adap, CIM_BOOT_CFG_A, UPCRST_F, 0);
5034                for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) {
5035                        if (!(t4_read_reg(adap, PCIE_FW_A) & PCIE_FW_HALT_F))
5036                                return 0;
5037                        msleep(100);
5038                        ms += 100;
5039                }
5040                return -ETIMEDOUT;
5041        }
5042        return 0;
5043}
5044
5045/**
5046 *      t4_fw_upgrade - perform all of the steps necessary to upgrade FW
5047 *      @adap: the adapter
5048 *      @mbox: mailbox to use for the FW RESET command (if desired)
5049 *      @fw_data: the firmware image to write
5050 *      @size: image size
5051 *      @force: force upgrade even if firmware doesn't cooperate
5052 *
5053 *      Perform all of the steps necessary for upgrading an adapter's
5054 *      firmware image.  Normally this requires the cooperation of the
5055 *      existing firmware in order to halt all existing activities
5056 *      but if an invalid mailbox token is passed in we skip that step
5057 *      (though we'll still put the adapter microprocessor into RESET in
5058 *      that case).
5059 *
5060 *      On successful return the new firmware will have been loaded and
5061 *      the adapter will have been fully RESET losing all previous setup
5062 *      state.  On unsuccessful return the adapter may be completely hosed ...
5063 *      positive errno indicates that the adapter is ~probably~ intact, a
5064 *      negative errno indicates that things are looking bad ...
5065 */
5066int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
5067                  const u8 *fw_data, unsigned int size, int force)
5068{
5069        const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data;
5070        int reset, ret;
5071
5072        if (!t4_fw_matches_chip(adap, fw_hdr))
5073                return -EINVAL;
5074
5075        ret = t4_fw_halt(adap, mbox, force);
5076        if (ret < 0 && !force)
5077                return ret;
5078
5079        ret = t4_load_fw(adap, fw_data, size);
5080        if (ret < 0)
5081                return ret;
5082
5083        /*
5084         * Older versions of the firmware don't understand the new
5085         * PCIE_FW.HALT flag and so won't know to perform a RESET when they
5086         * restart.  So for newly loaded older firmware we'll have to do the
5087         * RESET for it so it starts up on a clean slate.  We can tell if
5088         * the newly loaded firmware will handle this right by checking
5089         * its header flags to see if it advertises the capability.
5090         */
5091        reset = ((be32_to_cpu(fw_hdr->flags) & FW_HDR_FLAGS_RESET_HALT) == 0);
5092        return t4_fw_restart(adap, mbox, reset);
5093}
5094
5095/**
5096 *      t4_fixup_host_params - fix up host-dependent parameters
5097 *      @adap: the adapter
5098 *      @page_size: the host's Base Page Size
5099 *      @cache_line_size: the host's Cache Line Size
5100 *
5101 *      Various registers in T4 contain values which are dependent on the
5102 *      host's Base Page and Cache Line Sizes.  This function will fix all of
5103 *      those registers with the appropriate values as passed in ...
5104 */
5105int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
5106                         unsigned int cache_line_size)
5107{
5108        unsigned int page_shift = fls(page_size) - 1;
5109        unsigned int sge_hps = page_shift - 10;
5110        unsigned int stat_len = cache_line_size > 64 ? 128 : 64;
5111        unsigned int fl_align = cache_line_size < 32 ? 32 : cache_line_size;
5112        unsigned int fl_align_log = fls(fl_align) - 1;
5113
5114        t4_write_reg(adap, SGE_HOST_PAGE_SIZE_A,
5115                     HOSTPAGESIZEPF0_V(sge_hps) |
5116                     HOSTPAGESIZEPF1_V(sge_hps) |
5117                     HOSTPAGESIZEPF2_V(sge_hps) |
5118                     HOSTPAGESIZEPF3_V(sge_hps) |
5119                     HOSTPAGESIZEPF4_V(sge_hps) |
5120                     HOSTPAGESIZEPF5_V(sge_hps) |
5121                     HOSTPAGESIZEPF6_V(sge_hps) |
5122                     HOSTPAGESIZEPF7_V(sge_hps));
5123
5124        if (is_t4(adap->params.chip)) {
5125                t4_set_reg_field(adap, SGE_CONTROL_A,
5126                                 INGPADBOUNDARY_V(INGPADBOUNDARY_M) |
5127                                 EGRSTATUSPAGESIZE_F,
5128                                 INGPADBOUNDARY_V(fl_align_log -
5129                                                  INGPADBOUNDARY_SHIFT_X) |
5130                                 EGRSTATUSPAGESIZE_V(stat_len != 64));
5131        } else {
5132                /* T5 introduced the separation of the Free List Padding and
5133                 * Packing Boundaries.  Thus, we can select a smaller Padding
5134                 * Boundary to avoid uselessly chewing up PCIe Link and Memory
5135                 * Bandwidth, and use a Packing Boundary which is large enough
5136                 * to avoid false sharing between CPUs, etc.
5137                 *
5138                 * For the PCI Link, the smaller the Padding Boundary the
5139                 * better.  For the Memory Controller, a smaller Padding
5140                 * Boundary is better until we cross under the Memory Line
5141                 * Size (the minimum unit of transfer to/from Memory).  If we
5142                 * have a Padding Boundary which is smaller than the Memory
5143                 * Line Size, that'll involve a Read-Modify-Write cycle on the
5144                 * Memory Controller which is never good.  For T5 the smallest
5145                 * Padding Boundary which we can select is 32 bytes which is
5146                 * larger than any known Memory Controller Line Size so we'll
5147                 * use that.
5148                 *
5149                 * T5 has a different interpretation of the "0" value for the
5150                 * Packing Boundary.  This corresponds to 16 bytes instead of
5151                 * the expected 32 bytes.  We never have a Packing Boundary
5152                 * less than 32 bytes so we can't use that special value but
5153                 * on the other hand, if we wanted 32 bytes, the best we can
5154                 * really do is 64 bytes.
5155                */
5156                if (fl_align <= 32) {
5157                        fl_align = 64;
5158                        fl_align_log = 6;
5159                }
5160                t4_set_reg_field(adap, SGE_CONTROL_A,
5161                                 INGPADBOUNDARY_V(INGPADBOUNDARY_M) |
5162                                 EGRSTATUSPAGESIZE_F,
5163                                 INGPADBOUNDARY_V(INGPCIEBOUNDARY_32B_X) |
5164                                 EGRSTATUSPAGESIZE_V(stat_len != 64));
5165                t4_set_reg_field(adap, SGE_CONTROL2_A,
5166                                 INGPACKBOUNDARY_V(INGPACKBOUNDARY_M),
5167                                 INGPACKBOUNDARY_V(fl_align_log -
5168                                                   INGPACKBOUNDARY_SHIFT_X));
5169        }
5170        /*
5171         * Adjust various SGE Free List Host Buffer Sizes.
5172         *
5173         * This is something of a crock since we're using fixed indices into
5174         * the array which are also known by the sge.c code and the T4
5175         * Firmware Configuration File.  We need to come up with a much better
5176         * approach to managing this array.  For now, the first four entries
5177         * are:
5178         *
5179         *   0: Host Page Size
5180         *   1: 64KB
5181         *   2: Buffer size corresponding to 1500 byte MTU (unpacked mode)
5182         *   3: Buffer size corresponding to 9000 byte MTU (unpacked mode)
5183         *
5184         * For the single-MTU buffers in unpacked mode we need to include
5185         * space for the SGE Control Packet Shift, 14 byte Ethernet header,
5186         * possible 4 byte VLAN tag, all rounded up to the next Ingress Packet
5187         * Padding boundary.  All of these are accommodated in the Factory
5188         * Default Firmware Configuration File but we need to adjust it for
5189         * this host's cache line size.
5190         */
5191        t4_write_reg(adap, SGE_FL_BUFFER_SIZE0_A, page_size);
5192        t4_write_reg(adap, SGE_FL_BUFFER_SIZE2_A,
5193                     (t4_read_reg(adap, SGE_FL_BUFFER_SIZE2_A) + fl_align-1)
5194                     & ~(fl_align-1));
5195        t4_write_reg(adap, SGE_FL_BUFFER_SIZE3_A,
5196                     (t4_read_reg(adap, SGE_FL_BUFFER_SIZE3_A) + fl_align-1)
5197                     & ~(fl_align-1));
5198
5199        t4_write_reg(adap, ULP_RX_TDDP_PSZ_A, HPZ0_V(page_shift - 12));
5200
5201        return 0;
5202}
5203
5204/**
5205 *      t4_fw_initialize - ask FW to initialize the device
5206 *      @adap: the adapter
5207 *      @mbox: mailbox to use for the FW command
5208 *
5209 *      Issues a command to FW to partially initialize the device.  This
5210 *      performs initialization that generally doesn't depend on user input.
5211 */
5212int t4_fw_initialize(struct adapter *adap, unsigned int mbox)
5213{
5214        struct fw_initialize_cmd c;
5215
5216        memset(&c, 0, sizeof(c));
5217        INIT_CMD(c, INITIALIZE, WRITE);
5218        return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
5219}
5220
5221/**
5222 *      t4_query_params_rw - query FW or device parameters
5223 *      @adap: the adapter
5224 *      @mbox: mailbox to use for the FW command
5225 *      @pf: the PF
5226 *      @vf: the VF
5227 *      @nparams: the number of parameters
5228 *      @params: the parameter names
5229 *      @val: the parameter values
5230 *      @rw: Write and read flag
5231 *
5232 *      Reads the value of FW or device parameters.  Up to 7 parameters can be
5233 *      queried at once.
5234 */
5235int t4_query_params_rw(struct adapter *adap, unsigned int mbox, unsigned int pf,
5236                       unsigned int vf, unsigned int nparams, const u32 *params,
5237                       u32 *val, int rw)
5238{
5239        int i, ret;
5240        struct fw_params_cmd c;
5241        __be32 *p = &c.param[0].mnem;
5242
5243        if (nparams > 7)
5244                return -EINVAL;
5245
5246        memset(&c, 0, sizeof(c));
5247        c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD) |
5248                                  FW_CMD_REQUEST_F | FW_CMD_READ_F |
5249                                  FW_PARAMS_CMD_PFN_V(pf) |
5250                                  FW_PARAMS_CMD_VFN_V(vf));
5251        c.retval_len16 = cpu_to_be32(FW_LEN16(c));
5252
5253        for (i = 0; i < nparams; i++) {
5254                *p++ = cpu_to_be32(*params++);
5255                if (rw)
5256                        *p = cpu_to_be32(*(val + i));
5257                p++;
5258        }
5259
5260        ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
5261        if (ret == 0)
5262                for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2)
5263                        *val++ = be32_to_cpu(*p);
5264        return ret;
5265}
5266
5267int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
5268                    unsigned int vf, unsigned int nparams, const u32 *params,
5269                    u32 *val)
5270{
5271        return t4_query_params_rw(adap, mbox, pf, vf, nparams, params, val, 0);
5272}
5273
5274/**
5275 *      t4_set_params_timeout - sets FW or device parameters
5276 *      @adap: the adapter
5277 *      @mbox: mailbox to use for the FW command
5278 *      @pf: the PF
5279 *      @vf: the VF
5280 *      @nparams: the number of parameters
5281 *      @params: the parameter names
5282 *      @val: the parameter values
5283 *      @timeout: the timeout time
5284 *
5285 *      Sets the value of FW or device parameters.  Up to 7 parameters can be
5286 *      specified at once.
5287 */
5288int t4_set_params_timeout(struct adapter *adap, unsigned int mbox,
5289                          unsigned int pf, unsigned int vf,
5290                          unsigned int nparams, const u32 *params,
5291                          const u32 *val, int timeout)
5292{
5293        struct fw_params_cmd c;
5294        __be32 *p = &c.param[0].mnem;
5295
5296        if (nparams > 7)
5297                return -EINVAL;
5298
5299        memset(&c, 0, sizeof(c));
5300        c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD) |
5301                                  FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
5302                                  FW_PARAMS_CMD_PFN_V(pf) |
5303                                  FW_PARAMS_CMD_VFN_V(vf));
5304        c.retval_len16 = cpu_to_be32(FW_LEN16(c));
5305
5306        while (nparams--) {
5307                *p++ = cpu_to_be32(*params++);
5308                *p++ = cpu_to_be32(*val++);
5309        }
5310
5311        return t4_wr_mbox_timeout(adap, mbox, &c, sizeof(c), NULL, timeout);
5312}
5313
5314/**
5315 *      t4_set_params - sets FW or device parameters
5316 *      @adap: the adapter
5317 *      @mbox: mailbox to use for the FW command
5318 *      @pf: the PF
5319 *      @vf: the VF
5320 *      @nparams: the number of parameters
5321 *      @params: the parameter names
5322 *      @val: the parameter values
5323 *
5324 *      Sets the value of FW or device parameters.  Up to 7 parameters can be
5325 *      specified at once.
5326 */
5327int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
5328                  unsigned int vf, unsigned int nparams, const u32 *params,
5329                  const u32 *val)
5330{
5331        return t4_set_params_timeout(adap, mbox, pf, vf, nparams, params, val,
5332                                     FW_CMD_MAX_TIMEOUT);
5333}
5334
5335/**
5336 *      t4_cfg_pfvf - configure PF/VF resource limits
5337 *      @adap: the adapter
5338 *      @mbox: mailbox to use for the FW command
5339 *      @pf: the PF being configured
5340 *      @vf: the VF being configured
5341 *      @txq: the max number of egress queues
5342 *      @txq_eth_ctrl: the max number of egress Ethernet or control queues
5343 *      @rxqi: the max number of interrupt-capable ingress queues
5344 *      @rxq: the max number of interruptless ingress queues
5345 *      @tc: the PCI traffic class
5346 *      @vi: the max number of virtual interfaces
5347 *      @cmask: the channel access rights mask for the PF/VF
5348 *      @pmask: the port access rights mask for the PF/VF
5349 *      @nexact: the maximum number of exact MPS filters
5350 *      @rcaps: read capabilities
5351 *      @wxcaps: write/execute capabilities
5352 *
5353 *      Configures resource limits and capabilities for a physical or virtual
5354 *      function.
5355 */
5356int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
5357                unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl,
5358                unsigned int rxqi, unsigned int rxq, unsigned int tc,
5359                unsigned int vi, unsigned int cmask, unsigned int pmask,
5360                unsigned int nexact, unsigned int rcaps, unsigned int wxcaps)
5361{
5362        struct fw_pfvf_cmd c;
5363
5364        memset(&c, 0, sizeof(c));
5365        c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PFVF_CMD) | FW_CMD_REQUEST_F |
5366                                  FW_CMD_WRITE_F | FW_PFVF_CMD_PFN_V(pf) |
5367                                  FW_PFVF_CMD_VFN_V(vf));
5368        c.retval_len16 = cpu_to_be32(FW_LEN16(c));
5369        c.niqflint_niq = cpu_to_be32(FW_PFVF_CMD_NIQFLINT_V(rxqi) |
5370                                     FW_PFVF_CMD_NIQ_V(rxq));
5371        c.type_to_neq = cpu_to_be32(FW_PFVF_CMD_CMASK_V(cmask) |
5372                                    FW_PFVF_CMD_PMASK_V(pmask) |
5373                                    FW_PFVF_CMD_NEQ_V(txq));
5374        c.tc_to_nexactf = cpu_to_be32(FW_PFVF_CMD_TC_V(tc) |
5375                                      FW_PFVF_CMD_NVI_V(vi) |
5376                                      FW_PFVF_CMD_NEXACTF_V(nexact));
5377        c.r_caps_to_nethctrl = cpu_to_be32(FW_PFVF_CMD_R_CAPS_V(rcaps) |
5378                                        FW_PFVF_CMD_WX_CAPS_V(wxcaps) |
5379                                        FW_PFVF_CMD_NETHCTRL_V(txq_eth_ctrl));
5380        return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
5381}
5382
5383/**
5384 *      t4_alloc_vi - allocate a virtual interface
5385 *      @adap: the adapter
5386 *      @mbox: mailbox to use for the FW command
5387 *      @port: physical port associated with the VI
5388 *      @pf: the PF owning the VI
5389 *      @vf: the VF owning the VI
5390 *      @nmac: number of MAC addresses needed (1 to 5)
5391 *      @mac: the MAC addresses of the VI
5392 *      @rss_size: size of RSS table slice associated with this VI
5393 *
5394 *      Allocates a virtual interface for the given physical port.  If @mac is
5395 *      not %NULL it contains the MAC addresses of the VI as assigned by FW.
5396 *      @mac should be large enough to hold @nmac Ethernet addresses, they are
5397 *      stored consecutively so the space needed is @nmac * 6 bytes.
5398 *      Returns a negative error number or the non-negative VI id.
5399 */
5400int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
5401                unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
5402                unsigned int *rss_size)
5403{
5404        int ret;
5405        struct fw_vi_cmd c;
5406
5407        memset(&c, 0, sizeof(c));
5408        c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_VI_CMD) | FW_CMD_REQUEST_F |
5409                                  FW_CMD_WRITE_F | FW_CMD_EXEC_F |
5410                                  FW_VI_CMD_PFN_V(pf) | FW_VI_CMD_VFN_V(vf));
5411        c.alloc_to_len16 = cpu_to_be32(FW_VI_CMD_ALLOC_F | FW_LEN16(c));
5412        c.portid_pkd = FW_VI_CMD_PORTID_V(port);
5413        c.nmac = nmac - 1;
5414
5415        ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
5416        if (ret)
5417                return ret;
5418
5419        if (mac) {
5420                memcpy(mac, c.mac, sizeof(c.mac));
5421                switch (nmac) {
5422                case 5:
5423                        memcpy(mac + 24, c.nmac3, sizeof(c.nmac3));
5424                case 4:
5425                        memcpy(mac + 18, c.nmac2, sizeof(c.nmac2));
5426                case 3:
5427                        memcpy(mac + 12, c.nmac1, sizeof(c.nmac1));
5428                case 2:
5429                        memcpy(mac + 6,  c.nmac0, sizeof(c.nmac0));
5430                }
5431        }
5432        if (rss_size)
5433                *rss_size = FW_VI_CMD_RSSSIZE_G(be16_to_cpu(c.rsssize_pkd));
5434        return FW_VI_CMD_VIID_G(be16_to_cpu(c.type_viid));
5435}
5436
5437/**
5438 *      t4_free_vi - free a virtual interface
5439 *      @adap: the adapter
5440 *      @mbox: mailbox to use for the FW command
5441 *      @pf: the PF owning the VI
5442 *      @vf: the VF owning the VI
5443 *      @viid: virtual interface identifiler
5444 *
5445 *      Free a previously allocated virtual interface.
5446 */
5447int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf,
5448               unsigned int vf, unsigned int viid)
5449{
5450        struct fw_vi_cmd c;
5451
5452        memset(&c, 0, sizeof(c));
5453        c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_VI_CMD) |
5454                                  FW_CMD_REQUEST_F |
5455                                  FW_CMD_EXEC_F |
5456                                  FW_VI_CMD_PFN_V(pf) |
5457                                  FW_VI_CMD_VFN_V(vf));
5458        c.alloc_to_len16 = cpu_to_be32(FW_VI_CMD_FREE_F | FW_LEN16(c));
5459        c.type_viid = cpu_to_be16(FW_VI_CMD_VIID_V(viid));
5460
5461        return t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
5462}
5463
5464/**
5465 *      t4_set_rxmode - set Rx properties of a virtual interface
5466 *      @adap: the adapter
5467 *      @mbox: mailbox to use for the FW command
5468 *      @viid: the VI id
5469 *      @mtu: the new MTU or -1
5470 *      @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change
5471 *      @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change
5472 *      @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
5473 *      @vlanex: 1 to enable HW VLAN extraction, 0 to disable it, -1 no change
5474 *      @sleep_ok: if true we may sleep while awaiting command completion
5475 *
5476 *      Sets Rx properties of a virtual interface.
5477 */
5478int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
5479                  int mtu, int promisc, int all_multi, int bcast, int vlanex,
5480                  bool sleep_ok)
5481{
5482        struct fw_vi_rxmode_cmd c;
5483
5484        /* convert to FW values */
5485        if (mtu < 0)
5486                mtu = FW_RXMODE_MTU_NO_CHG;
5487        if (promisc < 0)
5488                promisc = FW_VI_RXMODE_CMD_PROMISCEN_M;
5489        if (all_multi < 0)
5490                all_multi = FW_VI_RXMODE_CMD_ALLMULTIEN_M;
5491        if (bcast < 0)
5492                bcast = FW_VI_RXMODE_CMD_BROADCASTEN_M;
5493        if (vlanex < 0)
5494                vlanex = FW_VI_RXMODE_CMD_VLANEXEN_M;
5495
5496        memset(&c, 0, sizeof(c));
5497        c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_RXMODE_CMD) |
5498                                   FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
5499                                   FW_VI_RXMODE_CMD_VIID_V(viid));
5500        c.retval_len16 = cpu_to_be32(FW_LEN16(c));
5501        c.mtu_to_vlanexen =
5502                cpu_to_be32(FW_VI_RXMODE_CMD_MTU_V(mtu) |
5503                            FW_VI_RXMODE_CMD_PROMISCEN_V(promisc) |
5504                            FW_VI_RXMODE_CMD_ALLMULTIEN_V(all_multi) |
5505                            FW_VI_RXMODE_CMD_BROADCASTEN_V(bcast) |
5506                            FW_VI_RXMODE_CMD_VLANEXEN_V(vlanex));
5507        return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
5508}
5509
5510/**
5511 *      t4_alloc_mac_filt - allocates exact-match filters for MAC addresses
5512 *      @adap: the adapter
5513 *      @mbox: mailbox to use for the FW command
5514 *      @viid: the VI id
5515 *      @free: if true any existing filters for this VI id are first removed
5516 *      @naddr: the number of MAC addresses to allocate filters for (up to 7)
5517 *      @addr: the MAC address(es)
5518 *      @idx: where to store the index of each allocated filter
5519 *      @hash: pointer to hash address filter bitmap
5520 *      @sleep_ok: call is allowed to sleep
5521 *
5522 *      Allocates an exact-match filter for each of the supplied addresses and
5523 *      sets it to the corresponding address.  If @idx is not %NULL it should
5524 *      have at least @naddr entries, each of which will be set to the index of
5525 *      the filter allocated for the corresponding MAC address.  If a filter
5526 *      could not be allocated for an address its index is set to 0xffff.
5527 *      If @hash is not %NULL addresses that fail to allocate an exact filter
5528 *      are hashed and update the hash filter bitmap pointed at by @hash.
5529 *
5530 *      Returns a negative error number or the number of filters allocated.
5531 */
5532int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
5533                      unsigned int viid, bool free, unsigned int naddr,
5534                      const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok)
5535{
5536        int offset, ret = 0;
5537        struct fw_vi_mac_cmd c;
5538        unsigned int nfilters = 0;
5539        unsigned int max_naddr = adap->params.arch.mps_tcam_size;
5540        unsigned int rem = naddr;
5541
5542        if (naddr > max_naddr)
5543                return -EINVAL;
5544
5545        for (offset = 0; offset < naddr ; /**/) {
5546                unsigned int fw_naddr = (rem < ARRAY_SIZE(c.u.exact) ?
5547                                         rem : ARRAY_SIZE(c.u.exact));
5548                size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
5549                                                     u.exact[fw_naddr]), 16);
5550                struct fw_vi_mac_exact *p;
5551                int i;
5552
5553                memset(&c, 0, sizeof(c));
5554                c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
5555                                           FW_CMD_REQUEST_F |
5556                                           FW_CMD_WRITE_F |
5557                                           FW_CMD_EXEC_V(free) |
5558                                           FW_VI_MAC_CMD_VIID_V(viid));
5559                c.freemacs_to_len16 =
5560                        cpu_to_be32(FW_VI_MAC_CMD_FREEMACS_V(free) |
5561                                    FW_CMD_LEN16_V(len16));
5562
5563                for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
5564                        p->valid_to_idx =
5565                                cpu_to_be16(FW_VI_MAC_CMD_VALID_F |
5566                                            FW_VI_MAC_CMD_IDX_V(
5567                                                    FW_VI_MAC_ADD_MAC));
5568                        memcpy(p->macaddr, addr[offset + i],
5569                               sizeof(p->macaddr));
5570                }
5571
5572                /* It's okay if we run out of space in our MAC address arena.
5573                 * Some of the addresses we submit may get stored so we need
5574                 * to run through the reply to see what the results were ...
5575                 */
5576                ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
5577                if (ret && ret != -FW_ENOMEM)
5578                        break;
5579
5580                for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
5581                        u16 index = FW_VI_MAC_CMD_IDX_G(
5582                                        be16_to_cpu(p->valid_to_idx));
5583
5584                        if (idx)
5585                                idx[offset + i] = (index >= max_naddr ?
5586                                                   0xffff : index);
5587                        if (index < max_naddr)
5588                                nfilters++;
5589                        else if (hash)
5590                                *hash |= (1ULL <<
5591                                          hash_mac_addr(addr[offset + i]));
5592                }
5593
5594                free = false;
5595                offset += fw_naddr;
5596                rem -= fw_naddr;
5597        }
5598
5599        if (ret == 0 || ret == -FW_ENOMEM)
5600                ret = nfilters;
5601        return ret;
5602}
5603
5604/**
5605 *      t4_change_mac - modifies the exact-match filter for a MAC address
5606 *      @adap: the adapter
5607 *      @mbox: mailbox to use for the FW command
5608 *      @viid: the VI id
5609 *      @idx: index of existing filter for old value of MAC address, or -1
5610 *      @addr: the new MAC address value
5611 *      @persist: whether a new MAC allocation should be persistent
5612 *      @add_smt: if true also add the address to the HW SMT
5613 *
5614 *      Modifies an exact-match filter and sets it to the new MAC address.
5615 *      Note that in general it is not possible to modify the value of a given
5616 *      filter so the generic way to modify an address filter is to free the one
5617 *      being used by the old address value and allocate a new filter for the
5618 *      new address value.  @idx can be -1 if the address is a new addition.
5619 *
5620 *      Returns a negative error number or the index of the filter with the new
5621 *      MAC value.
5622 */
5623int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
5624                  int idx, const u8 *addr, bool persist, bool add_smt)
5625{
5626        int ret, mode;
5627        struct fw_vi_mac_cmd c;
5628        struct fw_vi_mac_exact *p = c.u.exact;
5629        unsigned int max_mac_addr = adap->params.arch.mps_tcam_size;
5630
5631        if (idx < 0)                             /* new allocation */
5632                idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
5633        mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY;
5634
5635        memset(&c, 0, sizeof(c));
5636        c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
5637                                   FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
5638                                   FW_VI_MAC_CMD_VIID_V(viid));
5639        c.freemacs_to_len16 = cpu_to_be32(FW_CMD_LEN16_V(1));
5640        p->valid_to_idx = cpu_to_be16(FW_VI_MAC_CMD_VALID_F |
5641                                      FW_VI_MAC_CMD_SMAC_RESULT_V(mode) |
5642                                      FW_VI_MAC_CMD_IDX_V(idx));
5643        memcpy(p->macaddr, addr, sizeof(p->macaddr));
5644
5645        ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
5646        if (ret == 0) {
5647                ret = FW_VI_MAC_CMD_IDX_G(be16_to_cpu(p->valid_to_idx));
5648                if (ret >= max_mac_addr)
5649                        ret = -ENOMEM;
5650        }
5651        return ret;
5652}
5653
5654/**
5655 *      t4_set_addr_hash - program the MAC inexact-match hash filter
5656 *      @adap: the adapter
5657 *      @mbox: mailbox to use for the FW command
5658 *      @viid: the VI id
5659 *      @ucast: whether the hash filter should also match unicast addresses
5660 *      @vec: the value to be written to the hash filter
5661 *      @sleep_ok: call is allowed to sleep
5662 *
5663 *      Sets the 64-bit inexact-match hash filter for a virtual interface.
5664 */
5665int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
5666                     bool ucast, u64 vec, bool sleep_ok)
5667{
5668        struct fw_vi_mac_cmd c;
5669
5670        memset(&c, 0, sizeof(c));
5671        c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
5672                                   FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
5673                                   FW_VI_ENABLE_CMD_VIID_V(viid));
5674        c.freemacs_to_len16 = cpu_to_be32(FW_VI_MAC_CMD_HASHVECEN_F |
5675                                          FW_VI_MAC_CMD_HASHUNIEN_V(ucast) |
5676                                          FW_CMD_LEN16_V(1));
5677        c.u.hash.hashvec = cpu_to_be64(vec);
5678        return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
5679}
5680
5681/**
5682 *      t4_enable_vi_params - enable/disable a virtual interface
5683 *      @adap: the adapter
5684 *      @mbox: mailbox to use for the FW command
5685 *      @viid: the VI id
5686 *      @rx_en: 1=enable Rx, 0=disable Rx
5687 *      @tx_en: 1=enable Tx, 0=disable Tx
5688 *      @dcb_en: 1=enable delivery of Data Center Bridging messages.
5689 *
5690 *      Enables/disables a virtual interface.  Note that setting DCB Enable
5691 *      only makes sense when enabling a Virtual Interface ...
5692 */
5693int t4_enable_vi_params(struct adapter *adap, unsigned int mbox,
5694                        unsigned int viid, bool rx_en, bool tx_en, bool dcb_en)
5695{
5696        struct fw_vi_enable_cmd c;
5697
5698        memset(&c, 0, sizeof(c));
5699        c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_ENABLE_CMD) |
5700                                   FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
5701                                   FW_VI_ENABLE_CMD_VIID_V(viid));
5702        c.ien_to_len16 = cpu_to_be32(FW_VI_ENABLE_CMD_IEN_V(rx_en) |
5703                                     FW_VI_ENABLE_CMD_EEN_V(tx_en) |
5704                                     FW_VI_ENABLE_CMD_DCB_INFO_V(dcb_en) |
5705                                     FW_LEN16(c));
5706        return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL);
5707}
5708
5709/**
5710 *      t4_enable_vi - enable/disable a virtual interface
5711 *      @adap: the adapter
5712 *      @mbox: mailbox to use for the FW command
5713 *      @viid: the VI id
5714 *      @rx_en: 1=enable Rx, 0=disable Rx
5715 *      @tx_en: 1=enable Tx, 0=disable Tx
5716 *
5717 *      Enables/disables a virtual interface.
5718 */
5719int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
5720                 bool rx_en, bool tx_en)
5721{
5722        return t4_enable_vi_params(adap, mbox, viid, rx_en, tx_en, 0);
5723}
5724
5725/**
5726 *      t4_identify_port - identify a VI's port by blinking its LED
5727 *      @adap: the adapter
5728 *      @mbox: mailbox to use for the FW command
5729 *      @viid: the VI id
5730 *      @nblinks: how many times to blink LED at 2.5 Hz
5731 *
5732 *      Identifies a VI's port by blinking its LED.
5733 */
5734int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
5735                     unsigned int nblinks)
5736{
5737        struct fw_vi_enable_cmd c;
5738
5739        memset(&c, 0, sizeof(c));
5740        c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_ENABLE_CMD) |
5741                                   FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
5742                                   FW_VI_ENABLE_CMD_VIID_V(viid));
5743        c.ien_to_len16 = cpu_to_be32(FW_VI_ENABLE_CMD_LED_F | FW_LEN16(c));
5744        c.blinkdur = cpu_to_be16(nblinks);
5745        return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
5746}
5747
5748/**
5749 *      t4_iq_free - free an ingress queue and its FLs
5750 *      @adap: the adapter
5751 *      @mbox: mailbox to use for the FW command
5752 *      @pf: the PF owning the queues
5753 *      @vf: the VF owning the queues
5754 *      @iqtype: the ingress queue type
5755 *      @iqid: ingress queue id
5756 *      @fl0id: FL0 queue id or 0xffff if no attached FL0
5757 *      @fl1id: FL1 queue id or 0xffff if no attached FL1
5758 *
5759 *      Frees an ingress queue and its associated FLs, if any.
5760 */
5761int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
5762               unsigned int vf, unsigned int iqtype, unsigned int iqid,
5763               unsigned int fl0id, unsigned int fl1id)
5764{
5765        struct fw_iq_cmd c;
5766
5767        memset(&c, 0, sizeof(c));
5768        c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_IQ_CMD) | FW_CMD_REQUEST_F |
5769                                  FW_CMD_EXEC_F | FW_IQ_CMD_PFN_V(pf) |
5770                                  FW_IQ_CMD_VFN_V(vf));
5771        c.alloc_to_len16 = cpu_to_be32(FW_IQ_CMD_FREE_F | FW_LEN16(c));
5772        c.type_to_iqandstindex = cpu_to_be32(FW_IQ_CMD_TYPE_V(iqtype));
5773        c.iqid = cpu_to_be16(iqid);
5774        c.fl0id = cpu_to_be16(fl0id);
5775        c.fl1id = cpu_to_be16(fl1id);
5776        return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
5777}
5778
5779/**
5780 *      t4_eth_eq_free - free an Ethernet egress queue
5781 *      @adap: the adapter
5782 *      @mbox: mailbox to use for the FW command
5783 *      @pf: the PF owning the queue
5784 *      @vf: the VF owning the queue
5785 *      @eqid: egress queue id
5786 *
5787 *      Frees an Ethernet egress queue.
5788 */
5789int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
5790                   unsigned int vf, unsigned int eqid)
5791{
5792        struct fw_eq_eth_cmd c;
5793
5794        memset(&c, 0, sizeof(c));
5795        c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_EQ_ETH_CMD) |
5796                                  FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
5797                                  FW_EQ_ETH_CMD_PFN_V(pf) |
5798                                  FW_EQ_ETH_CMD_VFN_V(vf));
5799        c.alloc_to_len16 = cpu_to_be32(FW_EQ_ETH_CMD_FREE_F | FW_LEN16(c));
5800        c.eqid_pkd = cpu_to_be32(FW_EQ_ETH_CMD_EQID_V(eqid));
5801        return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
5802}
5803
5804/**
5805 *      t4_ctrl_eq_free - free a control egress queue
5806 *      @adap: the adapter
5807 *      @mbox: mailbox to use for the FW command
5808 *      @pf: the PF owning the queue
5809 *      @vf: the VF owning the queue
5810 *      @eqid: egress queue id
5811 *
5812 *      Frees a control egress queue.
5813 */
5814int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
5815                    unsigned int vf, unsigned int eqid)
5816{
5817        struct fw_eq_ctrl_cmd c;
5818
5819        memset(&c, 0, sizeof(c));
5820        c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_EQ_CTRL_CMD) |
5821                                  FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
5822                                  FW_EQ_CTRL_CMD_PFN_V(pf) |
5823                                  FW_EQ_CTRL_CMD_VFN_V(vf));
5824        c.alloc_to_len16 = cpu_to_be32(FW_EQ_CTRL_CMD_FREE_F | FW_LEN16(c));
5825        c.cmpliqid_eqid = cpu_to_be32(FW_EQ_CTRL_CMD_EQID_V(eqid));
5826        return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
5827}
5828
5829/**
5830 *      t4_ofld_eq_free - free an offload egress queue
5831 *      @adap: the adapter
5832 *      @mbox: mailbox to use for the FW command
5833 *      @pf: the PF owning the queue
5834 *      @vf: the VF owning the queue
5835 *      @eqid: egress queue id
5836 *
5837 *      Frees a control egress queue.
5838 */
5839int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
5840                    unsigned int vf, unsigned int eqid)
5841{
5842        struct fw_eq_ofld_cmd c;
5843
5844        memset(&c, 0, sizeof(c));
5845        c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_EQ_OFLD_CMD) |
5846                                  FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
5847                                  FW_EQ_OFLD_CMD_PFN_V(pf) |
5848                                  FW_EQ_OFLD_CMD_VFN_V(vf));
5849        c.alloc_to_len16 = cpu_to_be32(FW_EQ_OFLD_CMD_FREE_F | FW_LEN16(c));
5850        c.eqid_pkd = cpu_to_be32(FW_EQ_OFLD_CMD_EQID_V(eqid));
5851        return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
5852}
5853
5854/**
5855 *      t4_handle_fw_rpl - process a FW reply message
5856 *      @adap: the adapter
5857 *      @rpl: start of the FW message
5858 *
5859 *      Processes a FW message, such as link state change messages.
5860 */
5861int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
5862{
5863        u8 opcode = *(const u8 *)rpl;
5864
5865        if (opcode == FW_PORT_CMD) {    /* link/module state change message */
5866                int speed = 0, fc = 0;
5867                const struct fw_port_cmd *p = (void *)rpl;
5868                int chan = FW_PORT_CMD_PORTID_G(be32_to_cpu(p->op_to_portid));
5869                int port = adap->chan_map[chan];
5870                struct port_info *pi = adap2pinfo(adap, port);
5871                struct link_config *lc = &pi->link_cfg;
5872                u32 stat = be32_to_cpu(p->u.info.lstatus_to_modtype);
5873                int link_ok = (stat & FW_PORT_CMD_LSTATUS_F) != 0;
5874                u32 mod = FW_PORT_CMD_MODTYPE_G(stat);
5875
5876                if (stat & FW_PORT_CMD_RXPAUSE_F)
5877                        fc |= PAUSE_RX;
5878                if (stat & FW_PORT_CMD_TXPAUSE_F)
5879                        fc |= PAUSE_TX;
5880                if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100M))
5881                        speed = 100;
5882                else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_1G))
5883                        speed = 1000;
5884                else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_10G))
5885                        speed = 10000;
5886                else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_40G))
5887                        speed = 40000;
5888
5889                if (link_ok != lc->link_ok || speed != lc->speed ||
5890                    fc != lc->fc) {                    /* something changed */
5891                        lc->link_ok = link_ok;
5892                        lc->speed = speed;
5893                        lc->fc = fc;
5894                        lc->supported = be16_to_cpu(p->u.info.pcap);
5895                        t4_os_link_changed(adap, port, link_ok);
5896                }
5897                if (mod != pi->mod_type) {
5898                        pi->mod_type = mod;
5899                        t4_os_portmod_changed(adap, port);
5900                }
5901        }
5902        return 0;
5903}
5904
5905static void get_pci_mode(struct adapter *adapter, struct pci_params *p)
5906{
5907        u16 val;
5908
5909        if (pci_is_pcie(adapter->pdev)) {
5910                pcie_capability_read_word(adapter->pdev, PCI_EXP_LNKSTA, &val);
5911                p->speed = val & PCI_EXP_LNKSTA_CLS;
5912                p->width = (val & PCI_EXP_LNKSTA_NLW) >> 4;
5913        }
5914}
5915
5916/**
5917 *      init_link_config - initialize a link's SW state
5918 *      @lc: structure holding the link state
5919 *      @caps: link capabilities
5920 *
5921 *      Initializes the SW state maintained for each link, including the link's
5922 *      capabilities and default speed/flow-control/autonegotiation settings.
5923 */
5924static void init_link_config(struct link_config *lc, unsigned int caps)
5925{
5926        lc->supported = caps;
5927        lc->requested_speed = 0;
5928        lc->speed = 0;
5929        lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
5930        if (lc->supported & FW_PORT_CAP_ANEG) {
5931                lc->advertising = lc->supported & ADVERT_MASK;
5932                lc->autoneg = AUTONEG_ENABLE;
5933                lc->requested_fc |= PAUSE_AUTONEG;
5934        } else {
5935                lc->advertising = 0;
5936                lc->autoneg = AUTONEG_DISABLE;
5937        }
5938}
5939
5940#define CIM_PF_NOACCESS 0xeeeeeeee
5941
5942int t4_wait_dev_ready(void __iomem *regs)
5943{
5944        u32 whoami;
5945
5946        whoami = readl(regs + PL_WHOAMI_A);
5947        if (whoami != 0xffffffff && whoami != CIM_PF_NOACCESS)
5948                return 0;
5949
5950        msleep(500);
5951        whoami = readl(regs + PL_WHOAMI_A);
5952        return (whoami != 0xffffffff && whoami != CIM_PF_NOACCESS ? 0 : -EIO);
5953}
5954
5955struct flash_desc {
5956        u32 vendor_and_model_id;
5957        u32 size_mb;
5958};
5959
5960static int get_flash_params(struct adapter *adap)
5961{
5962        /* Table for non-Numonix supported flash parts.  Numonix parts are left
5963         * to the preexisting code.  All flash parts have 64KB sectors.
5964         */
5965        static struct flash_desc supported_flash[] = {
5966                { 0x150201, 4 << 20 },       /* Spansion 4MB S25FL032P */
5967        };
5968
5969        int ret;
5970        u32 info;
5971
5972        ret = sf1_write(adap, 1, 1, 0, SF_RD_ID);
5973        if (!ret)
5974                ret = sf1_read(adap, 3, 0, 1, &info);
5975        t4_write_reg(adap, SF_OP_A, 0);                    /* unlock SF */
5976        if (ret)
5977                return ret;
5978
5979        for (ret = 0; ret < ARRAY_SIZE(supported_flash); ++ret)
5980                if (supported_flash[ret].vendor_and_model_id == info) {
5981                        adap->params.sf_size = supported_flash[ret].size_mb;
5982                        adap->params.sf_nsec =
5983                                adap->params.sf_size / SF_SEC_SIZE;
5984                        return 0;
5985                }
5986
5987        if ((info & 0xff) != 0x20)             /* not a Numonix flash */
5988                return -EINVAL;
5989        info >>= 16;                           /* log2 of size */
5990        if (info >= 0x14 && info < 0x18)
5991                adap->params.sf_nsec = 1 << (info - 16);
5992        else if (info == 0x18)
5993                adap->params.sf_nsec = 64;
5994        else
5995                return -EINVAL;
5996        adap->params.sf_size = 1 << info;
5997        adap->params.sf_fw_start =
5998                t4_read_reg(adap, CIM_BOOT_CFG_A) & BOOTADDR_M;
5999
6000        if (adap->params.sf_size < FLASH_MIN_SIZE)
6001                dev_warn(adap->pdev_dev, "WARNING!!! FLASH size %#x < %#x!!!\n",
6002                         adap->params.sf_size, FLASH_MIN_SIZE);
6003        return 0;
6004}
6005
6006static void set_pcie_completion_timeout(struct adapter *adapter, u8 range)
6007{
6008        u16 val;
6009        u32 pcie_cap;
6010
6011        pcie_cap = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
6012        if (pcie_cap) {
6013                pci_read_config_word(adapter->pdev,
6014                                     pcie_cap + PCI_EXP_DEVCTL2, &val);
6015                val &= ~PCI_EXP_DEVCTL2_COMP_TIMEOUT;
6016                val |= range;
6017                pci_write_config_word(adapter->pdev,
6018                                      pcie_cap + PCI_EXP_DEVCTL2, val);
6019        }
6020}
6021
6022/**
6023 *      t4_prep_adapter - prepare SW and HW for operation
6024 *      @adapter: the adapter
6025 *      @reset: if true perform a HW reset
6026 *
6027 *      Initialize adapter SW state for the various HW modules, set initial
6028 *      values for some adapter tunables, take PHYs out of reset, and
6029 *      initialize the MDIO interface.
6030 */
6031int t4_prep_adapter(struct adapter *adapter)
6032{
6033        int ret, ver;
6034        uint16_t device_id;
6035        u32 pl_rev;
6036
6037        get_pci_mode(adapter, &adapter->params.pci);
6038        pl_rev = REV_G(t4_read_reg(adapter, PL_REV_A));
6039
6040        ret = get_flash_params(adapter);
6041        if (ret < 0) {
6042                dev_err(adapter->pdev_dev, "error %d identifying flash\n", ret);
6043                return ret;
6044        }
6045
6046        /* Retrieve adapter's device ID
6047         */
6048        pci_read_config_word(adapter->pdev, PCI_DEVICE_ID, &device_id);
6049        ver = device_id >> 12;
6050        adapter->params.chip = 0;
6051        switch (ver) {
6052        case CHELSIO_T4:
6053                adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T4, pl_rev);
6054                adapter->params.arch.sge_fl_db = DBPRIO_F;
6055                adapter->params.arch.mps_tcam_size =
6056                                 NUM_MPS_CLS_SRAM_L_INSTANCES;
6057                adapter->params.arch.mps_rplc_size = 128;
6058                adapter->params.arch.nchan = NCHAN;
6059                adapter->params.arch.vfcount = 128;
6060                break;
6061        case CHELSIO_T5:
6062                adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, pl_rev);
6063                adapter->params.arch.sge_fl_db = DBPRIO_F | DBTYPE_F;
6064                adapter->params.arch.mps_tcam_size =
6065                                 NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
6066                adapter->params.arch.mps_rplc_size = 128;
6067                adapter->params.arch.nchan = NCHAN;
6068                adapter->params.arch.vfcount = 128;
6069                break;
6070        case CHELSIO_T6:
6071                adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T6, pl_rev);
6072                adapter->params.arch.sge_fl_db = 0;
6073                adapter->params.arch.mps_tcam_size =
6074                                 NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
6075                adapter->params.arch.mps_rplc_size = 256;
6076                adapter->params.arch.nchan = 2;
6077                adapter->params.arch.vfcount = 256;
6078                break;
6079        default:
6080                dev_err(adapter->pdev_dev, "Device %d is not supported\n",
6081                        device_id);
6082                return -EINVAL;
6083        }
6084
6085        adapter->params.cim_la_size = CIMLA_SIZE;
6086        init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
6087
6088        /*
6089         * Default port for debugging in case we can't reach FW.
6090         */
6091        adapter->params.nports = 1;
6092        adapter->params.portvec = 1;
6093        adapter->params.vpd.cclk = 50000;
6094
6095        /* Set pci completion timeout value to 4 seconds. */
6096        set_pcie_completion_timeout(adapter, 0xd);
6097        return 0;
6098}
6099
6100/**
6101 *      t4_bar2_sge_qregs - return BAR2 SGE Queue register information
6102 *      @adapter: the adapter
6103 *      @qid: the Queue ID
6104 *      @qtype: the Ingress or Egress type for @qid
6105 *      @user: true if this request is for a user mode queue
6106 *      @pbar2_qoffset: BAR2 Queue Offset
6107 *      @pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues
6108 *
6109 *      Returns the BAR2 SGE Queue Registers information associated with the
6110 *      indicated Absolute Queue ID.  These are passed back in return value
6111 *      pointers.  @qtype should be T4_BAR2_QTYPE_EGRESS for Egress Queue
6112 *      and T4_BAR2_QTYPE_INGRESS for Ingress Queues.
6113 *
6114 *      This may return an error which indicates that BAR2 SGE Queue
6115 *      registers aren't available.  If an error is not returned, then the
6116 *      following values are returned:
6117 *
6118 *        *@pbar2_qoffset: the BAR2 Offset of the @qid Registers
6119 *        *@pbar2_qid: the BAR2 SGE Queue ID or 0 of @qid
6120 *
6121 *      If the returned BAR2 Queue ID is 0, then BAR2 SGE registers which
6122 *      require the "Inferred Queue ID" ability may be used.  E.g. the
6123 *      Write Combining Doorbell Buffer. If the BAR2 Queue ID is not 0,
6124 *      then these "Inferred Queue ID" register may not be used.
6125 */
6126int t4_bar2_sge_qregs(struct adapter *adapter,
6127                      unsigned int qid,
6128                      enum t4_bar2_qtype qtype,
6129                      int user,
6130                      u64 *pbar2_qoffset,
6131                      unsigned int *pbar2_qid)
6132{
6133        unsigned int page_shift, page_size, qpp_shift, qpp_mask;
6134        u64 bar2_page_offset, bar2_qoffset;
6135        unsigned int bar2_qid, bar2_qid_offset, bar2_qinferred;
6136
6137        /* T4 doesn't support BAR2 SGE Queue registers for kernel mode queues */
6138        if (!user && is_t4(adapter->params.chip))
6139                return -EINVAL;
6140
6141        /* Get our SGE Page Size parameters.
6142         */
6143        page_shift = adapter->params.sge.hps + 10;
6144        page_size = 1 << page_shift;
6145
6146        /* Get the right Queues per Page parameters for our Queue.
6147         */
6148        qpp_shift = (qtype == T4_BAR2_QTYPE_EGRESS
6149                     ? adapter->params.sge.eq_qpp
6150                     : adapter->params.sge.iq_qpp);
6151        qpp_mask = (1 << qpp_shift) - 1;
6152
6153        /*  Calculate the basics of the BAR2 SGE Queue register area:
6154         *  o The BAR2 page the Queue registers will be in.
6155         *  o The BAR2 Queue ID.
6156         *  o The BAR2 Queue ID Offset into the BAR2 page.
6157         */
6158        bar2_page_offset = ((u64)(qid >> qpp_shift) << page_shift);
6159        bar2_qid = qid & qpp_mask;
6160        bar2_qid_offset = bar2_qid * SGE_UDB_SIZE;
6161
6162        /* If the BAR2 Queue ID Offset is less than the Page Size, then the
6163         * hardware will infer the Absolute Queue ID simply from the writes to
6164         * the BAR2 Queue ID Offset within the BAR2 Page (and we need to use a
6165         * BAR2 Queue ID of 0 for those writes).  Otherwise, we'll simply
6166         * write to the first BAR2 SGE Queue Area within the BAR2 Page with
6167         * the BAR2 Queue ID and the hardware will infer the Absolute Queue ID
6168         * from the BAR2 Page and BAR2 Queue ID.
6169         *
6170         * One important censequence of this is that some BAR2 SGE registers
6171         * have a "Queue ID" field and we can write the BAR2 SGE Queue ID
6172         * there.  But other registers synthesize the SGE Queue ID purely
6173         * from the writes to the registers -- the Write Combined Doorbell
6174         * Buffer is a good example.  These BAR2 SGE Registers are only
6175         * available for those BAR2 SGE Register areas where the SGE Absolute
6176         * Queue ID can be inferred from simple writes.
6177         */
6178        bar2_qoffset = bar2_page_offset;
6179        bar2_qinferred = (bar2_qid_offset < page_size);
6180        if (bar2_qinferred) {
6181                bar2_qoffset += bar2_qid_offset;
6182                bar2_qid = 0;
6183        }
6184
6185        *pbar2_qoffset = bar2_qoffset;
6186        *pbar2_qid = bar2_qid;
6187        return 0;
6188}
6189
6190/**
6191 *      t4_init_devlog_params - initialize adapter->params.devlog
6192 *      @adap: the adapter
6193 *
6194 *      Initialize various fields of the adapter's Firmware Device Log
6195 *      Parameters structure.
6196 */
6197int t4_init_devlog_params(struct adapter *adap)
6198{
6199        struct devlog_params *dparams = &adap->params.devlog;
6200        u32 pf_dparams;
6201        unsigned int devlog_meminfo;
6202        struct fw_devlog_cmd devlog_cmd;
6203        int ret;
6204
6205        /* If we're dealing with newer firmware, the Device Log Paramerters
6206         * are stored in a designated register which allows us to access the
6207         * Device Log even if we can't talk to the firmware.
6208         */
6209        pf_dparams =
6210                t4_read_reg(adap, PCIE_FW_REG(PCIE_FW_PF_A, PCIE_FW_PF_DEVLOG));
6211        if (pf_dparams) {
6212                unsigned int nentries, nentries128;
6213
6214                dparams->memtype = PCIE_FW_PF_DEVLOG_MEMTYPE_G(pf_dparams);
6215                dparams->start = PCIE_FW_PF_DEVLOG_ADDR16_G(pf_dparams) << 4;
6216
6217                nentries128 = PCIE_FW_PF_DEVLOG_NENTRIES128_G(pf_dparams);
6218                nentries = (nentries128 + 1) * 128;
6219                dparams->size = nentries * sizeof(struct fw_devlog_e);
6220
6221                return 0;
6222        }
6223
6224        /* Otherwise, ask the firmware for it's Device Log Parameters.
6225         */
6226        memset(&devlog_cmd, 0, sizeof(devlog_cmd));
6227        devlog_cmd.op_to_write = cpu_to_be32(FW_CMD_OP_V(FW_DEVLOG_CMD) |
6228                                             FW_CMD_REQUEST_F | FW_CMD_READ_F);
6229        devlog_cmd.retval_len16 = cpu_to_be32(FW_LEN16(devlog_cmd));
6230        ret = t4_wr_mbox(adap, adap->mbox, &devlog_cmd, sizeof(devlog_cmd),
6231                         &devlog_cmd);
6232        if (ret)
6233                return ret;
6234
6235        devlog_meminfo =
6236                be32_to_cpu(devlog_cmd.memtype_devlog_memaddr16_devlog);
6237        dparams->memtype = FW_DEVLOG_CMD_MEMTYPE_DEVLOG_G(devlog_meminfo);
6238        dparams->start = FW_DEVLOG_CMD_MEMADDR16_DEVLOG_G(devlog_meminfo) << 4;
6239        dparams->size = be32_to_cpu(devlog_cmd.memsize_devlog);
6240
6241        return 0;
6242}
6243
6244/**
6245 *      t4_init_sge_params - initialize adap->params.sge
6246 *      @adapter: the adapter
6247 *
6248 *      Initialize various fields of the adapter's SGE Parameters structure.
6249 */
6250int t4_init_sge_params(struct adapter *adapter)
6251{
6252        struct sge_params *sge_params = &adapter->params.sge;
6253        u32 hps, qpp;
6254        unsigned int s_hps, s_qpp;
6255
6256        /* Extract the SGE Page Size for our PF.
6257         */
6258        hps = t4_read_reg(adapter, SGE_HOST_PAGE_SIZE_A);
6259        s_hps = (HOSTPAGESIZEPF0_S +
6260                 (HOSTPAGESIZEPF1_S - HOSTPAGESIZEPF0_S) * adapter->pf);
6261        sge_params->hps = ((hps >> s_hps) & HOSTPAGESIZEPF0_M);
6262
6263        /* Extract the SGE Egress and Ingess Queues Per Page for our PF.
6264         */
6265        s_qpp = (QUEUESPERPAGEPF0_S +
6266                (QUEUESPERPAGEPF1_S - QUEUESPERPAGEPF0_S) * adapter->pf);
6267        qpp = t4_read_reg(adapter, SGE_EGRESS_QUEUES_PER_PAGE_PF_A);
6268        sge_params->eq_qpp = ((qpp >> s_qpp) & QUEUESPERPAGEPF0_M);
6269        qpp = t4_read_reg(adapter, SGE_INGRESS_QUEUES_PER_PAGE_PF_A);
6270        sge_params->iq_qpp = ((qpp >> s_qpp) & QUEUESPERPAGEPF0_M);
6271
6272        return 0;
6273}
6274
6275/**
6276 *      t4_init_tp_params - initialize adap->params.tp
6277 *      @adap: the adapter
6278 *
6279 *      Initialize various fields of the adapter's TP Parameters structure.
6280 */
6281int t4_init_tp_params(struct adapter *adap)
6282{
6283        int chan;
6284        u32 v;
6285
6286        v = t4_read_reg(adap, TP_TIMER_RESOLUTION_A);
6287        adap->params.tp.tre = TIMERRESOLUTION_G(v);
6288        adap->params.tp.dack_re = DELAYEDACKRESOLUTION_G(v);
6289
6290        /* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */
6291        for (chan = 0; chan < NCHAN; chan++)
6292                adap->params.tp.tx_modq[chan] = chan;
6293
6294        /* Cache the adapter's Compressed Filter Mode and global Incress
6295         * Configuration.
6296         */
6297        if (adap->flags & FW_OK) {
6298                t4_fw_tp_pio_rw(adap, &adap->params.tp.vlan_pri_map, 1,
6299                                TP_VLAN_PRI_MAP_A, 1);
6300                t4_fw_tp_pio_rw(adap, &adap->params.tp.ingress_config, 1,
6301                                TP_INGRESS_CONFIG_A, 1);
6302        } else {
6303                t4_read_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
6304                                 &adap->params.tp.vlan_pri_map, 1,
6305                                 TP_VLAN_PRI_MAP_A);
6306                t4_read_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
6307                                 &adap->params.tp.ingress_config, 1,
6308                                 TP_INGRESS_CONFIG_A);
6309        }
6310
6311        /* Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field
6312         * shift positions of several elements of the Compressed Filter Tuple
6313         * for this adapter which we need frequently ...
6314         */
6315        adap->params.tp.vlan_shift = t4_filter_field_shift(adap, VLAN_F);
6316        adap->params.tp.vnic_shift = t4_filter_field_shift(adap, VNIC_ID_F);
6317        adap->params.tp.port_shift = t4_filter_field_shift(adap, PORT_F);
6318        adap->params.tp.protocol_shift = t4_filter_field_shift(adap,
6319                                                               PROTOCOL_F);
6320
6321        /* If TP_INGRESS_CONFIG.VNID == 0, then TP_VLAN_PRI_MAP.VNIC_ID
6322         * represents the presence of an Outer VLAN instead of a VNIC ID.
6323         */
6324        if ((adap->params.tp.ingress_config & VNIC_F) == 0)
6325                adap->params.tp.vnic_shift = -1;
6326
6327        return 0;
6328}
6329
6330/**
6331 *      t4_filter_field_shift - calculate filter field shift
6332 *      @adap: the adapter
6333 *      @filter_sel: the desired field (from TP_VLAN_PRI_MAP bits)
6334 *
6335 *      Return the shift position of a filter field within the Compressed
6336 *      Filter Tuple.  The filter field is specified via its selection bit
6337 *      within TP_VLAN_PRI_MAL (filter mode).  E.g. F_VLAN.
6338 */
6339int t4_filter_field_shift(const struct adapter *adap, int filter_sel)
6340{
6341        unsigned int filter_mode = adap->params.tp.vlan_pri_map;
6342        unsigned int sel;
6343        int field_shift;
6344
6345        if ((filter_mode & filter_sel) == 0)
6346                return -1;
6347
6348        for (sel = 1, field_shift = 0; sel < filter_sel; sel <<= 1) {
6349                switch (filter_mode & sel) {
6350                case FCOE_F:
6351                        field_shift += FT_FCOE_W;
6352                        break;
6353                case PORT_F:
6354                        field_shift += FT_PORT_W;
6355                        break;
6356                case VNIC_ID_F:
6357                        field_shift += FT_VNIC_ID_W;
6358                        break;
6359                case VLAN_F:
6360                        field_shift += FT_VLAN_W;
6361                        break;
6362                case TOS_F:
6363                        field_shift += FT_TOS_W;
6364                        break;
6365                case PROTOCOL_F:
6366                        field_shift += FT_PROTOCOL_W;
6367                        break;
6368                case ETHERTYPE_F:
6369                        field_shift += FT_ETHERTYPE_W;
6370                        break;
6371                case MACMATCH_F:
6372                        field_shift += FT_MACMATCH_W;
6373                        break;
6374                case MPSHITTYPE_F:
6375                        field_shift += FT_MPSHITTYPE_W;
6376                        break;
6377                case FRAGMENTATION_F:
6378                        field_shift += FT_FRAGMENTATION_W;
6379                        break;
6380                }
6381        }
6382        return field_shift;
6383}
6384
6385int t4_init_rss_mode(struct adapter *adap, int mbox)
6386{
6387        int i, ret;
6388        struct fw_rss_vi_config_cmd rvc;
6389
6390        memset(&rvc, 0, sizeof(rvc));
6391
6392        for_each_port(adap, i) {
6393                struct port_info *p = adap2pinfo(adap, i);
6394
6395                rvc.op_to_viid =
6396                        cpu_to_be32(FW_CMD_OP_V(FW_RSS_VI_CONFIG_CMD) |
6397                                    FW_CMD_REQUEST_F | FW_CMD_READ_F |
6398                                    FW_RSS_VI_CONFIG_CMD_VIID_V(p->viid));
6399                rvc.retval_len16 = cpu_to_be32(FW_LEN16(rvc));
6400                ret = t4_wr_mbox(adap, mbox, &rvc, sizeof(rvc), &rvc);
6401                if (ret)
6402                        return ret;
6403                p->rss_mode = be32_to_cpu(rvc.u.basicvirtual.defaultq_to_udpen);
6404        }
6405        return 0;
6406}
6407
6408int t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
6409{
6410        u8 addr[6];
6411        int ret, i, j = 0;
6412        struct fw_port_cmd c;
6413        struct fw_rss_vi_config_cmd rvc;
6414
6415        memset(&c, 0, sizeof(c));
6416        memset(&rvc, 0, sizeof(rvc));
6417
6418        for_each_port(adap, i) {
6419                unsigned int rss_size;
6420                struct port_info *p = adap2pinfo(adap, i);
6421
6422                while ((adap->params.portvec & (1 << j)) == 0)
6423                        j++;
6424
6425                c.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) |
6426                                             FW_CMD_REQUEST_F | FW_CMD_READ_F |
6427                                             FW_PORT_CMD_PORTID_V(j));
6428                c.action_to_len16 = cpu_to_be32(
6429                        FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_GET_PORT_INFO) |
6430                        FW_LEN16(c));
6431                ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
6432                if (ret)
6433                        return ret;
6434
6435                ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &rss_size);
6436                if (ret < 0)
6437                        return ret;
6438
6439                p->viid = ret;
6440                p->tx_chan = j;
6441                p->lport = j;
6442                p->rss_size = rss_size;
6443                memcpy(adap->port[i]->dev_addr, addr, ETH_ALEN);
6444                adap->port[i]->dev_port = j;
6445
6446                ret = be32_to_cpu(c.u.info.lstatus_to_modtype);
6447                p->mdio_addr = (ret & FW_PORT_CMD_MDIOCAP_F) ?
6448                        FW_PORT_CMD_MDIOADDR_G(ret) : -1;
6449                p->port_type = FW_PORT_CMD_PTYPE_G(ret);
6450                p->mod_type = FW_PORT_MOD_TYPE_NA;
6451
6452                rvc.op_to_viid =
6453                        cpu_to_be32(FW_CMD_OP_V(FW_RSS_VI_CONFIG_CMD) |
6454                                    FW_CMD_REQUEST_F | FW_CMD_READ_F |
6455                                    FW_RSS_VI_CONFIG_CMD_VIID(p->viid));
6456                rvc.retval_len16 = cpu_to_be32(FW_LEN16(rvc));
6457                ret = t4_wr_mbox(adap, mbox, &rvc, sizeof(rvc), &rvc);
6458                if (ret)
6459                        return ret;
6460                p->rss_mode = be32_to_cpu(rvc.u.basicvirtual.defaultq_to_udpen);
6461
6462                init_link_config(&p->link_cfg, be16_to_cpu(c.u.info.pcap));
6463                j++;
6464        }
6465        return 0;
6466}
6467
6468/**
6469 *      t4_read_cimq_cfg - read CIM queue configuration
6470 *      @adap: the adapter
6471 *      @base: holds the queue base addresses in bytes
6472 *      @size: holds the queue sizes in bytes
6473 *      @thres: holds the queue full thresholds in bytes
6474 *
6475 *      Returns the current configuration of the CIM queues, starting with
6476 *      the IBQs, then the OBQs.
6477 */
6478void t4_read_cimq_cfg(struct adapter *adap, u16 *base, u16 *size, u16 *thres)
6479{
6480        unsigned int i, v;
6481        int cim_num_obq = is_t4(adap->params.chip) ?
6482                                CIM_NUM_OBQ : CIM_NUM_OBQ_T5;
6483
6484        for (i = 0; i < CIM_NUM_IBQ; i++) {
6485                t4_write_reg(adap, CIM_QUEUE_CONFIG_REF_A, IBQSELECT_F |
6486                             QUENUMSELECT_V(i));
6487                v = t4_read_reg(adap, CIM_QUEUE_CONFIG_CTRL_A);
6488                /* value is in 256-byte units */
6489                *base++ = CIMQBASE_G(v) * 256;
6490                *size++ = CIMQSIZE_G(v) * 256;
6491                *thres++ = QUEFULLTHRSH_G(v) * 8; /* 8-byte unit */
6492        }
6493        for (i = 0; i < cim_num_obq; i++) {
6494                t4_write_reg(adap, CIM_QUEUE_CONFIG_REF_A, OBQSELECT_F |
6495                             QUENUMSELECT_V(i));
6496                v = t4_read_reg(adap, CIM_QUEUE_CONFIG_CTRL_A);
6497                /* value is in 256-byte units */
6498                *base++ = CIMQBASE_G(v) * 256;
6499                *size++ = CIMQSIZE_G(v) * 256;
6500        }
6501}
6502
6503/**
6504 *      t4_read_cim_ibq - read the contents of a CIM inbound queue
6505 *      @adap: the adapter
6506 *      @qid: the queue index
6507 *      @data: where to store the queue contents
6508 *      @n: capacity of @data in 32-bit words
6509 *
6510 *      Reads the contents of the selected CIM queue starting at address 0 up
6511 *      to the capacity of @data.  @n must be a multiple of 4.  Returns < 0 on
6512 *      error and the number of 32-bit words actually read on success.
6513 */
6514int t4_read_cim_ibq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
6515{
6516        int i, err, attempts;
6517        unsigned int addr;
6518        const unsigned int nwords = CIM_IBQ_SIZE * 4;
6519
6520        if (qid > 5 || (n & 3))
6521                return -EINVAL;
6522
6523        addr = qid * nwords;
6524        if (n > nwords)
6525                n = nwords;
6526
6527        /* It might take 3-10ms before the IBQ debug read access is allowed.
6528         * Wait for 1 Sec with a delay of 1 usec.
6529         */
6530        attempts = 1000000;
6531
6532        for (i = 0; i < n; i++, addr++) {
6533                t4_write_reg(adap, CIM_IBQ_DBG_CFG_A, IBQDBGADDR_V(addr) |
6534                             IBQDBGEN_F);
6535                err = t4_wait_op_done(adap, CIM_IBQ_DBG_CFG_A, IBQDBGBUSY_F, 0,
6536                                      attempts, 1);
6537                if (err)
6538                        return err;
6539                *data++ = t4_read_reg(adap, CIM_IBQ_DBG_DATA_A);
6540        }
6541        t4_write_reg(adap, CIM_IBQ_DBG_CFG_A, 0);
6542        return i;
6543}
6544
6545/**
6546 *      t4_read_cim_obq - read the contents of a CIM outbound queue
6547 *      @adap: the adapter
6548 *      @qid: the queue index
6549 *      @data: where to store the queue contents
6550 *      @n: capacity of @data in 32-bit words
6551 *
6552 *      Reads the contents of the selected CIM queue starting at address 0 up
6553 *      to the capacity of @data.  @n must be a multiple of 4.  Returns < 0 on
6554 *      error and the number of 32-bit words actually read on success.
6555 */
6556int t4_read_cim_obq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
6557{
6558        int i, err;
6559        unsigned int addr, v, nwords;
6560        int cim_num_obq = is_t4(adap->params.chip) ?
6561                                CIM_NUM_OBQ : CIM_NUM_OBQ_T5;
6562
6563        if ((qid > (cim_num_obq - 1)) || (n & 3))
6564                return -EINVAL;
6565
6566        t4_write_reg(adap, CIM_QUEUE_CONFIG_REF_A, OBQSELECT_F |
6567                     QUENUMSELECT_V(qid));
6568        v = t4_read_reg(adap, CIM_QUEUE_CONFIG_CTRL_A);
6569
6570        addr = CIMQBASE_G(v) * 64;    /* muliple of 256 -> muliple of 4 */
6571        nwords = CIMQSIZE_G(v) * 64;  /* same */
6572        if (n > nwords)
6573                n = nwords;
6574
6575        for (i = 0; i < n; i++, addr++) {
6576                t4_write_reg(adap, CIM_OBQ_DBG_CFG_A, OBQDBGADDR_V(addr) |
6577                             OBQDBGEN_F);
6578                err = t4_wait_op_done(adap, CIM_OBQ_DBG_CFG_A, OBQDBGBUSY_F, 0,
6579                                      2, 1);
6580                if (err)
6581                        return err;
6582                *data++ = t4_read_reg(adap, CIM_OBQ_DBG_DATA_A);
6583        }
6584        t4_write_reg(adap, CIM_OBQ_DBG_CFG_A, 0);
6585        return i;
6586}
6587
6588/**
6589 *      t4_cim_read - read a block from CIM internal address space
6590 *      @adap: the adapter
6591 *      @addr: the start address within the CIM address space
6592 *      @n: number of words to read
6593 *      @valp: where to store the result
6594 *
6595 *      Reads a block of 4-byte words from the CIM intenal address space.
6596 */
6597int t4_cim_read(struct adapter *adap, unsigned int addr, unsigned int n,
6598                unsigned int *valp)
6599{
6600        int ret = 0;
6601
6602        if (t4_read_reg(adap, CIM_HOST_ACC_CTRL_A) & HOSTBUSY_F)
6603                return -EBUSY;
6604
6605        for ( ; !ret && n--; addr += 4) {
6606                t4_write_reg(adap, CIM_HOST_ACC_CTRL_A, addr);
6607                ret = t4_wait_op_done(adap, CIM_HOST_ACC_CTRL_A, HOSTBUSY_F,
6608                                      0, 5, 2);
6609                if (!ret)
6610                        *valp++ = t4_read_reg(adap, CIM_HOST_ACC_DATA_A);
6611        }
6612        return ret;
6613}
6614
6615/**
6616 *      t4_cim_write - write a block into CIM internal address space
6617 *      @adap: the adapter
6618 *      @addr: the start address within the CIM address space
6619 *      @n: number of words to write
6620 *      @valp: set of values to write
6621 *
6622 *      Writes a block of 4-byte words into the CIM intenal address space.
6623 */
6624int t4_cim_write(struct adapter *adap, unsigned int addr, unsigned int n,
6625                 const unsigned int *valp)
6626{
6627        int ret = 0;
6628
6629        if (t4_read_reg(adap, CIM_HOST_ACC_CTRL_A) & HOSTBUSY_F)
6630                return -EBUSY;
6631
6632        for ( ; !ret && n--; addr += 4) {
6633                t4_write_reg(adap, CIM_HOST_ACC_DATA_A, *valp++);
6634                t4_write_reg(adap, CIM_HOST_ACC_CTRL_A, addr | HOSTWRITE_F);
6635                ret = t4_wait_op_done(adap, CIM_HOST_ACC_CTRL_A, HOSTBUSY_F,
6636                                      0, 5, 2);
6637        }
6638        return ret;
6639}
6640
6641static int t4_cim_write1(struct adapter *adap, unsigned int addr,
6642                         unsigned int val)
6643{
6644        return t4_cim_write(adap, addr, 1, &val);
6645}
6646
6647/**
6648 *      t4_cim_read_la - read CIM LA capture buffer
6649 *      @adap: the adapter
6650 *      @la_buf: where to store the LA data
6651 *      @wrptr: the HW write pointer within the capture buffer
6652 *
6653 *      Reads the contents of the CIM LA buffer with the most recent entry at
6654 *      the end of the returned data and with the entry at @wrptr first.
6655 *      We try to leave the LA in the running state we find it in.
6656 */
6657int t4_cim_read_la(struct adapter *adap, u32 *la_buf, unsigned int *wrptr)
6658{
6659        int i, ret;
6660        unsigned int cfg, val, idx;
6661
6662        ret = t4_cim_read(adap, UP_UP_DBG_LA_CFG_A, 1, &cfg);
6663        if (ret)
6664                return ret;
6665
6666        if (cfg & UPDBGLAEN_F) {        /* LA is running, freeze it */
6667                ret = t4_cim_write1(adap, UP_UP_DBG_LA_CFG_A, 0);
6668                if (ret)
6669                        return ret;
6670        }
6671
6672        ret = t4_cim_read(adap, UP_UP_DBG_LA_CFG_A, 1, &val);
6673        if (ret)
6674                goto restart;
6675
6676        idx = UPDBGLAWRPTR_G(val);
6677        if (wrptr)
6678                *wrptr = idx;
6679
6680        for (i = 0; i < adap->params.cim_la_size; i++) {
6681                ret = t4_cim_write1(adap, UP_UP_DBG_LA_CFG_A,
6682                                    UPDBGLARDPTR_V(idx) | UPDBGLARDEN_F);
6683                if (ret)
6684                        break;
6685                ret = t4_cim_read(adap, UP_UP_DBG_LA_CFG_A, 1, &val);
6686                if (ret)
6687                        break;
6688                if (val & UPDBGLARDEN_F) {
6689                        ret = -ETIMEDOUT;
6690                        break;
6691                }
6692                ret = t4_cim_read(adap, UP_UP_DBG_LA_DATA_A, 1, &la_buf[i]);
6693                if (ret)
6694                        break;
6695                idx = (idx + 1) & UPDBGLARDPTR_M;
6696        }
6697restart:
6698        if (cfg & UPDBGLAEN_F) {
6699                int r = t4_cim_write1(adap, UP_UP_DBG_LA_CFG_A,
6700                                      cfg & ~UPDBGLARDEN_F);
6701                if (!ret)
6702                        ret = r;
6703        }
6704        return ret;
6705}
6706
6707/**
6708 *      t4_tp_read_la - read TP LA capture buffer
6709 *      @adap: the adapter
6710 *      @la_buf: where to store the LA data
6711 *      @wrptr: the HW write pointer within the capture buffer
6712 *
6713 *      Reads the contents of the TP LA buffer with the most recent entry at
6714 *      the end of the returned data and with the entry at @wrptr first.
6715 *      We leave the LA in the running state we find it in.
6716 */
6717void t4_tp_read_la(struct adapter *adap, u64 *la_buf, unsigned int *wrptr)
6718{
6719        bool last_incomplete;
6720        unsigned int i, cfg, val, idx;
6721
6722        cfg = t4_read_reg(adap, TP_DBG_LA_CONFIG_A) & 0xffff;
6723        if (cfg & DBGLAENABLE_F)                        /* freeze LA */
6724                t4_write_reg(adap, TP_DBG_LA_CONFIG_A,
6725                             adap->params.tp.la_mask | (cfg ^ DBGLAENABLE_F));
6726
6727        val = t4_read_reg(adap, TP_DBG_LA_CONFIG_A);
6728        idx = DBGLAWPTR_G(val);
6729        last_incomplete = DBGLAMODE_G(val) >= 2 && (val & DBGLAWHLF_F) == 0;
6730        if (last_incomplete)
6731                idx = (idx + 1) & DBGLARPTR_M;
6732        if (wrptr)
6733                *wrptr = idx;
6734
6735        val &= 0xffff;
6736        val &= ~DBGLARPTR_V(DBGLARPTR_M);
6737        val |= adap->params.tp.la_mask;
6738
6739        for (i = 0; i < TPLA_SIZE; i++) {
6740                t4_write_reg(adap, TP_DBG_LA_CONFIG_A, DBGLARPTR_V(idx) | val);
6741                la_buf[i] = t4_read_reg64(adap, TP_DBG_LA_DATAL_A);
6742                idx = (idx + 1) & DBGLARPTR_M;
6743        }
6744
6745        /* Wipe out last entry if it isn't valid */
6746        if (last_incomplete)
6747                la_buf[TPLA_SIZE - 1] = ~0ULL;
6748
6749        if (cfg & DBGLAENABLE_F)                    /* restore running state */
6750                t4_write_reg(adap, TP_DBG_LA_CONFIG_A,
6751                             cfg | adap->params.tp.la_mask);
6752}
6753
6754/* SGE Hung Ingress DMA Warning Threshold time and Warning Repeat Rate (in
6755 * seconds).  If we find one of the SGE Ingress DMA State Machines in the same
6756 * state for more than the Warning Threshold then we'll issue a warning about
6757 * a potential hang.  We'll repeat the warning as the SGE Ingress DMA Channel
6758 * appears to be hung every Warning Repeat second till the situation clears.
6759 * If the situation clears, we'll note that as well.
6760 */
6761#define SGE_IDMA_WARN_THRESH 1
6762#define SGE_IDMA_WARN_REPEAT 300
6763
6764/**
6765 *      t4_idma_monitor_init - initialize SGE Ingress DMA Monitor
6766 *      @adapter: the adapter
6767 *      @idma: the adapter IDMA Monitor state
6768 *
6769 *      Initialize the state of an SGE Ingress DMA Monitor.
6770 */
6771void t4_idma_monitor_init(struct adapter *adapter,
6772                          struct sge_idma_monitor_state *idma)
6773{
6774        /* Initialize the state variables for detecting an SGE Ingress DMA
6775         * hang.  The SGE has internal counters which count up on each clock
6776         * tick whenever the SGE finds its Ingress DMA State Engines in the
6777         * same state they were on the previous clock tick.  The clock used is
6778         * the Core Clock so we have a limit on the maximum "time" they can
6779         * record; typically a very small number of seconds.  For instance,
6780         * with a 600MHz Core Clock, we can only count up to a bit more than
6781         * 7s.  So we'll synthesize a larger counter in order to not run the
6782         * risk of having the "timers" overflow and give us the flexibility to
6783         * maintain a Hung SGE State Machine of our own which operates across
6784         * a longer time frame.
6785         */
6786        idma->idma_1s_thresh = core_ticks_per_usec(adapter) * 1000000; /* 1s */
6787        idma->idma_stalled[0] = 0;
6788        idma->idma_stalled[1] = 0;
6789}
6790
6791/**
6792 *      t4_idma_monitor - monitor SGE Ingress DMA state
6793 *      @adapter: the adapter
6794 *      @idma: the adapter IDMA Monitor state
6795 *      @hz: number of ticks/second
6796 *      @ticks: number of ticks since the last IDMA Monitor call
6797 */
6798void t4_idma_monitor(struct adapter *adapter,
6799                     struct sge_idma_monitor_state *idma,
6800                     int hz, int ticks)
6801{
6802        int i, idma_same_state_cnt[2];
6803
6804         /* Read the SGE Debug Ingress DMA Same State Count registers.  These
6805          * are counters inside the SGE which count up on each clock when the
6806          * SGE finds its Ingress DMA State Engines in the same states they
6807          * were in the previous clock.  The counters will peg out at
6808          * 0xffffffff without wrapping around so once they pass the 1s
6809          * threshold they'll stay above that till the IDMA state changes.
6810          */
6811        t4_write_reg(adapter, SGE_DEBUG_INDEX_A, 13);
6812        idma_same_state_cnt[0] = t4_read_reg(adapter, SGE_DEBUG_DATA_HIGH_A);
6813        idma_same_state_cnt[1] = t4_read_reg(adapter, SGE_DEBUG_DATA_LOW_A);
6814
6815        for (i = 0; i < 2; i++) {
6816                u32 debug0, debug11;
6817
6818                /* If the Ingress DMA Same State Counter ("timer") is less
6819                 * than 1s, then we can reset our synthesized Stall Timer and
6820                 * continue.  If we have previously emitted warnings about a
6821                 * potential stalled Ingress Queue, issue a note indicating
6822                 * that the Ingress Queue has resumed forward progress.
6823                 */
6824                if (idma_same_state_cnt[i] < idma->idma_1s_thresh) {
6825                        if (idma->idma_stalled[i] >= SGE_IDMA_WARN_THRESH * hz)
6826                                dev_warn(adapter->pdev_dev, "SGE idma%d, queue %u, "
6827                                         "resumed after %d seconds\n",
6828                                         i, idma->idma_qid[i],
6829                                         idma->idma_stalled[i] / hz);
6830                        idma->idma_stalled[i] = 0;
6831                        continue;
6832                }
6833
6834                /* Synthesize an SGE Ingress DMA Same State Timer in the Hz
6835                 * domain.  The first time we get here it'll be because we
6836                 * passed the 1s Threshold; each additional time it'll be
6837                 * because the RX Timer Callback is being fired on its regular
6838                 * schedule.
6839                 *
6840                 * If the stall is below our Potential Hung Ingress Queue
6841                 * Warning Threshold, continue.
6842                 */
6843                if (idma->idma_stalled[i] == 0) {
6844                        idma->idma_stalled[i] = hz;
6845                        idma->idma_warn[i] = 0;
6846                } else {
6847                        idma->idma_stalled[i] += ticks;
6848                        idma->idma_warn[i] -= ticks;
6849                }
6850
6851                if (idma->idma_stalled[i] < SGE_IDMA_WARN_THRESH * hz)
6852                        continue;
6853
6854                /* We'll issue a warning every SGE_IDMA_WARN_REPEAT seconds.
6855                 */
6856                if (idma->idma_warn[i] > 0)
6857                        continue;
6858                idma->idma_warn[i] = SGE_IDMA_WARN_REPEAT * hz;
6859
6860                /* Read and save the SGE IDMA State and Queue ID information.
6861                 * We do this every time in case it changes across time ...
6862                 * can't be too careful ...
6863                 */
6864                t4_write_reg(adapter, SGE_DEBUG_INDEX_A, 0);
6865                debug0 = t4_read_reg(adapter, SGE_DEBUG_DATA_LOW_A);
6866                idma->idma_state[i] = (debug0 >> (i * 9)) & 0x3f;
6867
6868                t4_write_reg(adapter, SGE_DEBUG_INDEX_A, 11);
6869                debug11 = t4_read_reg(adapter, SGE_DEBUG_DATA_LOW_A);
6870                idma->idma_qid[i] = (debug11 >> (i * 16)) & 0xffff;
6871
6872                dev_warn(adapter->pdev_dev, "SGE idma%u, queue %u, potentially stuck in "
6873                         "state %u for %d seconds (debug0=%#x, debug11=%#x)\n",
6874                         i, idma->idma_qid[i], idma->idma_state[i],
6875                         idma->idma_stalled[i] / hz,
6876                         debug0, debug11);
6877                t4_sge_decode_idma_state(adapter, idma->idma_state[i]);
6878        }
6879}
6880