linux/drivers/net/ethernet/cavium/liquidio/cn66xx_device.c
<<
>>
Prefs
   1/**********************************************************************
   2 * Author: Cavium, Inc.
   3 *
   4 * Contact: support@cavium.com
   5 *          Please include "LiquidIO" in the subject.
   6 *
   7 * Copyright (c) 2003-2016 Cavium, Inc.
   8 *
   9 * This file is free software; you can redistribute it and/or modify
  10 * it under the terms of the GNU General Public License, Version 2, as
  11 * published by the Free Software Foundation.
  12 *
  13 * This file is distributed in the hope that it will be useful, but
  14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
  15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
  16 * NONINFRINGEMENT.  See the GNU General Public License for more details.
  17 ***********************************************************************/
  18#include <linux/pci.h>
  19#include <linux/netdevice.h>
  20#include "liquidio_common.h"
  21#include "octeon_droq.h"
  22#include "octeon_iq.h"
  23#include "response_manager.h"
  24#include "octeon_device.h"
  25#include "octeon_main.h"
  26#include "cn66xx_regs.h"
  27#include "cn66xx_device.h"
  28
  29int lio_cn6xxx_soft_reset(struct octeon_device *oct)
  30{
  31        octeon_write_csr64(oct, CN6XXX_WIN_WR_MASK_REG, 0xFF);
  32
  33        dev_dbg(&oct->pci_dev->dev, "BIST enabled for soft reset\n");
  34
  35        lio_pci_writeq(oct, 1, CN6XXX_CIU_SOFT_BIST);
  36        octeon_write_csr64(oct, CN6XXX_SLI_SCRATCH1, 0x1234ULL);
  37
  38        lio_pci_readq(oct, CN6XXX_CIU_SOFT_RST);
  39        lio_pci_writeq(oct, 1, CN6XXX_CIU_SOFT_RST);
  40
  41        /* make sure that the reset is written before starting timer */
  42        mmiowb();
  43
  44        /* Wait for 10ms as Octeon resets. */
  45        mdelay(100);
  46
  47        if (octeon_read_csr64(oct, CN6XXX_SLI_SCRATCH1)) {
  48                dev_err(&oct->pci_dev->dev, "Soft reset failed\n");
  49                return 1;
  50        }
  51
  52        dev_dbg(&oct->pci_dev->dev, "Reset completed\n");
  53        octeon_write_csr64(oct, CN6XXX_WIN_WR_MASK_REG, 0xFF);
  54
  55        return 0;
  56}
  57
  58void lio_cn6xxx_enable_error_reporting(struct octeon_device *oct)
  59{
  60        u32 val;
  61
  62        pci_read_config_dword(oct->pci_dev, CN6XXX_PCIE_DEVCTL, &val);
  63        if (val & 0x000c0000) {
  64                dev_err(&oct->pci_dev->dev, "PCI-E Link error detected: 0x%08x\n",
  65                        val & 0x000c0000);
  66        }
  67
  68        val |= 0xf;          /* Enable Link error reporting */
  69
  70        dev_dbg(&oct->pci_dev->dev, "Enabling PCI-E error reporting..\n");
  71        pci_write_config_dword(oct->pci_dev, CN6XXX_PCIE_DEVCTL, val);
  72}
  73
  74void lio_cn6xxx_setup_pcie_mps(struct octeon_device *oct,
  75                               enum octeon_pcie_mps mps)
  76{
  77        u32 val;
  78        u64 r64;
  79
  80        /* Read config register for MPS */
  81        pci_read_config_dword(oct->pci_dev, CN6XXX_PCIE_DEVCTL, &val);
  82
  83        if (mps == PCIE_MPS_DEFAULT) {
  84                mps = ((val & (0x7 << 5)) >> 5);
  85        } else {
  86                val &= ~(0x7 << 5);  /* Turn off any MPS bits */
  87                val |= (mps << 5);   /* Set MPS */
  88                pci_write_config_dword(oct->pci_dev, CN6XXX_PCIE_DEVCTL, val);
  89        }
  90
  91        /* Set MPS in DPI_SLI_PRT0_CFG to the same value. */
  92        r64 = lio_pci_readq(oct, CN6XXX_DPI_SLI_PRTX_CFG(oct->pcie_port));
  93        r64 |= (mps << 4);
  94        lio_pci_writeq(oct, r64, CN6XXX_DPI_SLI_PRTX_CFG(oct->pcie_port));
  95}
  96
  97void lio_cn6xxx_setup_pcie_mrrs(struct octeon_device *oct,
  98                                enum octeon_pcie_mrrs mrrs)
  99{
 100        u32 val;
 101        u64 r64;
 102
 103        /* Read config register for MRRS */
 104        pci_read_config_dword(oct->pci_dev, CN6XXX_PCIE_DEVCTL, &val);
 105
 106        if (mrrs == PCIE_MRRS_DEFAULT) {
 107                mrrs = ((val & (0x7 << 12)) >> 12);
 108        } else {
 109                val &= ~(0x7 << 12); /* Turn off any MRRS bits */
 110                val |= (mrrs << 12); /* Set MRRS */
 111                pci_write_config_dword(oct->pci_dev, CN6XXX_PCIE_DEVCTL, val);
 112        }
 113
 114        /* Set MRRS in SLI_S2M_PORT0_CTL to the same value. */
 115        r64 = octeon_read_csr64(oct, CN6XXX_SLI_S2M_PORTX_CTL(oct->pcie_port));
 116        r64 |= mrrs;
 117        octeon_write_csr64(oct, CN6XXX_SLI_S2M_PORTX_CTL(oct->pcie_port), r64);
 118
 119        /* Set MRRS in DPI_SLI_PRT0_CFG to the same value. */
 120        r64 = lio_pci_readq(oct, CN6XXX_DPI_SLI_PRTX_CFG(oct->pcie_port));
 121        r64 |= mrrs;
 122        lio_pci_writeq(oct, r64, CN6XXX_DPI_SLI_PRTX_CFG(oct->pcie_port));
 123}
 124
 125u32 lio_cn6xxx_coprocessor_clock(struct octeon_device *oct)
 126{
 127        /* Bits 29:24 of MIO_RST_BOOT holds the ref. clock multiplier
 128         * for SLI.
 129         */
 130        return ((lio_pci_readq(oct, CN6XXX_MIO_RST_BOOT) >> 24) & 0x3f) * 50;
 131}
 132
 133u32 lio_cn6xxx_get_oq_ticks(struct octeon_device *oct,
 134                            u32 time_intr_in_us)
 135{
 136        /* This gives the SLI clock per microsec */
 137        u32 oqticks_per_us = lio_cn6xxx_coprocessor_clock(oct);
 138
 139        /* core clock per us / oq ticks will be fractional. TO avoid that
 140         * we use the method below.
 141         */
 142
 143        /* This gives the clock cycles per millisecond */
 144        oqticks_per_us *= 1000;
 145
 146        /* This gives the oq ticks (1024 core clock cycles) per millisecond */
 147        oqticks_per_us /= 1024;
 148
 149        /* time_intr is in microseconds. The next 2 steps gives the oq ticks
 150         * corressponding to time_intr.
 151         */
 152        oqticks_per_us *= time_intr_in_us;
 153        oqticks_per_us /= 1000;
 154
 155        return oqticks_per_us;
 156}
 157
 158void lio_cn6xxx_setup_global_input_regs(struct octeon_device *oct)
 159{
 160        /* Select Round-Robin Arb, ES, RO, NS for Input Queues */
 161        octeon_write_csr(oct, CN6XXX_SLI_PKT_INPUT_CONTROL,
 162                         CN6XXX_INPUT_CTL_MASK);
 163
 164        /* Instruction Read Size - Max 4 instructions per PCIE Read */
 165        octeon_write_csr64(oct, CN6XXX_SLI_PKT_INSTR_RD_SIZE,
 166                           0xFFFFFFFFFFFFFFFFULL);
 167
 168        /* Select PCIE Port for all Input rings. */
 169        octeon_write_csr64(oct, CN6XXX_SLI_IN_PCIE_PORT,
 170                           (oct->pcie_port * 0x5555555555555555ULL));
 171}
 172
 173static void lio_cn66xx_setup_pkt_ctl_regs(struct octeon_device *oct)
 174{
 175        u64 pktctl;
 176
 177        struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)oct->chip;
 178
 179        pktctl = octeon_read_csr64(oct, CN6XXX_SLI_PKT_CTL);
 180
 181        /* 66XX SPECIFIC */
 182        if (CFG_GET_OQ_MAX_Q(cn6xxx->conf) <= 4)
 183                /* Disable RING_EN if only upto 4 rings are used. */
 184                pktctl &= ~(1 << 4);
 185        else
 186                pktctl |= (1 << 4);
 187
 188        if (CFG_GET_IS_SLI_BP_ON(cn6xxx->conf))
 189                pktctl |= 0xF;
 190        else
 191                /* Disable per-port backpressure. */
 192                pktctl &= ~0xF;
 193        octeon_write_csr64(oct, CN6XXX_SLI_PKT_CTL, pktctl);
 194}
 195
 196void lio_cn6xxx_setup_global_output_regs(struct octeon_device *oct)
 197{
 198        u32 time_threshold;
 199        struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)oct->chip;
 200
 201        /* / Select PCI-E Port for all Output queues */
 202        octeon_write_csr64(oct, CN6XXX_SLI_PKT_PCIE_PORT64,
 203                           (oct->pcie_port * 0x5555555555555555ULL));
 204
 205        if (CFG_GET_IS_SLI_BP_ON(cn6xxx->conf)) {
 206                octeon_write_csr64(oct, CN6XXX_SLI_OQ_WMARK, 32);
 207        } else {
 208                /* / Set Output queue watermark to 0 to disable backpressure */
 209                octeon_write_csr64(oct, CN6XXX_SLI_OQ_WMARK, 0);
 210        }
 211
 212        /* / Select Packet count instead of bytes for SLI_PKTi_CNTS[CNT] */
 213        octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_BMODE, 0);
 214
 215        /* Select ES, RO, NS setting from register for Output Queue Packet
 216         * Address
 217         */
 218        octeon_write_csr(oct, CN6XXX_SLI_PKT_DPADDR, 0xFFFFFFFF);
 219
 220        /* No Relaxed Ordering, No Snoop, 64-bit swap for Output
 221         * Queue ScatterList
 222         */
 223        octeon_write_csr(oct, CN6XXX_SLI_PKT_SLIST_ROR, 0);
 224        octeon_write_csr(oct, CN6XXX_SLI_PKT_SLIST_NS, 0);
 225
 226        /* / ENDIAN_SPECIFIC CHANGES - 0 works for LE. */
 227#ifdef __BIG_ENDIAN_BITFIELD
 228        octeon_write_csr64(oct, CN6XXX_SLI_PKT_SLIST_ES64,
 229                           0x5555555555555555ULL);
 230#else
 231        octeon_write_csr64(oct, CN6XXX_SLI_PKT_SLIST_ES64, 0ULL);
 232#endif
 233
 234        /* / No Relaxed Ordering, No Snoop, 64-bit swap for Output Queue Data */
 235        octeon_write_csr(oct, CN6XXX_SLI_PKT_DATA_OUT_ROR, 0);
 236        octeon_write_csr(oct, CN6XXX_SLI_PKT_DATA_OUT_NS, 0);
 237        octeon_write_csr64(oct, CN6XXX_SLI_PKT_DATA_OUT_ES64,
 238                           0x5555555555555555ULL);
 239
 240        /* / Set up interrupt packet and time threshold */
 241        octeon_write_csr(oct, CN6XXX_SLI_OQ_INT_LEVEL_PKTS,
 242                         (u32)CFG_GET_OQ_INTR_PKT(cn6xxx->conf));
 243        time_threshold =
 244                lio_cn6xxx_get_oq_ticks(oct, (u32)
 245                                        CFG_GET_OQ_INTR_TIME(cn6xxx->conf));
 246
 247        octeon_write_csr(oct, CN6XXX_SLI_OQ_INT_LEVEL_TIME, time_threshold);
 248}
 249
 250static int lio_cn6xxx_setup_device_regs(struct octeon_device *oct)
 251{
 252        lio_cn6xxx_setup_pcie_mps(oct, PCIE_MPS_DEFAULT);
 253        lio_cn6xxx_setup_pcie_mrrs(oct, PCIE_MRRS_512B);
 254        lio_cn6xxx_enable_error_reporting(oct);
 255
 256        lio_cn6xxx_setup_global_input_regs(oct);
 257        lio_cn66xx_setup_pkt_ctl_regs(oct);
 258        lio_cn6xxx_setup_global_output_regs(oct);
 259
 260        /* Default error timeout value should be 0x200000 to avoid host hang
 261         * when reads invalid register
 262         */
 263        octeon_write_csr64(oct, CN6XXX_SLI_WINDOW_CTL, 0x200000ULL);
 264        return 0;
 265}
 266
 267void lio_cn6xxx_setup_iq_regs(struct octeon_device *oct, u32 iq_no)
 268{
 269        struct octeon_instr_queue *iq = oct->instr_queue[iq_no];
 270
 271        octeon_write_csr64(oct, CN6XXX_SLI_IQ_PKT_INSTR_HDR64(iq_no), 0);
 272
 273        /* Write the start of the input queue's ring and its size  */
 274        octeon_write_csr64(oct, CN6XXX_SLI_IQ_BASE_ADDR64(iq_no),
 275                           iq->base_addr_dma);
 276        octeon_write_csr(oct, CN6XXX_SLI_IQ_SIZE(iq_no), iq->max_count);
 277
 278        /* Remember the doorbell & instruction count register addr for this
 279         * queue
 280         */
 281        iq->doorbell_reg = oct->mmio[0].hw_addr + CN6XXX_SLI_IQ_DOORBELL(iq_no);
 282        iq->inst_cnt_reg = oct->mmio[0].hw_addr
 283                           + CN6XXX_SLI_IQ_INSTR_COUNT(iq_no);
 284        dev_dbg(&oct->pci_dev->dev, "InstQ[%d]:dbell reg @ 0x%p instcnt_reg @ 0x%p\n",
 285                iq_no, iq->doorbell_reg, iq->inst_cnt_reg);
 286
 287        /* Store the current instruction counter
 288         * (used in flush_iq calculation)
 289         */
 290        iq->reset_instr_cnt = readl(iq->inst_cnt_reg);
 291}
 292
 293static void lio_cn66xx_setup_iq_regs(struct octeon_device *oct, u32 iq_no)
 294{
 295        lio_cn6xxx_setup_iq_regs(oct, iq_no);
 296
 297        /* Backpressure for this queue - WMARK set to all F's. This effectively
 298         * disables the backpressure mechanism.
 299         */
 300        octeon_write_csr64(oct, CN66XX_SLI_IQ_BP64(iq_no),
 301                           (0xFFFFFFFFULL << 32));
 302}
 303
 304void lio_cn6xxx_setup_oq_regs(struct octeon_device *oct, u32 oq_no)
 305{
 306        u32 intr;
 307        struct octeon_droq *droq = oct->droq[oq_no];
 308
 309        octeon_write_csr64(oct, CN6XXX_SLI_OQ_BASE_ADDR64(oq_no),
 310                           droq->desc_ring_dma);
 311        octeon_write_csr(oct, CN6XXX_SLI_OQ_SIZE(oq_no), droq->max_count);
 312
 313        octeon_write_csr(oct, CN6XXX_SLI_OQ_BUFF_INFO_SIZE(oq_no),
 314                         droq->buffer_size);
 315
 316        /* Get the mapped address of the pkt_sent and pkts_credit regs */
 317        droq->pkts_sent_reg =
 318                oct->mmio[0].hw_addr + CN6XXX_SLI_OQ_PKTS_SENT(oq_no);
 319        droq->pkts_credit_reg =
 320                oct->mmio[0].hw_addr + CN6XXX_SLI_OQ_PKTS_CREDIT(oq_no);
 321
 322        /* Enable this output queue to generate Packet Timer Interrupt */
 323        intr = octeon_read_csr(oct, CN6XXX_SLI_PKT_TIME_INT_ENB);
 324        intr |= (1 << oq_no);
 325        octeon_write_csr(oct, CN6XXX_SLI_PKT_TIME_INT_ENB, intr);
 326
 327        /* Enable this output queue to generate Packet Timer Interrupt */
 328        intr = octeon_read_csr(oct, CN6XXX_SLI_PKT_CNT_INT_ENB);
 329        intr |= (1 << oq_no);
 330        octeon_write_csr(oct, CN6XXX_SLI_PKT_CNT_INT_ENB, intr);
 331}
 332
 333int lio_cn6xxx_enable_io_queues(struct octeon_device *oct)
 334{
 335        u32 mask;
 336
 337        mask = octeon_read_csr(oct, CN6XXX_SLI_PKT_INSTR_SIZE);
 338        mask |= oct->io_qmask.iq64B;
 339        octeon_write_csr(oct, CN6XXX_SLI_PKT_INSTR_SIZE, mask);
 340
 341        mask = octeon_read_csr(oct, CN6XXX_SLI_PKT_INSTR_ENB);
 342        mask |= oct->io_qmask.iq;
 343        octeon_write_csr(oct, CN6XXX_SLI_PKT_INSTR_ENB, mask);
 344
 345        mask = octeon_read_csr(oct, CN6XXX_SLI_PKT_OUT_ENB);
 346        mask |= oct->io_qmask.oq;
 347        octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_ENB, mask);
 348
 349        return 0;
 350}
 351
 352void lio_cn6xxx_disable_io_queues(struct octeon_device *oct)
 353{
 354        int i;
 355        u32 mask, loop = HZ;
 356        u32 d32;
 357
 358        /* Reset the Enable bits for Input Queues. */
 359        mask = octeon_read_csr(oct, CN6XXX_SLI_PKT_INSTR_ENB);
 360        mask ^= oct->io_qmask.iq;
 361        octeon_write_csr(oct, CN6XXX_SLI_PKT_INSTR_ENB, mask);
 362
 363        /* Wait until hardware indicates that the queues are out of reset. */
 364        mask = (u32)oct->io_qmask.iq;
 365        d32 = octeon_read_csr(oct, CN6XXX_SLI_PORT_IN_RST_IQ);
 366        while (((d32 & mask) != mask) && loop--) {
 367                d32 = octeon_read_csr(oct, CN6XXX_SLI_PORT_IN_RST_IQ);
 368                schedule_timeout_uninterruptible(1);
 369        }
 370
 371        /* Reset the doorbell register for each Input queue. */
 372        for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
 373                if (!(oct->io_qmask.iq & BIT_ULL(i)))
 374                        continue;
 375                octeon_write_csr(oct, CN6XXX_SLI_IQ_DOORBELL(i), 0xFFFFFFFF);
 376                d32 = octeon_read_csr(oct, CN6XXX_SLI_IQ_DOORBELL(i));
 377        }
 378
 379        /* Reset the Enable bits for Output Queues. */
 380        mask = octeon_read_csr(oct, CN6XXX_SLI_PKT_OUT_ENB);
 381        mask ^= oct->io_qmask.oq;
 382        octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_ENB, mask);
 383
 384        /* Wait until hardware indicates that the queues are out of reset. */
 385        loop = HZ;
 386        mask = (u32)oct->io_qmask.oq;
 387        d32 = octeon_read_csr(oct, CN6XXX_SLI_PORT_IN_RST_OQ);
 388        while (((d32 & mask) != mask) && loop--) {
 389                d32 = octeon_read_csr(oct, CN6XXX_SLI_PORT_IN_RST_OQ);
 390                schedule_timeout_uninterruptible(1);
 391        }
 392        ;
 393
 394        /* Reset the doorbell register for each Output queue. */
 395        for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
 396                if (!(oct->io_qmask.oq & BIT_ULL(i)))
 397                        continue;
 398                octeon_write_csr(oct, CN6XXX_SLI_OQ_PKTS_CREDIT(i), 0xFFFFFFFF);
 399                d32 = octeon_read_csr(oct, CN6XXX_SLI_OQ_PKTS_CREDIT(i));
 400
 401                d32 = octeon_read_csr(oct, CN6XXX_SLI_OQ_PKTS_SENT(i));
 402                octeon_write_csr(oct, CN6XXX_SLI_OQ_PKTS_SENT(i), d32);
 403        }
 404
 405        d32 = octeon_read_csr(oct, CN6XXX_SLI_PKT_CNT_INT);
 406        if (d32)
 407                octeon_write_csr(oct, CN6XXX_SLI_PKT_CNT_INT, d32);
 408
 409        d32 = octeon_read_csr(oct, CN6XXX_SLI_PKT_TIME_INT);
 410        if (d32)
 411                octeon_write_csr(oct, CN6XXX_SLI_PKT_TIME_INT, d32);
 412}
 413
 414void
 415lio_cn6xxx_bar1_idx_setup(struct octeon_device *oct,
 416                          u64 core_addr,
 417                          u32 idx,
 418                          int valid)
 419{
 420        u64 bar1;
 421
 422        if (valid == 0) {
 423                bar1 = lio_pci_readq(oct, CN6XXX_BAR1_REG(idx, oct->pcie_port));
 424                lio_pci_writeq(oct, (bar1 & 0xFFFFFFFEULL),
 425                               CN6XXX_BAR1_REG(idx, oct->pcie_port));
 426                bar1 = lio_pci_readq(oct, CN6XXX_BAR1_REG(idx, oct->pcie_port));
 427                return;
 428        }
 429
 430        /* Bits 17:4 of the PCI_BAR1_INDEXx stores bits 35:22 of
 431         * the Core Addr
 432         */
 433        lio_pci_writeq(oct, (((core_addr >> 22) << 4) | PCI_BAR1_MASK),
 434                       CN6XXX_BAR1_REG(idx, oct->pcie_port));
 435
 436        bar1 = lio_pci_readq(oct, CN6XXX_BAR1_REG(idx, oct->pcie_port));
 437}
 438
 439void lio_cn6xxx_bar1_idx_write(struct octeon_device *oct,
 440                               u32 idx,
 441                               u32 mask)
 442{
 443        lio_pci_writeq(oct, mask, CN6XXX_BAR1_REG(idx, oct->pcie_port));
 444}
 445
 446u32 lio_cn6xxx_bar1_idx_read(struct octeon_device *oct, u32 idx)
 447{
 448        return (u32)lio_pci_readq(oct, CN6XXX_BAR1_REG(idx, oct->pcie_port));
 449}
 450
 451u32
 452lio_cn6xxx_update_read_index(struct octeon_instr_queue *iq)
 453{
 454        u32 new_idx = readl(iq->inst_cnt_reg);
 455
 456        /* The new instr cnt reg is a 32-bit counter that can roll over. We have
 457         * noted the counter's initial value at init time into
 458         * reset_instr_cnt
 459         */
 460        if (iq->reset_instr_cnt < new_idx)
 461                new_idx -= iq->reset_instr_cnt;
 462        else
 463                new_idx += (0xffffffff - iq->reset_instr_cnt) + 1;
 464
 465        /* Modulo of the new index with the IQ size will give us
 466         * the new index.
 467         */
 468        new_idx %= iq->max_count;
 469
 470        return new_idx;
 471}
 472
 473void lio_cn6xxx_enable_interrupt(struct octeon_device *oct,
 474                                 u8 unused __attribute__((unused)))
 475{
 476        struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)oct->chip;
 477        u64 mask = cn6xxx->intr_mask64 | CN6XXX_INTR_DMA0_FORCE;
 478
 479        /* Enable Interrupt */
 480        writeq(mask, cn6xxx->intr_enb_reg64);
 481}
 482
 483void lio_cn6xxx_disable_interrupt(struct octeon_device *oct,
 484                                  u8 unused __attribute__((unused)))
 485{
 486        struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)oct->chip;
 487
 488        /* Disable Interrupts */
 489        writeq(0, cn6xxx->intr_enb_reg64);
 490
 491        /* make sure interrupts are really disabled */
 492        mmiowb();
 493}
 494
 495static void lio_cn6xxx_get_pcie_qlmport(struct octeon_device *oct)
 496{
 497        /* CN63xx Pass2 and newer parts implements the SLI_MAC_NUMBER register
 498         * to determine the PCIE port #
 499         */
 500        oct->pcie_port = octeon_read_csr(oct, CN6XXX_SLI_MAC_NUMBER) & 0xff;
 501
 502        dev_dbg(&oct->pci_dev->dev, "Using PCIE Port %d\n", oct->pcie_port);
 503}
 504
 505static void
 506lio_cn6xxx_process_pcie_error_intr(struct octeon_device *oct, u64 intr64)
 507{
 508        dev_err(&oct->pci_dev->dev, "Error Intr: 0x%016llx\n",
 509                CVM_CAST64(intr64));
 510}
 511
 512static int lio_cn6xxx_process_droq_intr_regs(struct octeon_device *oct)
 513{
 514        struct octeon_droq *droq;
 515        int oq_no;
 516        u32 pkt_count, droq_time_mask, droq_mask, droq_int_enb;
 517        u32 droq_cnt_enb, droq_cnt_mask;
 518
 519        droq_cnt_enb = octeon_read_csr(oct, CN6XXX_SLI_PKT_CNT_INT_ENB);
 520        droq_cnt_mask = octeon_read_csr(oct, CN6XXX_SLI_PKT_CNT_INT);
 521        droq_mask = droq_cnt_mask & droq_cnt_enb;
 522
 523        droq_time_mask = octeon_read_csr(oct, CN6XXX_SLI_PKT_TIME_INT);
 524        droq_int_enb = octeon_read_csr(oct, CN6XXX_SLI_PKT_TIME_INT_ENB);
 525        droq_mask |= (droq_time_mask & droq_int_enb);
 526
 527        droq_mask &= oct->io_qmask.oq;
 528
 529        oct->droq_intr = 0;
 530
 531        for (oq_no = 0; oq_no < MAX_OCTEON_OUTPUT_QUEUES(oct); oq_no++) {
 532                if (!(droq_mask & BIT_ULL(oq_no)))
 533                        continue;
 534
 535                droq = oct->droq[oq_no];
 536                pkt_count = octeon_droq_check_hw_for_pkts(droq);
 537                if (pkt_count) {
 538                        oct->droq_intr |= BIT_ULL(oq_no);
 539                        if (droq->ops.poll_mode) {
 540                                u32 value;
 541                                u32 reg;
 542
 543                                struct octeon_cn6xxx *cn6xxx =
 544                                        (struct octeon_cn6xxx *)oct->chip;
 545
 546                                /* disable interrupts for this droq */
 547                                spin_lock
 548                                        (&cn6xxx->lock_for_droq_int_enb_reg);
 549                                reg = CN6XXX_SLI_PKT_TIME_INT_ENB;
 550                                value = octeon_read_csr(oct, reg);
 551                                value &= ~(1 << oq_no);
 552                                octeon_write_csr(oct, reg, value);
 553                                reg = CN6XXX_SLI_PKT_CNT_INT_ENB;
 554                                value = octeon_read_csr(oct, reg);
 555                                value &= ~(1 << oq_no);
 556                                octeon_write_csr(oct, reg, value);
 557
 558                                /* Ensure that the enable register is written.
 559                                 */
 560                                mmiowb();
 561
 562                                spin_unlock(&cn6xxx->lock_for_droq_int_enb_reg);
 563                        }
 564                }
 565        }
 566
 567        droq_time_mask &= oct->io_qmask.oq;
 568        droq_cnt_mask &= oct->io_qmask.oq;
 569
 570        /* Reset the PKT_CNT/TIME_INT registers. */
 571        if (droq_time_mask)
 572                octeon_write_csr(oct, CN6XXX_SLI_PKT_TIME_INT, droq_time_mask);
 573
 574        if (droq_cnt_mask)      /* reset PKT_CNT register:66xx */
 575                octeon_write_csr(oct, CN6XXX_SLI_PKT_CNT_INT, droq_cnt_mask);
 576
 577        return 0;
 578}
 579
 580irqreturn_t lio_cn6xxx_process_interrupt_regs(void *dev)
 581{
 582        struct octeon_device *oct = (struct octeon_device *)dev;
 583        struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)oct->chip;
 584        u64 intr64;
 585
 586        intr64 = readq(cn6xxx->intr_sum_reg64);
 587
 588        /* If our device has interrupted, then proceed.
 589         * Also check for all f's if interrupt was triggered on an error
 590         * and the PCI read fails.
 591         */
 592        if (!intr64 || (intr64 == 0xFFFFFFFFFFFFFFFFULL))
 593                return IRQ_NONE;
 594
 595        oct->int_status = 0;
 596
 597        if (intr64 & CN6XXX_INTR_ERR)
 598                lio_cn6xxx_process_pcie_error_intr(oct, intr64);
 599
 600        if (intr64 & CN6XXX_INTR_PKT_DATA) {
 601                lio_cn6xxx_process_droq_intr_regs(oct);
 602                oct->int_status |= OCT_DEV_INTR_PKT_DATA;
 603        }
 604
 605        if (intr64 & CN6XXX_INTR_DMA0_FORCE)
 606                oct->int_status |= OCT_DEV_INTR_DMA0_FORCE;
 607
 608        if (intr64 & CN6XXX_INTR_DMA1_FORCE)
 609                oct->int_status |= OCT_DEV_INTR_DMA1_FORCE;
 610
 611        /* Clear the current interrupts */
 612        writeq(intr64, cn6xxx->intr_sum_reg64);
 613
 614        return IRQ_HANDLED;
 615}
 616
 617void lio_cn6xxx_setup_reg_address(struct octeon_device *oct,
 618                                  void *chip,
 619                                  struct octeon_reg_list *reg_list)
 620{
 621        u8 __iomem *bar0_pciaddr = oct->mmio[0].hw_addr;
 622        struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)chip;
 623
 624        reg_list->pci_win_wr_addr_hi =
 625                (u32 __iomem *)(bar0_pciaddr + CN6XXX_WIN_WR_ADDR_HI);
 626        reg_list->pci_win_wr_addr_lo =
 627                (u32 __iomem *)(bar0_pciaddr + CN6XXX_WIN_WR_ADDR_LO);
 628        reg_list->pci_win_wr_addr =
 629                (u64 __iomem *)(bar0_pciaddr + CN6XXX_WIN_WR_ADDR64);
 630
 631        reg_list->pci_win_rd_addr_hi =
 632                (u32 __iomem *)(bar0_pciaddr + CN6XXX_WIN_RD_ADDR_HI);
 633        reg_list->pci_win_rd_addr_lo =
 634                (u32 __iomem *)(bar0_pciaddr + CN6XXX_WIN_RD_ADDR_LO);
 635        reg_list->pci_win_rd_addr =
 636                (u64 __iomem *)(bar0_pciaddr + CN6XXX_WIN_RD_ADDR64);
 637
 638        reg_list->pci_win_wr_data_hi =
 639                (u32 __iomem *)(bar0_pciaddr + CN6XXX_WIN_WR_DATA_HI);
 640        reg_list->pci_win_wr_data_lo =
 641                (u32 __iomem *)(bar0_pciaddr + CN6XXX_WIN_WR_DATA_LO);
 642        reg_list->pci_win_wr_data =
 643                (u64 __iomem *)(bar0_pciaddr + CN6XXX_WIN_WR_DATA64);
 644
 645        reg_list->pci_win_rd_data_hi =
 646                (u32 __iomem *)(bar0_pciaddr + CN6XXX_WIN_RD_DATA_HI);
 647        reg_list->pci_win_rd_data_lo =
 648                (u32 __iomem *)(bar0_pciaddr + CN6XXX_WIN_RD_DATA_LO);
 649        reg_list->pci_win_rd_data =
 650                (u64 __iomem *)(bar0_pciaddr + CN6XXX_WIN_RD_DATA64);
 651
 652        lio_cn6xxx_get_pcie_qlmport(oct);
 653
 654        cn6xxx->intr_sum_reg64 = bar0_pciaddr + CN6XXX_SLI_INT_SUM64;
 655        cn6xxx->intr_mask64 = CN6XXX_INTR_MASK;
 656        cn6xxx->intr_enb_reg64 =
 657                bar0_pciaddr + CN6XXX_SLI_INT_ENB64(oct->pcie_port);
 658}
 659
 660int lio_setup_cn66xx_octeon_device(struct octeon_device *oct)
 661{
 662        struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)oct->chip;
 663
 664        if (octeon_map_pci_barx(oct, 0, 0))
 665                return 1;
 666
 667        if (octeon_map_pci_barx(oct, 1, MAX_BAR1_IOREMAP_SIZE)) {
 668                dev_err(&oct->pci_dev->dev, "%s CN66XX BAR1 map failed\n",
 669                        __func__);
 670                octeon_unmap_pci_barx(oct, 0);
 671                return 1;
 672        }
 673
 674        spin_lock_init(&cn6xxx->lock_for_droq_int_enb_reg);
 675
 676        oct->fn_list.setup_iq_regs = lio_cn66xx_setup_iq_regs;
 677        oct->fn_list.setup_oq_regs = lio_cn6xxx_setup_oq_regs;
 678
 679        oct->fn_list.soft_reset = lio_cn6xxx_soft_reset;
 680        oct->fn_list.setup_device_regs = lio_cn6xxx_setup_device_regs;
 681        oct->fn_list.update_iq_read_idx = lio_cn6xxx_update_read_index;
 682
 683        oct->fn_list.bar1_idx_setup = lio_cn6xxx_bar1_idx_setup;
 684        oct->fn_list.bar1_idx_write = lio_cn6xxx_bar1_idx_write;
 685        oct->fn_list.bar1_idx_read = lio_cn6xxx_bar1_idx_read;
 686
 687        oct->fn_list.process_interrupt_regs = lio_cn6xxx_process_interrupt_regs;
 688        oct->fn_list.enable_interrupt = lio_cn6xxx_enable_interrupt;
 689        oct->fn_list.disable_interrupt = lio_cn6xxx_disable_interrupt;
 690
 691        oct->fn_list.enable_io_queues = lio_cn6xxx_enable_io_queues;
 692        oct->fn_list.disable_io_queues = lio_cn6xxx_disable_io_queues;
 693
 694        lio_cn6xxx_setup_reg_address(oct, oct->chip, &oct->reg_list);
 695
 696        cn6xxx->conf = (struct octeon_config *)
 697                       oct_get_config_info(oct, LIO_210SV);
 698        if (!cn6xxx->conf) {
 699                dev_err(&oct->pci_dev->dev, "%s No Config found for CN66XX\n",
 700                        __func__);
 701                octeon_unmap_pci_barx(oct, 0);
 702                octeon_unmap_pci_barx(oct, 1);
 703                return 1;
 704        }
 705
 706        oct->coproc_clock_rate = 1000000ULL * lio_cn6xxx_coprocessor_clock(oct);
 707
 708        return 0;
 709}
 710
 711int lio_validate_cn6xxx_config_info(struct octeon_device *oct,
 712                                    struct octeon_config *conf6xxx)
 713{
 714        if (CFG_GET_IQ_MAX_Q(conf6xxx) > CN6XXX_MAX_INPUT_QUEUES) {
 715                dev_err(&oct->pci_dev->dev, "%s: Num IQ (%d) exceeds Max (%d)\n",
 716                        __func__, CFG_GET_IQ_MAX_Q(conf6xxx),
 717                        CN6XXX_MAX_INPUT_QUEUES);
 718                return 1;
 719        }
 720
 721        if (CFG_GET_OQ_MAX_Q(conf6xxx) > CN6XXX_MAX_OUTPUT_QUEUES) {
 722                dev_err(&oct->pci_dev->dev, "%s: Num OQ (%d) exceeds Max (%d)\n",
 723                        __func__, CFG_GET_OQ_MAX_Q(conf6xxx),
 724                        CN6XXX_MAX_OUTPUT_QUEUES);
 725                return 1;
 726        }
 727
 728        if (CFG_GET_IQ_INSTR_TYPE(conf6xxx) != OCTEON_32BYTE_INSTR &&
 729            CFG_GET_IQ_INSTR_TYPE(conf6xxx) != OCTEON_64BYTE_INSTR) {
 730                dev_err(&oct->pci_dev->dev, "%s: Invalid instr type for IQ\n",
 731                        __func__);
 732                return 1;
 733        }
 734        if (!CFG_GET_OQ_REFILL_THRESHOLD(conf6xxx)) {
 735                dev_err(&oct->pci_dev->dev, "%s: Invalid parameter for OQ\n",
 736                        __func__);
 737                return 1;
 738        }
 739
 740        if (!(CFG_GET_OQ_INTR_TIME(conf6xxx))) {
 741                dev_err(&oct->pci_dev->dev, "%s: No Time Interrupt for OQ\n",
 742                        __func__);
 743                return 1;
 744        }
 745
 746        return 0;
 747}
 748