linux/drivers/net/can/kvaser_pciefd.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
   2/* Copyright (C) 2018 KVASER AB, Sweden. All rights reserved.
   3 * Parts of this driver are based on the following:
   4 *  - Kvaser linux pciefd driver (version 5.25)
   5 *  - PEAK linux canfd driver
   6 *  - Altera Avalon EPCS flash controller driver
   7 */
   8
   9#include <linux/kernel.h>
  10#include <linux/module.h>
  11#include <linux/device.h>
  12#include <linux/pci.h>
  13#include <linux/can/dev.h>
  14#include <linux/timer.h>
  15#include <linux/netdevice.h>
  16#include <linux/crc32.h>
  17#include <linux/iopoll.h>
  18
  19MODULE_LICENSE("Dual BSD/GPL");
  20MODULE_AUTHOR("Kvaser AB <support@kvaser.com>");
  21MODULE_DESCRIPTION("CAN driver for Kvaser CAN/PCIe devices");
  22
  23#define KVASER_PCIEFD_DRV_NAME "kvaser_pciefd"
  24
  25#define KVASER_PCIEFD_WAIT_TIMEOUT msecs_to_jiffies(1000)
  26#define KVASER_PCIEFD_BEC_POLL_FREQ (jiffies + msecs_to_jiffies(200))
  27#define KVASER_PCIEFD_MAX_ERR_REP 256
  28#define KVASER_PCIEFD_CAN_TX_MAX_COUNT 17
  29#define KVASER_PCIEFD_MAX_CAN_CHANNELS 4
  30#define KVASER_PCIEFD_DMA_COUNT 2
  31
  32#define KVASER_PCIEFD_DMA_SIZE (4 * 1024)
  33#define KVASER_PCIEFD_64BIT_DMA_BIT BIT(0)
  34
  35#define KVASER_PCIEFD_VENDOR 0x1a07
  36#define KVASER_PCIEFD_4HS_ID 0x0d
  37#define KVASER_PCIEFD_2HS_ID 0x0e
  38#define KVASER_PCIEFD_HS_ID 0x0f
  39#define KVASER_PCIEFD_MINIPCIE_HS_ID 0x10
  40#define KVASER_PCIEFD_MINIPCIE_2HS_ID 0x11
  41
  42/* PCIe IRQ registers */
  43#define KVASER_PCIEFD_IRQ_REG 0x40
  44#define KVASER_PCIEFD_IEN_REG 0x50
  45/* DMA map */
  46#define KVASER_PCIEFD_DMA_MAP_BASE 0x1000
  47/* Kvaser KCAN CAN controller registers */
  48#define KVASER_PCIEFD_KCAN0_BASE 0x10000
  49#define KVASER_PCIEFD_KCAN_BASE_OFFSET 0x1000
  50#define KVASER_PCIEFD_KCAN_FIFO_REG 0x100
  51#define KVASER_PCIEFD_KCAN_FIFO_LAST_REG 0x180
  52#define KVASER_PCIEFD_KCAN_CTRL_REG 0x2c0
  53#define KVASER_PCIEFD_KCAN_CMD_REG 0x400
  54#define KVASER_PCIEFD_KCAN_IEN_REG 0x408
  55#define KVASER_PCIEFD_KCAN_IRQ_REG 0x410
  56#define KVASER_PCIEFD_KCAN_TX_NPACKETS_REG 0x414
  57#define KVASER_PCIEFD_KCAN_STAT_REG 0x418
  58#define KVASER_PCIEFD_KCAN_MODE_REG 0x41c
  59#define KVASER_PCIEFD_KCAN_BTRN_REG 0x420
  60#define KVASER_PCIEFD_KCAN_BTRD_REG 0x428
  61#define KVASER_PCIEFD_KCAN_PWM_REG 0x430
  62/* Loopback control register */
  63#define KVASER_PCIEFD_LOOP_REG 0x1f000
  64/* System identification and information registers */
  65#define KVASER_PCIEFD_SYSID_BASE 0x1f020
  66#define KVASER_PCIEFD_SYSID_VERSION_REG (KVASER_PCIEFD_SYSID_BASE + 0x8)
  67#define KVASER_PCIEFD_SYSID_CANFREQ_REG (KVASER_PCIEFD_SYSID_BASE + 0xc)
  68#define KVASER_PCIEFD_SYSID_BUSFREQ_REG (KVASER_PCIEFD_SYSID_BASE + 0x10)
  69#define KVASER_PCIEFD_SYSID_BUILD_REG (KVASER_PCIEFD_SYSID_BASE + 0x14)
  70/* Shared receive buffer registers */
  71#define KVASER_PCIEFD_SRB_BASE 0x1f200
  72#define KVASER_PCIEFD_SRB_CMD_REG (KVASER_PCIEFD_SRB_BASE + 0x200)
  73#define KVASER_PCIEFD_SRB_IEN_REG (KVASER_PCIEFD_SRB_BASE + 0x204)
  74#define KVASER_PCIEFD_SRB_IRQ_REG (KVASER_PCIEFD_SRB_BASE + 0x20c)
  75#define KVASER_PCIEFD_SRB_STAT_REG (KVASER_PCIEFD_SRB_BASE + 0x210)
  76#define KVASER_PCIEFD_SRB_CTRL_REG (KVASER_PCIEFD_SRB_BASE + 0x218)
  77/* EPCS flash controller registers */
  78#define KVASER_PCIEFD_SPI_BASE 0x1fc00
  79#define KVASER_PCIEFD_SPI_RX_REG KVASER_PCIEFD_SPI_BASE
  80#define KVASER_PCIEFD_SPI_TX_REG (KVASER_PCIEFD_SPI_BASE + 0x4)
  81#define KVASER_PCIEFD_SPI_STATUS_REG (KVASER_PCIEFD_SPI_BASE + 0x8)
  82#define KVASER_PCIEFD_SPI_CTRL_REG (KVASER_PCIEFD_SPI_BASE + 0xc)
  83#define KVASER_PCIEFD_SPI_SSEL_REG (KVASER_PCIEFD_SPI_BASE + 0x14)
  84
  85#define KVASER_PCIEFD_IRQ_ALL_MSK 0x1f
  86#define KVASER_PCIEFD_IRQ_SRB BIT(4)
  87
  88#define KVASER_PCIEFD_SYSID_NRCHAN_SHIFT 24
  89#define KVASER_PCIEFD_SYSID_MAJOR_VER_SHIFT 16
  90#define KVASER_PCIEFD_SYSID_BUILD_VER_SHIFT 1
  91
  92/* Reset DMA buffer 0, 1 and FIFO offset */
  93#define KVASER_PCIEFD_SRB_CMD_RDB0 BIT(4)
  94#define KVASER_PCIEFD_SRB_CMD_RDB1 BIT(5)
  95#define KVASER_PCIEFD_SRB_CMD_FOR BIT(0)
  96
  97/* DMA packet done, buffer 0 and 1 */
  98#define KVASER_PCIEFD_SRB_IRQ_DPD0 BIT(8)
  99#define KVASER_PCIEFD_SRB_IRQ_DPD1 BIT(9)
 100/* DMA overflow, buffer 0 and 1 */
 101#define KVASER_PCIEFD_SRB_IRQ_DOF0 BIT(10)
 102#define KVASER_PCIEFD_SRB_IRQ_DOF1 BIT(11)
 103/* DMA underflow, buffer 0 and 1 */
 104#define KVASER_PCIEFD_SRB_IRQ_DUF0 BIT(12)
 105#define KVASER_PCIEFD_SRB_IRQ_DUF1 BIT(13)
 106
 107/* DMA idle */
 108#define KVASER_PCIEFD_SRB_STAT_DI BIT(15)
 109/* DMA support */
 110#define KVASER_PCIEFD_SRB_STAT_DMA BIT(24)
 111
 112/* DMA Enable */
 113#define KVASER_PCIEFD_SRB_CTRL_DMA_ENABLE BIT(0)
 114
 115/* EPCS flash controller definitions */
 116#define KVASER_PCIEFD_CFG_IMG_SZ (64 * 1024)
 117#define KVASER_PCIEFD_CFG_IMG_OFFSET (31 * 65536L)
 118#define KVASER_PCIEFD_CFG_MAX_PARAMS 256
 119#define KVASER_PCIEFD_CFG_MAGIC 0xcafef00d
 120#define KVASER_PCIEFD_CFG_PARAM_MAX_SZ 24
 121#define KVASER_PCIEFD_CFG_SYS_VER 1
 122#define KVASER_PCIEFD_CFG_PARAM_NR_CHAN 130
 123#define KVASER_PCIEFD_SPI_TMT BIT(5)
 124#define KVASER_PCIEFD_SPI_TRDY BIT(6)
 125#define KVASER_PCIEFD_SPI_RRDY BIT(7)
 126#define KVASER_PCIEFD_FLASH_ID_EPCS16 0x14
 127/* Commands for controlling the onboard flash */
 128#define KVASER_PCIEFD_FLASH_RES_CMD 0xab
 129#define KVASER_PCIEFD_FLASH_READ_CMD 0x3
 130#define KVASER_PCIEFD_FLASH_STATUS_CMD 0x5
 131
 132/* Kvaser KCAN definitions */
 133#define KVASER_PCIEFD_KCAN_CTRL_EFLUSH (4 << 29)
 134#define KVASER_PCIEFD_KCAN_CTRL_EFRAME (5 << 29)
 135
 136#define KVASER_PCIEFD_KCAN_CMD_SEQ_SHIFT 16
 137/* Request status packet */
 138#define KVASER_PCIEFD_KCAN_CMD_SRQ BIT(0)
 139/* Abort, flush and reset */
 140#define KVASER_PCIEFD_KCAN_CMD_AT BIT(1)
 141
 142/* Tx FIFO unaligned read */
 143#define KVASER_PCIEFD_KCAN_IRQ_TAR BIT(0)
 144/* Tx FIFO unaligned end */
 145#define KVASER_PCIEFD_KCAN_IRQ_TAE BIT(1)
 146/* Bus parameter protection error */
 147#define KVASER_PCIEFD_KCAN_IRQ_BPP BIT(2)
 148/* FDF bit when controller is in classic mode */
 149#define KVASER_PCIEFD_KCAN_IRQ_FDIC BIT(3)
 150/* Rx FIFO overflow */
 151#define KVASER_PCIEFD_KCAN_IRQ_ROF BIT(5)
 152/* Abort done */
 153#define KVASER_PCIEFD_KCAN_IRQ_ABD BIT(13)
 154/* Tx buffer flush done */
 155#define KVASER_PCIEFD_KCAN_IRQ_TFD BIT(14)
 156/* Tx FIFO overflow */
 157#define KVASER_PCIEFD_KCAN_IRQ_TOF BIT(15)
 158/* Tx FIFO empty */
 159#define KVASER_PCIEFD_KCAN_IRQ_TE BIT(16)
 160/* Transmitter unaligned */
 161#define KVASER_PCIEFD_KCAN_IRQ_TAL BIT(17)
 162
 163#define KVASER_PCIEFD_KCAN_TX_NPACKETS_MAX_SHIFT 16
 164
 165#define KVASER_PCIEFD_KCAN_STAT_SEQNO_SHIFT 24
 166/* Abort request */
 167#define KVASER_PCIEFD_KCAN_STAT_AR BIT(7)
 168/* Idle state. Controller in reset mode and no abort or flush pending */
 169#define KVASER_PCIEFD_KCAN_STAT_IDLE BIT(10)
 170/* Bus off */
 171#define KVASER_PCIEFD_KCAN_STAT_BOFF BIT(11)
 172/* Reset mode request */
 173#define KVASER_PCIEFD_KCAN_STAT_RMR BIT(14)
 174/* Controller in reset mode */
 175#define KVASER_PCIEFD_KCAN_STAT_IRM BIT(15)
 176/* Controller got one-shot capability */
 177#define KVASER_PCIEFD_KCAN_STAT_CAP BIT(16)
 178/* Controller got CAN FD capability */
 179#define KVASER_PCIEFD_KCAN_STAT_FD BIT(19)
 180#define KVASER_PCIEFD_KCAN_STAT_BUS_OFF_MSK (KVASER_PCIEFD_KCAN_STAT_AR | \
 181        KVASER_PCIEFD_KCAN_STAT_BOFF | KVASER_PCIEFD_KCAN_STAT_RMR | \
 182        KVASER_PCIEFD_KCAN_STAT_IRM)
 183
 184/* Reset mode */
 185#define KVASER_PCIEFD_KCAN_MODE_RM BIT(8)
 186/* Listen only mode */
 187#define KVASER_PCIEFD_KCAN_MODE_LOM BIT(9)
 188/* Error packet enable */
 189#define KVASER_PCIEFD_KCAN_MODE_EPEN BIT(12)
 190/* CAN FD non-ISO */
 191#define KVASER_PCIEFD_KCAN_MODE_NIFDEN BIT(15)
 192/* Acknowledgment packet type */
 193#define KVASER_PCIEFD_KCAN_MODE_APT BIT(20)
 194/* Active error flag enable. Clear to force error passive */
 195#define KVASER_PCIEFD_KCAN_MODE_EEN BIT(23)
 196/* Classic CAN mode */
 197#define KVASER_PCIEFD_KCAN_MODE_CCM BIT(31)
 198
 199#define KVASER_PCIEFD_KCAN_BTRN_SJW_SHIFT 13
 200#define KVASER_PCIEFD_KCAN_BTRN_TSEG1_SHIFT 17
 201#define KVASER_PCIEFD_KCAN_BTRN_TSEG2_SHIFT 26
 202
 203#define KVASER_PCIEFD_KCAN_PWM_TOP_SHIFT 16
 204
 205/* Kvaser KCAN packet types */
 206#define KVASER_PCIEFD_PACK_TYPE_DATA 0
 207#define KVASER_PCIEFD_PACK_TYPE_ACK 1
 208#define KVASER_PCIEFD_PACK_TYPE_TXRQ 2
 209#define KVASER_PCIEFD_PACK_TYPE_ERROR 3
 210#define KVASER_PCIEFD_PACK_TYPE_EFLUSH_ACK 4
 211#define KVASER_PCIEFD_PACK_TYPE_EFRAME_ACK 5
 212#define KVASER_PCIEFD_PACK_TYPE_ACK_DATA 6
 213#define KVASER_PCIEFD_PACK_TYPE_STATUS 8
 214#define KVASER_PCIEFD_PACK_TYPE_BUS_LOAD 9
 215
 216/* Kvaser KCAN packet common definitions */
 217#define KVASER_PCIEFD_PACKET_SEQ_MSK 0xff
 218#define KVASER_PCIEFD_PACKET_CHID_SHIFT 25
 219#define KVASER_PCIEFD_PACKET_TYPE_SHIFT 28
 220
 221/* Kvaser KCAN TDATA and RDATA first word */
 222#define KVASER_PCIEFD_RPACKET_IDE BIT(30)
 223#define KVASER_PCIEFD_RPACKET_RTR BIT(29)
 224/* Kvaser KCAN TDATA and RDATA second word */
 225#define KVASER_PCIEFD_RPACKET_ESI BIT(13)
 226#define KVASER_PCIEFD_RPACKET_BRS BIT(14)
 227#define KVASER_PCIEFD_RPACKET_FDF BIT(15)
 228#define KVASER_PCIEFD_RPACKET_DLC_SHIFT 8
 229/* Kvaser KCAN TDATA second word */
 230#define KVASER_PCIEFD_TPACKET_SMS BIT(16)
 231#define KVASER_PCIEFD_TPACKET_AREQ BIT(31)
 232
 233/* Kvaser KCAN APACKET */
 234#define KVASER_PCIEFD_APACKET_FLU BIT(8)
 235#define KVASER_PCIEFD_APACKET_CT BIT(9)
 236#define KVASER_PCIEFD_APACKET_ABL BIT(10)
 237#define KVASER_PCIEFD_APACKET_NACK BIT(11)
 238
 239/* Kvaser KCAN SPACK first word */
 240#define KVASER_PCIEFD_SPACK_RXERR_SHIFT 8
 241#define KVASER_PCIEFD_SPACK_BOFF BIT(16)
 242#define KVASER_PCIEFD_SPACK_IDET BIT(20)
 243#define KVASER_PCIEFD_SPACK_IRM BIT(21)
 244#define KVASER_PCIEFD_SPACK_RMCD BIT(22)
 245/* Kvaser KCAN SPACK second word */
 246#define KVASER_PCIEFD_SPACK_AUTO BIT(21)
 247#define KVASER_PCIEFD_SPACK_EWLR BIT(23)
 248#define KVASER_PCIEFD_SPACK_EPLR BIT(24)
 249
 250struct kvaser_pciefd;
 251
 252struct kvaser_pciefd_can {
 253        struct can_priv can;
 254        struct kvaser_pciefd *kv_pcie;
 255        void __iomem *reg_base;
 256        struct can_berr_counter bec;
 257        u8 cmd_seq;
 258        int err_rep_cnt;
 259        int echo_idx;
 260        spinlock_t lock; /* Locks sensitive registers (e.g. MODE) */
 261        spinlock_t echo_lock; /* Locks the message echo buffer */
 262        struct timer_list bec_poll_timer;
 263        struct completion start_comp, flush_comp;
 264};
 265
 266struct kvaser_pciefd {
 267        struct pci_dev *pci;
 268        void __iomem *reg_base;
 269        struct kvaser_pciefd_can *can[KVASER_PCIEFD_MAX_CAN_CHANNELS];
 270        void *dma_data[KVASER_PCIEFD_DMA_COUNT];
 271        u8 nr_channels;
 272        u32 bus_freq;
 273        u32 freq;
 274        u32 freq_to_ticks_div;
 275};
 276
 277struct kvaser_pciefd_rx_packet {
 278        u32 header[2];
 279        u64 timestamp;
 280};
 281
 282struct kvaser_pciefd_tx_packet {
 283        u32 header[2];
 284        u8 data[64];
 285};
 286
 287static const struct can_bittiming_const kvaser_pciefd_bittiming_const = {
 288        .name = KVASER_PCIEFD_DRV_NAME,
 289        .tseg1_min = 1,
 290        .tseg1_max = 255,
 291        .tseg2_min = 1,
 292        .tseg2_max = 32,
 293        .sjw_max = 16,
 294        .brp_min = 1,
 295        .brp_max = 4096,
 296        .brp_inc = 1,
 297};
 298
 299struct kvaser_pciefd_cfg_param {
 300        __le32 magic;
 301        __le32 nr;
 302        __le32 len;
 303        u8 data[KVASER_PCIEFD_CFG_PARAM_MAX_SZ];
 304};
 305
 306struct kvaser_pciefd_cfg_img {
 307        __le32 version;
 308        __le32 magic;
 309        __le32 crc;
 310        struct kvaser_pciefd_cfg_param params[KVASER_PCIEFD_CFG_MAX_PARAMS];
 311};
 312
 313static struct pci_device_id kvaser_pciefd_id_table[] = {
 314        { PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_4HS_ID), },
 315        { PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_2HS_ID), },
 316        { PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_HS_ID), },
 317        { PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_MINIPCIE_HS_ID), },
 318        { PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_MINIPCIE_2HS_ID), },
 319        { 0,},
 320};
 321MODULE_DEVICE_TABLE(pci, kvaser_pciefd_id_table);
 322
 323/* Onboard flash memory functions */
 324static int kvaser_pciefd_spi_wait_loop(struct kvaser_pciefd *pcie, int msk)
 325{
 326        u32 res;
 327        int ret;
 328
 329        ret = readl_poll_timeout(pcie->reg_base + KVASER_PCIEFD_SPI_STATUS_REG,
 330                                 res, res & msk, 0, 10);
 331
 332        return ret;
 333}
 334
 335static int kvaser_pciefd_spi_cmd(struct kvaser_pciefd *pcie, const u8 *tx,
 336                                 u32 tx_len, u8 *rx, u32 rx_len)
 337{
 338        int c;
 339
 340        iowrite32(BIT(0), pcie->reg_base + KVASER_PCIEFD_SPI_SSEL_REG);
 341        iowrite32(BIT(10), pcie->reg_base + KVASER_PCIEFD_SPI_CTRL_REG);
 342        ioread32(pcie->reg_base + KVASER_PCIEFD_SPI_RX_REG);
 343
 344        c = tx_len;
 345        while (c--) {
 346                if (kvaser_pciefd_spi_wait_loop(pcie, KVASER_PCIEFD_SPI_TRDY))
 347                        return -EIO;
 348
 349                iowrite32(*tx++, pcie->reg_base + KVASER_PCIEFD_SPI_TX_REG);
 350
 351                if (kvaser_pciefd_spi_wait_loop(pcie, KVASER_PCIEFD_SPI_RRDY))
 352                        return -EIO;
 353
 354                ioread32(pcie->reg_base + KVASER_PCIEFD_SPI_RX_REG);
 355        }
 356
 357        c = rx_len;
 358        while (c-- > 0) {
 359                if (kvaser_pciefd_spi_wait_loop(pcie, KVASER_PCIEFD_SPI_TRDY))
 360                        return -EIO;
 361
 362                iowrite32(0, pcie->reg_base + KVASER_PCIEFD_SPI_TX_REG);
 363
 364                if (kvaser_pciefd_spi_wait_loop(pcie, KVASER_PCIEFD_SPI_RRDY))
 365                        return -EIO;
 366
 367                *rx++ = ioread32(pcie->reg_base + KVASER_PCIEFD_SPI_RX_REG);
 368        }
 369
 370        if (kvaser_pciefd_spi_wait_loop(pcie, KVASER_PCIEFD_SPI_TMT))
 371                return -EIO;
 372
 373        iowrite32(0, pcie->reg_base + KVASER_PCIEFD_SPI_CTRL_REG);
 374
 375        if (c != -1) {
 376                dev_err(&pcie->pci->dev, "Flash SPI transfer failed\n");
 377                return -EIO;
 378        }
 379
 380        return 0;
 381}
 382
 383static int kvaser_pciefd_cfg_read_and_verify(struct kvaser_pciefd *pcie,
 384                                             struct kvaser_pciefd_cfg_img *img)
 385{
 386        int offset = KVASER_PCIEFD_CFG_IMG_OFFSET;
 387        int res, crc;
 388        u8 *crc_buff;
 389
 390        u8 cmd[] = {
 391                KVASER_PCIEFD_FLASH_READ_CMD,
 392                (u8)((offset >> 16) & 0xff),
 393                (u8)((offset >> 8) & 0xff),
 394                (u8)(offset & 0xff)
 395        };
 396
 397        res = kvaser_pciefd_spi_cmd(pcie, cmd, ARRAY_SIZE(cmd), (u8 *)img,
 398                                    KVASER_PCIEFD_CFG_IMG_SZ);
 399        if (res)
 400                return res;
 401
 402        crc_buff = (u8 *)img->params;
 403
 404        if (le32_to_cpu(img->version) != KVASER_PCIEFD_CFG_SYS_VER) {
 405                dev_err(&pcie->pci->dev,
 406                        "Config flash corrupted, version number is wrong\n");
 407                return -ENODEV;
 408        }
 409
 410        if (le32_to_cpu(img->magic) != KVASER_PCIEFD_CFG_MAGIC) {
 411                dev_err(&pcie->pci->dev,
 412                        "Config flash corrupted, magic number is wrong\n");
 413                return -ENODEV;
 414        }
 415
 416        crc = ~crc32_be(0xffffffff, crc_buff, sizeof(img->params));
 417        if (le32_to_cpu(img->crc) != crc) {
 418                dev_err(&pcie->pci->dev,
 419                        "Stored CRC does not match flash image contents\n");
 420                return -EIO;
 421        }
 422
 423        return 0;
 424}
 425
 426static void kvaser_pciefd_cfg_read_params(struct kvaser_pciefd *pcie,
 427                                          struct kvaser_pciefd_cfg_img *img)
 428{
 429        struct kvaser_pciefd_cfg_param *param;
 430
 431        param = &img->params[KVASER_PCIEFD_CFG_PARAM_NR_CHAN];
 432        memcpy(&pcie->nr_channels, param->data, le32_to_cpu(param->len));
 433}
 434
 435static int kvaser_pciefd_read_cfg(struct kvaser_pciefd *pcie)
 436{
 437        int res;
 438        struct kvaser_pciefd_cfg_img *img;
 439
 440        /* Read electronic signature */
 441        u8 cmd[] = {KVASER_PCIEFD_FLASH_RES_CMD, 0, 0, 0};
 442
 443        res = kvaser_pciefd_spi_cmd(pcie, cmd, ARRAY_SIZE(cmd), cmd, 1);
 444        if (res)
 445                return -EIO;
 446
 447        img = kmalloc(KVASER_PCIEFD_CFG_IMG_SZ, GFP_KERNEL);
 448        if (!img)
 449                return -ENOMEM;
 450
 451        if (cmd[0] != KVASER_PCIEFD_FLASH_ID_EPCS16) {
 452                dev_err(&pcie->pci->dev,
 453                        "Flash id is 0x%x instead of expected EPCS16 (0x%x)\n",
 454                        cmd[0], KVASER_PCIEFD_FLASH_ID_EPCS16);
 455
 456                res = -ENODEV;
 457                goto image_free;
 458        }
 459
 460        cmd[0] = KVASER_PCIEFD_FLASH_STATUS_CMD;
 461        res = kvaser_pciefd_spi_cmd(pcie, cmd, 1, cmd, 1);
 462        if (res) {
 463                goto image_free;
 464        } else if (cmd[0] & 1) {
 465                res = -EIO;
 466                /* No write is ever done, the WIP should never be set */
 467                dev_err(&pcie->pci->dev, "Unexpected WIP bit set in flash\n");
 468                goto image_free;
 469        }
 470
 471        res = kvaser_pciefd_cfg_read_and_verify(pcie, img);
 472        if (res) {
 473                res = -EIO;
 474                goto image_free;
 475        }
 476
 477        kvaser_pciefd_cfg_read_params(pcie, img);
 478
 479image_free:
 480        kfree(img);
 481        return res;
 482}
 483
 484static void kvaser_pciefd_request_status(struct kvaser_pciefd_can *can)
 485{
 486        u32 cmd;
 487
 488        cmd = KVASER_PCIEFD_KCAN_CMD_SRQ;
 489        cmd |= ++can->cmd_seq << KVASER_PCIEFD_KCAN_CMD_SEQ_SHIFT;
 490        iowrite32(cmd, can->reg_base + KVASER_PCIEFD_KCAN_CMD_REG);
 491}
 492
 493static void kvaser_pciefd_enable_err_gen(struct kvaser_pciefd_can *can)
 494{
 495        u32 mode;
 496        unsigned long irq;
 497
 498        spin_lock_irqsave(&can->lock, irq);
 499        mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
 500        if (!(mode & KVASER_PCIEFD_KCAN_MODE_EPEN)) {
 501                mode |= KVASER_PCIEFD_KCAN_MODE_EPEN;
 502                iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
 503        }
 504        spin_unlock_irqrestore(&can->lock, irq);
 505}
 506
 507static void kvaser_pciefd_disable_err_gen(struct kvaser_pciefd_can *can)
 508{
 509        u32 mode;
 510        unsigned long irq;
 511
 512        spin_lock_irqsave(&can->lock, irq);
 513        mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
 514        mode &= ~KVASER_PCIEFD_KCAN_MODE_EPEN;
 515        iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
 516        spin_unlock_irqrestore(&can->lock, irq);
 517}
 518
 519static int kvaser_pciefd_set_tx_irq(struct kvaser_pciefd_can *can)
 520{
 521        u32 msk;
 522
 523        msk = KVASER_PCIEFD_KCAN_IRQ_TE | KVASER_PCIEFD_KCAN_IRQ_ROF |
 524              KVASER_PCIEFD_KCAN_IRQ_TOF | KVASER_PCIEFD_KCAN_IRQ_ABD |
 525              KVASER_PCIEFD_KCAN_IRQ_TAE | KVASER_PCIEFD_KCAN_IRQ_TAL |
 526              KVASER_PCIEFD_KCAN_IRQ_FDIC | KVASER_PCIEFD_KCAN_IRQ_BPP |
 527              KVASER_PCIEFD_KCAN_IRQ_TAR | KVASER_PCIEFD_KCAN_IRQ_TFD;
 528
 529        iowrite32(msk, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
 530
 531        return 0;
 532}
 533
 534static void kvaser_pciefd_setup_controller(struct kvaser_pciefd_can *can)
 535{
 536        u32 mode;
 537        unsigned long irq;
 538
 539        spin_lock_irqsave(&can->lock, irq);
 540
 541        mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
 542        if (can->can.ctrlmode & CAN_CTRLMODE_FD) {
 543                mode &= ~KVASER_PCIEFD_KCAN_MODE_CCM;
 544                if (can->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO)
 545                        mode |= KVASER_PCIEFD_KCAN_MODE_NIFDEN;
 546                else
 547                        mode &= ~KVASER_PCIEFD_KCAN_MODE_NIFDEN;
 548        } else {
 549                mode |= KVASER_PCIEFD_KCAN_MODE_CCM;
 550                mode &= ~KVASER_PCIEFD_KCAN_MODE_NIFDEN;
 551        }
 552
 553        if (can->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
 554                mode |= KVASER_PCIEFD_KCAN_MODE_LOM;
 555
 556        mode |= KVASER_PCIEFD_KCAN_MODE_EEN;
 557        mode |= KVASER_PCIEFD_KCAN_MODE_EPEN;
 558        /* Use ACK packet type */
 559        mode &= ~KVASER_PCIEFD_KCAN_MODE_APT;
 560        mode &= ~KVASER_PCIEFD_KCAN_MODE_RM;
 561        iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
 562
 563        spin_unlock_irqrestore(&can->lock, irq);
 564}
 565
 566static void kvaser_pciefd_start_controller_flush(struct kvaser_pciefd_can *can)
 567{
 568        u32 status;
 569        unsigned long irq;
 570
 571        spin_lock_irqsave(&can->lock, irq);
 572        iowrite32(-1, can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG);
 573        iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD | KVASER_PCIEFD_KCAN_IRQ_TFD,
 574                  can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
 575
 576        status = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_STAT_REG);
 577        if (status & KVASER_PCIEFD_KCAN_STAT_IDLE) {
 578                u32 cmd;
 579
 580                /* If controller is already idle, run abort, flush and reset */
 581                cmd = KVASER_PCIEFD_KCAN_CMD_AT;
 582                cmd |= ++can->cmd_seq << KVASER_PCIEFD_KCAN_CMD_SEQ_SHIFT;
 583                iowrite32(cmd, can->reg_base + KVASER_PCIEFD_KCAN_CMD_REG);
 584        } else if (!(status & KVASER_PCIEFD_KCAN_STAT_RMR)) {
 585                u32 mode;
 586
 587                /* Put controller in reset mode */
 588                mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
 589                mode |= KVASER_PCIEFD_KCAN_MODE_RM;
 590                iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
 591        }
 592
 593        spin_unlock_irqrestore(&can->lock, irq);
 594}
 595
 596static int kvaser_pciefd_bus_on(struct kvaser_pciefd_can *can)
 597{
 598        u32 mode;
 599        unsigned long irq;
 600
 601        del_timer(&can->bec_poll_timer);
 602
 603        if (!completion_done(&can->flush_comp))
 604                kvaser_pciefd_start_controller_flush(can);
 605
 606        if (!wait_for_completion_timeout(&can->flush_comp,
 607                                         KVASER_PCIEFD_WAIT_TIMEOUT)) {
 608                netdev_err(can->can.dev, "Timeout during bus on flush\n");
 609                return -ETIMEDOUT;
 610        }
 611
 612        spin_lock_irqsave(&can->lock, irq);
 613        iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
 614        iowrite32(-1, can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG);
 615
 616        iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD | KVASER_PCIEFD_KCAN_IRQ_TFD,
 617                  can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
 618
 619        mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
 620        mode &= ~KVASER_PCIEFD_KCAN_MODE_RM;
 621        iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
 622        spin_unlock_irqrestore(&can->lock, irq);
 623
 624        if (!wait_for_completion_timeout(&can->start_comp,
 625                                         KVASER_PCIEFD_WAIT_TIMEOUT)) {
 626                netdev_err(can->can.dev, "Timeout during bus on reset\n");
 627                return -ETIMEDOUT;
 628        }
 629        /* Reset interrupt handling */
 630        iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
 631        iowrite32(-1, can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG);
 632
 633        kvaser_pciefd_set_tx_irq(can);
 634        kvaser_pciefd_setup_controller(can);
 635
 636        can->can.state = CAN_STATE_ERROR_ACTIVE;
 637        netif_wake_queue(can->can.dev);
 638        can->bec.txerr = 0;
 639        can->bec.rxerr = 0;
 640        can->err_rep_cnt = 0;
 641
 642        return 0;
 643}
 644
 645static void kvaser_pciefd_pwm_stop(struct kvaser_pciefd_can *can)
 646{
 647        u8 top;
 648        u32 pwm_ctrl;
 649        unsigned long irq;
 650
 651        spin_lock_irqsave(&can->lock, irq);
 652        pwm_ctrl = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_PWM_REG);
 653        top = (pwm_ctrl >> KVASER_PCIEFD_KCAN_PWM_TOP_SHIFT) & 0xff;
 654
 655        /* Set duty cycle to zero */
 656        pwm_ctrl |= top;
 657        iowrite32(pwm_ctrl, can->reg_base + KVASER_PCIEFD_KCAN_PWM_REG);
 658        spin_unlock_irqrestore(&can->lock, irq);
 659}
 660
 661static void kvaser_pciefd_pwm_start(struct kvaser_pciefd_can *can)
 662{
 663        int top, trigger;
 664        u32 pwm_ctrl;
 665        unsigned long irq;
 666
 667        kvaser_pciefd_pwm_stop(can);
 668        spin_lock_irqsave(&can->lock, irq);
 669
 670        /* Set frequency to 500 KHz*/
 671        top = can->kv_pcie->bus_freq / (2 * 500000) - 1;
 672
 673        pwm_ctrl = top & 0xff;
 674        pwm_ctrl |= (top & 0xff) << KVASER_PCIEFD_KCAN_PWM_TOP_SHIFT;
 675        iowrite32(pwm_ctrl, can->reg_base + KVASER_PCIEFD_KCAN_PWM_REG);
 676
 677        /* Set duty cycle to 95 */
 678        trigger = (100 * top - 95 * (top + 1) + 50) / 100;
 679        pwm_ctrl = trigger & 0xff;
 680        pwm_ctrl |= (top & 0xff) << KVASER_PCIEFD_KCAN_PWM_TOP_SHIFT;
 681        iowrite32(pwm_ctrl, can->reg_base + KVASER_PCIEFD_KCAN_PWM_REG);
 682        spin_unlock_irqrestore(&can->lock, irq);
 683}
 684
 685static int kvaser_pciefd_open(struct net_device *netdev)
 686{
 687        int err;
 688        struct kvaser_pciefd_can *can = netdev_priv(netdev);
 689
 690        err = open_candev(netdev);
 691        if (err)
 692                return err;
 693
 694        err = kvaser_pciefd_bus_on(can);
 695        if (err)
 696                return err;
 697
 698        return 0;
 699}
 700
 701static int kvaser_pciefd_stop(struct net_device *netdev)
 702{
 703        struct kvaser_pciefd_can *can = netdev_priv(netdev);
 704        int ret = 0;
 705
 706        /* Don't interrupt ongoing flush */
 707        if (!completion_done(&can->flush_comp))
 708                kvaser_pciefd_start_controller_flush(can);
 709
 710        if (!wait_for_completion_timeout(&can->flush_comp,
 711                                         KVASER_PCIEFD_WAIT_TIMEOUT)) {
 712                netdev_err(can->can.dev, "Timeout during stop\n");
 713                ret = -ETIMEDOUT;
 714        } else {
 715                iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
 716                del_timer(&can->bec_poll_timer);
 717        }
 718        close_candev(netdev);
 719
 720        return ret;
 721}
 722
 723static int kvaser_pciefd_prepare_tx_packet(struct kvaser_pciefd_tx_packet *p,
 724                                           struct kvaser_pciefd_can *can,
 725                                           struct sk_buff *skb)
 726{
 727        struct canfd_frame *cf = (struct canfd_frame *)skb->data;
 728        int packet_size;
 729        int seq = can->echo_idx;
 730
 731        memset(p, 0, sizeof(*p));
 732
 733        if (can->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT)
 734                p->header[1] |= KVASER_PCIEFD_TPACKET_SMS;
 735
 736        if (cf->can_id & CAN_RTR_FLAG)
 737                p->header[0] |= KVASER_PCIEFD_RPACKET_RTR;
 738
 739        if (cf->can_id & CAN_EFF_FLAG)
 740                p->header[0] |= KVASER_PCIEFD_RPACKET_IDE;
 741
 742        p->header[0] |= cf->can_id & CAN_EFF_MASK;
 743        p->header[1] |= can_len2dlc(cf->len) << KVASER_PCIEFD_RPACKET_DLC_SHIFT;
 744        p->header[1] |= KVASER_PCIEFD_TPACKET_AREQ;
 745
 746        if (can_is_canfd_skb(skb)) {
 747                p->header[1] |= KVASER_PCIEFD_RPACKET_FDF;
 748                if (cf->flags & CANFD_BRS)
 749                        p->header[1] |= KVASER_PCIEFD_RPACKET_BRS;
 750                if (cf->flags & CANFD_ESI)
 751                        p->header[1] |= KVASER_PCIEFD_RPACKET_ESI;
 752        }
 753
 754        p->header[1] |= seq & KVASER_PCIEFD_PACKET_SEQ_MSK;
 755
 756        packet_size = cf->len;
 757        memcpy(p->data, cf->data, packet_size);
 758
 759        return DIV_ROUND_UP(packet_size, 4);
 760}
 761
 762static netdev_tx_t kvaser_pciefd_start_xmit(struct sk_buff *skb,
 763                                            struct net_device *netdev)
 764{
 765        struct kvaser_pciefd_can *can = netdev_priv(netdev);
 766        unsigned long irq_flags;
 767        struct kvaser_pciefd_tx_packet packet;
 768        int nwords;
 769        u8 count;
 770
 771        if (can_dropped_invalid_skb(netdev, skb))
 772                return NETDEV_TX_OK;
 773
 774        nwords = kvaser_pciefd_prepare_tx_packet(&packet, can, skb);
 775
 776        spin_lock_irqsave(&can->echo_lock, irq_flags);
 777
 778        /* Prepare and save echo skb in internal slot */
 779        can_put_echo_skb(skb, netdev, can->echo_idx);
 780
 781        /* Move echo index to the next slot */
 782        can->echo_idx = (can->echo_idx + 1) % can->can.echo_skb_max;
 783
 784        /* Write header to fifo */
 785        iowrite32(packet.header[0],
 786                  can->reg_base + KVASER_PCIEFD_KCAN_FIFO_REG);
 787        iowrite32(packet.header[1],
 788                  can->reg_base + KVASER_PCIEFD_KCAN_FIFO_REG);
 789
 790        if (nwords) {
 791                u32 data_last = ((u32 *)packet.data)[nwords - 1];
 792
 793                /* Write data to fifo, except last word */
 794                iowrite32_rep(can->reg_base +
 795                              KVASER_PCIEFD_KCAN_FIFO_REG, packet.data,
 796                              nwords - 1);
 797                /* Write last word to end of fifo */
 798                __raw_writel(data_last, can->reg_base +
 799                             KVASER_PCIEFD_KCAN_FIFO_LAST_REG);
 800        } else {
 801                /* Complete write to fifo */
 802                __raw_writel(0, can->reg_base +
 803                             KVASER_PCIEFD_KCAN_FIFO_LAST_REG);
 804        }
 805
 806        count = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_TX_NPACKETS_REG);
 807        /* No room for a new message, stop the queue until at least one
 808         * successful transmit
 809         */
 810        if (count >= KVASER_PCIEFD_CAN_TX_MAX_COUNT ||
 811            can->can.echo_skb[can->echo_idx])
 812                netif_stop_queue(netdev);
 813
 814        spin_unlock_irqrestore(&can->echo_lock, irq_flags);
 815
 816        return NETDEV_TX_OK;
 817}
 818
 819static int kvaser_pciefd_set_bittiming(struct kvaser_pciefd_can *can, bool data)
 820{
 821        u32 mode, test, btrn;
 822        unsigned long irq_flags;
 823        int ret;
 824        struct can_bittiming *bt;
 825
 826        if (data)
 827                bt = &can->can.data_bittiming;
 828        else
 829                bt = &can->can.bittiming;
 830
 831        btrn = ((bt->phase_seg2 - 1) & 0x1f) <<
 832               KVASER_PCIEFD_KCAN_BTRN_TSEG2_SHIFT |
 833               (((bt->prop_seg + bt->phase_seg1) - 1) & 0x1ff) <<
 834               KVASER_PCIEFD_KCAN_BTRN_TSEG1_SHIFT |
 835               ((bt->sjw - 1) & 0xf) << KVASER_PCIEFD_KCAN_BTRN_SJW_SHIFT |
 836               ((bt->brp - 1) & 0x1fff);
 837
 838        spin_lock_irqsave(&can->lock, irq_flags);
 839        mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
 840
 841        /* Put the circuit in reset mode */
 842        iowrite32(mode | KVASER_PCIEFD_KCAN_MODE_RM,
 843                  can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
 844
 845        /* Can only set bittiming if in reset mode */
 846        ret = readl_poll_timeout(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG,
 847                                 test, test & KVASER_PCIEFD_KCAN_MODE_RM,
 848                                 0, 10);
 849
 850        if (ret) {
 851                spin_unlock_irqrestore(&can->lock, irq_flags);
 852                return -EBUSY;
 853        }
 854
 855        if (data)
 856                iowrite32(btrn, can->reg_base + KVASER_PCIEFD_KCAN_BTRD_REG);
 857        else
 858                iowrite32(btrn, can->reg_base + KVASER_PCIEFD_KCAN_BTRN_REG);
 859
 860        /* Restore previous reset mode status */
 861        iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
 862
 863        spin_unlock_irqrestore(&can->lock, irq_flags);
 864        return 0;
 865}
 866
 867static int kvaser_pciefd_set_nominal_bittiming(struct net_device *ndev)
 868{
 869        return kvaser_pciefd_set_bittiming(netdev_priv(ndev), false);
 870}
 871
 872static int kvaser_pciefd_set_data_bittiming(struct net_device *ndev)
 873{
 874        return kvaser_pciefd_set_bittiming(netdev_priv(ndev), true);
 875}
 876
 877static int kvaser_pciefd_set_mode(struct net_device *ndev, enum can_mode mode)
 878{
 879        struct kvaser_pciefd_can *can = netdev_priv(ndev);
 880        int ret = 0;
 881
 882        switch (mode) {
 883        case CAN_MODE_START:
 884                if (!can->can.restart_ms)
 885                        ret = kvaser_pciefd_bus_on(can);
 886                break;
 887        default:
 888                return -EOPNOTSUPP;
 889        }
 890
 891        return ret;
 892}
 893
 894static int kvaser_pciefd_get_berr_counter(const struct net_device *ndev,
 895                                          struct can_berr_counter *bec)
 896{
 897        struct kvaser_pciefd_can *can = netdev_priv(ndev);
 898
 899        bec->rxerr = can->bec.rxerr;
 900        bec->txerr = can->bec.txerr;
 901        return 0;
 902}
 903
 904static void kvaser_pciefd_bec_poll_timer(struct timer_list *data)
 905{
 906        struct kvaser_pciefd_can *can = from_timer(can, data, bec_poll_timer);
 907
 908        kvaser_pciefd_enable_err_gen(can);
 909        kvaser_pciefd_request_status(can);
 910        can->err_rep_cnt = 0;
 911}
 912
 913static const struct net_device_ops kvaser_pciefd_netdev_ops = {
 914        .ndo_open = kvaser_pciefd_open,
 915        .ndo_stop = kvaser_pciefd_stop,
 916        .ndo_start_xmit = kvaser_pciefd_start_xmit,
 917        .ndo_change_mtu = can_change_mtu,
 918};
 919
 920static int kvaser_pciefd_setup_can_ctrls(struct kvaser_pciefd *pcie)
 921{
 922        int i;
 923
 924        for (i = 0; i < pcie->nr_channels; i++) {
 925                struct net_device *netdev;
 926                struct kvaser_pciefd_can *can;
 927                u32 status, tx_npackets;
 928
 929                netdev = alloc_candev(sizeof(struct kvaser_pciefd_can),
 930                                      KVASER_PCIEFD_CAN_TX_MAX_COUNT);
 931                if (!netdev)
 932                        return -ENOMEM;
 933
 934                can = netdev_priv(netdev);
 935                netdev->netdev_ops = &kvaser_pciefd_netdev_ops;
 936                can->reg_base = pcie->reg_base + KVASER_PCIEFD_KCAN0_BASE +
 937                                i * KVASER_PCIEFD_KCAN_BASE_OFFSET;
 938
 939                can->kv_pcie = pcie;
 940                can->cmd_seq = 0;
 941                can->err_rep_cnt = 0;
 942                can->bec.txerr = 0;
 943                can->bec.rxerr = 0;
 944
 945                init_completion(&can->start_comp);
 946                init_completion(&can->flush_comp);
 947                timer_setup(&can->bec_poll_timer, kvaser_pciefd_bec_poll_timer,
 948                            0);
 949
 950                tx_npackets = ioread32(can->reg_base +
 951                                       KVASER_PCIEFD_KCAN_TX_NPACKETS_REG);
 952                if (((tx_npackets >> KVASER_PCIEFD_KCAN_TX_NPACKETS_MAX_SHIFT) &
 953                      0xff) < KVASER_PCIEFD_CAN_TX_MAX_COUNT) {
 954                        dev_err(&pcie->pci->dev,
 955                                "Max Tx count is smaller than expected\n");
 956
 957                        free_candev(netdev);
 958                        return -ENODEV;
 959                }
 960
 961                can->can.clock.freq = pcie->freq;
 962                can->can.echo_skb_max = KVASER_PCIEFD_CAN_TX_MAX_COUNT;
 963                can->echo_idx = 0;
 964                spin_lock_init(&can->echo_lock);
 965                spin_lock_init(&can->lock);
 966                can->can.bittiming_const = &kvaser_pciefd_bittiming_const;
 967                can->can.data_bittiming_const = &kvaser_pciefd_bittiming_const;
 968
 969                can->can.do_set_bittiming = kvaser_pciefd_set_nominal_bittiming;
 970                can->can.do_set_data_bittiming =
 971                        kvaser_pciefd_set_data_bittiming;
 972
 973                can->can.do_set_mode = kvaser_pciefd_set_mode;
 974                can->can.do_get_berr_counter = kvaser_pciefd_get_berr_counter;
 975
 976                can->can.ctrlmode_supported = CAN_CTRLMODE_LISTENONLY |
 977                                              CAN_CTRLMODE_FD |
 978                                              CAN_CTRLMODE_FD_NON_ISO;
 979
 980                status = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_STAT_REG);
 981                if (!(status & KVASER_PCIEFD_KCAN_STAT_FD)) {
 982                        dev_err(&pcie->pci->dev,
 983                                "CAN FD not supported as expected %d\n", i);
 984
 985                        free_candev(netdev);
 986                        return -ENODEV;
 987                }
 988
 989                if (status & KVASER_PCIEFD_KCAN_STAT_CAP)
 990                        can->can.ctrlmode_supported |= CAN_CTRLMODE_ONE_SHOT;
 991
 992                netdev->flags |= IFF_ECHO;
 993
 994                SET_NETDEV_DEV(netdev, &pcie->pci->dev);
 995
 996                iowrite32(-1, can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG);
 997                iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD |
 998                          KVASER_PCIEFD_KCAN_IRQ_TFD,
 999                          can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
1000
1001                pcie->can[i] = can;
1002                kvaser_pciefd_pwm_start(can);
1003        }
1004
1005        return 0;
1006}
1007
1008static int kvaser_pciefd_reg_candev(struct kvaser_pciefd *pcie)
1009{
1010        int i;
1011
1012        for (i = 0; i < pcie->nr_channels; i++) {
1013                int err = register_candev(pcie->can[i]->can.dev);
1014
1015                if (err) {
1016                        int j;
1017
1018                        /* Unregister all successfully registered devices. */
1019                        for (j = 0; j < i; j++)
1020                                unregister_candev(pcie->can[j]->can.dev);
1021                        return err;
1022                }
1023        }
1024
1025        return 0;
1026}
1027
1028static void kvaser_pciefd_write_dma_map(struct kvaser_pciefd *pcie,
1029                                        dma_addr_t addr, int offset)
1030{
1031        u32 word1, word2;
1032
1033#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
1034        word1 = addr | KVASER_PCIEFD_64BIT_DMA_BIT;
1035        word2 = addr >> 32;
1036#else
1037        word1 = addr;
1038        word2 = 0;
1039#endif
1040        iowrite32(word1, pcie->reg_base + offset);
1041        iowrite32(word2, pcie->reg_base + offset + 4);
1042}
1043
1044static int kvaser_pciefd_setup_dma(struct kvaser_pciefd *pcie)
1045{
1046        int i;
1047        u32 srb_status;
1048        dma_addr_t dma_addr[KVASER_PCIEFD_DMA_COUNT];
1049
1050        /* Disable the DMA */
1051        iowrite32(0, pcie->reg_base + KVASER_PCIEFD_SRB_CTRL_REG);
1052        for (i = 0; i < KVASER_PCIEFD_DMA_COUNT; i++) {
1053                unsigned int offset = KVASER_PCIEFD_DMA_MAP_BASE + 8 * i;
1054
1055                pcie->dma_data[i] =
1056                        dmam_alloc_coherent(&pcie->pci->dev,
1057                                            KVASER_PCIEFD_DMA_SIZE,
1058                                            &dma_addr[i],
1059                                            GFP_KERNEL);
1060
1061                if (!pcie->dma_data[i] || !dma_addr[i]) {
1062                        dev_err(&pcie->pci->dev, "Rx dma_alloc(%u) failure\n",
1063                                KVASER_PCIEFD_DMA_SIZE);
1064                        return -ENOMEM;
1065                }
1066
1067                kvaser_pciefd_write_dma_map(pcie, dma_addr[i], offset);
1068        }
1069
1070        /* Reset Rx FIFO, and both DMA buffers */
1071        iowrite32(KVASER_PCIEFD_SRB_CMD_FOR | KVASER_PCIEFD_SRB_CMD_RDB0 |
1072                  KVASER_PCIEFD_SRB_CMD_RDB1,
1073                  pcie->reg_base + KVASER_PCIEFD_SRB_CMD_REG);
1074
1075        srb_status = ioread32(pcie->reg_base + KVASER_PCIEFD_SRB_STAT_REG);
1076        if (!(srb_status & KVASER_PCIEFD_SRB_STAT_DI)) {
1077                dev_err(&pcie->pci->dev, "DMA not idle before enabling\n");
1078                return -EIO;
1079        }
1080
1081        /* Enable the DMA */
1082        iowrite32(KVASER_PCIEFD_SRB_CTRL_DMA_ENABLE,
1083                  pcie->reg_base + KVASER_PCIEFD_SRB_CTRL_REG);
1084
1085        return 0;
1086}
1087
1088static int kvaser_pciefd_setup_board(struct kvaser_pciefd *pcie)
1089{
1090        u32 sysid, srb_status, build;
1091        u8 sysid_nr_chan;
1092        int ret;
1093
1094        ret = kvaser_pciefd_read_cfg(pcie);
1095        if (ret)
1096                return ret;
1097
1098        sysid = ioread32(pcie->reg_base + KVASER_PCIEFD_SYSID_VERSION_REG);
1099        sysid_nr_chan = (sysid >> KVASER_PCIEFD_SYSID_NRCHAN_SHIFT) & 0xff;
1100        if (pcie->nr_channels != sysid_nr_chan) {
1101                dev_err(&pcie->pci->dev,
1102                        "Number of channels does not match: %u vs %u\n",
1103                        pcie->nr_channels,
1104                        sysid_nr_chan);
1105                return -ENODEV;
1106        }
1107
1108        if (pcie->nr_channels > KVASER_PCIEFD_MAX_CAN_CHANNELS)
1109                pcie->nr_channels = KVASER_PCIEFD_MAX_CAN_CHANNELS;
1110
1111        build = ioread32(pcie->reg_base + KVASER_PCIEFD_SYSID_BUILD_REG);
1112        dev_dbg(&pcie->pci->dev, "Version %u.%u.%u\n",
1113                (sysid >> KVASER_PCIEFD_SYSID_MAJOR_VER_SHIFT) & 0xff,
1114                sysid & 0xff,
1115                (build >> KVASER_PCIEFD_SYSID_BUILD_VER_SHIFT) & 0x7fff);
1116
1117        srb_status = ioread32(pcie->reg_base + KVASER_PCIEFD_SRB_STAT_REG);
1118        if (!(srb_status & KVASER_PCIEFD_SRB_STAT_DMA)) {
1119                dev_err(&pcie->pci->dev,
1120                        "Hardware without DMA is not supported\n");
1121                return -ENODEV;
1122        }
1123
1124        pcie->bus_freq = ioread32(pcie->reg_base +
1125                                  KVASER_PCIEFD_SYSID_BUSFREQ_REG);
1126        pcie->freq = ioread32(pcie->reg_base + KVASER_PCIEFD_SYSID_CANFREQ_REG);
1127        pcie->freq_to_ticks_div = pcie->freq / 1000000;
1128        if (pcie->freq_to_ticks_div == 0)
1129                pcie->freq_to_ticks_div = 1;
1130
1131        /* Turn off all loopback functionality */
1132        iowrite32(0, pcie->reg_base + KVASER_PCIEFD_LOOP_REG);
1133        return ret;
1134}
1135
1136static int kvaser_pciefd_handle_data_packet(struct kvaser_pciefd *pcie,
1137                                            struct kvaser_pciefd_rx_packet *p,
1138                                            __le32 *data)
1139{
1140        struct sk_buff *skb;
1141        struct canfd_frame *cf;
1142        struct can_priv *priv;
1143        struct net_device_stats *stats;
1144        struct skb_shared_hwtstamps *shhwtstamps;
1145        u8 ch_id = (p->header[1] >> KVASER_PCIEFD_PACKET_CHID_SHIFT) & 0x7;
1146
1147        if (ch_id >= pcie->nr_channels)
1148                return -EIO;
1149
1150        priv = &pcie->can[ch_id]->can;
1151        stats = &priv->dev->stats;
1152
1153        if (p->header[1] & KVASER_PCIEFD_RPACKET_FDF) {
1154                skb = alloc_canfd_skb(priv->dev, &cf);
1155                if (!skb) {
1156                        stats->rx_dropped++;
1157                        return -ENOMEM;
1158                }
1159
1160                if (p->header[1] & KVASER_PCIEFD_RPACKET_BRS)
1161                        cf->flags |= CANFD_BRS;
1162
1163                if (p->header[1] & KVASER_PCIEFD_RPACKET_ESI)
1164                        cf->flags |= CANFD_ESI;
1165        } else {
1166                skb = alloc_can_skb(priv->dev, (struct can_frame **)&cf);
1167                if (!skb) {
1168                        stats->rx_dropped++;
1169                        return -ENOMEM;
1170                }
1171        }
1172
1173        cf->can_id = p->header[0] & CAN_EFF_MASK;
1174        if (p->header[0] & KVASER_PCIEFD_RPACKET_IDE)
1175                cf->can_id |= CAN_EFF_FLAG;
1176
1177        cf->len = can_dlc2len(p->header[1] >> KVASER_PCIEFD_RPACKET_DLC_SHIFT);
1178
1179        if (p->header[0] & KVASER_PCIEFD_RPACKET_RTR)
1180                cf->can_id |= CAN_RTR_FLAG;
1181        else
1182                memcpy(cf->data, data, cf->len);
1183
1184        shhwtstamps = skb_hwtstamps(skb);
1185
1186        shhwtstamps->hwtstamp =
1187                ns_to_ktime(div_u64(p->timestamp * 1000,
1188                                    pcie->freq_to_ticks_div));
1189
1190        stats->rx_bytes += cf->len;
1191        stats->rx_packets++;
1192
1193        return netif_rx(skb);
1194}
1195
1196static void kvaser_pciefd_change_state(struct kvaser_pciefd_can *can,
1197                                       struct can_frame *cf,
1198                                       enum can_state new_state,
1199                                       enum can_state tx_state,
1200                                       enum can_state rx_state)
1201{
1202        can_change_state(can->can.dev, cf, tx_state, rx_state);
1203
1204        if (new_state == CAN_STATE_BUS_OFF) {
1205                struct net_device *ndev = can->can.dev;
1206                unsigned long irq_flags;
1207
1208                spin_lock_irqsave(&can->lock, irq_flags);
1209                netif_stop_queue(can->can.dev);
1210                spin_unlock_irqrestore(&can->lock, irq_flags);
1211
1212                /* Prevent CAN controller from auto recover from bus off */
1213                if (!can->can.restart_ms) {
1214                        kvaser_pciefd_start_controller_flush(can);
1215                        can_bus_off(ndev);
1216                }
1217        }
1218}
1219
1220static void kvaser_pciefd_packet_to_state(struct kvaser_pciefd_rx_packet *p,
1221                                          struct can_berr_counter *bec,
1222                                          enum can_state *new_state,
1223                                          enum can_state *tx_state,
1224                                          enum can_state *rx_state)
1225{
1226        if (p->header[0] & KVASER_PCIEFD_SPACK_BOFF ||
1227            p->header[0] & KVASER_PCIEFD_SPACK_IRM)
1228                *new_state = CAN_STATE_BUS_OFF;
1229        else if (bec->txerr >= 255 ||  bec->rxerr >= 255)
1230                *new_state = CAN_STATE_BUS_OFF;
1231        else if (p->header[1] & KVASER_PCIEFD_SPACK_EPLR)
1232                *new_state = CAN_STATE_ERROR_PASSIVE;
1233        else if (bec->txerr >= 128 || bec->rxerr >= 128)
1234                *new_state = CAN_STATE_ERROR_PASSIVE;
1235        else if (p->header[1] & KVASER_PCIEFD_SPACK_EWLR)
1236                *new_state = CAN_STATE_ERROR_WARNING;
1237        else if (bec->txerr >= 96 || bec->rxerr >= 96)
1238                *new_state = CAN_STATE_ERROR_WARNING;
1239        else
1240                *new_state = CAN_STATE_ERROR_ACTIVE;
1241
1242        *tx_state = bec->txerr >= bec->rxerr ? *new_state : 0;
1243        *rx_state = bec->txerr <= bec->rxerr ? *new_state : 0;
1244}
1245
1246static int kvaser_pciefd_rx_error_frame(struct kvaser_pciefd_can *can,
1247                                        struct kvaser_pciefd_rx_packet *p)
1248{
1249        struct can_berr_counter bec;
1250        enum can_state old_state, new_state, tx_state, rx_state;
1251        struct net_device *ndev = can->can.dev;
1252        struct sk_buff *skb;
1253        struct can_frame *cf = NULL;
1254        struct skb_shared_hwtstamps *shhwtstamps;
1255        struct net_device_stats *stats = &ndev->stats;
1256
1257        old_state = can->can.state;
1258
1259        bec.txerr = p->header[0] & 0xff;
1260        bec.rxerr = (p->header[0] >> KVASER_PCIEFD_SPACK_RXERR_SHIFT) & 0xff;
1261
1262        kvaser_pciefd_packet_to_state(p, &bec, &new_state, &tx_state,
1263                                      &rx_state);
1264
1265        skb = alloc_can_err_skb(ndev, &cf);
1266
1267        if (new_state != old_state) {
1268                kvaser_pciefd_change_state(can, cf, new_state, tx_state,
1269                                           rx_state);
1270
1271                if (old_state == CAN_STATE_BUS_OFF &&
1272                    new_state == CAN_STATE_ERROR_ACTIVE &&
1273                    can->can.restart_ms) {
1274                        can->can.can_stats.restarts++;
1275                        if (skb)
1276                                cf->can_id |= CAN_ERR_RESTARTED;
1277                }
1278        }
1279
1280        can->err_rep_cnt++;
1281        can->can.can_stats.bus_error++;
1282        stats->rx_errors++;
1283
1284        can->bec.txerr = bec.txerr;
1285        can->bec.rxerr = bec.rxerr;
1286
1287        if (!skb) {
1288                stats->rx_dropped++;
1289                return -ENOMEM;
1290        }
1291
1292        shhwtstamps = skb_hwtstamps(skb);
1293        shhwtstamps->hwtstamp =
1294                ns_to_ktime(div_u64(p->timestamp * 1000,
1295                                    can->kv_pcie->freq_to_ticks_div));
1296        cf->can_id |= CAN_ERR_BUSERROR;
1297
1298        cf->data[6] = bec.txerr;
1299        cf->data[7] = bec.rxerr;
1300
1301        stats->rx_packets++;
1302        stats->rx_bytes += cf->can_dlc;
1303
1304        netif_rx(skb);
1305        return 0;
1306}
1307
1308static int kvaser_pciefd_handle_error_packet(struct kvaser_pciefd *pcie,
1309                                             struct kvaser_pciefd_rx_packet *p)
1310{
1311        struct kvaser_pciefd_can *can;
1312        u8 ch_id = (p->header[1] >> KVASER_PCIEFD_PACKET_CHID_SHIFT) & 0x7;
1313
1314        if (ch_id >= pcie->nr_channels)
1315                return -EIO;
1316
1317        can = pcie->can[ch_id];
1318
1319        kvaser_pciefd_rx_error_frame(can, p);
1320        if (can->err_rep_cnt >= KVASER_PCIEFD_MAX_ERR_REP)
1321                /* Do not report more errors, until bec_poll_timer expires */
1322                kvaser_pciefd_disable_err_gen(can);
1323        /* Start polling the error counters */
1324        mod_timer(&can->bec_poll_timer, KVASER_PCIEFD_BEC_POLL_FREQ);
1325        return 0;
1326}
1327
1328static int kvaser_pciefd_handle_status_resp(struct kvaser_pciefd_can *can,
1329                                            struct kvaser_pciefd_rx_packet *p)
1330{
1331        struct can_berr_counter bec;
1332        enum can_state old_state, new_state, tx_state, rx_state;
1333
1334        old_state = can->can.state;
1335
1336        bec.txerr = p->header[0] & 0xff;
1337        bec.rxerr = (p->header[0] >> KVASER_PCIEFD_SPACK_RXERR_SHIFT) & 0xff;
1338
1339        kvaser_pciefd_packet_to_state(p, &bec, &new_state, &tx_state,
1340                                      &rx_state);
1341
1342        if (new_state != old_state) {
1343                struct net_device *ndev = can->can.dev;
1344                struct sk_buff *skb;
1345                struct can_frame *cf;
1346                struct skb_shared_hwtstamps *shhwtstamps;
1347
1348                skb = alloc_can_err_skb(ndev, &cf);
1349                if (!skb) {
1350                        struct net_device_stats *stats = &ndev->stats;
1351
1352                        stats->rx_dropped++;
1353                        return -ENOMEM;
1354                }
1355
1356                kvaser_pciefd_change_state(can, cf, new_state, tx_state,
1357                                           rx_state);
1358
1359                if (old_state == CAN_STATE_BUS_OFF &&
1360                    new_state == CAN_STATE_ERROR_ACTIVE &&
1361                    can->can.restart_ms) {
1362                        can->can.can_stats.restarts++;
1363                        cf->can_id |= CAN_ERR_RESTARTED;
1364                }
1365
1366                shhwtstamps = skb_hwtstamps(skb);
1367                shhwtstamps->hwtstamp =
1368                        ns_to_ktime(div_u64(p->timestamp * 1000,
1369                                            can->kv_pcie->freq_to_ticks_div));
1370
1371                cf->data[6] = bec.txerr;
1372                cf->data[7] = bec.rxerr;
1373
1374                netif_rx(skb);
1375        }
1376        can->bec.txerr = bec.txerr;
1377        can->bec.rxerr = bec.rxerr;
1378        /* Check if we need to poll the error counters */
1379        if (bec.txerr || bec.rxerr)
1380                mod_timer(&can->bec_poll_timer, KVASER_PCIEFD_BEC_POLL_FREQ);
1381
1382        return 0;
1383}
1384
1385static int kvaser_pciefd_handle_status_packet(struct kvaser_pciefd *pcie,
1386                                              struct kvaser_pciefd_rx_packet *p)
1387{
1388        struct kvaser_pciefd_can *can;
1389        u8 cmdseq;
1390        u32 status;
1391        u8 ch_id = (p->header[1] >> KVASER_PCIEFD_PACKET_CHID_SHIFT) & 0x7;
1392
1393        if (ch_id >= pcie->nr_channels)
1394                return -EIO;
1395
1396        can = pcie->can[ch_id];
1397
1398        status = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_STAT_REG);
1399        cmdseq = (status >> KVASER_PCIEFD_KCAN_STAT_SEQNO_SHIFT) & 0xff;
1400
1401        /* Reset done, start abort and flush */
1402        if (p->header[0] & KVASER_PCIEFD_SPACK_IRM &&
1403            p->header[0] & KVASER_PCIEFD_SPACK_RMCD &&
1404            p->header[1] & KVASER_PCIEFD_SPACK_AUTO &&
1405            cmdseq == (p->header[1] & KVASER_PCIEFD_PACKET_SEQ_MSK) &&
1406            status & KVASER_PCIEFD_KCAN_STAT_IDLE) {
1407                u32 cmd;
1408
1409                iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD,
1410                          can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG);
1411                cmd = KVASER_PCIEFD_KCAN_CMD_AT;
1412                cmd |= ++can->cmd_seq << KVASER_PCIEFD_KCAN_CMD_SEQ_SHIFT;
1413                iowrite32(cmd, can->reg_base + KVASER_PCIEFD_KCAN_CMD_REG);
1414
1415                iowrite32(KVASER_PCIEFD_KCAN_IRQ_TFD,
1416                          can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
1417        } else if (p->header[0] & KVASER_PCIEFD_SPACK_IDET &&
1418                   p->header[0] & KVASER_PCIEFD_SPACK_IRM &&
1419                   cmdseq == (p->header[1] & KVASER_PCIEFD_PACKET_SEQ_MSK) &&
1420                   status & KVASER_PCIEFD_KCAN_STAT_IDLE) {
1421                /* Reset detected, send end of flush if no packet are in FIFO */
1422                u8 count = ioread32(can->reg_base +
1423                                    KVASER_PCIEFD_KCAN_TX_NPACKETS_REG) & 0xff;
1424
1425                if (!count)
1426                        iowrite32(KVASER_PCIEFD_KCAN_CTRL_EFLUSH,
1427                                  can->reg_base + KVASER_PCIEFD_KCAN_CTRL_REG);
1428        } else if (!(p->header[1] & KVASER_PCIEFD_SPACK_AUTO) &&
1429                   cmdseq == (p->header[1] & KVASER_PCIEFD_PACKET_SEQ_MSK)) {
1430                /* Response to status request received */
1431                kvaser_pciefd_handle_status_resp(can, p);
1432                if (can->can.state != CAN_STATE_BUS_OFF &&
1433                    can->can.state != CAN_STATE_ERROR_ACTIVE) {
1434                        mod_timer(&can->bec_poll_timer,
1435                                  KVASER_PCIEFD_BEC_POLL_FREQ);
1436                }
1437        } else if (p->header[0] & KVASER_PCIEFD_SPACK_RMCD &&
1438                   !(status & KVASER_PCIEFD_KCAN_STAT_BUS_OFF_MSK)) {
1439                /* Reset to bus on detected */
1440                if (!completion_done(&can->start_comp))
1441                        complete(&can->start_comp);
1442        }
1443
1444        return 0;
1445}
1446
1447static int kvaser_pciefd_handle_eack_packet(struct kvaser_pciefd *pcie,
1448                                            struct kvaser_pciefd_rx_packet *p)
1449{
1450        struct kvaser_pciefd_can *can;
1451        u8 ch_id = (p->header[1] >> KVASER_PCIEFD_PACKET_CHID_SHIFT) & 0x7;
1452
1453        if (ch_id >= pcie->nr_channels)
1454                return -EIO;
1455
1456        can = pcie->can[ch_id];
1457
1458        /* If this is the last flushed packet, send end of flush */
1459        if (p->header[0] & KVASER_PCIEFD_APACKET_FLU) {
1460                u8 count = ioread32(can->reg_base +
1461                                    KVASER_PCIEFD_KCAN_TX_NPACKETS_REG) & 0xff;
1462
1463                if (count == 0)
1464                        iowrite32(KVASER_PCIEFD_KCAN_CTRL_EFLUSH,
1465                                  can->reg_base + KVASER_PCIEFD_KCAN_CTRL_REG);
1466        } else {
1467                int echo_idx = p->header[0] & KVASER_PCIEFD_PACKET_SEQ_MSK;
1468                int dlc = can_get_echo_skb(can->can.dev, echo_idx);
1469                struct net_device_stats *stats = &can->can.dev->stats;
1470
1471                stats->tx_bytes += dlc;
1472                stats->tx_packets++;
1473
1474                if (netif_queue_stopped(can->can.dev))
1475                        netif_wake_queue(can->can.dev);
1476        }
1477
1478        return 0;
1479}
1480
1481static void kvaser_pciefd_handle_nack_packet(struct kvaser_pciefd_can *can,
1482                                             struct kvaser_pciefd_rx_packet *p)
1483{
1484        struct sk_buff *skb;
1485        struct net_device_stats *stats = &can->can.dev->stats;
1486        struct can_frame *cf;
1487
1488        skb = alloc_can_err_skb(can->can.dev, &cf);
1489
1490        stats->tx_errors++;
1491        if (p->header[0] & KVASER_PCIEFD_APACKET_ABL) {
1492                if (skb)
1493                        cf->can_id |= CAN_ERR_LOSTARB;
1494                can->can.can_stats.arbitration_lost++;
1495        } else if (skb) {
1496                cf->can_id |= CAN_ERR_ACK;
1497        }
1498
1499        if (skb) {
1500                cf->can_id |= CAN_ERR_BUSERROR;
1501                stats->rx_bytes += cf->can_dlc;
1502                stats->rx_packets++;
1503                netif_rx(skb);
1504        } else {
1505                stats->rx_dropped++;
1506                netdev_warn(can->can.dev, "No memory left for err_skb\n");
1507        }
1508}
1509
1510static int kvaser_pciefd_handle_ack_packet(struct kvaser_pciefd *pcie,
1511                                           struct kvaser_pciefd_rx_packet *p)
1512{
1513        struct kvaser_pciefd_can *can;
1514        bool one_shot_fail = false;
1515        u8 ch_id = (p->header[1] >> KVASER_PCIEFD_PACKET_CHID_SHIFT) & 0x7;
1516
1517        if (ch_id >= pcie->nr_channels)
1518                return -EIO;
1519
1520        can = pcie->can[ch_id];
1521        /* Ignore control packet ACK */
1522        if (p->header[0] & KVASER_PCIEFD_APACKET_CT)
1523                return 0;
1524
1525        if (p->header[0] & KVASER_PCIEFD_APACKET_NACK) {
1526                kvaser_pciefd_handle_nack_packet(can, p);
1527                one_shot_fail = true;
1528        }
1529
1530        if (p->header[0] & KVASER_PCIEFD_APACKET_FLU) {
1531                netdev_dbg(can->can.dev, "Packet was flushed\n");
1532        } else {
1533                int echo_idx = p->header[0] & KVASER_PCIEFD_PACKET_SEQ_MSK;
1534                int dlc = can_get_echo_skb(can->can.dev, echo_idx);
1535                u8 count = ioread32(can->reg_base +
1536                                    KVASER_PCIEFD_KCAN_TX_NPACKETS_REG) & 0xff;
1537
1538                if (count < KVASER_PCIEFD_CAN_TX_MAX_COUNT &&
1539                    netif_queue_stopped(can->can.dev))
1540                        netif_wake_queue(can->can.dev);
1541
1542                if (!one_shot_fail) {
1543                        struct net_device_stats *stats = &can->can.dev->stats;
1544
1545                        stats->tx_bytes += dlc;
1546                        stats->tx_packets++;
1547                }
1548        }
1549
1550        return 0;
1551}
1552
1553static int kvaser_pciefd_handle_eflush_packet(struct kvaser_pciefd *pcie,
1554                                              struct kvaser_pciefd_rx_packet *p)
1555{
1556        struct kvaser_pciefd_can *can;
1557        u8 ch_id = (p->header[1] >> KVASER_PCIEFD_PACKET_CHID_SHIFT) & 0x7;
1558
1559        if (ch_id >= pcie->nr_channels)
1560                return -EIO;
1561
1562        can = pcie->can[ch_id];
1563
1564        if (!completion_done(&can->flush_comp))
1565                complete(&can->flush_comp);
1566
1567        return 0;
1568}
1569
1570static int kvaser_pciefd_read_packet(struct kvaser_pciefd *pcie, int *start_pos,
1571                                     int dma_buf)
1572{
1573        __le32 *buffer = pcie->dma_data[dma_buf];
1574        __le64 timestamp;
1575        struct kvaser_pciefd_rx_packet packet;
1576        struct kvaser_pciefd_rx_packet *p = &packet;
1577        u8 type;
1578        int pos = *start_pos;
1579        int size;
1580        int ret = 0;
1581
1582        size = le32_to_cpu(buffer[pos++]);
1583        if (!size) {
1584                *start_pos = 0;
1585                return 0;
1586        }
1587
1588        p->header[0] = le32_to_cpu(buffer[pos++]);
1589        p->header[1] = le32_to_cpu(buffer[pos++]);
1590
1591        /* Read 64-bit timestamp */
1592        memcpy(&timestamp, &buffer[pos], sizeof(__le64));
1593        pos += 2;
1594        p->timestamp = le64_to_cpu(timestamp);
1595
1596        type = (p->header[1] >> KVASER_PCIEFD_PACKET_TYPE_SHIFT) & 0xf;
1597        switch (type) {
1598        case KVASER_PCIEFD_PACK_TYPE_DATA:
1599                ret = kvaser_pciefd_handle_data_packet(pcie, p, &buffer[pos]);
1600                if (!(p->header[0] & KVASER_PCIEFD_RPACKET_RTR)) {
1601                        u8 data_len;
1602
1603                        data_len = can_dlc2len(p->header[1] >>
1604                                               KVASER_PCIEFD_RPACKET_DLC_SHIFT);
1605                        pos += DIV_ROUND_UP(data_len, 4);
1606                }
1607                break;
1608
1609        case KVASER_PCIEFD_PACK_TYPE_ACK:
1610                ret = kvaser_pciefd_handle_ack_packet(pcie, p);
1611                break;
1612
1613        case KVASER_PCIEFD_PACK_TYPE_STATUS:
1614                ret = kvaser_pciefd_handle_status_packet(pcie, p);
1615                break;
1616
1617        case KVASER_PCIEFD_PACK_TYPE_ERROR:
1618                ret = kvaser_pciefd_handle_error_packet(pcie, p);
1619                break;
1620
1621        case KVASER_PCIEFD_PACK_TYPE_EFRAME_ACK:
1622                ret = kvaser_pciefd_handle_eack_packet(pcie, p);
1623                break;
1624
1625        case KVASER_PCIEFD_PACK_TYPE_EFLUSH_ACK:
1626                ret = kvaser_pciefd_handle_eflush_packet(pcie, p);
1627                break;
1628
1629        case KVASER_PCIEFD_PACK_TYPE_ACK_DATA:
1630        case KVASER_PCIEFD_PACK_TYPE_BUS_LOAD:
1631        case KVASER_PCIEFD_PACK_TYPE_TXRQ:
1632                dev_info(&pcie->pci->dev,
1633                         "Received unexpected packet type 0x%08X\n", type);
1634                break;
1635
1636        default:
1637                dev_err(&pcie->pci->dev, "Unknown packet type 0x%08X\n", type);
1638                ret = -EIO;
1639                break;
1640        }
1641
1642        if (ret)
1643                return ret;
1644
1645        /* Position does not point to the end of the package,
1646         * corrupted packet size?
1647         */
1648        if ((*start_pos + size) != pos)
1649                return -EIO;
1650
1651        /* Point to the next packet header, if any */
1652        *start_pos = pos;
1653
1654        return ret;
1655}
1656
1657static int kvaser_pciefd_read_buffer(struct kvaser_pciefd *pcie, int dma_buf)
1658{
1659        int pos = 0;
1660        int res = 0;
1661
1662        do {
1663                res = kvaser_pciefd_read_packet(pcie, &pos, dma_buf);
1664        } while (!res && pos > 0 && pos < KVASER_PCIEFD_DMA_SIZE);
1665
1666        return res;
1667}
1668
1669static int kvaser_pciefd_receive_irq(struct kvaser_pciefd *pcie)
1670{
1671        u32 irq;
1672
1673        irq = ioread32(pcie->reg_base + KVASER_PCIEFD_SRB_IRQ_REG);
1674        if (irq & KVASER_PCIEFD_SRB_IRQ_DPD0) {
1675                kvaser_pciefd_read_buffer(pcie, 0);
1676                /* Reset DMA buffer 0 */
1677                iowrite32(KVASER_PCIEFD_SRB_CMD_RDB0,
1678                          pcie->reg_base + KVASER_PCIEFD_SRB_CMD_REG);
1679        }
1680
1681        if (irq & KVASER_PCIEFD_SRB_IRQ_DPD1) {
1682                kvaser_pciefd_read_buffer(pcie, 1);
1683                /* Reset DMA buffer 1 */
1684                iowrite32(KVASER_PCIEFD_SRB_CMD_RDB1,
1685                          pcie->reg_base + KVASER_PCIEFD_SRB_CMD_REG);
1686        }
1687
1688        if (irq & KVASER_PCIEFD_SRB_IRQ_DOF0 ||
1689            irq & KVASER_PCIEFD_SRB_IRQ_DOF1 ||
1690            irq & KVASER_PCIEFD_SRB_IRQ_DUF0 ||
1691            irq & KVASER_PCIEFD_SRB_IRQ_DUF1)
1692                dev_err(&pcie->pci->dev, "DMA IRQ error 0x%08X\n", irq);
1693
1694        iowrite32(irq, pcie->reg_base + KVASER_PCIEFD_SRB_IRQ_REG);
1695        return 0;
1696}
1697
1698static int kvaser_pciefd_transmit_irq(struct kvaser_pciefd_can *can)
1699{
1700        u32 irq = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG);
1701
1702        if (irq & KVASER_PCIEFD_KCAN_IRQ_TOF)
1703                netdev_err(can->can.dev, "Tx FIFO overflow\n");
1704
1705        if (irq & KVASER_PCIEFD_KCAN_IRQ_TFD) {
1706                u8 count = ioread32(can->reg_base +
1707                                    KVASER_PCIEFD_KCAN_TX_NPACKETS_REG) & 0xff;
1708
1709                if (count == 0)
1710                        iowrite32(KVASER_PCIEFD_KCAN_CTRL_EFLUSH,
1711                                  can->reg_base + KVASER_PCIEFD_KCAN_CTRL_REG);
1712        }
1713
1714        if (irq & KVASER_PCIEFD_KCAN_IRQ_BPP)
1715                netdev_err(can->can.dev,
1716                           "Fail to change bittiming, when not in reset mode\n");
1717
1718        if (irq & KVASER_PCIEFD_KCAN_IRQ_FDIC)
1719                netdev_err(can->can.dev, "CAN FD frame in CAN mode\n");
1720
1721        if (irq & KVASER_PCIEFD_KCAN_IRQ_ROF)
1722                netdev_err(can->can.dev, "Rx FIFO overflow\n");
1723
1724        iowrite32(irq, can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG);
1725        return 0;
1726}
1727
1728static irqreturn_t kvaser_pciefd_irq_handler(int irq, void *dev)
1729{
1730        struct kvaser_pciefd *pcie = (struct kvaser_pciefd *)dev;
1731        u32 board_irq;
1732        int i;
1733
1734        board_irq = ioread32(pcie->reg_base + KVASER_PCIEFD_IRQ_REG);
1735
1736        if (!(board_irq & KVASER_PCIEFD_IRQ_ALL_MSK))
1737                return IRQ_NONE;
1738
1739        if (board_irq & KVASER_PCIEFD_IRQ_SRB)
1740                kvaser_pciefd_receive_irq(pcie);
1741
1742        for (i = 0; i < pcie->nr_channels; i++) {
1743                if (!pcie->can[i]) {
1744                        dev_err(&pcie->pci->dev,
1745                                "IRQ mask points to unallocated controller\n");
1746                        break;
1747                }
1748
1749                /* Check that mask matches channel (i) IRQ mask */
1750                if (board_irq & (1 << i))
1751                        kvaser_pciefd_transmit_irq(pcie->can[i]);
1752        }
1753
1754        iowrite32(board_irq, pcie->reg_base + KVASER_PCIEFD_IRQ_REG);
1755        return IRQ_HANDLED;
1756}
1757
1758static void kvaser_pciefd_teardown_can_ctrls(struct kvaser_pciefd *pcie)
1759{
1760        int i;
1761        struct kvaser_pciefd_can *can;
1762
1763        for (i = 0; i < pcie->nr_channels; i++) {
1764                can = pcie->can[i];
1765                if (can) {
1766                        iowrite32(0,
1767                                  can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
1768                        kvaser_pciefd_pwm_stop(can);
1769                        free_candev(can->can.dev);
1770                }
1771        }
1772}
1773
1774static int kvaser_pciefd_probe(struct pci_dev *pdev,
1775                               const struct pci_device_id *id)
1776{
1777        int err;
1778        struct kvaser_pciefd *pcie;
1779
1780        pcie = devm_kzalloc(&pdev->dev, sizeof(*pcie), GFP_KERNEL);
1781        if (!pcie)
1782                return -ENOMEM;
1783
1784        pci_set_drvdata(pdev, pcie);
1785        pcie->pci = pdev;
1786
1787        err = pci_enable_device(pdev);
1788        if (err)
1789                return err;
1790
1791        err = pci_request_regions(pdev, KVASER_PCIEFD_DRV_NAME);
1792        if (err)
1793                goto err_disable_pci;
1794
1795        pcie->reg_base = pci_iomap(pdev, 0, 0);
1796        if (!pcie->reg_base) {
1797                err = -ENOMEM;
1798                goto err_release_regions;
1799        }
1800
1801        err = kvaser_pciefd_setup_board(pcie);
1802        if (err)
1803                goto err_pci_iounmap;
1804
1805        err = kvaser_pciefd_setup_dma(pcie);
1806        if (err)
1807                goto err_pci_iounmap;
1808
1809        pci_set_master(pdev);
1810
1811        err = kvaser_pciefd_setup_can_ctrls(pcie);
1812        if (err)
1813                goto err_teardown_can_ctrls;
1814
1815        iowrite32(KVASER_PCIEFD_SRB_IRQ_DPD0 | KVASER_PCIEFD_SRB_IRQ_DPD1,
1816                  pcie->reg_base + KVASER_PCIEFD_SRB_IRQ_REG);
1817
1818        iowrite32(KVASER_PCIEFD_SRB_IRQ_DPD0 | KVASER_PCIEFD_SRB_IRQ_DPD1 |
1819                  KVASER_PCIEFD_SRB_IRQ_DOF0 | KVASER_PCIEFD_SRB_IRQ_DOF1 |
1820                  KVASER_PCIEFD_SRB_IRQ_DUF0 | KVASER_PCIEFD_SRB_IRQ_DUF1,
1821                  pcie->reg_base + KVASER_PCIEFD_SRB_IEN_REG);
1822
1823        /* Reset IRQ handling, expected to be off before */
1824        iowrite32(KVASER_PCIEFD_IRQ_ALL_MSK,
1825                  pcie->reg_base + KVASER_PCIEFD_IRQ_REG);
1826        iowrite32(KVASER_PCIEFD_IRQ_ALL_MSK,
1827                  pcie->reg_base + KVASER_PCIEFD_IEN_REG);
1828
1829        /* Ready the DMA buffers */
1830        iowrite32(KVASER_PCIEFD_SRB_CMD_RDB0,
1831                  pcie->reg_base + KVASER_PCIEFD_SRB_CMD_REG);
1832        iowrite32(KVASER_PCIEFD_SRB_CMD_RDB1,
1833                  pcie->reg_base + KVASER_PCIEFD_SRB_CMD_REG);
1834
1835        err = request_irq(pcie->pci->irq, kvaser_pciefd_irq_handler,
1836                          IRQF_SHARED, KVASER_PCIEFD_DRV_NAME, pcie);
1837        if (err)
1838                goto err_teardown_can_ctrls;
1839
1840        err = kvaser_pciefd_reg_candev(pcie);
1841        if (err)
1842                goto err_free_irq;
1843
1844        return 0;
1845
1846err_free_irq:
1847        free_irq(pcie->pci->irq, pcie);
1848
1849err_teardown_can_ctrls:
1850        kvaser_pciefd_teardown_can_ctrls(pcie);
1851        iowrite32(0, pcie->reg_base + KVASER_PCIEFD_SRB_CTRL_REG);
1852        pci_clear_master(pdev);
1853
1854err_pci_iounmap:
1855        pci_iounmap(pdev, pcie->reg_base);
1856
1857err_release_regions:
1858        pci_release_regions(pdev);
1859
1860err_disable_pci:
1861        pci_disable_device(pdev);
1862
1863        return err;
1864}
1865
1866static void kvaser_pciefd_remove_all_ctrls(struct kvaser_pciefd *pcie)
1867{
1868        struct kvaser_pciefd_can *can;
1869        int i;
1870
1871        for (i = 0; i < pcie->nr_channels; i++) {
1872                can = pcie->can[i];
1873                if (can) {
1874                        iowrite32(0,
1875                                  can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
1876                        unregister_candev(can->can.dev);
1877                        del_timer(&can->bec_poll_timer);
1878                        kvaser_pciefd_pwm_stop(can);
1879                        free_candev(can->can.dev);
1880                }
1881        }
1882}
1883
1884static void kvaser_pciefd_remove(struct pci_dev *pdev)
1885{
1886        struct kvaser_pciefd *pcie = pci_get_drvdata(pdev);
1887
1888        kvaser_pciefd_remove_all_ctrls(pcie);
1889
1890        /* Turn off IRQ generation */
1891        iowrite32(0, pcie->reg_base + KVASER_PCIEFD_SRB_CTRL_REG);
1892        iowrite32(KVASER_PCIEFD_IRQ_ALL_MSK,
1893                  pcie->reg_base + KVASER_PCIEFD_IRQ_REG);
1894        iowrite32(0, pcie->reg_base + KVASER_PCIEFD_IEN_REG);
1895
1896        free_irq(pcie->pci->irq, pcie);
1897
1898        pci_clear_master(pdev);
1899        pci_iounmap(pdev, pcie->reg_base);
1900        pci_release_regions(pdev);
1901        pci_disable_device(pdev);
1902}
1903
1904static struct pci_driver kvaser_pciefd = {
1905        .name = KVASER_PCIEFD_DRV_NAME,
1906        .id_table = kvaser_pciefd_id_table,
1907        .probe = kvaser_pciefd_probe,
1908        .remove = kvaser_pciefd_remove,
1909};
1910
1911module_pci_driver(kvaser_pciefd)
1912