linux/drivers/net/can/kvaser_pciefd.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
   2/* Copyright (C) 2018 KVASER AB, Sweden. All rights reserved.
   3 * Parts of this driver are based on the following:
   4 *  - Kvaser linux pciefd driver (version 5.25)
   5 *  - PEAK linux canfd driver
   6 *  - Altera Avalon EPCS flash controller driver
   7 */
   8
   9#include <linux/kernel.h>
  10#include <linux/module.h>
  11#include <linux/device.h>
  12#include <linux/pci.h>
  13#include <linux/can/dev.h>
  14#include <linux/timer.h>
  15#include <linux/netdevice.h>
  16#include <linux/crc32.h>
  17#include <linux/iopoll.h>
  18
  19MODULE_LICENSE("Dual BSD/GPL");
  20MODULE_AUTHOR("Kvaser AB <support@kvaser.com>");
  21MODULE_DESCRIPTION("CAN driver for Kvaser CAN/PCIe devices");
  22
  23#define KVASER_PCIEFD_DRV_NAME "kvaser_pciefd"
  24
  25#define KVASER_PCIEFD_WAIT_TIMEOUT msecs_to_jiffies(1000)
  26#define KVASER_PCIEFD_BEC_POLL_FREQ (jiffies + msecs_to_jiffies(200))
  27#define KVASER_PCIEFD_MAX_ERR_REP 256
  28#define KVASER_PCIEFD_CAN_TX_MAX_COUNT 17
  29#define KVASER_PCIEFD_MAX_CAN_CHANNELS 4
  30#define KVASER_PCIEFD_DMA_COUNT 2
  31
  32#define KVASER_PCIEFD_DMA_SIZE (4 * 1024)
  33#define KVASER_PCIEFD_64BIT_DMA_BIT BIT(0)
  34
  35#define KVASER_PCIEFD_VENDOR 0x1a07
  36#define KVASER_PCIEFD_4HS_ID 0x0d
  37#define KVASER_PCIEFD_2HS_ID 0x0e
  38#define KVASER_PCIEFD_HS_ID 0x0f
  39#define KVASER_PCIEFD_MINIPCIE_HS_ID 0x10
  40#define KVASER_PCIEFD_MINIPCIE_2HS_ID 0x11
  41
  42/* PCIe IRQ registers */
  43#define KVASER_PCIEFD_IRQ_REG 0x40
  44#define KVASER_PCIEFD_IEN_REG 0x50
  45/* DMA map */
  46#define KVASER_PCIEFD_DMA_MAP_BASE 0x1000
  47/* Kvaser KCAN CAN controller registers */
  48#define KVASER_PCIEFD_KCAN0_BASE 0x10000
  49#define KVASER_PCIEFD_KCAN_BASE_OFFSET 0x1000
  50#define KVASER_PCIEFD_KCAN_FIFO_REG 0x100
  51#define KVASER_PCIEFD_KCAN_FIFO_LAST_REG 0x180
  52#define KVASER_PCIEFD_KCAN_CTRL_REG 0x2c0
  53#define KVASER_PCIEFD_KCAN_CMD_REG 0x400
  54#define KVASER_PCIEFD_KCAN_IEN_REG 0x408
  55#define KVASER_PCIEFD_KCAN_IRQ_REG 0x410
  56#define KVASER_PCIEFD_KCAN_TX_NPACKETS_REG 0x414
  57#define KVASER_PCIEFD_KCAN_STAT_REG 0x418
  58#define KVASER_PCIEFD_KCAN_MODE_REG 0x41c
  59#define KVASER_PCIEFD_KCAN_BTRN_REG 0x420
  60#define KVASER_PCIEFD_KCAN_BUS_LOAD_REG 0x424
  61#define KVASER_PCIEFD_KCAN_BTRD_REG 0x428
  62#define KVASER_PCIEFD_KCAN_PWM_REG 0x430
  63/* Loopback control register */
  64#define KVASER_PCIEFD_LOOP_REG 0x1f000
  65/* System identification and information registers */
  66#define KVASER_PCIEFD_SYSID_BASE 0x1f020
  67#define KVASER_PCIEFD_SYSID_VERSION_REG (KVASER_PCIEFD_SYSID_BASE + 0x8)
  68#define KVASER_PCIEFD_SYSID_CANFREQ_REG (KVASER_PCIEFD_SYSID_BASE + 0xc)
  69#define KVASER_PCIEFD_SYSID_BUSFREQ_REG (KVASER_PCIEFD_SYSID_BASE + 0x10)
  70#define KVASER_PCIEFD_SYSID_BUILD_REG (KVASER_PCIEFD_SYSID_BASE + 0x14)
  71/* Shared receive buffer registers */
  72#define KVASER_PCIEFD_SRB_BASE 0x1f200
  73#define KVASER_PCIEFD_SRB_CMD_REG (KVASER_PCIEFD_SRB_BASE + 0x200)
  74#define KVASER_PCIEFD_SRB_IEN_REG (KVASER_PCIEFD_SRB_BASE + 0x204)
  75#define KVASER_PCIEFD_SRB_IRQ_REG (KVASER_PCIEFD_SRB_BASE + 0x20c)
  76#define KVASER_PCIEFD_SRB_STAT_REG (KVASER_PCIEFD_SRB_BASE + 0x210)
  77#define KVASER_PCIEFD_SRB_CTRL_REG (KVASER_PCIEFD_SRB_BASE + 0x218)
  78/* EPCS flash controller registers */
  79#define KVASER_PCIEFD_SPI_BASE 0x1fc00
  80#define KVASER_PCIEFD_SPI_RX_REG KVASER_PCIEFD_SPI_BASE
  81#define KVASER_PCIEFD_SPI_TX_REG (KVASER_PCIEFD_SPI_BASE + 0x4)
  82#define KVASER_PCIEFD_SPI_STATUS_REG (KVASER_PCIEFD_SPI_BASE + 0x8)
  83#define KVASER_PCIEFD_SPI_CTRL_REG (KVASER_PCIEFD_SPI_BASE + 0xc)
  84#define KVASER_PCIEFD_SPI_SSEL_REG (KVASER_PCIEFD_SPI_BASE + 0x14)
  85
  86#define KVASER_PCIEFD_IRQ_ALL_MSK 0x1f
  87#define KVASER_PCIEFD_IRQ_SRB BIT(4)
  88
  89#define KVASER_PCIEFD_SYSID_NRCHAN_SHIFT 24
  90#define KVASER_PCIEFD_SYSID_MAJOR_VER_SHIFT 16
  91#define KVASER_PCIEFD_SYSID_BUILD_VER_SHIFT 1
  92
  93/* Reset DMA buffer 0, 1 and FIFO offset */
  94#define KVASER_PCIEFD_SRB_CMD_RDB0 BIT(4)
  95#define KVASER_PCIEFD_SRB_CMD_RDB1 BIT(5)
  96#define KVASER_PCIEFD_SRB_CMD_FOR BIT(0)
  97
  98/* DMA packet done, buffer 0 and 1 */
  99#define KVASER_PCIEFD_SRB_IRQ_DPD0 BIT(8)
 100#define KVASER_PCIEFD_SRB_IRQ_DPD1 BIT(9)
 101/* DMA overflow, buffer 0 and 1 */
 102#define KVASER_PCIEFD_SRB_IRQ_DOF0 BIT(10)
 103#define KVASER_PCIEFD_SRB_IRQ_DOF1 BIT(11)
 104/* DMA underflow, buffer 0 and 1 */
 105#define KVASER_PCIEFD_SRB_IRQ_DUF0 BIT(12)
 106#define KVASER_PCIEFD_SRB_IRQ_DUF1 BIT(13)
 107
 108/* DMA idle */
 109#define KVASER_PCIEFD_SRB_STAT_DI BIT(15)
 110/* DMA support */
 111#define KVASER_PCIEFD_SRB_STAT_DMA BIT(24)
 112
 113/* DMA Enable */
 114#define KVASER_PCIEFD_SRB_CTRL_DMA_ENABLE BIT(0)
 115
 116/* EPCS flash controller definitions */
 117#define KVASER_PCIEFD_CFG_IMG_SZ (64 * 1024)
 118#define KVASER_PCIEFD_CFG_IMG_OFFSET (31 * 65536L)
 119#define KVASER_PCIEFD_CFG_MAX_PARAMS 256
 120#define KVASER_PCIEFD_CFG_MAGIC 0xcafef00d
 121#define KVASER_PCIEFD_CFG_PARAM_MAX_SZ 24
 122#define KVASER_PCIEFD_CFG_SYS_VER 1
 123#define KVASER_PCIEFD_CFG_PARAM_NR_CHAN 130
 124#define KVASER_PCIEFD_SPI_TMT BIT(5)
 125#define KVASER_PCIEFD_SPI_TRDY BIT(6)
 126#define KVASER_PCIEFD_SPI_RRDY BIT(7)
 127#define KVASER_PCIEFD_FLASH_ID_EPCS16 0x14
 128/* Commands for controlling the onboard flash */
 129#define KVASER_PCIEFD_FLASH_RES_CMD 0xab
 130#define KVASER_PCIEFD_FLASH_READ_CMD 0x3
 131#define KVASER_PCIEFD_FLASH_STATUS_CMD 0x5
 132
 133/* Kvaser KCAN definitions */
 134#define KVASER_PCIEFD_KCAN_CTRL_EFLUSH (4 << 29)
 135#define KVASER_PCIEFD_KCAN_CTRL_EFRAME (5 << 29)
 136
 137#define KVASER_PCIEFD_KCAN_CMD_SEQ_SHIFT 16
 138/* Request status packet */
 139#define KVASER_PCIEFD_KCAN_CMD_SRQ BIT(0)
 140/* Abort, flush and reset */
 141#define KVASER_PCIEFD_KCAN_CMD_AT BIT(1)
 142
 143/* Tx FIFO unaligned read */
 144#define KVASER_PCIEFD_KCAN_IRQ_TAR BIT(0)
 145/* Tx FIFO unaligned end */
 146#define KVASER_PCIEFD_KCAN_IRQ_TAE BIT(1)
 147/* Bus parameter protection error */
 148#define KVASER_PCIEFD_KCAN_IRQ_BPP BIT(2)
 149/* FDF bit when controller is in classic mode */
 150#define KVASER_PCIEFD_KCAN_IRQ_FDIC BIT(3)
 151/* Rx FIFO overflow */
 152#define KVASER_PCIEFD_KCAN_IRQ_ROF BIT(5)
 153/* Abort done */
 154#define KVASER_PCIEFD_KCAN_IRQ_ABD BIT(13)
 155/* Tx buffer flush done */
 156#define KVASER_PCIEFD_KCAN_IRQ_TFD BIT(14)
 157/* Tx FIFO overflow */
 158#define KVASER_PCIEFD_KCAN_IRQ_TOF BIT(15)
 159/* Tx FIFO empty */
 160#define KVASER_PCIEFD_KCAN_IRQ_TE BIT(16)
 161/* Transmitter unaligned */
 162#define KVASER_PCIEFD_KCAN_IRQ_TAL BIT(17)
 163
 164#define KVASER_PCIEFD_KCAN_TX_NPACKETS_MAX_SHIFT 16
 165
 166#define KVASER_PCIEFD_KCAN_STAT_SEQNO_SHIFT 24
 167/* Abort request */
 168#define KVASER_PCIEFD_KCAN_STAT_AR BIT(7)
 169/* Idle state. Controller in reset mode and no abort or flush pending */
 170#define KVASER_PCIEFD_KCAN_STAT_IDLE BIT(10)
 171/* Bus off */
 172#define KVASER_PCIEFD_KCAN_STAT_BOFF BIT(11)
 173/* Reset mode request */
 174#define KVASER_PCIEFD_KCAN_STAT_RMR BIT(14)
 175/* Controller in reset mode */
 176#define KVASER_PCIEFD_KCAN_STAT_IRM BIT(15)
 177/* Controller got one-shot capability */
 178#define KVASER_PCIEFD_KCAN_STAT_CAP BIT(16)
 179/* Controller got CAN FD capability */
 180#define KVASER_PCIEFD_KCAN_STAT_FD BIT(19)
 181#define KVASER_PCIEFD_KCAN_STAT_BUS_OFF_MSK (KVASER_PCIEFD_KCAN_STAT_AR | \
 182        KVASER_PCIEFD_KCAN_STAT_BOFF | KVASER_PCIEFD_KCAN_STAT_RMR | \
 183        KVASER_PCIEFD_KCAN_STAT_IRM)
 184
 185/* Reset mode */
 186#define KVASER_PCIEFD_KCAN_MODE_RM BIT(8)
 187/* Listen only mode */
 188#define KVASER_PCIEFD_KCAN_MODE_LOM BIT(9)
 189/* Error packet enable */
 190#define KVASER_PCIEFD_KCAN_MODE_EPEN BIT(12)
 191/* CAN FD non-ISO */
 192#define KVASER_PCIEFD_KCAN_MODE_NIFDEN BIT(15)
 193/* Acknowledgment packet type */
 194#define KVASER_PCIEFD_KCAN_MODE_APT BIT(20)
 195/* Active error flag enable. Clear to force error passive */
 196#define KVASER_PCIEFD_KCAN_MODE_EEN BIT(23)
 197/* Classic CAN mode */
 198#define KVASER_PCIEFD_KCAN_MODE_CCM BIT(31)
 199
 200#define KVASER_PCIEFD_KCAN_BTRN_SJW_SHIFT 13
 201#define KVASER_PCIEFD_KCAN_BTRN_TSEG1_SHIFT 17
 202#define KVASER_PCIEFD_KCAN_BTRN_TSEG2_SHIFT 26
 203
 204#define KVASER_PCIEFD_KCAN_PWM_TOP_SHIFT 16
 205
 206/* Kvaser KCAN packet types */
 207#define KVASER_PCIEFD_PACK_TYPE_DATA 0
 208#define KVASER_PCIEFD_PACK_TYPE_ACK 1
 209#define KVASER_PCIEFD_PACK_TYPE_TXRQ 2
 210#define KVASER_PCIEFD_PACK_TYPE_ERROR 3
 211#define KVASER_PCIEFD_PACK_TYPE_EFLUSH_ACK 4
 212#define KVASER_PCIEFD_PACK_TYPE_EFRAME_ACK 5
 213#define KVASER_PCIEFD_PACK_TYPE_ACK_DATA 6
 214#define KVASER_PCIEFD_PACK_TYPE_STATUS 8
 215#define KVASER_PCIEFD_PACK_TYPE_BUS_LOAD 9
 216
 217/* Kvaser KCAN packet common definitions */
 218#define KVASER_PCIEFD_PACKET_SEQ_MSK 0xff
 219#define KVASER_PCIEFD_PACKET_CHID_SHIFT 25
 220#define KVASER_PCIEFD_PACKET_TYPE_SHIFT 28
 221
 222/* Kvaser KCAN TDATA and RDATA first word */
 223#define KVASER_PCIEFD_RPACKET_IDE BIT(30)
 224#define KVASER_PCIEFD_RPACKET_RTR BIT(29)
 225/* Kvaser KCAN TDATA and RDATA second word */
 226#define KVASER_PCIEFD_RPACKET_ESI BIT(13)
 227#define KVASER_PCIEFD_RPACKET_BRS BIT(14)
 228#define KVASER_PCIEFD_RPACKET_FDF BIT(15)
 229#define KVASER_PCIEFD_RPACKET_DLC_SHIFT 8
 230/* Kvaser KCAN TDATA second word */
 231#define KVASER_PCIEFD_TPACKET_SMS BIT(16)
 232#define KVASER_PCIEFD_TPACKET_AREQ BIT(31)
 233
 234/* Kvaser KCAN APACKET */
 235#define KVASER_PCIEFD_APACKET_FLU BIT(8)
 236#define KVASER_PCIEFD_APACKET_CT BIT(9)
 237#define KVASER_PCIEFD_APACKET_ABL BIT(10)
 238#define KVASER_PCIEFD_APACKET_NACK BIT(11)
 239
 240/* Kvaser KCAN SPACK first word */
 241#define KVASER_PCIEFD_SPACK_RXERR_SHIFT 8
 242#define KVASER_PCIEFD_SPACK_BOFF BIT(16)
 243#define KVASER_PCIEFD_SPACK_IDET BIT(20)
 244#define KVASER_PCIEFD_SPACK_IRM BIT(21)
 245#define KVASER_PCIEFD_SPACK_RMCD BIT(22)
 246/* Kvaser KCAN SPACK second word */
 247#define KVASER_PCIEFD_SPACK_AUTO BIT(21)
 248#define KVASER_PCIEFD_SPACK_EWLR BIT(23)
 249#define KVASER_PCIEFD_SPACK_EPLR BIT(24)
 250
 251struct kvaser_pciefd;
 252
 253struct kvaser_pciefd_can {
 254        struct can_priv can;
 255        struct kvaser_pciefd *kv_pcie;
 256        void __iomem *reg_base;
 257        struct can_berr_counter bec;
 258        u8 cmd_seq;
 259        int err_rep_cnt;
 260        int echo_idx;
 261        spinlock_t lock; /* Locks sensitive registers (e.g. MODE) */
 262        spinlock_t echo_lock; /* Locks the message echo buffer */
 263        struct timer_list bec_poll_timer;
 264        struct completion start_comp, flush_comp;
 265};
 266
 267struct kvaser_pciefd {
 268        struct pci_dev *pci;
 269        void __iomem *reg_base;
 270        struct kvaser_pciefd_can *can[KVASER_PCIEFD_MAX_CAN_CHANNELS];
 271        void *dma_data[KVASER_PCIEFD_DMA_COUNT];
 272        u8 nr_channels;
 273        u32 bus_freq;
 274        u32 freq;
 275        u32 freq_to_ticks_div;
 276};
 277
 278struct kvaser_pciefd_rx_packet {
 279        u32 header[2];
 280        u64 timestamp;
 281};
 282
 283struct kvaser_pciefd_tx_packet {
 284        u32 header[2];
 285        u8 data[64];
 286};
 287
 288static const struct can_bittiming_const kvaser_pciefd_bittiming_const = {
 289        .name = KVASER_PCIEFD_DRV_NAME,
 290        .tseg1_min = 1,
 291        .tseg1_max = 512,
 292        .tseg2_min = 1,
 293        .tseg2_max = 32,
 294        .sjw_max = 16,
 295        .brp_min = 1,
 296        .brp_max = 8192,
 297        .brp_inc = 1,
 298};
 299
 300struct kvaser_pciefd_cfg_param {
 301        __le32 magic;
 302        __le32 nr;
 303        __le32 len;
 304        u8 data[KVASER_PCIEFD_CFG_PARAM_MAX_SZ];
 305};
 306
 307struct kvaser_pciefd_cfg_img {
 308        __le32 version;
 309        __le32 magic;
 310        __le32 crc;
 311        struct kvaser_pciefd_cfg_param params[KVASER_PCIEFD_CFG_MAX_PARAMS];
 312};
 313
 314static struct pci_device_id kvaser_pciefd_id_table[] = {
 315        { PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_4HS_ID), },
 316        { PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_2HS_ID), },
 317        { PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_HS_ID), },
 318        { PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_MINIPCIE_HS_ID), },
 319        { PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_MINIPCIE_2HS_ID), },
 320        { 0,},
 321};
 322MODULE_DEVICE_TABLE(pci, kvaser_pciefd_id_table);
 323
 324/* Onboard flash memory functions */
 325static int kvaser_pciefd_spi_wait_loop(struct kvaser_pciefd *pcie, int msk)
 326{
 327        u32 res;
 328        int ret;
 329
 330        ret = readl_poll_timeout(pcie->reg_base + KVASER_PCIEFD_SPI_STATUS_REG,
 331                                 res, res & msk, 0, 10);
 332
 333        return ret;
 334}
 335
 336static int kvaser_pciefd_spi_cmd(struct kvaser_pciefd *pcie, const u8 *tx,
 337                                 u32 tx_len, u8 *rx, u32 rx_len)
 338{
 339        int c;
 340
 341        iowrite32(BIT(0), pcie->reg_base + KVASER_PCIEFD_SPI_SSEL_REG);
 342        iowrite32(BIT(10), pcie->reg_base + KVASER_PCIEFD_SPI_CTRL_REG);
 343        ioread32(pcie->reg_base + KVASER_PCIEFD_SPI_RX_REG);
 344
 345        c = tx_len;
 346        while (c--) {
 347                if (kvaser_pciefd_spi_wait_loop(pcie, KVASER_PCIEFD_SPI_TRDY))
 348                        return -EIO;
 349
 350                iowrite32(*tx++, pcie->reg_base + KVASER_PCIEFD_SPI_TX_REG);
 351
 352                if (kvaser_pciefd_spi_wait_loop(pcie, KVASER_PCIEFD_SPI_RRDY))
 353                        return -EIO;
 354
 355                ioread32(pcie->reg_base + KVASER_PCIEFD_SPI_RX_REG);
 356        }
 357
 358        c = rx_len;
 359        while (c-- > 0) {
 360                if (kvaser_pciefd_spi_wait_loop(pcie, KVASER_PCIEFD_SPI_TRDY))
 361                        return -EIO;
 362
 363                iowrite32(0, pcie->reg_base + KVASER_PCIEFD_SPI_TX_REG);
 364
 365                if (kvaser_pciefd_spi_wait_loop(pcie, KVASER_PCIEFD_SPI_RRDY))
 366                        return -EIO;
 367
 368                *rx++ = ioread32(pcie->reg_base + KVASER_PCIEFD_SPI_RX_REG);
 369        }
 370
 371        if (kvaser_pciefd_spi_wait_loop(pcie, KVASER_PCIEFD_SPI_TMT))
 372                return -EIO;
 373
 374        iowrite32(0, pcie->reg_base + KVASER_PCIEFD_SPI_CTRL_REG);
 375
 376        if (c != -1) {
 377                dev_err(&pcie->pci->dev, "Flash SPI transfer failed\n");
 378                return -EIO;
 379        }
 380
 381        return 0;
 382}
 383
 384static int kvaser_pciefd_cfg_read_and_verify(struct kvaser_pciefd *pcie,
 385                                             struct kvaser_pciefd_cfg_img *img)
 386{
 387        int offset = KVASER_PCIEFD_CFG_IMG_OFFSET;
 388        int res, crc;
 389        u8 *crc_buff;
 390
 391        u8 cmd[] = {
 392                KVASER_PCIEFD_FLASH_READ_CMD,
 393                (u8)((offset >> 16) & 0xff),
 394                (u8)((offset >> 8) & 0xff),
 395                (u8)(offset & 0xff)
 396        };
 397
 398        res = kvaser_pciefd_spi_cmd(pcie, cmd, ARRAY_SIZE(cmd), (u8 *)img,
 399                                    KVASER_PCIEFD_CFG_IMG_SZ);
 400        if (res)
 401                return res;
 402
 403        crc_buff = (u8 *)img->params;
 404
 405        if (le32_to_cpu(img->version) != KVASER_PCIEFD_CFG_SYS_VER) {
 406                dev_err(&pcie->pci->dev,
 407                        "Config flash corrupted, version number is wrong\n");
 408                return -ENODEV;
 409        }
 410
 411        if (le32_to_cpu(img->magic) != KVASER_PCIEFD_CFG_MAGIC) {
 412                dev_err(&pcie->pci->dev,
 413                        "Config flash corrupted, magic number is wrong\n");
 414                return -ENODEV;
 415        }
 416
 417        crc = ~crc32_be(0xffffffff, crc_buff, sizeof(img->params));
 418        if (le32_to_cpu(img->crc) != crc) {
 419                dev_err(&pcie->pci->dev,
 420                        "Stored CRC does not match flash image contents\n");
 421                return -EIO;
 422        }
 423
 424        return 0;
 425}
 426
 427static void kvaser_pciefd_cfg_read_params(struct kvaser_pciefd *pcie,
 428                                          struct kvaser_pciefd_cfg_img *img)
 429{
 430        struct kvaser_pciefd_cfg_param *param;
 431
 432        param = &img->params[KVASER_PCIEFD_CFG_PARAM_NR_CHAN];
 433        memcpy(&pcie->nr_channels, param->data, le32_to_cpu(param->len));
 434}
 435
 436static int kvaser_pciefd_read_cfg(struct kvaser_pciefd *pcie)
 437{
 438        int res;
 439        struct kvaser_pciefd_cfg_img *img;
 440
 441        /* Read electronic signature */
 442        u8 cmd[] = {KVASER_PCIEFD_FLASH_RES_CMD, 0, 0, 0};
 443
 444        res = kvaser_pciefd_spi_cmd(pcie, cmd, ARRAY_SIZE(cmd), cmd, 1);
 445        if (res)
 446                return -EIO;
 447
 448        img = kmalloc(KVASER_PCIEFD_CFG_IMG_SZ, GFP_KERNEL);
 449        if (!img)
 450                return -ENOMEM;
 451
 452        if (cmd[0] != KVASER_PCIEFD_FLASH_ID_EPCS16) {
 453                dev_err(&pcie->pci->dev,
 454                        "Flash id is 0x%x instead of expected EPCS16 (0x%x)\n",
 455                        cmd[0], KVASER_PCIEFD_FLASH_ID_EPCS16);
 456
 457                res = -ENODEV;
 458                goto image_free;
 459        }
 460
 461        cmd[0] = KVASER_PCIEFD_FLASH_STATUS_CMD;
 462        res = kvaser_pciefd_spi_cmd(pcie, cmd, 1, cmd, 1);
 463        if (res) {
 464                goto image_free;
 465        } else if (cmd[0] & 1) {
 466                res = -EIO;
 467                /* No write is ever done, the WIP should never be set */
 468                dev_err(&pcie->pci->dev, "Unexpected WIP bit set in flash\n");
 469                goto image_free;
 470        }
 471
 472        res = kvaser_pciefd_cfg_read_and_verify(pcie, img);
 473        if (res) {
 474                res = -EIO;
 475                goto image_free;
 476        }
 477
 478        kvaser_pciefd_cfg_read_params(pcie, img);
 479
 480image_free:
 481        kfree(img);
 482        return res;
 483}
 484
 485static void kvaser_pciefd_request_status(struct kvaser_pciefd_can *can)
 486{
 487        u32 cmd;
 488
 489        cmd = KVASER_PCIEFD_KCAN_CMD_SRQ;
 490        cmd |= ++can->cmd_seq << KVASER_PCIEFD_KCAN_CMD_SEQ_SHIFT;
 491        iowrite32(cmd, can->reg_base + KVASER_PCIEFD_KCAN_CMD_REG);
 492}
 493
 494static void kvaser_pciefd_enable_err_gen(struct kvaser_pciefd_can *can)
 495{
 496        u32 mode;
 497        unsigned long irq;
 498
 499        spin_lock_irqsave(&can->lock, irq);
 500        mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
 501        if (!(mode & KVASER_PCIEFD_KCAN_MODE_EPEN)) {
 502                mode |= KVASER_PCIEFD_KCAN_MODE_EPEN;
 503                iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
 504        }
 505        spin_unlock_irqrestore(&can->lock, irq);
 506}
 507
 508static void kvaser_pciefd_disable_err_gen(struct kvaser_pciefd_can *can)
 509{
 510        u32 mode;
 511        unsigned long irq;
 512
 513        spin_lock_irqsave(&can->lock, irq);
 514        mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
 515        mode &= ~KVASER_PCIEFD_KCAN_MODE_EPEN;
 516        iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
 517        spin_unlock_irqrestore(&can->lock, irq);
 518}
 519
 520static int kvaser_pciefd_set_tx_irq(struct kvaser_pciefd_can *can)
 521{
 522        u32 msk;
 523
 524        msk = KVASER_PCIEFD_KCAN_IRQ_TE | KVASER_PCIEFD_KCAN_IRQ_ROF |
 525              KVASER_PCIEFD_KCAN_IRQ_TOF | KVASER_PCIEFD_KCAN_IRQ_ABD |
 526              KVASER_PCIEFD_KCAN_IRQ_TAE | KVASER_PCIEFD_KCAN_IRQ_TAL |
 527              KVASER_PCIEFD_KCAN_IRQ_FDIC | KVASER_PCIEFD_KCAN_IRQ_BPP |
 528              KVASER_PCIEFD_KCAN_IRQ_TAR | KVASER_PCIEFD_KCAN_IRQ_TFD;
 529
 530        iowrite32(msk, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
 531
 532        return 0;
 533}
 534
 535static void kvaser_pciefd_setup_controller(struct kvaser_pciefd_can *can)
 536{
 537        u32 mode;
 538        unsigned long irq;
 539
 540        spin_lock_irqsave(&can->lock, irq);
 541
 542        mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
 543        if (can->can.ctrlmode & CAN_CTRLMODE_FD) {
 544                mode &= ~KVASER_PCIEFD_KCAN_MODE_CCM;
 545                if (can->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO)
 546                        mode |= KVASER_PCIEFD_KCAN_MODE_NIFDEN;
 547                else
 548                        mode &= ~KVASER_PCIEFD_KCAN_MODE_NIFDEN;
 549        } else {
 550                mode |= KVASER_PCIEFD_KCAN_MODE_CCM;
 551                mode &= ~KVASER_PCIEFD_KCAN_MODE_NIFDEN;
 552        }
 553
 554        if (can->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
 555                mode |= KVASER_PCIEFD_KCAN_MODE_LOM;
 556
 557        mode |= KVASER_PCIEFD_KCAN_MODE_EEN;
 558        mode |= KVASER_PCIEFD_KCAN_MODE_EPEN;
 559        /* Use ACK packet type */
 560        mode &= ~KVASER_PCIEFD_KCAN_MODE_APT;
 561        mode &= ~KVASER_PCIEFD_KCAN_MODE_RM;
 562        iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
 563
 564        spin_unlock_irqrestore(&can->lock, irq);
 565}
 566
 567static void kvaser_pciefd_start_controller_flush(struct kvaser_pciefd_can *can)
 568{
 569        u32 status;
 570        unsigned long irq;
 571
 572        spin_lock_irqsave(&can->lock, irq);
 573        iowrite32(-1, can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG);
 574        iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD | KVASER_PCIEFD_KCAN_IRQ_TFD,
 575                  can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
 576
 577        status = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_STAT_REG);
 578        if (status & KVASER_PCIEFD_KCAN_STAT_IDLE) {
 579                u32 cmd;
 580
 581                /* If controller is already idle, run abort, flush and reset */
 582                cmd = KVASER_PCIEFD_KCAN_CMD_AT;
 583                cmd |= ++can->cmd_seq << KVASER_PCIEFD_KCAN_CMD_SEQ_SHIFT;
 584                iowrite32(cmd, can->reg_base + KVASER_PCIEFD_KCAN_CMD_REG);
 585        } else if (!(status & KVASER_PCIEFD_KCAN_STAT_RMR)) {
 586                u32 mode;
 587
 588                /* Put controller in reset mode */
 589                mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
 590                mode |= KVASER_PCIEFD_KCAN_MODE_RM;
 591                iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
 592        }
 593
 594        spin_unlock_irqrestore(&can->lock, irq);
 595}
 596
 597static int kvaser_pciefd_bus_on(struct kvaser_pciefd_can *can)
 598{
 599        u32 mode;
 600        unsigned long irq;
 601
 602        del_timer(&can->bec_poll_timer);
 603
 604        if (!completion_done(&can->flush_comp))
 605                kvaser_pciefd_start_controller_flush(can);
 606
 607        if (!wait_for_completion_timeout(&can->flush_comp,
 608                                         KVASER_PCIEFD_WAIT_TIMEOUT)) {
 609                netdev_err(can->can.dev, "Timeout during bus on flush\n");
 610                return -ETIMEDOUT;
 611        }
 612
 613        spin_lock_irqsave(&can->lock, irq);
 614        iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
 615        iowrite32(-1, can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG);
 616
 617        iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD | KVASER_PCIEFD_KCAN_IRQ_TFD,
 618                  can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
 619
 620        mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
 621        mode &= ~KVASER_PCIEFD_KCAN_MODE_RM;
 622        iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
 623        spin_unlock_irqrestore(&can->lock, irq);
 624
 625        if (!wait_for_completion_timeout(&can->start_comp,
 626                                         KVASER_PCIEFD_WAIT_TIMEOUT)) {
 627                netdev_err(can->can.dev, "Timeout during bus on reset\n");
 628                return -ETIMEDOUT;
 629        }
 630        /* Reset interrupt handling */
 631        iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
 632        iowrite32(-1, can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG);
 633
 634        kvaser_pciefd_set_tx_irq(can);
 635        kvaser_pciefd_setup_controller(can);
 636
 637        can->can.state = CAN_STATE_ERROR_ACTIVE;
 638        netif_wake_queue(can->can.dev);
 639        can->bec.txerr = 0;
 640        can->bec.rxerr = 0;
 641        can->err_rep_cnt = 0;
 642
 643        return 0;
 644}
 645
 646static void kvaser_pciefd_pwm_stop(struct kvaser_pciefd_can *can)
 647{
 648        u8 top;
 649        u32 pwm_ctrl;
 650        unsigned long irq;
 651
 652        spin_lock_irqsave(&can->lock, irq);
 653        pwm_ctrl = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_PWM_REG);
 654        top = (pwm_ctrl >> KVASER_PCIEFD_KCAN_PWM_TOP_SHIFT) & 0xff;
 655
 656        /* Set duty cycle to zero */
 657        pwm_ctrl |= top;
 658        iowrite32(pwm_ctrl, can->reg_base + KVASER_PCIEFD_KCAN_PWM_REG);
 659        spin_unlock_irqrestore(&can->lock, irq);
 660}
 661
 662static void kvaser_pciefd_pwm_start(struct kvaser_pciefd_can *can)
 663{
 664        int top, trigger;
 665        u32 pwm_ctrl;
 666        unsigned long irq;
 667
 668        kvaser_pciefd_pwm_stop(can);
 669        spin_lock_irqsave(&can->lock, irq);
 670
 671        /* Set frequency to 500 KHz*/
 672        top = can->kv_pcie->bus_freq / (2 * 500000) - 1;
 673
 674        pwm_ctrl = top & 0xff;
 675        pwm_ctrl |= (top & 0xff) << KVASER_PCIEFD_KCAN_PWM_TOP_SHIFT;
 676        iowrite32(pwm_ctrl, can->reg_base + KVASER_PCIEFD_KCAN_PWM_REG);
 677
 678        /* Set duty cycle to 95 */
 679        trigger = (100 * top - 95 * (top + 1) + 50) / 100;
 680        pwm_ctrl = trigger & 0xff;
 681        pwm_ctrl |= (top & 0xff) << KVASER_PCIEFD_KCAN_PWM_TOP_SHIFT;
 682        iowrite32(pwm_ctrl, can->reg_base + KVASER_PCIEFD_KCAN_PWM_REG);
 683        spin_unlock_irqrestore(&can->lock, irq);
 684}
 685
 686static int kvaser_pciefd_open(struct net_device *netdev)
 687{
 688        int err;
 689        struct kvaser_pciefd_can *can = netdev_priv(netdev);
 690
 691        err = open_candev(netdev);
 692        if (err)
 693                return err;
 694
 695        err = kvaser_pciefd_bus_on(can);
 696        if (err) {
 697                close_candev(netdev);
 698                return err;
 699        }
 700
 701        return 0;
 702}
 703
 704static int kvaser_pciefd_stop(struct net_device *netdev)
 705{
 706        struct kvaser_pciefd_can *can = netdev_priv(netdev);
 707        int ret = 0;
 708
 709        /* Don't interrupt ongoing flush */
 710        if (!completion_done(&can->flush_comp))
 711                kvaser_pciefd_start_controller_flush(can);
 712
 713        if (!wait_for_completion_timeout(&can->flush_comp,
 714                                         KVASER_PCIEFD_WAIT_TIMEOUT)) {
 715                netdev_err(can->can.dev, "Timeout during stop\n");
 716                ret = -ETIMEDOUT;
 717        } else {
 718                iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
 719                del_timer(&can->bec_poll_timer);
 720        }
 721        close_candev(netdev);
 722
 723        return ret;
 724}
 725
 726static int kvaser_pciefd_prepare_tx_packet(struct kvaser_pciefd_tx_packet *p,
 727                                           struct kvaser_pciefd_can *can,
 728                                           struct sk_buff *skb)
 729{
 730        struct canfd_frame *cf = (struct canfd_frame *)skb->data;
 731        int packet_size;
 732        int seq = can->echo_idx;
 733
 734        memset(p, 0, sizeof(*p));
 735
 736        if (can->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT)
 737                p->header[1] |= KVASER_PCIEFD_TPACKET_SMS;
 738
 739        if (cf->can_id & CAN_RTR_FLAG)
 740                p->header[0] |= KVASER_PCIEFD_RPACKET_RTR;
 741
 742        if (cf->can_id & CAN_EFF_FLAG)
 743                p->header[0] |= KVASER_PCIEFD_RPACKET_IDE;
 744
 745        p->header[0] |= cf->can_id & CAN_EFF_MASK;
 746        p->header[1] |= can_fd_len2dlc(cf->len) << KVASER_PCIEFD_RPACKET_DLC_SHIFT;
 747        p->header[1] |= KVASER_PCIEFD_TPACKET_AREQ;
 748
 749        if (can_is_canfd_skb(skb)) {
 750                p->header[1] |= KVASER_PCIEFD_RPACKET_FDF;
 751                if (cf->flags & CANFD_BRS)
 752                        p->header[1] |= KVASER_PCIEFD_RPACKET_BRS;
 753                if (cf->flags & CANFD_ESI)
 754                        p->header[1] |= KVASER_PCIEFD_RPACKET_ESI;
 755        }
 756
 757        p->header[1] |= seq & KVASER_PCIEFD_PACKET_SEQ_MSK;
 758
 759        packet_size = cf->len;
 760        memcpy(p->data, cf->data, packet_size);
 761
 762        return DIV_ROUND_UP(packet_size, 4);
 763}
 764
 765static netdev_tx_t kvaser_pciefd_start_xmit(struct sk_buff *skb,
 766                                            struct net_device *netdev)
 767{
 768        struct kvaser_pciefd_can *can = netdev_priv(netdev);
 769        unsigned long irq_flags;
 770        struct kvaser_pciefd_tx_packet packet;
 771        int nwords;
 772        u8 count;
 773
 774        if (can_dropped_invalid_skb(netdev, skb))
 775                return NETDEV_TX_OK;
 776
 777        nwords = kvaser_pciefd_prepare_tx_packet(&packet, can, skb);
 778
 779        spin_lock_irqsave(&can->echo_lock, irq_flags);
 780
 781        /* Prepare and save echo skb in internal slot */
 782        can_put_echo_skb(skb, netdev, can->echo_idx, 0);
 783
 784        /* Move echo index to the next slot */
 785        can->echo_idx = (can->echo_idx + 1) % can->can.echo_skb_max;
 786
 787        /* Write header to fifo */
 788        iowrite32(packet.header[0],
 789                  can->reg_base + KVASER_PCIEFD_KCAN_FIFO_REG);
 790        iowrite32(packet.header[1],
 791                  can->reg_base + KVASER_PCIEFD_KCAN_FIFO_REG);
 792
 793        if (nwords) {
 794                u32 data_last = ((u32 *)packet.data)[nwords - 1];
 795
 796                /* Write data to fifo, except last word */
 797                iowrite32_rep(can->reg_base +
 798                              KVASER_PCIEFD_KCAN_FIFO_REG, packet.data,
 799                              nwords - 1);
 800                /* Write last word to end of fifo */
 801                __raw_writel(data_last, can->reg_base +
 802                             KVASER_PCIEFD_KCAN_FIFO_LAST_REG);
 803        } else {
 804                /* Complete write to fifo */
 805                __raw_writel(0, can->reg_base +
 806                             KVASER_PCIEFD_KCAN_FIFO_LAST_REG);
 807        }
 808
 809        count = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_TX_NPACKETS_REG);
 810        /* No room for a new message, stop the queue until at least one
 811         * successful transmit
 812         */
 813        if (count >= KVASER_PCIEFD_CAN_TX_MAX_COUNT ||
 814            can->can.echo_skb[can->echo_idx])
 815                netif_stop_queue(netdev);
 816
 817        spin_unlock_irqrestore(&can->echo_lock, irq_flags);
 818
 819        return NETDEV_TX_OK;
 820}
 821
 822static int kvaser_pciefd_set_bittiming(struct kvaser_pciefd_can *can, bool data)
 823{
 824        u32 mode, test, btrn;
 825        unsigned long irq_flags;
 826        int ret;
 827        struct can_bittiming *bt;
 828
 829        if (data)
 830                bt = &can->can.data_bittiming;
 831        else
 832                bt = &can->can.bittiming;
 833
 834        btrn = ((bt->phase_seg2 - 1) & 0x1f) <<
 835               KVASER_PCIEFD_KCAN_BTRN_TSEG2_SHIFT |
 836               (((bt->prop_seg + bt->phase_seg1) - 1) & 0x1ff) <<
 837               KVASER_PCIEFD_KCAN_BTRN_TSEG1_SHIFT |
 838               ((bt->sjw - 1) & 0xf) << KVASER_PCIEFD_KCAN_BTRN_SJW_SHIFT |
 839               ((bt->brp - 1) & 0x1fff);
 840
 841        spin_lock_irqsave(&can->lock, irq_flags);
 842        mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
 843
 844        /* Put the circuit in reset mode */
 845        iowrite32(mode | KVASER_PCIEFD_KCAN_MODE_RM,
 846                  can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
 847
 848        /* Can only set bittiming if in reset mode */
 849        ret = readl_poll_timeout(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG,
 850                                 test, test & KVASER_PCIEFD_KCAN_MODE_RM,
 851                                 0, 10);
 852
 853        if (ret) {
 854                spin_unlock_irqrestore(&can->lock, irq_flags);
 855                return -EBUSY;
 856        }
 857
 858        if (data)
 859                iowrite32(btrn, can->reg_base + KVASER_PCIEFD_KCAN_BTRD_REG);
 860        else
 861                iowrite32(btrn, can->reg_base + KVASER_PCIEFD_KCAN_BTRN_REG);
 862
 863        /* Restore previous reset mode status */
 864        iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
 865
 866        spin_unlock_irqrestore(&can->lock, irq_flags);
 867        return 0;
 868}
 869
 870static int kvaser_pciefd_set_nominal_bittiming(struct net_device *ndev)
 871{
 872        return kvaser_pciefd_set_bittiming(netdev_priv(ndev), false);
 873}
 874
 875static int kvaser_pciefd_set_data_bittiming(struct net_device *ndev)
 876{
 877        return kvaser_pciefd_set_bittiming(netdev_priv(ndev), true);
 878}
 879
 880static int kvaser_pciefd_set_mode(struct net_device *ndev, enum can_mode mode)
 881{
 882        struct kvaser_pciefd_can *can = netdev_priv(ndev);
 883        int ret = 0;
 884
 885        switch (mode) {
 886        case CAN_MODE_START:
 887                if (!can->can.restart_ms)
 888                        ret = kvaser_pciefd_bus_on(can);
 889                break;
 890        default:
 891                return -EOPNOTSUPP;
 892        }
 893
 894        return ret;
 895}
 896
 897static int kvaser_pciefd_get_berr_counter(const struct net_device *ndev,
 898                                          struct can_berr_counter *bec)
 899{
 900        struct kvaser_pciefd_can *can = netdev_priv(ndev);
 901
 902        bec->rxerr = can->bec.rxerr;
 903        bec->txerr = can->bec.txerr;
 904        return 0;
 905}
 906
 907static void kvaser_pciefd_bec_poll_timer(struct timer_list *data)
 908{
 909        struct kvaser_pciefd_can *can = from_timer(can, data, bec_poll_timer);
 910
 911        kvaser_pciefd_enable_err_gen(can);
 912        kvaser_pciefd_request_status(can);
 913        can->err_rep_cnt = 0;
 914}
 915
 916static const struct net_device_ops kvaser_pciefd_netdev_ops = {
 917        .ndo_open = kvaser_pciefd_open,
 918        .ndo_stop = kvaser_pciefd_stop,
 919        .ndo_start_xmit = kvaser_pciefd_start_xmit,
 920        .ndo_change_mtu = can_change_mtu,
 921};
 922
 923static int kvaser_pciefd_setup_can_ctrls(struct kvaser_pciefd *pcie)
 924{
 925        int i;
 926
 927        for (i = 0; i < pcie->nr_channels; i++) {
 928                struct net_device *netdev;
 929                struct kvaser_pciefd_can *can;
 930                u32 status, tx_npackets;
 931
 932                netdev = alloc_candev(sizeof(struct kvaser_pciefd_can),
 933                                      KVASER_PCIEFD_CAN_TX_MAX_COUNT);
 934                if (!netdev)
 935                        return -ENOMEM;
 936
 937                can = netdev_priv(netdev);
 938                netdev->netdev_ops = &kvaser_pciefd_netdev_ops;
 939                can->reg_base = pcie->reg_base + KVASER_PCIEFD_KCAN0_BASE +
 940                                i * KVASER_PCIEFD_KCAN_BASE_OFFSET;
 941
 942                can->kv_pcie = pcie;
 943                can->cmd_seq = 0;
 944                can->err_rep_cnt = 0;
 945                can->bec.txerr = 0;
 946                can->bec.rxerr = 0;
 947
 948                init_completion(&can->start_comp);
 949                init_completion(&can->flush_comp);
 950                timer_setup(&can->bec_poll_timer, kvaser_pciefd_bec_poll_timer,
 951                            0);
 952
 953                /* Disable Bus load reporting */
 954                iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_BUS_LOAD_REG);
 955
 956                tx_npackets = ioread32(can->reg_base +
 957                                       KVASER_PCIEFD_KCAN_TX_NPACKETS_REG);
 958                if (((tx_npackets >> KVASER_PCIEFD_KCAN_TX_NPACKETS_MAX_SHIFT) &
 959                      0xff) < KVASER_PCIEFD_CAN_TX_MAX_COUNT) {
 960                        dev_err(&pcie->pci->dev,
 961                                "Max Tx count is smaller than expected\n");
 962
 963                        free_candev(netdev);
 964                        return -ENODEV;
 965                }
 966
 967                can->can.clock.freq = pcie->freq;
 968                can->can.echo_skb_max = KVASER_PCIEFD_CAN_TX_MAX_COUNT;
 969                can->echo_idx = 0;
 970                spin_lock_init(&can->echo_lock);
 971                spin_lock_init(&can->lock);
 972                can->can.bittiming_const = &kvaser_pciefd_bittiming_const;
 973                can->can.data_bittiming_const = &kvaser_pciefd_bittiming_const;
 974
 975                can->can.do_set_bittiming = kvaser_pciefd_set_nominal_bittiming;
 976                can->can.do_set_data_bittiming =
 977                        kvaser_pciefd_set_data_bittiming;
 978
 979                can->can.do_set_mode = kvaser_pciefd_set_mode;
 980                can->can.do_get_berr_counter = kvaser_pciefd_get_berr_counter;
 981
 982                can->can.ctrlmode_supported = CAN_CTRLMODE_LISTENONLY |
 983                                              CAN_CTRLMODE_FD |
 984                                              CAN_CTRLMODE_FD_NON_ISO;
 985
 986                status = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_STAT_REG);
 987                if (!(status & KVASER_PCIEFD_KCAN_STAT_FD)) {
 988                        dev_err(&pcie->pci->dev,
 989                                "CAN FD not supported as expected %d\n", i);
 990
 991                        free_candev(netdev);
 992                        return -ENODEV;
 993                }
 994
 995                if (status & KVASER_PCIEFD_KCAN_STAT_CAP)
 996                        can->can.ctrlmode_supported |= CAN_CTRLMODE_ONE_SHOT;
 997
 998                netdev->flags |= IFF_ECHO;
 999
1000                SET_NETDEV_DEV(netdev, &pcie->pci->dev);
1001
1002                iowrite32(-1, can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG);
1003                iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD |
1004                          KVASER_PCIEFD_KCAN_IRQ_TFD,
1005                          can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
1006
1007                pcie->can[i] = can;
1008                kvaser_pciefd_pwm_start(can);
1009        }
1010
1011        return 0;
1012}
1013
1014static int kvaser_pciefd_reg_candev(struct kvaser_pciefd *pcie)
1015{
1016        int i;
1017
1018        for (i = 0; i < pcie->nr_channels; i++) {
1019                int err = register_candev(pcie->can[i]->can.dev);
1020
1021                if (err) {
1022                        int j;
1023
1024                        /* Unregister all successfully registered devices. */
1025                        for (j = 0; j < i; j++)
1026                                unregister_candev(pcie->can[j]->can.dev);
1027                        return err;
1028                }
1029        }
1030
1031        return 0;
1032}
1033
1034static void kvaser_pciefd_write_dma_map(struct kvaser_pciefd *pcie,
1035                                        dma_addr_t addr, int offset)
1036{
1037        u32 word1, word2;
1038
1039#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
1040        word1 = addr | KVASER_PCIEFD_64BIT_DMA_BIT;
1041        word2 = addr >> 32;
1042#else
1043        word1 = addr;
1044        word2 = 0;
1045#endif
1046        iowrite32(word1, pcie->reg_base + offset);
1047        iowrite32(word2, pcie->reg_base + offset + 4);
1048}
1049
1050static int kvaser_pciefd_setup_dma(struct kvaser_pciefd *pcie)
1051{
1052        int i;
1053        u32 srb_status;
1054        dma_addr_t dma_addr[KVASER_PCIEFD_DMA_COUNT];
1055
1056        /* Disable the DMA */
1057        iowrite32(0, pcie->reg_base + KVASER_PCIEFD_SRB_CTRL_REG);
1058        for (i = 0; i < KVASER_PCIEFD_DMA_COUNT; i++) {
1059                unsigned int offset = KVASER_PCIEFD_DMA_MAP_BASE + 8 * i;
1060
1061                pcie->dma_data[i] =
1062                        dmam_alloc_coherent(&pcie->pci->dev,
1063                                            KVASER_PCIEFD_DMA_SIZE,
1064                                            &dma_addr[i],
1065                                            GFP_KERNEL);
1066
1067                if (!pcie->dma_data[i] || !dma_addr[i]) {
1068                        dev_err(&pcie->pci->dev, "Rx dma_alloc(%u) failure\n",
1069                                KVASER_PCIEFD_DMA_SIZE);
1070                        return -ENOMEM;
1071                }
1072
1073                kvaser_pciefd_write_dma_map(pcie, dma_addr[i], offset);
1074        }
1075
1076        /* Reset Rx FIFO, and both DMA buffers */
1077        iowrite32(KVASER_PCIEFD_SRB_CMD_FOR | KVASER_PCIEFD_SRB_CMD_RDB0 |
1078                  KVASER_PCIEFD_SRB_CMD_RDB1,
1079                  pcie->reg_base + KVASER_PCIEFD_SRB_CMD_REG);
1080
1081        srb_status = ioread32(pcie->reg_base + KVASER_PCIEFD_SRB_STAT_REG);
1082        if (!(srb_status & KVASER_PCIEFD_SRB_STAT_DI)) {
1083                dev_err(&pcie->pci->dev, "DMA not idle before enabling\n");
1084                return -EIO;
1085        }
1086
1087        /* Enable the DMA */
1088        iowrite32(KVASER_PCIEFD_SRB_CTRL_DMA_ENABLE,
1089                  pcie->reg_base + KVASER_PCIEFD_SRB_CTRL_REG);
1090
1091        return 0;
1092}
1093
1094static int kvaser_pciefd_setup_board(struct kvaser_pciefd *pcie)
1095{
1096        u32 sysid, srb_status, build;
1097        u8 sysid_nr_chan;
1098        int ret;
1099
1100        ret = kvaser_pciefd_read_cfg(pcie);
1101        if (ret)
1102                return ret;
1103
1104        sysid = ioread32(pcie->reg_base + KVASER_PCIEFD_SYSID_VERSION_REG);
1105        sysid_nr_chan = (sysid >> KVASER_PCIEFD_SYSID_NRCHAN_SHIFT) & 0xff;
1106        if (pcie->nr_channels != sysid_nr_chan) {
1107                dev_err(&pcie->pci->dev,
1108                        "Number of channels does not match: %u vs %u\n",
1109                        pcie->nr_channels,
1110                        sysid_nr_chan);
1111                return -ENODEV;
1112        }
1113
1114        if (pcie->nr_channels > KVASER_PCIEFD_MAX_CAN_CHANNELS)
1115                pcie->nr_channels = KVASER_PCIEFD_MAX_CAN_CHANNELS;
1116
1117        build = ioread32(pcie->reg_base + KVASER_PCIEFD_SYSID_BUILD_REG);
1118        dev_dbg(&pcie->pci->dev, "Version %u.%u.%u\n",
1119                (sysid >> KVASER_PCIEFD_SYSID_MAJOR_VER_SHIFT) & 0xff,
1120                sysid & 0xff,
1121                (build >> KVASER_PCIEFD_SYSID_BUILD_VER_SHIFT) & 0x7fff);
1122
1123        srb_status = ioread32(pcie->reg_base + KVASER_PCIEFD_SRB_STAT_REG);
1124        if (!(srb_status & KVASER_PCIEFD_SRB_STAT_DMA)) {
1125                dev_err(&pcie->pci->dev,
1126                        "Hardware without DMA is not supported\n");
1127                return -ENODEV;
1128        }
1129
1130        pcie->bus_freq = ioread32(pcie->reg_base +
1131                                  KVASER_PCIEFD_SYSID_BUSFREQ_REG);
1132        pcie->freq = ioread32(pcie->reg_base + KVASER_PCIEFD_SYSID_CANFREQ_REG);
1133        pcie->freq_to_ticks_div = pcie->freq / 1000000;
1134        if (pcie->freq_to_ticks_div == 0)
1135                pcie->freq_to_ticks_div = 1;
1136
1137        /* Turn off all loopback functionality */
1138        iowrite32(0, pcie->reg_base + KVASER_PCIEFD_LOOP_REG);
1139        return ret;
1140}
1141
1142static int kvaser_pciefd_handle_data_packet(struct kvaser_pciefd *pcie,
1143                                            struct kvaser_pciefd_rx_packet *p,
1144                                            __le32 *data)
1145{
1146        struct sk_buff *skb;
1147        struct canfd_frame *cf;
1148        struct can_priv *priv;
1149        struct net_device_stats *stats;
1150        struct skb_shared_hwtstamps *shhwtstamps;
1151        u8 ch_id = (p->header[1] >> KVASER_PCIEFD_PACKET_CHID_SHIFT) & 0x7;
1152
1153        if (ch_id >= pcie->nr_channels)
1154                return -EIO;
1155
1156        priv = &pcie->can[ch_id]->can;
1157        stats = &priv->dev->stats;
1158
1159        if (p->header[1] & KVASER_PCIEFD_RPACKET_FDF) {
1160                skb = alloc_canfd_skb(priv->dev, &cf);
1161                if (!skb) {
1162                        stats->rx_dropped++;
1163                        return -ENOMEM;
1164                }
1165
1166                if (p->header[1] & KVASER_PCIEFD_RPACKET_BRS)
1167                        cf->flags |= CANFD_BRS;
1168
1169                if (p->header[1] & KVASER_PCIEFD_RPACKET_ESI)
1170                        cf->flags |= CANFD_ESI;
1171        } else {
1172                skb = alloc_can_skb(priv->dev, (struct can_frame **)&cf);
1173                if (!skb) {
1174                        stats->rx_dropped++;
1175                        return -ENOMEM;
1176                }
1177        }
1178
1179        cf->can_id = p->header[0] & CAN_EFF_MASK;
1180        if (p->header[0] & KVASER_PCIEFD_RPACKET_IDE)
1181                cf->can_id |= CAN_EFF_FLAG;
1182
1183        cf->len = can_fd_dlc2len(p->header[1] >> KVASER_PCIEFD_RPACKET_DLC_SHIFT);
1184
1185        if (p->header[0] & KVASER_PCIEFD_RPACKET_RTR)
1186                cf->can_id |= CAN_RTR_FLAG;
1187        else
1188                memcpy(cf->data, data, cf->len);
1189
1190        shhwtstamps = skb_hwtstamps(skb);
1191
1192        shhwtstamps->hwtstamp =
1193                ns_to_ktime(div_u64(p->timestamp * 1000,
1194                                    pcie->freq_to_ticks_div));
1195
1196        stats->rx_bytes += cf->len;
1197        stats->rx_packets++;
1198
1199        return netif_rx(skb);
1200}
1201
1202static void kvaser_pciefd_change_state(struct kvaser_pciefd_can *can,
1203                                       struct can_frame *cf,
1204                                       enum can_state new_state,
1205                                       enum can_state tx_state,
1206                                       enum can_state rx_state)
1207{
1208        can_change_state(can->can.dev, cf, tx_state, rx_state);
1209
1210        if (new_state == CAN_STATE_BUS_OFF) {
1211                struct net_device *ndev = can->can.dev;
1212                unsigned long irq_flags;
1213
1214                spin_lock_irqsave(&can->lock, irq_flags);
1215                netif_stop_queue(can->can.dev);
1216                spin_unlock_irqrestore(&can->lock, irq_flags);
1217
1218                /* Prevent CAN controller from auto recover from bus off */
1219                if (!can->can.restart_ms) {
1220                        kvaser_pciefd_start_controller_flush(can);
1221                        can_bus_off(ndev);
1222                }
1223        }
1224}
1225
1226static void kvaser_pciefd_packet_to_state(struct kvaser_pciefd_rx_packet *p,
1227                                          struct can_berr_counter *bec,
1228                                          enum can_state *new_state,
1229                                          enum can_state *tx_state,
1230                                          enum can_state *rx_state)
1231{
1232        if (p->header[0] & KVASER_PCIEFD_SPACK_BOFF ||
1233            p->header[0] & KVASER_PCIEFD_SPACK_IRM)
1234                *new_state = CAN_STATE_BUS_OFF;
1235        else if (bec->txerr >= 255 ||  bec->rxerr >= 255)
1236                *new_state = CAN_STATE_BUS_OFF;
1237        else if (p->header[1] & KVASER_PCIEFD_SPACK_EPLR)
1238                *new_state = CAN_STATE_ERROR_PASSIVE;
1239        else if (bec->txerr >= 128 || bec->rxerr >= 128)
1240                *new_state = CAN_STATE_ERROR_PASSIVE;
1241        else if (p->header[1] & KVASER_PCIEFD_SPACK_EWLR)
1242                *new_state = CAN_STATE_ERROR_WARNING;
1243        else if (bec->txerr >= 96 || bec->rxerr >= 96)
1244                *new_state = CAN_STATE_ERROR_WARNING;
1245        else
1246                *new_state = CAN_STATE_ERROR_ACTIVE;
1247
1248        *tx_state = bec->txerr >= bec->rxerr ? *new_state : 0;
1249        *rx_state = bec->txerr <= bec->rxerr ? *new_state : 0;
1250}
1251
1252static int kvaser_pciefd_rx_error_frame(struct kvaser_pciefd_can *can,
1253                                        struct kvaser_pciefd_rx_packet *p)
1254{
1255        struct can_berr_counter bec;
1256        enum can_state old_state, new_state, tx_state, rx_state;
1257        struct net_device *ndev = can->can.dev;
1258        struct sk_buff *skb;
1259        struct can_frame *cf = NULL;
1260        struct skb_shared_hwtstamps *shhwtstamps;
1261        struct net_device_stats *stats = &ndev->stats;
1262
1263        old_state = can->can.state;
1264
1265        bec.txerr = p->header[0] & 0xff;
1266        bec.rxerr = (p->header[0] >> KVASER_PCIEFD_SPACK_RXERR_SHIFT) & 0xff;
1267
1268        kvaser_pciefd_packet_to_state(p, &bec, &new_state, &tx_state,
1269                                      &rx_state);
1270
1271        skb = alloc_can_err_skb(ndev, &cf);
1272
1273        if (new_state != old_state) {
1274                kvaser_pciefd_change_state(can, cf, new_state, tx_state,
1275                                           rx_state);
1276
1277                if (old_state == CAN_STATE_BUS_OFF &&
1278                    new_state == CAN_STATE_ERROR_ACTIVE &&
1279                    can->can.restart_ms) {
1280                        can->can.can_stats.restarts++;
1281                        if (skb)
1282                                cf->can_id |= CAN_ERR_RESTARTED;
1283                }
1284        }
1285
1286        can->err_rep_cnt++;
1287        can->can.can_stats.bus_error++;
1288        stats->rx_errors++;
1289
1290        can->bec.txerr = bec.txerr;
1291        can->bec.rxerr = bec.rxerr;
1292
1293        if (!skb) {
1294                stats->rx_dropped++;
1295                return -ENOMEM;
1296        }
1297
1298        shhwtstamps = skb_hwtstamps(skb);
1299        shhwtstamps->hwtstamp =
1300                ns_to_ktime(div_u64(p->timestamp * 1000,
1301                                    can->kv_pcie->freq_to_ticks_div));
1302        cf->can_id |= CAN_ERR_BUSERROR;
1303
1304        cf->data[6] = bec.txerr;
1305        cf->data[7] = bec.rxerr;
1306
1307        stats->rx_packets++;
1308        stats->rx_bytes += cf->len;
1309
1310        netif_rx(skb);
1311        return 0;
1312}
1313
1314static int kvaser_pciefd_handle_error_packet(struct kvaser_pciefd *pcie,
1315                                             struct kvaser_pciefd_rx_packet *p)
1316{
1317        struct kvaser_pciefd_can *can;
1318        u8 ch_id = (p->header[1] >> KVASER_PCIEFD_PACKET_CHID_SHIFT) & 0x7;
1319
1320        if (ch_id >= pcie->nr_channels)
1321                return -EIO;
1322
1323        can = pcie->can[ch_id];
1324
1325        kvaser_pciefd_rx_error_frame(can, p);
1326        if (can->err_rep_cnt >= KVASER_PCIEFD_MAX_ERR_REP)
1327                /* Do not report more errors, until bec_poll_timer expires */
1328                kvaser_pciefd_disable_err_gen(can);
1329        /* Start polling the error counters */
1330        mod_timer(&can->bec_poll_timer, KVASER_PCIEFD_BEC_POLL_FREQ);
1331        return 0;
1332}
1333
1334static int kvaser_pciefd_handle_status_resp(struct kvaser_pciefd_can *can,
1335                                            struct kvaser_pciefd_rx_packet *p)
1336{
1337        struct can_berr_counter bec;
1338        enum can_state old_state, new_state, tx_state, rx_state;
1339
1340        old_state = can->can.state;
1341
1342        bec.txerr = p->header[0] & 0xff;
1343        bec.rxerr = (p->header[0] >> KVASER_PCIEFD_SPACK_RXERR_SHIFT) & 0xff;
1344
1345        kvaser_pciefd_packet_to_state(p, &bec, &new_state, &tx_state,
1346                                      &rx_state);
1347
1348        if (new_state != old_state) {
1349                struct net_device *ndev = can->can.dev;
1350                struct sk_buff *skb;
1351                struct can_frame *cf;
1352                struct skb_shared_hwtstamps *shhwtstamps;
1353
1354                skb = alloc_can_err_skb(ndev, &cf);
1355                if (!skb) {
1356                        struct net_device_stats *stats = &ndev->stats;
1357
1358                        stats->rx_dropped++;
1359                        return -ENOMEM;
1360                }
1361
1362                kvaser_pciefd_change_state(can, cf, new_state, tx_state,
1363                                           rx_state);
1364
1365                if (old_state == CAN_STATE_BUS_OFF &&
1366                    new_state == CAN_STATE_ERROR_ACTIVE &&
1367                    can->can.restart_ms) {
1368                        can->can.can_stats.restarts++;
1369                        cf->can_id |= CAN_ERR_RESTARTED;
1370                }
1371
1372                shhwtstamps = skb_hwtstamps(skb);
1373                shhwtstamps->hwtstamp =
1374                        ns_to_ktime(div_u64(p->timestamp * 1000,
1375                                            can->kv_pcie->freq_to_ticks_div));
1376
1377                cf->data[6] = bec.txerr;
1378                cf->data[7] = bec.rxerr;
1379
1380                netif_rx(skb);
1381        }
1382        can->bec.txerr = bec.txerr;
1383        can->bec.rxerr = bec.rxerr;
1384        /* Check if we need to poll the error counters */
1385        if (bec.txerr || bec.rxerr)
1386                mod_timer(&can->bec_poll_timer, KVASER_PCIEFD_BEC_POLL_FREQ);
1387
1388        return 0;
1389}
1390
1391static int kvaser_pciefd_handle_status_packet(struct kvaser_pciefd *pcie,
1392                                              struct kvaser_pciefd_rx_packet *p)
1393{
1394        struct kvaser_pciefd_can *can;
1395        u8 cmdseq;
1396        u32 status;
1397        u8 ch_id = (p->header[1] >> KVASER_PCIEFD_PACKET_CHID_SHIFT) & 0x7;
1398
1399        if (ch_id >= pcie->nr_channels)
1400                return -EIO;
1401
1402        can = pcie->can[ch_id];
1403
1404        status = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_STAT_REG);
1405        cmdseq = (status >> KVASER_PCIEFD_KCAN_STAT_SEQNO_SHIFT) & 0xff;
1406
1407        /* Reset done, start abort and flush */
1408        if (p->header[0] & KVASER_PCIEFD_SPACK_IRM &&
1409            p->header[0] & KVASER_PCIEFD_SPACK_RMCD &&
1410            p->header[1] & KVASER_PCIEFD_SPACK_AUTO &&
1411            cmdseq == (p->header[1] & KVASER_PCIEFD_PACKET_SEQ_MSK) &&
1412            status & KVASER_PCIEFD_KCAN_STAT_IDLE) {
1413                u32 cmd;
1414
1415                iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD,
1416                          can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG);
1417                cmd = KVASER_PCIEFD_KCAN_CMD_AT;
1418                cmd |= ++can->cmd_seq << KVASER_PCIEFD_KCAN_CMD_SEQ_SHIFT;
1419                iowrite32(cmd, can->reg_base + KVASER_PCIEFD_KCAN_CMD_REG);
1420
1421                iowrite32(KVASER_PCIEFD_KCAN_IRQ_TFD,
1422                          can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
1423        } else if (p->header[0] & KVASER_PCIEFD_SPACK_IDET &&
1424                   p->header[0] & KVASER_PCIEFD_SPACK_IRM &&
1425                   cmdseq == (p->header[1] & KVASER_PCIEFD_PACKET_SEQ_MSK) &&
1426                   status & KVASER_PCIEFD_KCAN_STAT_IDLE) {
1427                /* Reset detected, send end of flush if no packet are in FIFO */
1428                u8 count = ioread32(can->reg_base +
1429                                    KVASER_PCIEFD_KCAN_TX_NPACKETS_REG) & 0xff;
1430
1431                if (!count)
1432                        iowrite32(KVASER_PCIEFD_KCAN_CTRL_EFLUSH,
1433                                  can->reg_base + KVASER_PCIEFD_KCAN_CTRL_REG);
1434        } else if (!(p->header[1] & KVASER_PCIEFD_SPACK_AUTO) &&
1435                   cmdseq == (p->header[1] & KVASER_PCIEFD_PACKET_SEQ_MSK)) {
1436                /* Response to status request received */
1437                kvaser_pciefd_handle_status_resp(can, p);
1438                if (can->can.state != CAN_STATE_BUS_OFF &&
1439                    can->can.state != CAN_STATE_ERROR_ACTIVE) {
1440                        mod_timer(&can->bec_poll_timer,
1441                                  KVASER_PCIEFD_BEC_POLL_FREQ);
1442                }
1443        } else if (p->header[0] & KVASER_PCIEFD_SPACK_RMCD &&
1444                   !(status & KVASER_PCIEFD_KCAN_STAT_BUS_OFF_MSK)) {
1445                /* Reset to bus on detected */
1446                if (!completion_done(&can->start_comp))
1447                        complete(&can->start_comp);
1448        }
1449
1450        return 0;
1451}
1452
1453static int kvaser_pciefd_handle_eack_packet(struct kvaser_pciefd *pcie,
1454                                            struct kvaser_pciefd_rx_packet *p)
1455{
1456        struct kvaser_pciefd_can *can;
1457        u8 ch_id = (p->header[1] >> KVASER_PCIEFD_PACKET_CHID_SHIFT) & 0x7;
1458
1459        if (ch_id >= pcie->nr_channels)
1460                return -EIO;
1461
1462        can = pcie->can[ch_id];
1463
1464        /* If this is the last flushed packet, send end of flush */
1465        if (p->header[0] & KVASER_PCIEFD_APACKET_FLU) {
1466                u8 count = ioread32(can->reg_base +
1467                                    KVASER_PCIEFD_KCAN_TX_NPACKETS_REG) & 0xff;
1468
1469                if (count == 0)
1470                        iowrite32(KVASER_PCIEFD_KCAN_CTRL_EFLUSH,
1471                                  can->reg_base + KVASER_PCIEFD_KCAN_CTRL_REG);
1472        } else {
1473                int echo_idx = p->header[0] & KVASER_PCIEFD_PACKET_SEQ_MSK;
1474                int dlc = can_get_echo_skb(can->can.dev, echo_idx, NULL);
1475                struct net_device_stats *stats = &can->can.dev->stats;
1476
1477                stats->tx_bytes += dlc;
1478                stats->tx_packets++;
1479
1480                if (netif_queue_stopped(can->can.dev))
1481                        netif_wake_queue(can->can.dev);
1482        }
1483
1484        return 0;
1485}
1486
1487static void kvaser_pciefd_handle_nack_packet(struct kvaser_pciefd_can *can,
1488                                             struct kvaser_pciefd_rx_packet *p)
1489{
1490        struct sk_buff *skb;
1491        struct net_device_stats *stats = &can->can.dev->stats;
1492        struct can_frame *cf;
1493
1494        skb = alloc_can_err_skb(can->can.dev, &cf);
1495
1496        stats->tx_errors++;
1497        if (p->header[0] & KVASER_PCIEFD_APACKET_ABL) {
1498                if (skb)
1499                        cf->can_id |= CAN_ERR_LOSTARB;
1500                can->can.can_stats.arbitration_lost++;
1501        } else if (skb) {
1502                cf->can_id |= CAN_ERR_ACK;
1503        }
1504
1505        if (skb) {
1506                cf->can_id |= CAN_ERR_BUSERROR;
1507                stats->rx_bytes += cf->len;
1508                stats->rx_packets++;
1509                netif_rx(skb);
1510        } else {
1511                stats->rx_dropped++;
1512                netdev_warn(can->can.dev, "No memory left for err_skb\n");
1513        }
1514}
1515
1516static int kvaser_pciefd_handle_ack_packet(struct kvaser_pciefd *pcie,
1517                                           struct kvaser_pciefd_rx_packet *p)
1518{
1519        struct kvaser_pciefd_can *can;
1520        bool one_shot_fail = false;
1521        u8 ch_id = (p->header[1] >> KVASER_PCIEFD_PACKET_CHID_SHIFT) & 0x7;
1522
1523        if (ch_id >= pcie->nr_channels)
1524                return -EIO;
1525
1526        can = pcie->can[ch_id];
1527        /* Ignore control packet ACK */
1528        if (p->header[0] & KVASER_PCIEFD_APACKET_CT)
1529                return 0;
1530
1531        if (p->header[0] & KVASER_PCIEFD_APACKET_NACK) {
1532                kvaser_pciefd_handle_nack_packet(can, p);
1533                one_shot_fail = true;
1534        }
1535
1536        if (p->header[0] & KVASER_PCIEFD_APACKET_FLU) {
1537                netdev_dbg(can->can.dev, "Packet was flushed\n");
1538        } else {
1539                int echo_idx = p->header[0] & KVASER_PCIEFD_PACKET_SEQ_MSK;
1540                int dlc = can_get_echo_skb(can->can.dev, echo_idx, NULL);
1541                u8 count = ioread32(can->reg_base +
1542                                    KVASER_PCIEFD_KCAN_TX_NPACKETS_REG) & 0xff;
1543
1544                if (count < KVASER_PCIEFD_CAN_TX_MAX_COUNT &&
1545                    netif_queue_stopped(can->can.dev))
1546                        netif_wake_queue(can->can.dev);
1547
1548                if (!one_shot_fail) {
1549                        struct net_device_stats *stats = &can->can.dev->stats;
1550
1551                        stats->tx_bytes += dlc;
1552                        stats->tx_packets++;
1553                }
1554        }
1555
1556        return 0;
1557}
1558
1559static int kvaser_pciefd_handle_eflush_packet(struct kvaser_pciefd *pcie,
1560                                              struct kvaser_pciefd_rx_packet *p)
1561{
1562        struct kvaser_pciefd_can *can;
1563        u8 ch_id = (p->header[1] >> KVASER_PCIEFD_PACKET_CHID_SHIFT) & 0x7;
1564
1565        if (ch_id >= pcie->nr_channels)
1566                return -EIO;
1567
1568        can = pcie->can[ch_id];
1569
1570        if (!completion_done(&can->flush_comp))
1571                complete(&can->flush_comp);
1572
1573        return 0;
1574}
1575
1576static int kvaser_pciefd_read_packet(struct kvaser_pciefd *pcie, int *start_pos,
1577                                     int dma_buf)
1578{
1579        __le32 *buffer = pcie->dma_data[dma_buf];
1580        __le64 timestamp;
1581        struct kvaser_pciefd_rx_packet packet;
1582        struct kvaser_pciefd_rx_packet *p = &packet;
1583        u8 type;
1584        int pos = *start_pos;
1585        int size;
1586        int ret = 0;
1587
1588        size = le32_to_cpu(buffer[pos++]);
1589        if (!size) {
1590                *start_pos = 0;
1591                return 0;
1592        }
1593
1594        p->header[0] = le32_to_cpu(buffer[pos++]);
1595        p->header[1] = le32_to_cpu(buffer[pos++]);
1596
1597        /* Read 64-bit timestamp */
1598        memcpy(&timestamp, &buffer[pos], sizeof(__le64));
1599        pos += 2;
1600        p->timestamp = le64_to_cpu(timestamp);
1601
1602        type = (p->header[1] >> KVASER_PCIEFD_PACKET_TYPE_SHIFT) & 0xf;
1603        switch (type) {
1604        case KVASER_PCIEFD_PACK_TYPE_DATA:
1605                ret = kvaser_pciefd_handle_data_packet(pcie, p, &buffer[pos]);
1606                if (!(p->header[0] & KVASER_PCIEFD_RPACKET_RTR)) {
1607                        u8 data_len;
1608
1609                        data_len = can_fd_dlc2len(p->header[1] >>
1610                                               KVASER_PCIEFD_RPACKET_DLC_SHIFT);
1611                        pos += DIV_ROUND_UP(data_len, 4);
1612                }
1613                break;
1614
1615        case KVASER_PCIEFD_PACK_TYPE_ACK:
1616                ret = kvaser_pciefd_handle_ack_packet(pcie, p);
1617                break;
1618
1619        case KVASER_PCIEFD_PACK_TYPE_STATUS:
1620                ret = kvaser_pciefd_handle_status_packet(pcie, p);
1621                break;
1622
1623        case KVASER_PCIEFD_PACK_TYPE_ERROR:
1624                ret = kvaser_pciefd_handle_error_packet(pcie, p);
1625                break;
1626
1627        case KVASER_PCIEFD_PACK_TYPE_EFRAME_ACK:
1628                ret = kvaser_pciefd_handle_eack_packet(pcie, p);
1629                break;
1630
1631        case KVASER_PCIEFD_PACK_TYPE_EFLUSH_ACK:
1632                ret = kvaser_pciefd_handle_eflush_packet(pcie, p);
1633                break;
1634
1635        case KVASER_PCIEFD_PACK_TYPE_ACK_DATA:
1636        case KVASER_PCIEFD_PACK_TYPE_BUS_LOAD:
1637        case KVASER_PCIEFD_PACK_TYPE_TXRQ:
1638                dev_info(&pcie->pci->dev,
1639                         "Received unexpected packet type 0x%08X\n", type);
1640                break;
1641
1642        default:
1643                dev_err(&pcie->pci->dev, "Unknown packet type 0x%08X\n", type);
1644                ret = -EIO;
1645                break;
1646        }
1647
1648        if (ret)
1649                return ret;
1650
1651        /* Position does not point to the end of the package,
1652         * corrupted packet size?
1653         */
1654        if ((*start_pos + size) != pos)
1655                return -EIO;
1656
1657        /* Point to the next packet header, if any */
1658        *start_pos = pos;
1659
1660        return ret;
1661}
1662
1663static int kvaser_pciefd_read_buffer(struct kvaser_pciefd *pcie, int dma_buf)
1664{
1665        int pos = 0;
1666        int res = 0;
1667
1668        do {
1669                res = kvaser_pciefd_read_packet(pcie, &pos, dma_buf);
1670        } while (!res && pos > 0 && pos < KVASER_PCIEFD_DMA_SIZE);
1671
1672        return res;
1673}
1674
1675static int kvaser_pciefd_receive_irq(struct kvaser_pciefd *pcie)
1676{
1677        u32 irq;
1678
1679        irq = ioread32(pcie->reg_base + KVASER_PCIEFD_SRB_IRQ_REG);
1680        if (irq & KVASER_PCIEFD_SRB_IRQ_DPD0) {
1681                kvaser_pciefd_read_buffer(pcie, 0);
1682                /* Reset DMA buffer 0 */
1683                iowrite32(KVASER_PCIEFD_SRB_CMD_RDB0,
1684                          pcie->reg_base + KVASER_PCIEFD_SRB_CMD_REG);
1685        }
1686
1687        if (irq & KVASER_PCIEFD_SRB_IRQ_DPD1) {
1688                kvaser_pciefd_read_buffer(pcie, 1);
1689                /* Reset DMA buffer 1 */
1690                iowrite32(KVASER_PCIEFD_SRB_CMD_RDB1,
1691                          pcie->reg_base + KVASER_PCIEFD_SRB_CMD_REG);
1692        }
1693
1694        if (irq & KVASER_PCIEFD_SRB_IRQ_DOF0 ||
1695            irq & KVASER_PCIEFD_SRB_IRQ_DOF1 ||
1696            irq & KVASER_PCIEFD_SRB_IRQ_DUF0 ||
1697            irq & KVASER_PCIEFD_SRB_IRQ_DUF1)
1698                dev_err(&pcie->pci->dev, "DMA IRQ error 0x%08X\n", irq);
1699
1700        iowrite32(irq, pcie->reg_base + KVASER_PCIEFD_SRB_IRQ_REG);
1701        return 0;
1702}
1703
1704static int kvaser_pciefd_transmit_irq(struct kvaser_pciefd_can *can)
1705{
1706        u32 irq = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG);
1707
1708        if (irq & KVASER_PCIEFD_KCAN_IRQ_TOF)
1709                netdev_err(can->can.dev, "Tx FIFO overflow\n");
1710
1711        if (irq & KVASER_PCIEFD_KCAN_IRQ_TFD) {
1712                u8 count = ioread32(can->reg_base +
1713                                    KVASER_PCIEFD_KCAN_TX_NPACKETS_REG) & 0xff;
1714
1715                if (count == 0)
1716                        iowrite32(KVASER_PCIEFD_KCAN_CTRL_EFLUSH,
1717                                  can->reg_base + KVASER_PCIEFD_KCAN_CTRL_REG);
1718        }
1719
1720        if (irq & KVASER_PCIEFD_KCAN_IRQ_BPP)
1721                netdev_err(can->can.dev,
1722                           "Fail to change bittiming, when not in reset mode\n");
1723
1724        if (irq & KVASER_PCIEFD_KCAN_IRQ_FDIC)
1725                netdev_err(can->can.dev, "CAN FD frame in CAN mode\n");
1726
1727        if (irq & KVASER_PCIEFD_KCAN_IRQ_ROF)
1728                netdev_err(can->can.dev, "Rx FIFO overflow\n");
1729
1730        iowrite32(irq, can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG);
1731        return 0;
1732}
1733
1734static irqreturn_t kvaser_pciefd_irq_handler(int irq, void *dev)
1735{
1736        struct kvaser_pciefd *pcie = (struct kvaser_pciefd *)dev;
1737        u32 board_irq;
1738        int i;
1739
1740        board_irq = ioread32(pcie->reg_base + KVASER_PCIEFD_IRQ_REG);
1741
1742        if (!(board_irq & KVASER_PCIEFD_IRQ_ALL_MSK))
1743                return IRQ_NONE;
1744
1745        if (board_irq & KVASER_PCIEFD_IRQ_SRB)
1746                kvaser_pciefd_receive_irq(pcie);
1747
1748        for (i = 0; i < pcie->nr_channels; i++) {
1749                if (!pcie->can[i]) {
1750                        dev_err(&pcie->pci->dev,
1751                                "IRQ mask points to unallocated controller\n");
1752                        break;
1753                }
1754
1755                /* Check that mask matches channel (i) IRQ mask */
1756                if (board_irq & (1 << i))
1757                        kvaser_pciefd_transmit_irq(pcie->can[i]);
1758        }
1759
1760        iowrite32(board_irq, pcie->reg_base + KVASER_PCIEFD_IRQ_REG);
1761        return IRQ_HANDLED;
1762}
1763
1764static void kvaser_pciefd_teardown_can_ctrls(struct kvaser_pciefd *pcie)
1765{
1766        int i;
1767        struct kvaser_pciefd_can *can;
1768
1769        for (i = 0; i < pcie->nr_channels; i++) {
1770                can = pcie->can[i];
1771                if (can) {
1772                        iowrite32(0,
1773                                  can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
1774                        kvaser_pciefd_pwm_stop(can);
1775                        free_candev(can->can.dev);
1776                }
1777        }
1778}
1779
1780static int kvaser_pciefd_probe(struct pci_dev *pdev,
1781                               const struct pci_device_id *id)
1782{
1783        int err;
1784        struct kvaser_pciefd *pcie;
1785
1786        pcie = devm_kzalloc(&pdev->dev, sizeof(*pcie), GFP_KERNEL);
1787        if (!pcie)
1788                return -ENOMEM;
1789
1790        pci_set_drvdata(pdev, pcie);
1791        pcie->pci = pdev;
1792
1793        err = pci_enable_device(pdev);
1794        if (err)
1795                return err;
1796
1797        err = pci_request_regions(pdev, KVASER_PCIEFD_DRV_NAME);
1798        if (err)
1799                goto err_disable_pci;
1800
1801        pcie->reg_base = pci_iomap(pdev, 0, 0);
1802        if (!pcie->reg_base) {
1803                err = -ENOMEM;
1804                goto err_release_regions;
1805        }
1806
1807        err = kvaser_pciefd_setup_board(pcie);
1808        if (err)
1809                goto err_pci_iounmap;
1810
1811        err = kvaser_pciefd_setup_dma(pcie);
1812        if (err)
1813                goto err_pci_iounmap;
1814
1815        pci_set_master(pdev);
1816
1817        err = kvaser_pciefd_setup_can_ctrls(pcie);
1818        if (err)
1819                goto err_teardown_can_ctrls;
1820
1821        iowrite32(KVASER_PCIEFD_SRB_IRQ_DPD0 | KVASER_PCIEFD_SRB_IRQ_DPD1,
1822                  pcie->reg_base + KVASER_PCIEFD_SRB_IRQ_REG);
1823
1824        iowrite32(KVASER_PCIEFD_SRB_IRQ_DPD0 | KVASER_PCIEFD_SRB_IRQ_DPD1 |
1825                  KVASER_PCIEFD_SRB_IRQ_DOF0 | KVASER_PCIEFD_SRB_IRQ_DOF1 |
1826                  KVASER_PCIEFD_SRB_IRQ_DUF0 | KVASER_PCIEFD_SRB_IRQ_DUF1,
1827                  pcie->reg_base + KVASER_PCIEFD_SRB_IEN_REG);
1828
1829        /* Reset IRQ handling, expected to be off before */
1830        iowrite32(KVASER_PCIEFD_IRQ_ALL_MSK,
1831                  pcie->reg_base + KVASER_PCIEFD_IRQ_REG);
1832        iowrite32(KVASER_PCIEFD_IRQ_ALL_MSK,
1833                  pcie->reg_base + KVASER_PCIEFD_IEN_REG);
1834
1835        /* Ready the DMA buffers */
1836        iowrite32(KVASER_PCIEFD_SRB_CMD_RDB0,
1837                  pcie->reg_base + KVASER_PCIEFD_SRB_CMD_REG);
1838        iowrite32(KVASER_PCIEFD_SRB_CMD_RDB1,
1839                  pcie->reg_base + KVASER_PCIEFD_SRB_CMD_REG);
1840
1841        err = request_irq(pcie->pci->irq, kvaser_pciefd_irq_handler,
1842                          IRQF_SHARED, KVASER_PCIEFD_DRV_NAME, pcie);
1843        if (err)
1844                goto err_teardown_can_ctrls;
1845
1846        err = kvaser_pciefd_reg_candev(pcie);
1847        if (err)
1848                goto err_free_irq;
1849
1850        return 0;
1851
1852err_free_irq:
1853        free_irq(pcie->pci->irq, pcie);
1854
1855err_teardown_can_ctrls:
1856        kvaser_pciefd_teardown_can_ctrls(pcie);
1857        iowrite32(0, pcie->reg_base + KVASER_PCIEFD_SRB_CTRL_REG);
1858        pci_clear_master(pdev);
1859
1860err_pci_iounmap:
1861        pci_iounmap(pdev, pcie->reg_base);
1862
1863err_release_regions:
1864        pci_release_regions(pdev);
1865
1866err_disable_pci:
1867        pci_disable_device(pdev);
1868
1869        return err;
1870}
1871
1872static void kvaser_pciefd_remove_all_ctrls(struct kvaser_pciefd *pcie)
1873{
1874        struct kvaser_pciefd_can *can;
1875        int i;
1876
1877        for (i = 0; i < pcie->nr_channels; i++) {
1878                can = pcie->can[i];
1879                if (can) {
1880                        iowrite32(0,
1881                                  can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
1882                        unregister_candev(can->can.dev);
1883                        del_timer(&can->bec_poll_timer);
1884                        kvaser_pciefd_pwm_stop(can);
1885                        free_candev(can->can.dev);
1886                }
1887        }
1888}
1889
1890static void kvaser_pciefd_remove(struct pci_dev *pdev)
1891{
1892        struct kvaser_pciefd *pcie = pci_get_drvdata(pdev);
1893
1894        kvaser_pciefd_remove_all_ctrls(pcie);
1895
1896        /* Turn off IRQ generation */
1897        iowrite32(0, pcie->reg_base + KVASER_PCIEFD_SRB_CTRL_REG);
1898        iowrite32(KVASER_PCIEFD_IRQ_ALL_MSK,
1899                  pcie->reg_base + KVASER_PCIEFD_IRQ_REG);
1900        iowrite32(0, pcie->reg_base + KVASER_PCIEFD_IEN_REG);
1901
1902        free_irq(pcie->pci->irq, pcie);
1903
1904        pci_clear_master(pdev);
1905        pci_iounmap(pdev, pcie->reg_base);
1906        pci_release_regions(pdev);
1907        pci_disable_device(pdev);
1908}
1909
1910static struct pci_driver kvaser_pciefd = {
1911        .name = KVASER_PCIEFD_DRV_NAME,
1912        .id_table = kvaser_pciefd_id_table,
1913        .probe = kvaser_pciefd_probe,
1914        .remove = kvaser_pciefd_remove,
1915};
1916
1917module_pci_driver(kvaser_pciefd)
1918