linux/drivers/platform/mellanox/mlxbf-tmfifo.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0+
   2/*
   3 * Mellanox BlueField SoC TmFifo driver
   4 *
   5 * Copyright (C) 2019 Mellanox Technologies
   6 */
   7
   8#include <linux/acpi.h>
   9#include <linux/bitfield.h>
  10#include <linux/circ_buf.h>
  11#include <linux/efi.h>
  12#include <linux/irq.h>
  13#include <linux/module.h>
  14#include <linux/mutex.h>
  15#include <linux/platform_device.h>
  16#include <linux/types.h>
  17
  18#include <linux/virtio_config.h>
  19#include <linux/virtio_console.h>
  20#include <linux/virtio_ids.h>
  21#include <linux/virtio_net.h>
  22#include <linux/virtio_ring.h>
  23
  24#include "mlxbf-tmfifo-regs.h"
  25
  26/* Vring size. */
  27#define MLXBF_TMFIFO_VRING_SIZE                 SZ_1K
  28
  29/* Console Tx buffer size. */
  30#define MLXBF_TMFIFO_CON_TX_BUF_SIZE            SZ_32K
  31
  32/* Console Tx buffer reserved space. */
  33#define MLXBF_TMFIFO_CON_TX_BUF_RSV_SIZE        8
  34
  35/* House-keeping timer interval. */
  36#define MLXBF_TMFIFO_TIMER_INTERVAL             (HZ / 10)
  37
  38/* Virtual devices sharing the TM FIFO. */
  39#define MLXBF_TMFIFO_VDEV_MAX           (VIRTIO_ID_CONSOLE + 1)
  40
  41/*
  42 * Reserve 1/16 of TmFifo space, so console messages are not starved by
  43 * the networking traffic.
  44 */
  45#define MLXBF_TMFIFO_RESERVE_RATIO              16
  46
  47/* Message with data needs at least two words (for header & data). */
  48#define MLXBF_TMFIFO_DATA_MIN_WORDS             2
  49
  50struct mlxbf_tmfifo;
  51
  52/**
  53 * mlxbf_tmfifo_vring - Structure of the TmFifo virtual ring
  54 * @va: virtual address of the ring
  55 * @dma: dma address of the ring
  56 * @vq: pointer to the virtio virtqueue
  57 * @desc: current descriptor of the pending packet
  58 * @desc_head: head descriptor of the pending packet
  59 * @cur_len: processed length of the current descriptor
  60 * @rem_len: remaining length of the pending packet
  61 * @pkt_len: total length of the pending packet
  62 * @next_avail: next avail descriptor id
  63 * @num: vring size (number of descriptors)
  64 * @align: vring alignment size
  65 * @index: vring index
  66 * @vdev_id: vring virtio id (VIRTIO_ID_xxx)
  67 * @fifo: pointer to the tmfifo structure
  68 */
  69struct mlxbf_tmfifo_vring {
  70        void *va;
  71        dma_addr_t dma;
  72        struct virtqueue *vq;
  73        struct vring_desc *desc;
  74        struct vring_desc *desc_head;
  75        int cur_len;
  76        int rem_len;
  77        u32 pkt_len;
  78        u16 next_avail;
  79        int num;
  80        int align;
  81        int index;
  82        int vdev_id;
  83        struct mlxbf_tmfifo *fifo;
  84};
  85
  86/* Interrupt types. */
  87enum {
  88        MLXBF_TM_RX_LWM_IRQ,
  89        MLXBF_TM_RX_HWM_IRQ,
  90        MLXBF_TM_TX_LWM_IRQ,
  91        MLXBF_TM_TX_HWM_IRQ,
  92        MLXBF_TM_MAX_IRQ
  93};
  94
  95/* Ring types (Rx & Tx). */
  96enum {
  97        MLXBF_TMFIFO_VRING_RX,
  98        MLXBF_TMFIFO_VRING_TX,
  99        MLXBF_TMFIFO_VRING_MAX
 100};
 101
 102/**
 103 * mlxbf_tmfifo_vdev - Structure of the TmFifo virtual device
 104 * @vdev: virtio device, in which the vdev.id.device field has the
 105 *        VIRTIO_ID_xxx id to distinguish the virtual device.
 106 * @status: status of the device
 107 * @features: supported features of the device
 108 * @vrings: array of tmfifo vrings of this device
 109 * @config.cons: virtual console config -
 110 *               select if vdev.id.device is VIRTIO_ID_CONSOLE
 111 * @config.net: virtual network config -
 112 *              select if vdev.id.device is VIRTIO_ID_NET
 113 * @tx_buf: tx buffer used to buffer data before writing into the FIFO
 114 */
 115struct mlxbf_tmfifo_vdev {
 116        struct virtio_device vdev;
 117        u8 status;
 118        u64 features;
 119        struct mlxbf_tmfifo_vring vrings[MLXBF_TMFIFO_VRING_MAX];
 120        union {
 121                struct virtio_console_config cons;
 122                struct virtio_net_config net;
 123        } config;
 124        struct circ_buf tx_buf;
 125};
 126
 127/**
 128 * mlxbf_tmfifo_irq_info - Structure of the interrupt information
 129 * @fifo: pointer to the tmfifo structure
 130 * @irq: interrupt number
 131 * @index: index into the interrupt array
 132 */
 133struct mlxbf_tmfifo_irq_info {
 134        struct mlxbf_tmfifo *fifo;
 135        int irq;
 136        int index;
 137};
 138
 139/**
 140 * mlxbf_tmfifo - Structure of the TmFifo
 141 * @vdev: array of the virtual devices running over the TmFifo
 142 * @lock: lock to protect the TmFifo access
 143 * @rx_base: mapped register base address for the Rx FIFO
 144 * @tx_base: mapped register base address for the Tx FIFO
 145 * @rx_fifo_size: number of entries of the Rx FIFO
 146 * @tx_fifo_size: number of entries of the Tx FIFO
 147 * @pend_events: pending bits for deferred events
 148 * @irq_info: interrupt information
 149 * @work: work struct for deferred process
 150 * @timer: background timer
 151 * @vring: Tx/Rx ring
 152 * @spin_lock: Tx/Rx spin lock
 153 * @is_ready: ready flag
 154 */
 155struct mlxbf_tmfifo {
 156        struct mlxbf_tmfifo_vdev *vdev[MLXBF_TMFIFO_VDEV_MAX];
 157        struct mutex lock;              /* TmFifo lock */
 158        void __iomem *rx_base;
 159        void __iomem *tx_base;
 160        int rx_fifo_size;
 161        int tx_fifo_size;
 162        unsigned long pend_events;
 163        struct mlxbf_tmfifo_irq_info irq_info[MLXBF_TM_MAX_IRQ];
 164        struct work_struct work;
 165        struct timer_list timer;
 166        struct mlxbf_tmfifo_vring *vring[2];
 167        spinlock_t spin_lock[2];        /* spin lock */
 168        bool is_ready;
 169};
 170
 171/**
 172 * mlxbf_tmfifo_msg_hdr - Structure of the TmFifo message header
 173 * @type: message type
 174 * @len: payload length in network byte order. Messages sent into the FIFO
 175 *       will be read by the other side as data stream in the same byte order.
 176 *       The length needs to be encoded into network order so both sides
 177 *       could understand it.
 178 */
 179struct mlxbf_tmfifo_msg_hdr {
 180        u8 type;
 181        __be16 len;
 182        u8 unused[5];
 183} __packed __aligned(sizeof(u64));
 184
 185/*
 186 * Default MAC.
 187 * This MAC address will be read from EFI persistent variable if configured.
 188 * It can also be reconfigured with standard Linux tools.
 189 */
 190static u8 mlxbf_tmfifo_net_default_mac[ETH_ALEN] = {
 191        0x00, 0x1A, 0xCA, 0xFF, 0xFF, 0x01
 192};
 193
 194/* EFI variable name of the MAC address. */
 195static efi_char16_t mlxbf_tmfifo_efi_name[] = L"RshimMacAddr";
 196
 197/* Maximum L2 header length. */
 198#define MLXBF_TMFIFO_NET_L2_OVERHEAD    36
 199
 200/* Supported virtio-net features. */
 201#define MLXBF_TMFIFO_NET_FEATURES \
 202        (BIT_ULL(VIRTIO_NET_F_MTU) | BIT_ULL(VIRTIO_NET_F_STATUS) | \
 203         BIT_ULL(VIRTIO_NET_F_MAC))
 204
 205#define mlxbf_vdev_to_tmfifo(d) container_of(d, struct mlxbf_tmfifo_vdev, vdev)
 206
 207/* Free vrings of the FIFO device. */
 208static void mlxbf_tmfifo_free_vrings(struct mlxbf_tmfifo *fifo,
 209                                     struct mlxbf_tmfifo_vdev *tm_vdev)
 210{
 211        struct mlxbf_tmfifo_vring *vring;
 212        int i, size;
 213
 214        for (i = 0; i < ARRAY_SIZE(tm_vdev->vrings); i++) {
 215                vring = &tm_vdev->vrings[i];
 216                if (vring->va) {
 217                        size = vring_size(vring->num, vring->align);
 218                        dma_free_coherent(tm_vdev->vdev.dev.parent, size,
 219                                          vring->va, vring->dma);
 220                        vring->va = NULL;
 221                        if (vring->vq) {
 222                                vring_del_virtqueue(vring->vq);
 223                                vring->vq = NULL;
 224                        }
 225                }
 226        }
 227}
 228
 229/* Allocate vrings for the FIFO. */
 230static int mlxbf_tmfifo_alloc_vrings(struct mlxbf_tmfifo *fifo,
 231                                     struct mlxbf_tmfifo_vdev *tm_vdev)
 232{
 233        struct mlxbf_tmfifo_vring *vring;
 234        struct device *dev;
 235        dma_addr_t dma;
 236        int i, size;
 237        void *va;
 238
 239        for (i = 0; i < ARRAY_SIZE(tm_vdev->vrings); i++) {
 240                vring = &tm_vdev->vrings[i];
 241                vring->fifo = fifo;
 242                vring->num = MLXBF_TMFIFO_VRING_SIZE;
 243                vring->align = SMP_CACHE_BYTES;
 244                vring->index = i;
 245                vring->vdev_id = tm_vdev->vdev.id.device;
 246                dev = &tm_vdev->vdev.dev;
 247
 248                size = vring_size(vring->num, vring->align);
 249                va = dma_alloc_coherent(dev->parent, size, &dma, GFP_KERNEL);
 250                if (!va) {
 251                        mlxbf_tmfifo_free_vrings(fifo, tm_vdev);
 252                        dev_err(dev->parent, "dma_alloc_coherent failed\n");
 253                        return -ENOMEM;
 254                }
 255
 256                vring->va = va;
 257                vring->dma = dma;
 258        }
 259
 260        return 0;
 261}
 262
 263/* Disable interrupts of the FIFO device. */
 264static void mlxbf_tmfifo_disable_irqs(struct mlxbf_tmfifo *fifo)
 265{
 266        int i, irq;
 267
 268        for (i = 0; i < MLXBF_TM_MAX_IRQ; i++) {
 269                irq = fifo->irq_info[i].irq;
 270                fifo->irq_info[i].irq = 0;
 271                disable_irq(irq);
 272        }
 273}
 274
 275/* Interrupt handler. */
 276static irqreturn_t mlxbf_tmfifo_irq_handler(int irq, void *arg)
 277{
 278        struct mlxbf_tmfifo_irq_info *irq_info = arg;
 279
 280        if (!test_and_set_bit(irq_info->index, &irq_info->fifo->pend_events))
 281                schedule_work(&irq_info->fifo->work);
 282
 283        return IRQ_HANDLED;
 284}
 285
 286/* Get the next packet descriptor from the vring. */
 287static struct vring_desc *
 288mlxbf_tmfifo_get_next_desc(struct mlxbf_tmfifo_vring *vring)
 289{
 290        const struct vring *vr = virtqueue_get_vring(vring->vq);
 291        struct virtio_device *vdev = vring->vq->vdev;
 292        unsigned int idx, head;
 293
 294        if (vring->next_avail == virtio16_to_cpu(vdev, vr->avail->idx))
 295                return NULL;
 296
 297        /* Make sure 'avail->idx' is visible already. */
 298        virtio_rmb(false);
 299
 300        idx = vring->next_avail % vr->num;
 301        head = virtio16_to_cpu(vdev, vr->avail->ring[idx]);
 302        if (WARN_ON(head >= vr->num))
 303                return NULL;
 304
 305        vring->next_avail++;
 306
 307        return &vr->desc[head];
 308}
 309
 310/* Release virtio descriptor. */
 311static void mlxbf_tmfifo_release_desc(struct mlxbf_tmfifo_vring *vring,
 312                                      struct vring_desc *desc, u32 len)
 313{
 314        const struct vring *vr = virtqueue_get_vring(vring->vq);
 315        struct virtio_device *vdev = vring->vq->vdev;
 316        u16 idx, vr_idx;
 317
 318        vr_idx = virtio16_to_cpu(vdev, vr->used->idx);
 319        idx = vr_idx % vr->num;
 320        vr->used->ring[idx].id = cpu_to_virtio32(vdev, desc - vr->desc);
 321        vr->used->ring[idx].len = cpu_to_virtio32(vdev, len);
 322
 323        /*
 324         * Virtio could poll and check the 'idx' to decide whether the desc is
 325         * done or not. Add a memory barrier here to make sure the update above
 326         * completes before updating the idx.
 327         */
 328        virtio_mb(false);
 329        vr->used->idx = cpu_to_virtio16(vdev, vr_idx + 1);
 330}
 331
 332/* Get the total length of the descriptor chain. */
 333static u32 mlxbf_tmfifo_get_pkt_len(struct mlxbf_tmfifo_vring *vring,
 334                                    struct vring_desc *desc)
 335{
 336        const struct vring *vr = virtqueue_get_vring(vring->vq);
 337        struct virtio_device *vdev = vring->vq->vdev;
 338        u32 len = 0, idx;
 339
 340        while (desc) {
 341                len += virtio32_to_cpu(vdev, desc->len);
 342                if (!(virtio16_to_cpu(vdev, desc->flags) & VRING_DESC_F_NEXT))
 343                        break;
 344                idx = virtio16_to_cpu(vdev, desc->next);
 345                desc = &vr->desc[idx];
 346        }
 347
 348        return len;
 349}
 350
 351static void mlxbf_tmfifo_release_pending_pkt(struct mlxbf_tmfifo_vring *vring)
 352{
 353        struct vring_desc *desc_head;
 354        u32 len = 0;
 355
 356        if (vring->desc_head) {
 357                desc_head = vring->desc_head;
 358                len = vring->pkt_len;
 359        } else {
 360                desc_head = mlxbf_tmfifo_get_next_desc(vring);
 361                len = mlxbf_tmfifo_get_pkt_len(vring, desc_head);
 362        }
 363
 364        if (desc_head)
 365                mlxbf_tmfifo_release_desc(vring, desc_head, len);
 366
 367        vring->pkt_len = 0;
 368        vring->desc = NULL;
 369        vring->desc_head = NULL;
 370}
 371
 372static void mlxbf_tmfifo_init_net_desc(struct mlxbf_tmfifo_vring *vring,
 373                                       struct vring_desc *desc, bool is_rx)
 374{
 375        struct virtio_device *vdev = vring->vq->vdev;
 376        struct virtio_net_hdr *net_hdr;
 377
 378        net_hdr = phys_to_virt(virtio64_to_cpu(vdev, desc->addr));
 379        memset(net_hdr, 0, sizeof(*net_hdr));
 380}
 381
 382/* Get and initialize the next packet. */
 383static struct vring_desc *
 384mlxbf_tmfifo_get_next_pkt(struct mlxbf_tmfifo_vring *vring, bool is_rx)
 385{
 386        struct vring_desc *desc;
 387
 388        desc = mlxbf_tmfifo_get_next_desc(vring);
 389        if (desc && is_rx && vring->vdev_id == VIRTIO_ID_NET)
 390                mlxbf_tmfifo_init_net_desc(vring, desc, is_rx);
 391
 392        vring->desc_head = desc;
 393        vring->desc = desc;
 394
 395        return desc;
 396}
 397
 398/* House-keeping timer. */
 399static void mlxbf_tmfifo_timer(struct timer_list *t)
 400{
 401        struct mlxbf_tmfifo *fifo = container_of(t, struct mlxbf_tmfifo, timer);
 402        int rx, tx;
 403
 404        rx = !test_and_set_bit(MLXBF_TM_RX_HWM_IRQ, &fifo->pend_events);
 405        tx = !test_and_set_bit(MLXBF_TM_TX_LWM_IRQ, &fifo->pend_events);
 406
 407        if (rx || tx)
 408                schedule_work(&fifo->work);
 409
 410        mod_timer(&fifo->timer, jiffies + MLXBF_TMFIFO_TIMER_INTERVAL);
 411}
 412
 413/* Copy one console packet into the output buffer. */
 414static void mlxbf_tmfifo_console_output_one(struct mlxbf_tmfifo_vdev *cons,
 415                                            struct mlxbf_tmfifo_vring *vring,
 416                                            struct vring_desc *desc)
 417{
 418        const struct vring *vr = virtqueue_get_vring(vring->vq);
 419        struct virtio_device *vdev = &cons->vdev;
 420        u32 len, idx, seg;
 421        void *addr;
 422
 423        while (desc) {
 424                addr = phys_to_virt(virtio64_to_cpu(vdev, desc->addr));
 425                len = virtio32_to_cpu(vdev, desc->len);
 426
 427                seg = CIRC_SPACE_TO_END(cons->tx_buf.head, cons->tx_buf.tail,
 428                                        MLXBF_TMFIFO_CON_TX_BUF_SIZE);
 429                if (len <= seg) {
 430                        memcpy(cons->tx_buf.buf + cons->tx_buf.head, addr, len);
 431                } else {
 432                        memcpy(cons->tx_buf.buf + cons->tx_buf.head, addr, seg);
 433                        addr += seg;
 434                        memcpy(cons->tx_buf.buf, addr, len - seg);
 435                }
 436                cons->tx_buf.head = (cons->tx_buf.head + len) %
 437                        MLXBF_TMFIFO_CON_TX_BUF_SIZE;
 438
 439                if (!(virtio16_to_cpu(vdev, desc->flags) & VRING_DESC_F_NEXT))
 440                        break;
 441                idx = virtio16_to_cpu(vdev, desc->next);
 442                desc = &vr->desc[idx];
 443        }
 444}
 445
 446/* Copy console data into the output buffer. */
 447static void mlxbf_tmfifo_console_output(struct mlxbf_tmfifo_vdev *cons,
 448                                        struct mlxbf_tmfifo_vring *vring)
 449{
 450        struct vring_desc *desc;
 451        u32 len, avail;
 452
 453        desc = mlxbf_tmfifo_get_next_desc(vring);
 454        while (desc) {
 455                /* Release the packet if not enough space. */
 456                len = mlxbf_tmfifo_get_pkt_len(vring, desc);
 457                avail = CIRC_SPACE(cons->tx_buf.head, cons->tx_buf.tail,
 458                                   MLXBF_TMFIFO_CON_TX_BUF_SIZE);
 459                if (len + MLXBF_TMFIFO_CON_TX_BUF_RSV_SIZE > avail) {
 460                        mlxbf_tmfifo_release_desc(vring, desc, len);
 461                        break;
 462                }
 463
 464                mlxbf_tmfifo_console_output_one(cons, vring, desc);
 465                mlxbf_tmfifo_release_desc(vring, desc, len);
 466                desc = mlxbf_tmfifo_get_next_desc(vring);
 467        }
 468}
 469
 470/* Get the number of available words in Rx FIFO for receiving. */
 471static int mlxbf_tmfifo_get_rx_avail(struct mlxbf_tmfifo *fifo)
 472{
 473        u64 sts;
 474
 475        sts = readq(fifo->rx_base + MLXBF_TMFIFO_RX_STS);
 476        return FIELD_GET(MLXBF_TMFIFO_RX_STS__COUNT_MASK, sts);
 477}
 478
 479/* Get the number of available words in the TmFifo for sending. */
 480static int mlxbf_tmfifo_get_tx_avail(struct mlxbf_tmfifo *fifo, int vdev_id)
 481{
 482        int tx_reserve;
 483        u32 count;
 484        u64 sts;
 485
 486        /* Reserve some room in FIFO for console messages. */
 487        if (vdev_id == VIRTIO_ID_NET)
 488                tx_reserve = fifo->tx_fifo_size / MLXBF_TMFIFO_RESERVE_RATIO;
 489        else
 490                tx_reserve = 1;
 491
 492        sts = readq(fifo->tx_base + MLXBF_TMFIFO_TX_STS);
 493        count = FIELD_GET(MLXBF_TMFIFO_TX_STS__COUNT_MASK, sts);
 494        return fifo->tx_fifo_size - tx_reserve - count;
 495}
 496
 497/* Console Tx (move data from the output buffer into the TmFifo). */
 498static void mlxbf_tmfifo_console_tx(struct mlxbf_tmfifo *fifo, int avail)
 499{
 500        struct mlxbf_tmfifo_msg_hdr hdr;
 501        struct mlxbf_tmfifo_vdev *cons;
 502        unsigned long flags;
 503        int size, seg;
 504        void *addr;
 505        u64 data;
 506
 507        /* Return if not enough space available. */
 508        if (avail < MLXBF_TMFIFO_DATA_MIN_WORDS)
 509                return;
 510
 511        cons = fifo->vdev[VIRTIO_ID_CONSOLE];
 512        if (!cons || !cons->tx_buf.buf)
 513                return;
 514
 515        /* Return if no data to send. */
 516        size = CIRC_CNT(cons->tx_buf.head, cons->tx_buf.tail,
 517                        MLXBF_TMFIFO_CON_TX_BUF_SIZE);
 518        if (size == 0)
 519                return;
 520
 521        /* Adjust the size to available space. */
 522        if (size + sizeof(hdr) > avail * sizeof(u64))
 523                size = avail * sizeof(u64) - sizeof(hdr);
 524
 525        /* Write header. */
 526        hdr.type = VIRTIO_ID_CONSOLE;
 527        hdr.len = htons(size);
 528        writeq(*(u64 *)&hdr, fifo->tx_base + MLXBF_TMFIFO_TX_DATA);
 529
 530        /* Use spin-lock to protect the 'cons->tx_buf'. */
 531        spin_lock_irqsave(&fifo->spin_lock[0], flags);
 532
 533        while (size > 0) {
 534                addr = cons->tx_buf.buf + cons->tx_buf.tail;
 535
 536                seg = CIRC_CNT_TO_END(cons->tx_buf.head, cons->tx_buf.tail,
 537                                      MLXBF_TMFIFO_CON_TX_BUF_SIZE);
 538                if (seg >= sizeof(u64)) {
 539                        memcpy(&data, addr, sizeof(u64));
 540                } else {
 541                        memcpy(&data, addr, seg);
 542                        memcpy((u8 *)&data + seg, cons->tx_buf.buf,
 543                               sizeof(u64) - seg);
 544                }
 545                writeq(data, fifo->tx_base + MLXBF_TMFIFO_TX_DATA);
 546
 547                if (size >= sizeof(u64)) {
 548                        cons->tx_buf.tail = (cons->tx_buf.tail + sizeof(u64)) %
 549                                MLXBF_TMFIFO_CON_TX_BUF_SIZE;
 550                        size -= sizeof(u64);
 551                } else {
 552                        cons->tx_buf.tail = (cons->tx_buf.tail + size) %
 553                                MLXBF_TMFIFO_CON_TX_BUF_SIZE;
 554                        size = 0;
 555                }
 556        }
 557
 558        spin_unlock_irqrestore(&fifo->spin_lock[0], flags);
 559}
 560
 561/* Rx/Tx one word in the descriptor buffer. */
 562static void mlxbf_tmfifo_rxtx_word(struct mlxbf_tmfifo_vring *vring,
 563                                   struct vring_desc *desc,
 564                                   bool is_rx, int len)
 565{
 566        struct virtio_device *vdev = vring->vq->vdev;
 567        struct mlxbf_tmfifo *fifo = vring->fifo;
 568        void *addr;
 569        u64 data;
 570
 571        /* Get the buffer address of this desc. */
 572        addr = phys_to_virt(virtio64_to_cpu(vdev, desc->addr));
 573
 574        /* Read a word from FIFO for Rx. */
 575        if (is_rx)
 576                data = readq(fifo->rx_base + MLXBF_TMFIFO_RX_DATA);
 577
 578        if (vring->cur_len + sizeof(u64) <= len) {
 579                /* The whole word. */
 580                if (is_rx)
 581                        memcpy(addr + vring->cur_len, &data, sizeof(u64));
 582                else
 583                        memcpy(&data, addr + vring->cur_len, sizeof(u64));
 584                vring->cur_len += sizeof(u64);
 585        } else {
 586                /* Leftover bytes. */
 587                if (is_rx)
 588                        memcpy(addr + vring->cur_len, &data,
 589                               len - vring->cur_len);
 590                else
 591                        memcpy(&data, addr + vring->cur_len,
 592                               len - vring->cur_len);
 593                vring->cur_len = len;
 594        }
 595
 596        /* Write the word into FIFO for Tx. */
 597        if (!is_rx)
 598                writeq(data, fifo->tx_base + MLXBF_TMFIFO_TX_DATA);
 599}
 600
 601/*
 602 * Rx/Tx packet header.
 603 *
 604 * In Rx case, the packet might be found to belong to a different vring since
 605 * the TmFifo is shared by different services. In such case, the 'vring_change'
 606 * flag is set.
 607 */
 608static void mlxbf_tmfifo_rxtx_header(struct mlxbf_tmfifo_vring *vring,
 609                                     struct vring_desc *desc,
 610                                     bool is_rx, bool *vring_change)
 611{
 612        struct mlxbf_tmfifo *fifo = vring->fifo;
 613        struct virtio_net_config *config;
 614        struct mlxbf_tmfifo_msg_hdr hdr;
 615        int vdev_id, hdr_len;
 616
 617        /* Read/Write packet header. */
 618        if (is_rx) {
 619                /* Drain one word from the FIFO. */
 620                *(u64 *)&hdr = readq(fifo->rx_base + MLXBF_TMFIFO_RX_DATA);
 621
 622                /* Skip the length 0 packets (keepalive). */
 623                if (hdr.len == 0)
 624                        return;
 625
 626                /* Check packet type. */
 627                if (hdr.type == VIRTIO_ID_NET) {
 628                        vdev_id = VIRTIO_ID_NET;
 629                        hdr_len = sizeof(struct virtio_net_hdr);
 630                        config = &fifo->vdev[vdev_id]->config.net;
 631                        /* A legacy-only interface for now. */
 632                        if (ntohs(hdr.len) >
 633                            __virtio16_to_cpu(virtio_legacy_is_little_endian(),
 634                                              config->mtu) +
 635                            MLXBF_TMFIFO_NET_L2_OVERHEAD)
 636                                return;
 637                } else {
 638                        vdev_id = VIRTIO_ID_CONSOLE;
 639                        hdr_len = 0;
 640                }
 641
 642                /*
 643                 * Check whether the new packet still belongs to this vring.
 644                 * If not, update the pkt_len of the new vring.
 645                 */
 646                if (vdev_id != vring->vdev_id) {
 647                        struct mlxbf_tmfifo_vdev *tm_dev2 = fifo->vdev[vdev_id];
 648
 649                        if (!tm_dev2)
 650                                return;
 651                        vring->desc = desc;
 652                        vring = &tm_dev2->vrings[MLXBF_TMFIFO_VRING_RX];
 653                        *vring_change = true;
 654                }
 655                vring->pkt_len = ntohs(hdr.len) + hdr_len;
 656        } else {
 657                /* Network virtio has an extra header. */
 658                hdr_len = (vring->vdev_id == VIRTIO_ID_NET) ?
 659                           sizeof(struct virtio_net_hdr) : 0;
 660                vring->pkt_len = mlxbf_tmfifo_get_pkt_len(vring, desc);
 661                hdr.type = (vring->vdev_id == VIRTIO_ID_NET) ?
 662                            VIRTIO_ID_NET : VIRTIO_ID_CONSOLE;
 663                hdr.len = htons(vring->pkt_len - hdr_len);
 664                writeq(*(u64 *)&hdr, fifo->tx_base + MLXBF_TMFIFO_TX_DATA);
 665        }
 666
 667        vring->cur_len = hdr_len;
 668        vring->rem_len = vring->pkt_len;
 669        fifo->vring[is_rx] = vring;
 670}
 671
 672/*
 673 * Rx/Tx one descriptor.
 674 *
 675 * Return true to indicate more data available.
 676 */
 677static bool mlxbf_tmfifo_rxtx_one_desc(struct mlxbf_tmfifo_vring *vring,
 678                                       bool is_rx, int *avail)
 679{
 680        const struct vring *vr = virtqueue_get_vring(vring->vq);
 681        struct mlxbf_tmfifo *fifo = vring->fifo;
 682        struct virtio_device *vdev;
 683        bool vring_change = false;
 684        struct vring_desc *desc;
 685        unsigned long flags;
 686        u32 len, idx;
 687
 688        vdev = &fifo->vdev[vring->vdev_id]->vdev;
 689
 690        /* Get the descriptor of the next packet. */
 691        if (!vring->desc) {
 692                desc = mlxbf_tmfifo_get_next_pkt(vring, is_rx);
 693                if (!desc)
 694                        return false;
 695        } else {
 696                desc = vring->desc;
 697        }
 698
 699        /* Beginning of a packet. Start to Rx/Tx packet header. */
 700        if (vring->pkt_len == 0) {
 701                mlxbf_tmfifo_rxtx_header(vring, desc, is_rx, &vring_change);
 702                (*avail)--;
 703
 704                /* Return if new packet is for another ring. */
 705                if (vring_change)
 706                        return false;
 707                goto mlxbf_tmfifo_desc_done;
 708        }
 709
 710        /* Get the length of this desc. */
 711        len = virtio32_to_cpu(vdev, desc->len);
 712        if (len > vring->rem_len)
 713                len = vring->rem_len;
 714
 715        /* Rx/Tx one word (8 bytes) if not done. */
 716        if (vring->cur_len < len) {
 717                mlxbf_tmfifo_rxtx_word(vring, desc, is_rx, len);
 718                (*avail)--;
 719        }
 720
 721        /* Check again whether it's done. */
 722        if (vring->cur_len == len) {
 723                vring->cur_len = 0;
 724                vring->rem_len -= len;
 725
 726                /* Get the next desc on the chain. */
 727                if (vring->rem_len > 0 &&
 728                    (virtio16_to_cpu(vdev, desc->flags) & VRING_DESC_F_NEXT)) {
 729                        idx = virtio16_to_cpu(vdev, desc->next);
 730                        desc = &vr->desc[idx];
 731                        goto mlxbf_tmfifo_desc_done;
 732                }
 733
 734                /* Done and release the pending packet. */
 735                mlxbf_tmfifo_release_pending_pkt(vring);
 736                desc = NULL;
 737                fifo->vring[is_rx] = NULL;
 738
 739                /*
 740                 * Make sure the load/store are in order before
 741                 * returning back to virtio.
 742                 */
 743                virtio_mb(false);
 744
 745                /* Notify upper layer that packet is done. */
 746                spin_lock_irqsave(&fifo->spin_lock[is_rx], flags);
 747                vring_interrupt(0, vring->vq);
 748                spin_unlock_irqrestore(&fifo->spin_lock[is_rx], flags);
 749        }
 750
 751mlxbf_tmfifo_desc_done:
 752        /* Save the current desc. */
 753        vring->desc = desc;
 754
 755        return true;
 756}
 757
 758/* Rx & Tx processing of a queue. */
 759static void mlxbf_tmfifo_rxtx(struct mlxbf_tmfifo_vring *vring, bool is_rx)
 760{
 761        int avail = 0, devid = vring->vdev_id;
 762        struct mlxbf_tmfifo *fifo;
 763        bool more;
 764
 765        fifo = vring->fifo;
 766
 767        /* Return if vdev is not ready. */
 768        if (!fifo->vdev[devid])
 769                return;
 770
 771        /* Return if another vring is running. */
 772        if (fifo->vring[is_rx] && fifo->vring[is_rx] != vring)
 773                return;
 774
 775        /* Only handle console and network for now. */
 776        if (WARN_ON(devid != VIRTIO_ID_NET && devid != VIRTIO_ID_CONSOLE))
 777                return;
 778
 779        do {
 780                /* Get available FIFO space. */
 781                if (avail == 0) {
 782                        if (is_rx)
 783                                avail = mlxbf_tmfifo_get_rx_avail(fifo);
 784                        else
 785                                avail = mlxbf_tmfifo_get_tx_avail(fifo, devid);
 786                        if (avail <= 0)
 787                                break;
 788                }
 789
 790                /* Console output always comes from the Tx buffer. */
 791                if (!is_rx && devid == VIRTIO_ID_CONSOLE) {
 792                        mlxbf_tmfifo_console_tx(fifo, avail);
 793                        break;
 794                }
 795
 796                /* Handle one descriptor. */
 797                more = mlxbf_tmfifo_rxtx_one_desc(vring, is_rx, &avail);
 798        } while (more);
 799}
 800
 801/* Handle Rx or Tx queues. */
 802static void mlxbf_tmfifo_work_rxtx(struct mlxbf_tmfifo *fifo, int queue_id,
 803                                   int irq_id, bool is_rx)
 804{
 805        struct mlxbf_tmfifo_vdev *tm_vdev;
 806        struct mlxbf_tmfifo_vring *vring;
 807        int i;
 808
 809        if (!test_and_clear_bit(irq_id, &fifo->pend_events) ||
 810            !fifo->irq_info[irq_id].irq)
 811                return;
 812
 813        for (i = 0; i < MLXBF_TMFIFO_VDEV_MAX; i++) {
 814                tm_vdev = fifo->vdev[i];
 815                if (tm_vdev) {
 816                        vring = &tm_vdev->vrings[queue_id];
 817                        if (vring->vq)
 818                                mlxbf_tmfifo_rxtx(vring, is_rx);
 819                }
 820        }
 821}
 822
 823/* Work handler for Rx and Tx case. */
 824static void mlxbf_tmfifo_work_handler(struct work_struct *work)
 825{
 826        struct mlxbf_tmfifo *fifo;
 827
 828        fifo = container_of(work, struct mlxbf_tmfifo, work);
 829        if (!fifo->is_ready)
 830                return;
 831
 832        mutex_lock(&fifo->lock);
 833
 834        /* Tx (Send data to the TmFifo). */
 835        mlxbf_tmfifo_work_rxtx(fifo, MLXBF_TMFIFO_VRING_TX,
 836                               MLXBF_TM_TX_LWM_IRQ, false);
 837
 838        /* Rx (Receive data from the TmFifo). */
 839        mlxbf_tmfifo_work_rxtx(fifo, MLXBF_TMFIFO_VRING_RX,
 840                               MLXBF_TM_RX_HWM_IRQ, true);
 841
 842        mutex_unlock(&fifo->lock);
 843}
 844
 845/* The notify function is called when new buffers are posted. */
 846static bool mlxbf_tmfifo_virtio_notify(struct virtqueue *vq)
 847{
 848        struct mlxbf_tmfifo_vring *vring = vq->priv;
 849        struct mlxbf_tmfifo_vdev *tm_vdev;
 850        struct mlxbf_tmfifo *fifo;
 851        unsigned long flags;
 852
 853        fifo = vring->fifo;
 854
 855        /*
 856         * Virtio maintains vrings in pairs, even number ring for Rx
 857         * and odd number ring for Tx.
 858         */
 859        if (vring->index & BIT(0)) {
 860                /*
 861                 * Console could make blocking call with interrupts disabled.
 862                 * In such case, the vring needs to be served right away. For
 863                 * other cases, just set the TX LWM bit to start Tx in the
 864                 * worker handler.
 865                 */
 866                if (vring->vdev_id == VIRTIO_ID_CONSOLE) {
 867                        spin_lock_irqsave(&fifo->spin_lock[0], flags);
 868                        tm_vdev = fifo->vdev[VIRTIO_ID_CONSOLE];
 869                        mlxbf_tmfifo_console_output(tm_vdev, vring);
 870                        spin_unlock_irqrestore(&fifo->spin_lock[0], flags);
 871                } else if (test_and_set_bit(MLXBF_TM_TX_LWM_IRQ,
 872                                            &fifo->pend_events)) {
 873                        return true;
 874                }
 875        } else {
 876                if (test_and_set_bit(MLXBF_TM_RX_HWM_IRQ, &fifo->pend_events))
 877                        return true;
 878        }
 879
 880        schedule_work(&fifo->work);
 881
 882        return true;
 883}
 884
 885/* Get the array of feature bits for this device. */
 886static u64 mlxbf_tmfifo_virtio_get_features(struct virtio_device *vdev)
 887{
 888        struct mlxbf_tmfifo_vdev *tm_vdev = mlxbf_vdev_to_tmfifo(vdev);
 889
 890        return tm_vdev->features;
 891}
 892
 893/* Confirm device features to use. */
 894static int mlxbf_tmfifo_virtio_finalize_features(struct virtio_device *vdev)
 895{
 896        struct mlxbf_tmfifo_vdev *tm_vdev = mlxbf_vdev_to_tmfifo(vdev);
 897
 898        tm_vdev->features = vdev->features;
 899
 900        return 0;
 901}
 902
 903/* Free virtqueues found by find_vqs(). */
 904static void mlxbf_tmfifo_virtio_del_vqs(struct virtio_device *vdev)
 905{
 906        struct mlxbf_tmfifo_vdev *tm_vdev = mlxbf_vdev_to_tmfifo(vdev);
 907        struct mlxbf_tmfifo_vring *vring;
 908        struct virtqueue *vq;
 909        int i;
 910
 911        for (i = 0; i < ARRAY_SIZE(tm_vdev->vrings); i++) {
 912                vring = &tm_vdev->vrings[i];
 913
 914                /* Release the pending packet. */
 915                if (vring->desc)
 916                        mlxbf_tmfifo_release_pending_pkt(vring);
 917                vq = vring->vq;
 918                if (vq) {
 919                        vring->vq = NULL;
 920                        vring_del_virtqueue(vq);
 921                }
 922        }
 923}
 924
 925/* Create and initialize the virtual queues. */
 926static int mlxbf_tmfifo_virtio_find_vqs(struct virtio_device *vdev,
 927                                        unsigned int nvqs,
 928                                        struct virtqueue *vqs[],
 929                                        vq_callback_t *callbacks[],
 930                                        const char * const names[],
 931                                        const bool *ctx,
 932                                        struct irq_affinity *desc)
 933{
 934        struct mlxbf_tmfifo_vdev *tm_vdev = mlxbf_vdev_to_tmfifo(vdev);
 935        struct mlxbf_tmfifo_vring *vring;
 936        struct virtqueue *vq;
 937        int i, ret, size;
 938
 939        if (nvqs > ARRAY_SIZE(tm_vdev->vrings))
 940                return -EINVAL;
 941
 942        for (i = 0; i < nvqs; ++i) {
 943                if (!names[i]) {
 944                        ret = -EINVAL;
 945                        goto error;
 946                }
 947                vring = &tm_vdev->vrings[i];
 948
 949                /* zero vring */
 950                size = vring_size(vring->num, vring->align);
 951                memset(vring->va, 0, size);
 952                vq = vring_new_virtqueue(i, vring->num, vring->align, vdev,
 953                                         false, false, vring->va,
 954                                         mlxbf_tmfifo_virtio_notify,
 955                                         callbacks[i], names[i]);
 956                if (!vq) {
 957                        dev_err(&vdev->dev, "vring_new_virtqueue failed\n");
 958                        ret = -ENOMEM;
 959                        goto error;
 960                }
 961
 962                vqs[i] = vq;
 963                vring->vq = vq;
 964                vq->priv = vring;
 965        }
 966
 967        return 0;
 968
 969error:
 970        mlxbf_tmfifo_virtio_del_vqs(vdev);
 971        return ret;
 972}
 973
 974/* Read the status byte. */
 975static u8 mlxbf_tmfifo_virtio_get_status(struct virtio_device *vdev)
 976{
 977        struct mlxbf_tmfifo_vdev *tm_vdev = mlxbf_vdev_to_tmfifo(vdev);
 978
 979        return tm_vdev->status;
 980}
 981
 982/* Write the status byte. */
 983static void mlxbf_tmfifo_virtio_set_status(struct virtio_device *vdev,
 984                                           u8 status)
 985{
 986        struct mlxbf_tmfifo_vdev *tm_vdev = mlxbf_vdev_to_tmfifo(vdev);
 987
 988        tm_vdev->status = status;
 989}
 990
 991/* Reset the device. Not much here for now. */
 992static void mlxbf_tmfifo_virtio_reset(struct virtio_device *vdev)
 993{
 994        struct mlxbf_tmfifo_vdev *tm_vdev = mlxbf_vdev_to_tmfifo(vdev);
 995
 996        tm_vdev->status = 0;
 997}
 998
 999/* Read the value of a configuration field. */
1000static void mlxbf_tmfifo_virtio_get(struct virtio_device *vdev,
1001                                    unsigned int offset,
1002                                    void *buf,
1003                                    unsigned int len)
1004{
1005        struct mlxbf_tmfifo_vdev *tm_vdev = mlxbf_vdev_to_tmfifo(vdev);
1006
1007        if ((u64)offset + len > sizeof(tm_vdev->config))
1008                return;
1009
1010        memcpy(buf, (u8 *)&tm_vdev->config + offset, len);
1011}
1012
1013/* Write the value of a configuration field. */
1014static void mlxbf_tmfifo_virtio_set(struct virtio_device *vdev,
1015                                    unsigned int offset,
1016                                    const void *buf,
1017                                    unsigned int len)
1018{
1019        struct mlxbf_tmfifo_vdev *tm_vdev = mlxbf_vdev_to_tmfifo(vdev);
1020
1021        if ((u64)offset + len > sizeof(tm_vdev->config))
1022                return;
1023
1024        memcpy((u8 *)&tm_vdev->config + offset, buf, len);
1025}
1026
1027static void tmfifo_virtio_dev_release(struct device *device)
1028{
1029        struct virtio_device *vdev =
1030                        container_of(device, struct virtio_device, dev);
1031        struct mlxbf_tmfifo_vdev *tm_vdev = mlxbf_vdev_to_tmfifo(vdev);
1032
1033        kfree(tm_vdev);
1034}
1035
1036/* Virtio config operations. */
1037static const struct virtio_config_ops mlxbf_tmfifo_virtio_config_ops = {
1038        .get_features = mlxbf_tmfifo_virtio_get_features,
1039        .finalize_features = mlxbf_tmfifo_virtio_finalize_features,
1040        .find_vqs = mlxbf_tmfifo_virtio_find_vqs,
1041        .del_vqs = mlxbf_tmfifo_virtio_del_vqs,
1042        .reset = mlxbf_tmfifo_virtio_reset,
1043        .set_status = mlxbf_tmfifo_virtio_set_status,
1044        .get_status = mlxbf_tmfifo_virtio_get_status,
1045        .get = mlxbf_tmfifo_virtio_get,
1046        .set = mlxbf_tmfifo_virtio_set,
1047};
1048
1049/* Create vdev for the FIFO. */
1050static int mlxbf_tmfifo_create_vdev(struct device *dev,
1051                                    struct mlxbf_tmfifo *fifo,
1052                                    int vdev_id, u64 features,
1053                                    void *config, u32 size)
1054{
1055        struct mlxbf_tmfifo_vdev *tm_vdev, *reg_dev = NULL;
1056        int ret;
1057
1058        mutex_lock(&fifo->lock);
1059
1060        tm_vdev = fifo->vdev[vdev_id];
1061        if (tm_vdev) {
1062                dev_err(dev, "vdev %d already exists\n", vdev_id);
1063                ret = -EEXIST;
1064                goto fail;
1065        }
1066
1067        tm_vdev = kzalloc(sizeof(*tm_vdev), GFP_KERNEL);
1068        if (!tm_vdev) {
1069                ret = -ENOMEM;
1070                goto fail;
1071        }
1072
1073        tm_vdev->vdev.id.device = vdev_id;
1074        tm_vdev->vdev.config = &mlxbf_tmfifo_virtio_config_ops;
1075        tm_vdev->vdev.dev.parent = dev;
1076        tm_vdev->vdev.dev.release = tmfifo_virtio_dev_release;
1077        tm_vdev->features = features;
1078        if (config)
1079                memcpy(&tm_vdev->config, config, size);
1080
1081        if (mlxbf_tmfifo_alloc_vrings(fifo, tm_vdev)) {
1082                dev_err(dev, "unable to allocate vring\n");
1083                ret = -ENOMEM;
1084                goto vdev_fail;
1085        }
1086
1087        /* Allocate an output buffer for the console device. */
1088        if (vdev_id == VIRTIO_ID_CONSOLE)
1089                tm_vdev->tx_buf.buf = devm_kmalloc(dev,
1090                                                   MLXBF_TMFIFO_CON_TX_BUF_SIZE,
1091                                                   GFP_KERNEL);
1092        fifo->vdev[vdev_id] = tm_vdev;
1093
1094        /* Register the virtio device. */
1095        ret = register_virtio_device(&tm_vdev->vdev);
1096        reg_dev = tm_vdev;
1097        if (ret) {
1098                dev_err(dev, "register_virtio_device failed\n");
1099                goto vdev_fail;
1100        }
1101
1102        mutex_unlock(&fifo->lock);
1103        return 0;
1104
1105vdev_fail:
1106        mlxbf_tmfifo_free_vrings(fifo, tm_vdev);
1107        fifo->vdev[vdev_id] = NULL;
1108        if (reg_dev)
1109                put_device(&tm_vdev->vdev.dev);
1110        else
1111                kfree(tm_vdev);
1112fail:
1113        mutex_unlock(&fifo->lock);
1114        return ret;
1115}
1116
1117/* Delete vdev for the FIFO. */
1118static int mlxbf_tmfifo_delete_vdev(struct mlxbf_tmfifo *fifo, int vdev_id)
1119{
1120        struct mlxbf_tmfifo_vdev *tm_vdev;
1121
1122        mutex_lock(&fifo->lock);
1123
1124        /* Unregister vdev. */
1125        tm_vdev = fifo->vdev[vdev_id];
1126        if (tm_vdev) {
1127                unregister_virtio_device(&tm_vdev->vdev);
1128                mlxbf_tmfifo_free_vrings(fifo, tm_vdev);
1129                fifo->vdev[vdev_id] = NULL;
1130        }
1131
1132        mutex_unlock(&fifo->lock);
1133
1134        return 0;
1135}
1136
1137/* Read the configured network MAC address from efi variable. */
1138static void mlxbf_tmfifo_get_cfg_mac(u8 *mac)
1139{
1140        efi_guid_t guid = EFI_GLOBAL_VARIABLE_GUID;
1141        unsigned long size = ETH_ALEN;
1142        u8 buf[ETH_ALEN];
1143        efi_status_t rc;
1144
1145        rc = efi.get_variable(mlxbf_tmfifo_efi_name, &guid, NULL, &size, buf);
1146        if (rc == EFI_SUCCESS && size == ETH_ALEN)
1147                ether_addr_copy(mac, buf);
1148        else
1149                ether_addr_copy(mac, mlxbf_tmfifo_net_default_mac);
1150}
1151
1152/* Set TmFifo thresolds which is used to trigger interrupts. */
1153static void mlxbf_tmfifo_set_threshold(struct mlxbf_tmfifo *fifo)
1154{
1155        u64 ctl;
1156
1157        /* Get Tx FIFO size and set the low/high watermark. */
1158        ctl = readq(fifo->tx_base + MLXBF_TMFIFO_TX_CTL);
1159        fifo->tx_fifo_size =
1160                FIELD_GET(MLXBF_TMFIFO_TX_CTL__MAX_ENTRIES_MASK, ctl);
1161        ctl = (ctl & ~MLXBF_TMFIFO_TX_CTL__LWM_MASK) |
1162                FIELD_PREP(MLXBF_TMFIFO_TX_CTL__LWM_MASK,
1163                           fifo->tx_fifo_size / 2);
1164        ctl = (ctl & ~MLXBF_TMFIFO_TX_CTL__HWM_MASK) |
1165                FIELD_PREP(MLXBF_TMFIFO_TX_CTL__HWM_MASK,
1166                           fifo->tx_fifo_size - 1);
1167        writeq(ctl, fifo->tx_base + MLXBF_TMFIFO_TX_CTL);
1168
1169        /* Get Rx FIFO size and set the low/high watermark. */
1170        ctl = readq(fifo->rx_base + MLXBF_TMFIFO_RX_CTL);
1171        fifo->rx_fifo_size =
1172                FIELD_GET(MLXBF_TMFIFO_RX_CTL__MAX_ENTRIES_MASK, ctl);
1173        ctl = (ctl & ~MLXBF_TMFIFO_RX_CTL__LWM_MASK) |
1174                FIELD_PREP(MLXBF_TMFIFO_RX_CTL__LWM_MASK, 0);
1175        ctl = (ctl & ~MLXBF_TMFIFO_RX_CTL__HWM_MASK) |
1176                FIELD_PREP(MLXBF_TMFIFO_RX_CTL__HWM_MASK, 1);
1177        writeq(ctl, fifo->rx_base + MLXBF_TMFIFO_RX_CTL);
1178}
1179
1180static void mlxbf_tmfifo_cleanup(struct mlxbf_tmfifo *fifo)
1181{
1182        int i;
1183
1184        fifo->is_ready = false;
1185        del_timer_sync(&fifo->timer);
1186        mlxbf_tmfifo_disable_irqs(fifo);
1187        cancel_work_sync(&fifo->work);
1188        for (i = 0; i < MLXBF_TMFIFO_VDEV_MAX; i++)
1189                mlxbf_tmfifo_delete_vdev(fifo, i);
1190}
1191
1192/* Probe the TMFIFO. */
1193static int mlxbf_tmfifo_probe(struct platform_device *pdev)
1194{
1195        struct virtio_net_config net_config;
1196        struct device *dev = &pdev->dev;
1197        struct mlxbf_tmfifo *fifo;
1198        int i, rc;
1199
1200        fifo = devm_kzalloc(dev, sizeof(*fifo), GFP_KERNEL);
1201        if (!fifo)
1202                return -ENOMEM;
1203
1204        spin_lock_init(&fifo->spin_lock[0]);
1205        spin_lock_init(&fifo->spin_lock[1]);
1206        INIT_WORK(&fifo->work, mlxbf_tmfifo_work_handler);
1207        mutex_init(&fifo->lock);
1208
1209        /* Get the resource of the Rx FIFO. */
1210        fifo->rx_base = devm_platform_ioremap_resource(pdev, 0);
1211        if (IS_ERR(fifo->rx_base))
1212                return PTR_ERR(fifo->rx_base);
1213
1214        /* Get the resource of the Tx FIFO. */
1215        fifo->tx_base = devm_platform_ioremap_resource(pdev, 1);
1216        if (IS_ERR(fifo->tx_base))
1217                return PTR_ERR(fifo->tx_base);
1218
1219        platform_set_drvdata(pdev, fifo);
1220
1221        timer_setup(&fifo->timer, mlxbf_tmfifo_timer, 0);
1222
1223        for (i = 0; i < MLXBF_TM_MAX_IRQ; i++) {
1224                fifo->irq_info[i].index = i;
1225                fifo->irq_info[i].fifo = fifo;
1226                fifo->irq_info[i].irq = platform_get_irq(pdev, i);
1227                rc = devm_request_irq(dev, fifo->irq_info[i].irq,
1228                                      mlxbf_tmfifo_irq_handler, 0,
1229                                      "tmfifo", &fifo->irq_info[i]);
1230                if (rc) {
1231                        dev_err(dev, "devm_request_irq failed\n");
1232                        fifo->irq_info[i].irq = 0;
1233                        return rc;
1234                }
1235        }
1236
1237        mlxbf_tmfifo_set_threshold(fifo);
1238
1239        /* Create the console vdev. */
1240        rc = mlxbf_tmfifo_create_vdev(dev, fifo, VIRTIO_ID_CONSOLE, 0, NULL, 0);
1241        if (rc)
1242                goto fail;
1243
1244        /* Create the network vdev. */
1245        memset(&net_config, 0, sizeof(net_config));
1246
1247        /* A legacy-only interface for now. */
1248        net_config.mtu = __cpu_to_virtio16(virtio_legacy_is_little_endian(),
1249                                           ETH_DATA_LEN);
1250        net_config.status = __cpu_to_virtio16(virtio_legacy_is_little_endian(),
1251                                              VIRTIO_NET_S_LINK_UP);
1252        mlxbf_tmfifo_get_cfg_mac(net_config.mac);
1253        rc = mlxbf_tmfifo_create_vdev(dev, fifo, VIRTIO_ID_NET,
1254                                      MLXBF_TMFIFO_NET_FEATURES, &net_config,
1255                                      sizeof(net_config));
1256        if (rc)
1257                goto fail;
1258
1259        mod_timer(&fifo->timer, jiffies + MLXBF_TMFIFO_TIMER_INTERVAL);
1260
1261        fifo->is_ready = true;
1262        return 0;
1263
1264fail:
1265        mlxbf_tmfifo_cleanup(fifo);
1266        return rc;
1267}
1268
1269/* Device remove function. */
1270static int mlxbf_tmfifo_remove(struct platform_device *pdev)
1271{
1272        struct mlxbf_tmfifo *fifo = platform_get_drvdata(pdev);
1273
1274        mlxbf_tmfifo_cleanup(fifo);
1275
1276        return 0;
1277}
1278
1279static const struct acpi_device_id mlxbf_tmfifo_acpi_match[] = {
1280        { "MLNXBF01", 0 },
1281        {}
1282};
1283MODULE_DEVICE_TABLE(acpi, mlxbf_tmfifo_acpi_match);
1284
1285static struct platform_driver mlxbf_tmfifo_driver = {
1286        .probe = mlxbf_tmfifo_probe,
1287        .remove = mlxbf_tmfifo_remove,
1288        .driver = {
1289                .name = "bf-tmfifo",
1290                .acpi_match_table = mlxbf_tmfifo_acpi_match,
1291        },
1292};
1293
1294module_platform_driver(mlxbf_tmfifo_driver);
1295
1296MODULE_DESCRIPTION("Mellanox BlueField SoC TmFifo Driver");
1297MODULE_LICENSE("GPL v2");
1298MODULE_AUTHOR("Mellanox Technologies");
1299