linux/drivers/net/ethernet/mellanox/mlxsw/pci.c
<<
>>
Prefs
   1/*
   2 * drivers/net/ethernet/mellanox/mlxsw/pci.c
   3 * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
   4 * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
   5 *
   6 * Redistribution and use in source and binary forms, with or without
   7 * modification, are permitted provided that the following conditions are met:
   8 *
   9 * 1. Redistributions of source code must retain the above copyright
  10 *    notice, this list of conditions and the following disclaimer.
  11 * 2. Redistributions in binary form must reproduce the above copyright
  12 *    notice, this list of conditions and the following disclaimer in the
  13 *    documentation and/or other materials provided with the distribution.
  14 * 3. Neither the names of the copyright holders nor the names of its
  15 *    contributors may be used to endorse or promote products derived from
  16 *    this software without specific prior written permission.
  17 *
  18 * Alternatively, this software may be distributed under the terms of the
  19 * GNU General Public License ("GPL") version 2 as published by the Free
  20 * Software Foundation.
  21 *
  22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  23 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  25 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
  26 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  32 * POSSIBILITY OF SUCH DAMAGE.
  33 */
  34
  35#include <linux/kernel.h>
  36#include <linux/module.h>
  37#include <linux/export.h>
  38#include <linux/err.h>
  39#include <linux/device.h>
  40#include <linux/pci.h>
  41#include <linux/interrupt.h>
  42#include <linux/wait.h>
  43#include <linux/types.h>
  44#include <linux/skbuff.h>
  45#include <linux/if_vlan.h>
  46#include <linux/log2.h>
  47#include <linux/string.h>
  48
  49#include "pci_hw.h"
  50#include "pci.h"
  51#include "core.h"
  52#include "cmd.h"
  53#include "port.h"
  54#include "resources.h"
  55
  56static const char mlxsw_pci_driver_name[] = "mlxsw_pci";
  57
  58#define mlxsw_pci_write32(mlxsw_pci, reg, val) \
  59        iowrite32be(val, (mlxsw_pci)->hw_addr + (MLXSW_PCI_ ## reg))
  60#define mlxsw_pci_read32(mlxsw_pci, reg) \
  61        ioread32be((mlxsw_pci)->hw_addr + (MLXSW_PCI_ ## reg))
  62
  63enum mlxsw_pci_queue_type {
  64        MLXSW_PCI_QUEUE_TYPE_SDQ,
  65        MLXSW_PCI_QUEUE_TYPE_RDQ,
  66        MLXSW_PCI_QUEUE_TYPE_CQ,
  67        MLXSW_PCI_QUEUE_TYPE_EQ,
  68};
  69
  70#define MLXSW_PCI_QUEUE_TYPE_COUNT      4
  71
  72static const u16 mlxsw_pci_doorbell_type_offset[] = {
  73        MLXSW_PCI_DOORBELL_SDQ_OFFSET,  /* for type MLXSW_PCI_QUEUE_TYPE_SDQ */
  74        MLXSW_PCI_DOORBELL_RDQ_OFFSET,  /* for type MLXSW_PCI_QUEUE_TYPE_RDQ */
  75        MLXSW_PCI_DOORBELL_CQ_OFFSET,   /* for type MLXSW_PCI_QUEUE_TYPE_CQ */
  76        MLXSW_PCI_DOORBELL_EQ_OFFSET,   /* for type MLXSW_PCI_QUEUE_TYPE_EQ */
  77};
  78
  79static const u16 mlxsw_pci_doorbell_arm_type_offset[] = {
  80        0, /* unused */
  81        0, /* unused */
  82        MLXSW_PCI_DOORBELL_ARM_CQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_CQ */
  83        MLXSW_PCI_DOORBELL_ARM_EQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_EQ */
  84};
  85
  86struct mlxsw_pci_mem_item {
  87        char *buf;
  88        dma_addr_t mapaddr;
  89        size_t size;
  90};
  91
  92struct mlxsw_pci_queue_elem_info {
  93        char *elem; /* pointer to actual dma mapped element mem chunk */
  94        union {
  95                struct {
  96                        struct sk_buff *skb;
  97                } sdq;
  98                struct {
  99                        struct sk_buff *skb;
 100                } rdq;
 101        } u;
 102};
 103
 104struct mlxsw_pci_queue {
 105        spinlock_t lock; /* for queue accesses */
 106        struct mlxsw_pci_mem_item mem_item;
 107        struct mlxsw_pci_queue_elem_info *elem_info;
 108        u16 producer_counter;
 109        u16 consumer_counter;
 110        u16 count; /* number of elements in queue */
 111        u8 num; /* queue number */
 112        u8 elem_size; /* size of one element */
 113        enum mlxsw_pci_queue_type type;
 114        struct tasklet_struct tasklet; /* queue processing tasklet */
 115        struct mlxsw_pci *pci;
 116        union {
 117                struct {
 118                        u32 comp_sdq_count;
 119                        u32 comp_rdq_count;
 120                } cq;
 121                struct {
 122                        u32 ev_cmd_count;
 123                        u32 ev_comp_count;
 124                        u32 ev_other_count;
 125                } eq;
 126        } u;
 127};
 128
 129struct mlxsw_pci_queue_type_group {
 130        struct mlxsw_pci_queue *q;
 131        u8 count; /* number of queues in group */
 132};
 133
 134struct mlxsw_pci {
 135        struct pci_dev *pdev;
 136        u8 __iomem *hw_addr;
 137        struct mlxsw_pci_queue_type_group queues[MLXSW_PCI_QUEUE_TYPE_COUNT];
 138        u32 doorbell_offset;
 139        struct mlxsw_core *core;
 140        struct {
 141                struct mlxsw_pci_mem_item *items;
 142                unsigned int count;
 143        } fw_area;
 144        struct {
 145                struct mlxsw_pci_mem_item out_mbox;
 146                struct mlxsw_pci_mem_item in_mbox;
 147                struct mutex lock; /* Lock access to command registers */
 148                bool nopoll;
 149                wait_queue_head_t wait;
 150                bool wait_done;
 151                struct {
 152                        u8 status;
 153                        u64 out_param;
 154                } comp;
 155        } cmd;
 156        struct mlxsw_bus_info bus_info;
 157};
 158
 159static void mlxsw_pci_queue_tasklet_schedule(struct mlxsw_pci_queue *q)
 160{
 161        tasklet_schedule(&q->tasklet);
 162}
 163
 164static char *__mlxsw_pci_queue_elem_get(struct mlxsw_pci_queue *q,
 165                                        size_t elem_size, int elem_index)
 166{
 167        return q->mem_item.buf + (elem_size * elem_index);
 168}
 169
 170static struct mlxsw_pci_queue_elem_info *
 171mlxsw_pci_queue_elem_info_get(struct mlxsw_pci_queue *q, int elem_index)
 172{
 173        return &q->elem_info[elem_index];
 174}
 175
 176static struct mlxsw_pci_queue_elem_info *
 177mlxsw_pci_queue_elem_info_producer_get(struct mlxsw_pci_queue *q)
 178{
 179        int index = q->producer_counter & (q->count - 1);
 180
 181        if ((u16) (q->producer_counter - q->consumer_counter) == q->count)
 182                return NULL;
 183        return mlxsw_pci_queue_elem_info_get(q, index);
 184}
 185
 186static struct mlxsw_pci_queue_elem_info *
 187mlxsw_pci_queue_elem_info_consumer_get(struct mlxsw_pci_queue *q)
 188{
 189        int index = q->consumer_counter & (q->count - 1);
 190
 191        return mlxsw_pci_queue_elem_info_get(q, index);
 192}
 193
 194static char *mlxsw_pci_queue_elem_get(struct mlxsw_pci_queue *q, int elem_index)
 195{
 196        return mlxsw_pci_queue_elem_info_get(q, elem_index)->elem;
 197}
 198
 199static bool mlxsw_pci_elem_hw_owned(struct mlxsw_pci_queue *q, bool owner_bit)
 200{
 201        return owner_bit != !!(q->consumer_counter & q->count);
 202}
 203
 204static char *
 205mlxsw_pci_queue_sw_elem_get(struct mlxsw_pci_queue *q,
 206                            u32 (*get_elem_owner_func)(const char *))
 207{
 208        struct mlxsw_pci_queue_elem_info *elem_info;
 209        char *elem;
 210        bool owner_bit;
 211
 212        elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
 213        elem = elem_info->elem;
 214        owner_bit = get_elem_owner_func(elem);
 215        if (mlxsw_pci_elem_hw_owned(q, owner_bit))
 216                return NULL;
 217        q->consumer_counter++;
 218        rmb(); /* make sure we read owned bit before the rest of elem */
 219        return elem;
 220}
 221
 222static struct mlxsw_pci_queue_type_group *
 223mlxsw_pci_queue_type_group_get(struct mlxsw_pci *mlxsw_pci,
 224                               enum mlxsw_pci_queue_type q_type)
 225{
 226        return &mlxsw_pci->queues[q_type];
 227}
 228
 229static u8 __mlxsw_pci_queue_count(struct mlxsw_pci *mlxsw_pci,
 230                                  enum mlxsw_pci_queue_type q_type)
 231{
 232        struct mlxsw_pci_queue_type_group *queue_group;
 233
 234        queue_group = mlxsw_pci_queue_type_group_get(mlxsw_pci, q_type);
 235        return queue_group->count;
 236}
 237
 238static u8 mlxsw_pci_sdq_count(struct mlxsw_pci *mlxsw_pci)
 239{
 240        return __mlxsw_pci_queue_count(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_SDQ);
 241}
 242
 243static u8 mlxsw_pci_cq_count(struct mlxsw_pci *mlxsw_pci)
 244{
 245        return __mlxsw_pci_queue_count(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_CQ);
 246}
 247
 248static struct mlxsw_pci_queue *
 249__mlxsw_pci_queue_get(struct mlxsw_pci *mlxsw_pci,
 250                      enum mlxsw_pci_queue_type q_type, u8 q_num)
 251{
 252        return &mlxsw_pci->queues[q_type].q[q_num];
 253}
 254
 255static struct mlxsw_pci_queue *mlxsw_pci_sdq_get(struct mlxsw_pci *mlxsw_pci,
 256                                                 u8 q_num)
 257{
 258        return __mlxsw_pci_queue_get(mlxsw_pci,
 259                                     MLXSW_PCI_QUEUE_TYPE_SDQ, q_num);
 260}
 261
 262static struct mlxsw_pci_queue *mlxsw_pci_rdq_get(struct mlxsw_pci *mlxsw_pci,
 263                                                 u8 q_num)
 264{
 265        return __mlxsw_pci_queue_get(mlxsw_pci,
 266                                     MLXSW_PCI_QUEUE_TYPE_RDQ, q_num);
 267}
 268
 269static struct mlxsw_pci_queue *mlxsw_pci_cq_get(struct mlxsw_pci *mlxsw_pci,
 270                                                u8 q_num)
 271{
 272        return __mlxsw_pci_queue_get(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_CQ, q_num);
 273}
 274
 275static struct mlxsw_pci_queue *mlxsw_pci_eq_get(struct mlxsw_pci *mlxsw_pci,
 276                                                u8 q_num)
 277{
 278        return __mlxsw_pci_queue_get(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_EQ, q_num);
 279}
 280
 281static void __mlxsw_pci_queue_doorbell_set(struct mlxsw_pci *mlxsw_pci,
 282                                           struct mlxsw_pci_queue *q,
 283                                           u16 val)
 284{
 285        mlxsw_pci_write32(mlxsw_pci,
 286                          DOORBELL(mlxsw_pci->doorbell_offset,
 287                                   mlxsw_pci_doorbell_type_offset[q->type],
 288                                   q->num), val);
 289}
 290
 291static void __mlxsw_pci_queue_doorbell_arm_set(struct mlxsw_pci *mlxsw_pci,
 292                                               struct mlxsw_pci_queue *q,
 293                                               u16 val)
 294{
 295        mlxsw_pci_write32(mlxsw_pci,
 296                          DOORBELL(mlxsw_pci->doorbell_offset,
 297                                   mlxsw_pci_doorbell_arm_type_offset[q->type],
 298                                   q->num), val);
 299}
 300
 301static void mlxsw_pci_queue_doorbell_producer_ring(struct mlxsw_pci *mlxsw_pci,
 302                                                   struct mlxsw_pci_queue *q)
 303{
 304        wmb(); /* ensure all writes are done before we ring a bell */
 305        __mlxsw_pci_queue_doorbell_set(mlxsw_pci, q, q->producer_counter);
 306}
 307
 308static void mlxsw_pci_queue_doorbell_consumer_ring(struct mlxsw_pci *mlxsw_pci,
 309                                                   struct mlxsw_pci_queue *q)
 310{
 311        wmb(); /* ensure all writes are done before we ring a bell */
 312        __mlxsw_pci_queue_doorbell_set(mlxsw_pci, q,
 313                                       q->consumer_counter + q->count);
 314}
 315
 316static void
 317mlxsw_pci_queue_doorbell_arm_consumer_ring(struct mlxsw_pci *mlxsw_pci,
 318                                           struct mlxsw_pci_queue *q)
 319{
 320        wmb(); /* ensure all writes are done before we ring a bell */
 321        __mlxsw_pci_queue_doorbell_arm_set(mlxsw_pci, q, q->consumer_counter);
 322}
 323
 324static dma_addr_t __mlxsw_pci_queue_page_get(struct mlxsw_pci_queue *q,
 325                                             int page_index)
 326{
 327        return q->mem_item.mapaddr + MLXSW_PCI_PAGE_SIZE * page_index;
 328}
 329
 330static int mlxsw_pci_sdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
 331                              struct mlxsw_pci_queue *q)
 332{
 333        int i;
 334        int err;
 335
 336        q->producer_counter = 0;
 337        q->consumer_counter = 0;
 338
 339        /* Set CQ of same number of this SDQ. */
 340        mlxsw_cmd_mbox_sw2hw_dq_cq_set(mbox, q->num);
 341        mlxsw_cmd_mbox_sw2hw_dq_sdq_tclass_set(mbox, 3);
 342        mlxsw_cmd_mbox_sw2hw_dq_log2_dq_sz_set(mbox, 3); /* 8 pages */
 343        for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
 344                dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i);
 345
 346                mlxsw_cmd_mbox_sw2hw_dq_pa_set(mbox, i, mapaddr);
 347        }
 348
 349        err = mlxsw_cmd_sw2hw_sdq(mlxsw_pci->core, mbox, q->num);
 350        if (err)
 351                return err;
 352        mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
 353        return 0;
 354}
 355
 356static void mlxsw_pci_sdq_fini(struct mlxsw_pci *mlxsw_pci,
 357                               struct mlxsw_pci_queue *q)
 358{
 359        mlxsw_cmd_hw2sw_sdq(mlxsw_pci->core, q->num);
 360}
 361
 362static int mlxsw_pci_wqe_frag_map(struct mlxsw_pci *mlxsw_pci, char *wqe,
 363                                  int index, char *frag_data, size_t frag_len,
 364                                  int direction)
 365{
 366        struct pci_dev *pdev = mlxsw_pci->pdev;
 367        dma_addr_t mapaddr;
 368
 369        mapaddr = pci_map_single(pdev, frag_data, frag_len, direction);
 370        if (unlikely(pci_dma_mapping_error(pdev, mapaddr))) {
 371                dev_err_ratelimited(&pdev->dev, "failed to dma map tx frag\n");
 372                return -EIO;
 373        }
 374        mlxsw_pci_wqe_address_set(wqe, index, mapaddr);
 375        mlxsw_pci_wqe_byte_count_set(wqe, index, frag_len);
 376        return 0;
 377}
 378
 379static void mlxsw_pci_wqe_frag_unmap(struct mlxsw_pci *mlxsw_pci, char *wqe,
 380                                     int index, int direction)
 381{
 382        struct pci_dev *pdev = mlxsw_pci->pdev;
 383        size_t frag_len = mlxsw_pci_wqe_byte_count_get(wqe, index);
 384        dma_addr_t mapaddr = mlxsw_pci_wqe_address_get(wqe, index);
 385
 386        if (!frag_len)
 387                return;
 388        pci_unmap_single(pdev, mapaddr, frag_len, direction);
 389}
 390
 391static int mlxsw_pci_rdq_skb_alloc(struct mlxsw_pci *mlxsw_pci,
 392                                   struct mlxsw_pci_queue_elem_info *elem_info)
 393{
 394        size_t buf_len = MLXSW_PORT_MAX_MTU;
 395        char *wqe = elem_info->elem;
 396        struct sk_buff *skb;
 397        int err;
 398
 399        elem_info->u.rdq.skb = NULL;
 400        skb = netdev_alloc_skb_ip_align(NULL, buf_len);
 401        if (!skb)
 402                return -ENOMEM;
 403
 404        /* Assume that wqe was previously zeroed. */
 405
 406        err = mlxsw_pci_wqe_frag_map(mlxsw_pci, wqe, 0, skb->data,
 407                                     buf_len, DMA_FROM_DEVICE);
 408        if (err)
 409                goto err_frag_map;
 410
 411        elem_info->u.rdq.skb = skb;
 412        return 0;
 413
 414err_frag_map:
 415        dev_kfree_skb_any(skb);
 416        return err;
 417}
 418
 419static void mlxsw_pci_rdq_skb_free(struct mlxsw_pci *mlxsw_pci,
 420                                   struct mlxsw_pci_queue_elem_info *elem_info)
 421{
 422        struct sk_buff *skb;
 423        char *wqe;
 424
 425        skb = elem_info->u.rdq.skb;
 426        wqe = elem_info->elem;
 427
 428        mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, 0, DMA_FROM_DEVICE);
 429        dev_kfree_skb_any(skb);
 430}
 431
 432static int mlxsw_pci_rdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
 433                              struct mlxsw_pci_queue *q)
 434{
 435        struct mlxsw_pci_queue_elem_info *elem_info;
 436        u8 sdq_count = mlxsw_pci_sdq_count(mlxsw_pci);
 437        int i;
 438        int err;
 439
 440        q->producer_counter = 0;
 441        q->consumer_counter = 0;
 442
 443        /* Set CQ of same number of this RDQ with base
 444         * above SDQ count as the lower ones are assigned to SDQs.
 445         */
 446        mlxsw_cmd_mbox_sw2hw_dq_cq_set(mbox, sdq_count + q->num);
 447        mlxsw_cmd_mbox_sw2hw_dq_log2_dq_sz_set(mbox, 3); /* 8 pages */
 448        for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
 449                dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i);
 450
 451                mlxsw_cmd_mbox_sw2hw_dq_pa_set(mbox, i, mapaddr);
 452        }
 453
 454        err = mlxsw_cmd_sw2hw_rdq(mlxsw_pci->core, mbox, q->num);
 455        if (err)
 456                return err;
 457
 458        mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
 459
 460        for (i = 0; i < q->count; i++) {
 461                elem_info = mlxsw_pci_queue_elem_info_producer_get(q);
 462                BUG_ON(!elem_info);
 463                err = mlxsw_pci_rdq_skb_alloc(mlxsw_pci, elem_info);
 464                if (err)
 465                        goto rollback;
 466                /* Everything is set up, ring doorbell to pass elem to HW */
 467                q->producer_counter++;
 468                mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
 469        }
 470
 471        return 0;
 472
 473rollback:
 474        for (i--; i >= 0; i--) {
 475                elem_info = mlxsw_pci_queue_elem_info_get(q, i);
 476                mlxsw_pci_rdq_skb_free(mlxsw_pci, elem_info);
 477        }
 478        mlxsw_cmd_hw2sw_rdq(mlxsw_pci->core, q->num);
 479
 480        return err;
 481}
 482
 483static void mlxsw_pci_rdq_fini(struct mlxsw_pci *mlxsw_pci,
 484                               struct mlxsw_pci_queue *q)
 485{
 486        struct mlxsw_pci_queue_elem_info *elem_info;
 487        int i;
 488
 489        mlxsw_cmd_hw2sw_rdq(mlxsw_pci->core, q->num);
 490        for (i = 0; i < q->count; i++) {
 491                elem_info = mlxsw_pci_queue_elem_info_get(q, i);
 492                mlxsw_pci_rdq_skb_free(mlxsw_pci, elem_info);
 493        }
 494}
 495
 496static int mlxsw_pci_cq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
 497                             struct mlxsw_pci_queue *q)
 498{
 499        int i;
 500        int err;
 501
 502        q->consumer_counter = 0;
 503
 504        for (i = 0; i < q->count; i++) {
 505                char *elem = mlxsw_pci_queue_elem_get(q, i);
 506
 507                mlxsw_pci_cqe_owner_set(elem, 1);
 508        }
 509
 510        mlxsw_cmd_mbox_sw2hw_cq_cv_set(mbox, 0); /* CQE ver 0 */
 511        mlxsw_cmd_mbox_sw2hw_cq_c_eqn_set(mbox, MLXSW_PCI_EQ_COMP_NUM);
 512        mlxsw_cmd_mbox_sw2hw_cq_st_set(mbox, 0);
 513        mlxsw_cmd_mbox_sw2hw_cq_log_cq_size_set(mbox, ilog2(q->count));
 514        for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
 515                dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i);
 516
 517                mlxsw_cmd_mbox_sw2hw_cq_pa_set(mbox, i, mapaddr);
 518        }
 519        err = mlxsw_cmd_sw2hw_cq(mlxsw_pci->core, mbox, q->num);
 520        if (err)
 521                return err;
 522        mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
 523        mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
 524        return 0;
 525}
 526
 527static void mlxsw_pci_cq_fini(struct mlxsw_pci *mlxsw_pci,
 528                              struct mlxsw_pci_queue *q)
 529{
 530        mlxsw_cmd_hw2sw_cq(mlxsw_pci->core, q->num);
 531}
 532
 533static void mlxsw_pci_cqe_sdq_handle(struct mlxsw_pci *mlxsw_pci,
 534                                     struct mlxsw_pci_queue *q,
 535                                     u16 consumer_counter_limit,
 536                                     char *cqe)
 537{
 538        struct pci_dev *pdev = mlxsw_pci->pdev;
 539        struct mlxsw_pci_queue_elem_info *elem_info;
 540        char *wqe;
 541        struct sk_buff *skb;
 542        int i;
 543
 544        spin_lock(&q->lock);
 545        elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
 546        skb = elem_info->u.sdq.skb;
 547        wqe = elem_info->elem;
 548        for (i = 0; i < MLXSW_PCI_WQE_SG_ENTRIES; i++)
 549                mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, i, DMA_TO_DEVICE);
 550        dev_kfree_skb_any(skb);
 551        elem_info->u.sdq.skb = NULL;
 552
 553        if (q->consumer_counter++ != consumer_counter_limit)
 554                dev_dbg_ratelimited(&pdev->dev, "Consumer counter does not match limit in SDQ\n");
 555        spin_unlock(&q->lock);
 556}
 557
 558static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci,
 559                                     struct mlxsw_pci_queue *q,
 560                                     u16 consumer_counter_limit,
 561                                     char *cqe)
 562{
 563        struct pci_dev *pdev = mlxsw_pci->pdev;
 564        struct mlxsw_pci_queue_elem_info *elem_info;
 565        char *wqe;
 566        struct sk_buff *skb;
 567        struct mlxsw_rx_info rx_info;
 568        u16 byte_count;
 569        int err;
 570
 571        elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
 572        skb = elem_info->u.sdq.skb;
 573        if (!skb)
 574                return;
 575        wqe = elem_info->elem;
 576        mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, 0, DMA_FROM_DEVICE);
 577
 578        if (q->consumer_counter++ != consumer_counter_limit)
 579                dev_dbg_ratelimited(&pdev->dev, "Consumer counter does not match limit in RDQ\n");
 580
 581        if (mlxsw_pci_cqe_lag_get(cqe)) {
 582                rx_info.is_lag = true;
 583                rx_info.u.lag_id = mlxsw_pci_cqe_lag_id_get(cqe);
 584                rx_info.lag_port_index = mlxsw_pci_cqe_lag_port_index_get(cqe);
 585        } else {
 586                rx_info.is_lag = false;
 587                rx_info.u.sys_port = mlxsw_pci_cqe_system_port_get(cqe);
 588        }
 589
 590        rx_info.trap_id = mlxsw_pci_cqe_trap_id_get(cqe);
 591
 592        byte_count = mlxsw_pci_cqe_byte_count_get(cqe);
 593        if (mlxsw_pci_cqe_crc_get(cqe))
 594                byte_count -= ETH_FCS_LEN;
 595        skb_put(skb, byte_count);
 596        mlxsw_core_skb_receive(mlxsw_pci->core, skb, &rx_info);
 597
 598        memset(wqe, 0, q->elem_size);
 599        err = mlxsw_pci_rdq_skb_alloc(mlxsw_pci, elem_info);
 600        if (err)
 601                dev_dbg_ratelimited(&pdev->dev, "Failed to alloc skb for RDQ\n");
 602        /* Everything is set up, ring doorbell to pass elem to HW */
 603        q->producer_counter++;
 604        mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
 605        return;
 606}
 607
 608static char *mlxsw_pci_cq_sw_cqe_get(struct mlxsw_pci_queue *q)
 609{
 610        return mlxsw_pci_queue_sw_elem_get(q, mlxsw_pci_cqe_owner_get);
 611}
 612
 613static void mlxsw_pci_cq_tasklet(unsigned long data)
 614{
 615        struct mlxsw_pci_queue *q = (struct mlxsw_pci_queue *) data;
 616        struct mlxsw_pci *mlxsw_pci = q->pci;
 617        char *cqe;
 618        int items = 0;
 619        int credits = q->count >> 1;
 620
 621        while ((cqe = mlxsw_pci_cq_sw_cqe_get(q))) {
 622                u16 wqe_counter = mlxsw_pci_cqe_wqe_counter_get(cqe);
 623                u8 sendq = mlxsw_pci_cqe_sr_get(cqe);
 624                u8 dqn = mlxsw_pci_cqe_dqn_get(cqe);
 625
 626                if (sendq) {
 627                        struct mlxsw_pci_queue *sdq;
 628
 629                        sdq = mlxsw_pci_sdq_get(mlxsw_pci, dqn);
 630                        mlxsw_pci_cqe_sdq_handle(mlxsw_pci, sdq,
 631                                                 wqe_counter, cqe);
 632                        q->u.cq.comp_sdq_count++;
 633                } else {
 634                        struct mlxsw_pci_queue *rdq;
 635
 636                        rdq = mlxsw_pci_rdq_get(mlxsw_pci, dqn);
 637                        mlxsw_pci_cqe_rdq_handle(mlxsw_pci, rdq,
 638                                                 wqe_counter, cqe);
 639                        q->u.cq.comp_rdq_count++;
 640                }
 641                if (++items == credits)
 642                        break;
 643        }
 644        if (items) {
 645                mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
 646                mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
 647        }
 648}
 649
 650static int mlxsw_pci_eq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
 651                             struct mlxsw_pci_queue *q)
 652{
 653        int i;
 654        int err;
 655
 656        q->consumer_counter = 0;
 657
 658        for (i = 0; i < q->count; i++) {
 659                char *elem = mlxsw_pci_queue_elem_get(q, i);
 660
 661                mlxsw_pci_eqe_owner_set(elem, 1);
 662        }
 663
 664        mlxsw_cmd_mbox_sw2hw_eq_int_msix_set(mbox, 1); /* MSI-X used */
 665        mlxsw_cmd_mbox_sw2hw_eq_st_set(mbox, 1); /* armed */
 666        mlxsw_cmd_mbox_sw2hw_eq_log_eq_size_set(mbox, ilog2(q->count));
 667        for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
 668                dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i);
 669
 670                mlxsw_cmd_mbox_sw2hw_eq_pa_set(mbox, i, mapaddr);
 671        }
 672        err = mlxsw_cmd_sw2hw_eq(mlxsw_pci->core, mbox, q->num);
 673        if (err)
 674                return err;
 675        mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
 676        mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
 677        return 0;
 678}
 679
 680static void mlxsw_pci_eq_fini(struct mlxsw_pci *mlxsw_pci,
 681                              struct mlxsw_pci_queue *q)
 682{
 683        mlxsw_cmd_hw2sw_eq(mlxsw_pci->core, q->num);
 684}
 685
 686static void mlxsw_pci_eq_cmd_event(struct mlxsw_pci *mlxsw_pci, char *eqe)
 687{
 688        mlxsw_pci->cmd.comp.status = mlxsw_pci_eqe_cmd_status_get(eqe);
 689        mlxsw_pci->cmd.comp.out_param =
 690                ((u64) mlxsw_pci_eqe_cmd_out_param_h_get(eqe)) << 32 |
 691                mlxsw_pci_eqe_cmd_out_param_l_get(eqe);
 692        mlxsw_pci->cmd.wait_done = true;
 693        wake_up(&mlxsw_pci->cmd.wait);
 694}
 695
 696static char *mlxsw_pci_eq_sw_eqe_get(struct mlxsw_pci_queue *q)
 697{
 698        return mlxsw_pci_queue_sw_elem_get(q, mlxsw_pci_eqe_owner_get);
 699}
 700
 701static void mlxsw_pci_eq_tasklet(unsigned long data)
 702{
 703        struct mlxsw_pci_queue *q = (struct mlxsw_pci_queue *) data;
 704        struct mlxsw_pci *mlxsw_pci = q->pci;
 705        u8 cq_count = mlxsw_pci_cq_count(mlxsw_pci);
 706        unsigned long active_cqns[BITS_TO_LONGS(MLXSW_PCI_CQS_MAX)];
 707        char *eqe;
 708        u8 cqn;
 709        bool cq_handle = false;
 710        int items = 0;
 711        int credits = q->count >> 1;
 712
 713        memset(&active_cqns, 0, sizeof(active_cqns));
 714
 715        while ((eqe = mlxsw_pci_eq_sw_eqe_get(q))) {
 716                u8 event_type = mlxsw_pci_eqe_event_type_get(eqe);
 717
 718                switch (event_type) {
 719                case MLXSW_PCI_EQE_EVENT_TYPE_CMD:
 720                        mlxsw_pci_eq_cmd_event(mlxsw_pci, eqe);
 721                        q->u.eq.ev_cmd_count++;
 722                        break;
 723                case MLXSW_PCI_EQE_EVENT_TYPE_COMP:
 724                        cqn = mlxsw_pci_eqe_cqn_get(eqe);
 725                        set_bit(cqn, active_cqns);
 726                        cq_handle = true;
 727                        q->u.eq.ev_comp_count++;
 728                        break;
 729                default:
 730                        q->u.eq.ev_other_count++;
 731                }
 732                if (++items == credits)
 733                        break;
 734        }
 735        if (items) {
 736                mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
 737                mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
 738        }
 739
 740        if (!cq_handle)
 741                return;
 742        for_each_set_bit(cqn, active_cqns, cq_count) {
 743                q = mlxsw_pci_cq_get(mlxsw_pci, cqn);
 744                mlxsw_pci_queue_tasklet_schedule(q);
 745        }
 746}
 747
 748struct mlxsw_pci_queue_ops {
 749        const char *name;
 750        enum mlxsw_pci_queue_type type;
 751        int (*init)(struct mlxsw_pci *mlxsw_pci, char *mbox,
 752                    struct mlxsw_pci_queue *q);
 753        void (*fini)(struct mlxsw_pci *mlxsw_pci,
 754                     struct mlxsw_pci_queue *q);
 755        void (*tasklet)(unsigned long data);
 756        u16 elem_count;
 757        u8 elem_size;
 758};
 759
 760static const struct mlxsw_pci_queue_ops mlxsw_pci_sdq_ops = {
 761        .type           = MLXSW_PCI_QUEUE_TYPE_SDQ,
 762        .init           = mlxsw_pci_sdq_init,
 763        .fini           = mlxsw_pci_sdq_fini,
 764        .elem_count     = MLXSW_PCI_WQE_COUNT,
 765        .elem_size      = MLXSW_PCI_WQE_SIZE,
 766};
 767
 768static const struct mlxsw_pci_queue_ops mlxsw_pci_rdq_ops = {
 769        .type           = MLXSW_PCI_QUEUE_TYPE_RDQ,
 770        .init           = mlxsw_pci_rdq_init,
 771        .fini           = mlxsw_pci_rdq_fini,
 772        .elem_count     = MLXSW_PCI_WQE_COUNT,
 773        .elem_size      = MLXSW_PCI_WQE_SIZE
 774};
 775
 776static const struct mlxsw_pci_queue_ops mlxsw_pci_cq_ops = {
 777        .type           = MLXSW_PCI_QUEUE_TYPE_CQ,
 778        .init           = mlxsw_pci_cq_init,
 779        .fini           = mlxsw_pci_cq_fini,
 780        .tasklet        = mlxsw_pci_cq_tasklet,
 781        .elem_count     = MLXSW_PCI_CQE_COUNT,
 782        .elem_size      = MLXSW_PCI_CQE_SIZE
 783};
 784
 785static const struct mlxsw_pci_queue_ops mlxsw_pci_eq_ops = {
 786        .type           = MLXSW_PCI_QUEUE_TYPE_EQ,
 787        .init           = mlxsw_pci_eq_init,
 788        .fini           = mlxsw_pci_eq_fini,
 789        .tasklet        = mlxsw_pci_eq_tasklet,
 790        .elem_count     = MLXSW_PCI_EQE_COUNT,
 791        .elem_size      = MLXSW_PCI_EQE_SIZE
 792};
 793
 794static int mlxsw_pci_queue_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
 795                                const struct mlxsw_pci_queue_ops *q_ops,
 796                                struct mlxsw_pci_queue *q, u8 q_num)
 797{
 798        struct mlxsw_pci_mem_item *mem_item = &q->mem_item;
 799        int i;
 800        int err;
 801
 802        spin_lock_init(&q->lock);
 803        q->num = q_num;
 804        q->count = q_ops->elem_count;
 805        q->elem_size = q_ops->elem_size;
 806        q->type = q_ops->type;
 807        q->pci = mlxsw_pci;
 808
 809        if (q_ops->tasklet)
 810                tasklet_init(&q->tasklet, q_ops->tasklet, (unsigned long) q);
 811
 812        mem_item->size = MLXSW_PCI_AQ_SIZE;
 813        mem_item->buf = pci_alloc_consistent(mlxsw_pci->pdev,
 814                                             mem_item->size,
 815                                             &mem_item->mapaddr);
 816        if (!mem_item->buf)
 817                return -ENOMEM;
 818        memset(mem_item->buf, 0, mem_item->size);
 819
 820        q->elem_info = kcalloc(q->count, sizeof(*q->elem_info), GFP_KERNEL);
 821        if (!q->elem_info) {
 822                err = -ENOMEM;
 823                goto err_elem_info_alloc;
 824        }
 825
 826        /* Initialize dma mapped elements info elem_info for
 827         * future easy access.
 828         */
 829        for (i = 0; i < q->count; i++) {
 830                struct mlxsw_pci_queue_elem_info *elem_info;
 831
 832                elem_info = mlxsw_pci_queue_elem_info_get(q, i);
 833                elem_info->elem =
 834                        __mlxsw_pci_queue_elem_get(q, q_ops->elem_size, i);
 835        }
 836
 837        mlxsw_cmd_mbox_zero(mbox);
 838        err = q_ops->init(mlxsw_pci, mbox, q);
 839        if (err)
 840                goto err_q_ops_init;
 841        return 0;
 842
 843err_q_ops_init:
 844        kfree(q->elem_info);
 845err_elem_info_alloc:
 846        pci_free_consistent(mlxsw_pci->pdev, mem_item->size,
 847                            mem_item->buf, mem_item->mapaddr);
 848        return err;
 849}
 850
 851static void mlxsw_pci_queue_fini(struct mlxsw_pci *mlxsw_pci,
 852                                 const struct mlxsw_pci_queue_ops *q_ops,
 853                                 struct mlxsw_pci_queue *q)
 854{
 855        struct mlxsw_pci_mem_item *mem_item = &q->mem_item;
 856
 857        q_ops->fini(mlxsw_pci, q);
 858        kfree(q->elem_info);
 859        pci_free_consistent(mlxsw_pci->pdev, mem_item->size,
 860                            mem_item->buf, mem_item->mapaddr);
 861}
 862
 863static int mlxsw_pci_queue_group_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
 864                                      const struct mlxsw_pci_queue_ops *q_ops,
 865                                      u8 num_qs)
 866{
 867        struct mlxsw_pci_queue_type_group *queue_group;
 868        int i;
 869        int err;
 870
 871        queue_group = mlxsw_pci_queue_type_group_get(mlxsw_pci, q_ops->type);
 872        queue_group->q = kcalloc(num_qs, sizeof(*queue_group->q), GFP_KERNEL);
 873        if (!queue_group->q)
 874                return -ENOMEM;
 875
 876        for (i = 0; i < num_qs; i++) {
 877                err = mlxsw_pci_queue_init(mlxsw_pci, mbox, q_ops,
 878                                           &queue_group->q[i], i);
 879                if (err)
 880                        goto err_queue_init;
 881        }
 882        queue_group->count = num_qs;
 883
 884        return 0;
 885
 886err_queue_init:
 887        for (i--; i >= 0; i--)
 888                mlxsw_pci_queue_fini(mlxsw_pci, q_ops, &queue_group->q[i]);
 889        kfree(queue_group->q);
 890        return err;
 891}
 892
 893static void mlxsw_pci_queue_group_fini(struct mlxsw_pci *mlxsw_pci,
 894                                       const struct mlxsw_pci_queue_ops *q_ops)
 895{
 896        struct mlxsw_pci_queue_type_group *queue_group;
 897        int i;
 898
 899        queue_group = mlxsw_pci_queue_type_group_get(mlxsw_pci, q_ops->type);
 900        for (i = 0; i < queue_group->count; i++)
 901                mlxsw_pci_queue_fini(mlxsw_pci, q_ops, &queue_group->q[i]);
 902        kfree(queue_group->q);
 903}
 904
 905static int mlxsw_pci_aqs_init(struct mlxsw_pci *mlxsw_pci, char *mbox)
 906{
 907        struct pci_dev *pdev = mlxsw_pci->pdev;
 908        u8 num_sdqs;
 909        u8 sdq_log2sz;
 910        u8 num_rdqs;
 911        u8 rdq_log2sz;
 912        u8 num_cqs;
 913        u8 cq_log2sz;
 914        u8 num_eqs;
 915        u8 eq_log2sz;
 916        int err;
 917
 918        mlxsw_cmd_mbox_zero(mbox);
 919        err = mlxsw_cmd_query_aq_cap(mlxsw_pci->core, mbox);
 920        if (err)
 921                return err;
 922
 923        num_sdqs = mlxsw_cmd_mbox_query_aq_cap_max_num_sdqs_get(mbox);
 924        sdq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_sdq_sz_get(mbox);
 925        num_rdqs = mlxsw_cmd_mbox_query_aq_cap_max_num_rdqs_get(mbox);
 926        rdq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_rdq_sz_get(mbox);
 927        num_cqs = mlxsw_cmd_mbox_query_aq_cap_max_num_cqs_get(mbox);
 928        cq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_cq_sz_get(mbox);
 929        num_eqs = mlxsw_cmd_mbox_query_aq_cap_max_num_eqs_get(mbox);
 930        eq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_eq_sz_get(mbox);
 931
 932        if (num_sdqs + num_rdqs > num_cqs ||
 933            num_cqs > MLXSW_PCI_CQS_MAX || num_eqs != MLXSW_PCI_EQS_COUNT) {
 934                dev_err(&pdev->dev, "Unsupported number of queues\n");
 935                return -EINVAL;
 936        }
 937
 938        if ((1 << sdq_log2sz != MLXSW_PCI_WQE_COUNT) ||
 939            (1 << rdq_log2sz != MLXSW_PCI_WQE_COUNT) ||
 940            (1 << cq_log2sz != MLXSW_PCI_CQE_COUNT) ||
 941            (1 << eq_log2sz != MLXSW_PCI_EQE_COUNT)) {
 942                dev_err(&pdev->dev, "Unsupported number of async queue descriptors\n");
 943                return -EINVAL;
 944        }
 945
 946        err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_eq_ops,
 947                                         num_eqs);
 948        if (err) {
 949                dev_err(&pdev->dev, "Failed to initialize event queues\n");
 950                return err;
 951        }
 952
 953        err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_cq_ops,
 954                                         num_cqs);
 955        if (err) {
 956                dev_err(&pdev->dev, "Failed to initialize completion queues\n");
 957                goto err_cqs_init;
 958        }
 959
 960        err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_sdq_ops,
 961                                         num_sdqs);
 962        if (err) {
 963                dev_err(&pdev->dev, "Failed to initialize send descriptor queues\n");
 964                goto err_sdqs_init;
 965        }
 966
 967        err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_rdq_ops,
 968                                         num_rdqs);
 969        if (err) {
 970                dev_err(&pdev->dev, "Failed to initialize receive descriptor queues\n");
 971                goto err_rdqs_init;
 972        }
 973
 974        /* We have to poll in command interface until queues are initialized */
 975        mlxsw_pci->cmd.nopoll = true;
 976        return 0;
 977
 978err_rdqs_init:
 979        mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_sdq_ops);
 980err_sdqs_init:
 981        mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_cq_ops);
 982err_cqs_init:
 983        mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_eq_ops);
 984        return err;
 985}
 986
 987static void mlxsw_pci_aqs_fini(struct mlxsw_pci *mlxsw_pci)
 988{
 989        mlxsw_pci->cmd.nopoll = false;
 990        mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_rdq_ops);
 991        mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_sdq_ops);
 992        mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_cq_ops);
 993        mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_eq_ops);
 994}
 995
 996static void
 997mlxsw_pci_config_profile_swid_config(struct mlxsw_pci *mlxsw_pci,
 998                                     char *mbox, int index,
 999                                     const struct mlxsw_swid_config *swid)
1000{
1001        u8 mask = 0;
1002
1003        if (swid->used_type) {
1004                mlxsw_cmd_mbox_config_profile_swid_config_type_set(
1005                        mbox, index, swid->type);
1006                mask |= 1;
1007        }
1008        if (swid->used_properties) {
1009                mlxsw_cmd_mbox_config_profile_swid_config_properties_set(
1010                        mbox, index, swid->properties);
1011                mask |= 2;
1012        }
1013        mlxsw_cmd_mbox_config_profile_swid_config_mask_set(mbox, index, mask);
1014}
1015
1016static int mlxsw_pci_resources_query(struct mlxsw_pci *mlxsw_pci, char *mbox,
1017                                     struct mlxsw_res *res,
1018                                     u8 query_enabled)
1019{
1020        int index, i;
1021        u64 data;
1022        u16 id;
1023        int err;
1024
1025        /* Not all the versions support resources query */
1026        if (!query_enabled)
1027                return 0;
1028
1029        mlxsw_cmd_mbox_zero(mbox);
1030
1031        for (index = 0; index < MLXSW_CMD_QUERY_RESOURCES_MAX_QUERIES;
1032             index++) {
1033                err = mlxsw_cmd_query_resources(mlxsw_pci->core, mbox, index);
1034                if (err)
1035                        return err;
1036
1037                for (i = 0; i < MLXSW_CMD_QUERY_RESOURCES_PER_QUERY; i++) {
1038                        id = mlxsw_cmd_mbox_query_resource_id_get(mbox, i);
1039                        data = mlxsw_cmd_mbox_query_resource_data_get(mbox, i);
1040
1041                        if (id == MLXSW_CMD_QUERY_RESOURCES_TABLE_END_ID)
1042                                return 0;
1043
1044                        mlxsw_res_parse(res, id, data);
1045                }
1046        }
1047
1048        /* If after MLXSW_RESOURCES_QUERY_MAX_QUERIES we still didn't get
1049         * MLXSW_RESOURCES_TABLE_END_ID, something went bad in the FW.
1050         */
1051        return -EIO;
1052}
1053
1054static int
1055mlxsw_pci_profile_get_kvd_sizes(const struct mlxsw_config_profile *profile,
1056                                struct mlxsw_res *res)
1057{
1058        u32 single_size, double_size, linear_size;
1059
1060        if (!MLXSW_RES_VALID(res, KVD_SINGLE_MIN_SIZE) ||
1061            !MLXSW_RES_VALID(res, KVD_DOUBLE_MIN_SIZE) ||
1062            !profile->used_kvd_split_data)
1063                return -EIO;
1064
1065        linear_size = profile->kvd_linear_size;
1066
1067        /* The hash part is what left of the kvd without the
1068         * linear part. It is split to the single size and
1069         * double size by the parts ratio from the profile.
1070         * Both sizes must be a multiplications of the
1071         * granularity from the profile.
1072         */
1073        double_size = MLXSW_RES_GET(res, KVD_SIZE) - linear_size;
1074        double_size *= profile->kvd_hash_double_parts;
1075        double_size /= profile->kvd_hash_double_parts +
1076                       profile->kvd_hash_single_parts;
1077        double_size /= profile->kvd_hash_granularity;
1078        double_size *= profile->kvd_hash_granularity;
1079        single_size = MLXSW_RES_GET(res, KVD_SIZE) - double_size -
1080                      linear_size;
1081
1082        /* Check results are legal. */
1083        if (single_size < MLXSW_RES_GET(res, KVD_SINGLE_MIN_SIZE) ||
1084            double_size < MLXSW_RES_GET(res, KVD_DOUBLE_MIN_SIZE) ||
1085            MLXSW_RES_GET(res, KVD_SIZE) < linear_size)
1086                return -EIO;
1087
1088        MLXSW_RES_SET(res, KVD_SINGLE_SIZE, single_size);
1089        MLXSW_RES_SET(res, KVD_DOUBLE_SIZE, double_size);
1090        MLXSW_RES_SET(res, KVD_LINEAR_SIZE, linear_size);
1091
1092        return 0;
1093}
1094
1095static int mlxsw_pci_config_profile(struct mlxsw_pci *mlxsw_pci, char *mbox,
1096                                    const struct mlxsw_config_profile *profile,
1097                                    struct mlxsw_res *res)
1098{
1099        int i;
1100        int err;
1101
1102        mlxsw_cmd_mbox_zero(mbox);
1103
1104        if (profile->used_max_vepa_channels) {
1105                mlxsw_cmd_mbox_config_profile_set_max_vepa_channels_set(
1106                        mbox, 1);
1107                mlxsw_cmd_mbox_config_profile_max_vepa_channels_set(
1108                        mbox, profile->max_vepa_channels);
1109        }
1110        if (profile->used_max_mid) {
1111                mlxsw_cmd_mbox_config_profile_set_max_mid_set(
1112                        mbox, 1);
1113                mlxsw_cmd_mbox_config_profile_max_mid_set(
1114                        mbox, profile->max_mid);
1115        }
1116        if (profile->used_max_pgt) {
1117                mlxsw_cmd_mbox_config_profile_set_max_pgt_set(
1118                        mbox, 1);
1119                mlxsw_cmd_mbox_config_profile_max_pgt_set(
1120                        mbox, profile->max_pgt);
1121        }
1122        if (profile->used_max_system_port) {
1123                mlxsw_cmd_mbox_config_profile_set_max_system_port_set(
1124                        mbox, 1);
1125                mlxsw_cmd_mbox_config_profile_max_system_port_set(
1126                        mbox, profile->max_system_port);
1127        }
1128        if (profile->used_max_vlan_groups) {
1129                mlxsw_cmd_mbox_config_profile_set_max_vlan_groups_set(
1130                        mbox, 1);
1131                mlxsw_cmd_mbox_config_profile_max_vlan_groups_set(
1132                        mbox, profile->max_vlan_groups);
1133        }
1134        if (profile->used_max_regions) {
1135                mlxsw_cmd_mbox_config_profile_set_max_regions_set(
1136                        mbox, 1);
1137                mlxsw_cmd_mbox_config_profile_max_regions_set(
1138                        mbox, profile->max_regions);
1139        }
1140        if (profile->used_flood_tables) {
1141                mlxsw_cmd_mbox_config_profile_set_flood_tables_set(
1142                        mbox, 1);
1143                mlxsw_cmd_mbox_config_profile_max_flood_tables_set(
1144                        mbox, profile->max_flood_tables);
1145                mlxsw_cmd_mbox_config_profile_max_vid_flood_tables_set(
1146                        mbox, profile->max_vid_flood_tables);
1147                mlxsw_cmd_mbox_config_profile_max_fid_offset_flood_tables_set(
1148                        mbox, profile->max_fid_offset_flood_tables);
1149                mlxsw_cmd_mbox_config_profile_fid_offset_flood_table_size_set(
1150                        mbox, profile->fid_offset_flood_table_size);
1151                mlxsw_cmd_mbox_config_profile_max_fid_flood_tables_set(
1152                        mbox, profile->max_fid_flood_tables);
1153                mlxsw_cmd_mbox_config_profile_fid_flood_table_size_set(
1154                        mbox, profile->fid_flood_table_size);
1155        }
1156        if (profile->used_flood_mode) {
1157                mlxsw_cmd_mbox_config_profile_set_flood_mode_set(
1158                        mbox, 1);
1159                mlxsw_cmd_mbox_config_profile_flood_mode_set(
1160                        mbox, profile->flood_mode);
1161        }
1162        if (profile->used_max_ib_mc) {
1163                mlxsw_cmd_mbox_config_profile_set_max_ib_mc_set(
1164                        mbox, 1);
1165                mlxsw_cmd_mbox_config_profile_max_ib_mc_set(
1166                        mbox, profile->max_ib_mc);
1167        }
1168        if (profile->used_max_pkey) {
1169                mlxsw_cmd_mbox_config_profile_set_max_pkey_set(
1170                        mbox, 1);
1171                mlxsw_cmd_mbox_config_profile_max_pkey_set(
1172                        mbox, profile->max_pkey);
1173        }
1174        if (profile->used_ar_sec) {
1175                mlxsw_cmd_mbox_config_profile_set_ar_sec_set(
1176                        mbox, 1);
1177                mlxsw_cmd_mbox_config_profile_ar_sec_set(
1178                        mbox, profile->ar_sec);
1179        }
1180        if (profile->used_adaptive_routing_group_cap) {
1181                mlxsw_cmd_mbox_config_profile_set_adaptive_routing_group_cap_set(
1182                        mbox, 1);
1183                mlxsw_cmd_mbox_config_profile_adaptive_routing_group_cap_set(
1184                        mbox, profile->adaptive_routing_group_cap);
1185        }
1186        if (MLXSW_RES_VALID(res, KVD_SIZE)) {
1187                err = mlxsw_pci_profile_get_kvd_sizes(profile, res);
1188                if (err)
1189                        return err;
1190
1191                mlxsw_cmd_mbox_config_profile_set_kvd_linear_size_set(mbox, 1);
1192                mlxsw_cmd_mbox_config_profile_kvd_linear_size_set(mbox,
1193                                        MLXSW_RES_GET(res, KVD_LINEAR_SIZE));
1194                mlxsw_cmd_mbox_config_profile_set_kvd_hash_single_size_set(mbox,
1195                                                                           1);
1196                mlxsw_cmd_mbox_config_profile_kvd_hash_single_size_set(mbox,
1197                                        MLXSW_RES_GET(res, KVD_SINGLE_SIZE));
1198                mlxsw_cmd_mbox_config_profile_set_kvd_hash_double_size_set(
1199                                                                mbox, 1);
1200                mlxsw_cmd_mbox_config_profile_kvd_hash_double_size_set(mbox,
1201                                        MLXSW_RES_GET(res, KVD_DOUBLE_SIZE));
1202        }
1203
1204        for (i = 0; i < MLXSW_CONFIG_PROFILE_SWID_COUNT; i++)
1205                mlxsw_pci_config_profile_swid_config(mlxsw_pci, mbox, i,
1206                                                     &profile->swid_config[i]);
1207
1208        return mlxsw_cmd_config_profile_set(mlxsw_pci->core, mbox);
1209}
1210
1211static int mlxsw_pci_boardinfo(struct mlxsw_pci *mlxsw_pci, char *mbox)
1212{
1213        struct mlxsw_bus_info *bus_info = &mlxsw_pci->bus_info;
1214        int err;
1215
1216        mlxsw_cmd_mbox_zero(mbox);
1217        err = mlxsw_cmd_boardinfo(mlxsw_pci->core, mbox);
1218        if (err)
1219                return err;
1220        mlxsw_cmd_mbox_boardinfo_vsd_memcpy_from(mbox, bus_info->vsd);
1221        mlxsw_cmd_mbox_boardinfo_psid_memcpy_from(mbox, bus_info->psid);
1222        return 0;
1223}
1224
1225static int mlxsw_pci_fw_area_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
1226                                  u16 num_pages)
1227{
1228        struct mlxsw_pci_mem_item *mem_item;
1229        int nent = 0;
1230        int i;
1231        int err;
1232
1233        mlxsw_pci->fw_area.items = kcalloc(num_pages, sizeof(*mem_item),
1234                                           GFP_KERNEL);
1235        if (!mlxsw_pci->fw_area.items)
1236                return -ENOMEM;
1237        mlxsw_pci->fw_area.count = num_pages;
1238
1239        mlxsw_cmd_mbox_zero(mbox);
1240        for (i = 0; i < num_pages; i++) {
1241                mem_item = &mlxsw_pci->fw_area.items[i];
1242
1243                mem_item->size = MLXSW_PCI_PAGE_SIZE;
1244                mem_item->buf = pci_alloc_consistent(mlxsw_pci->pdev,
1245                                                     mem_item->size,
1246                                                     &mem_item->mapaddr);
1247                if (!mem_item->buf) {
1248                        err = -ENOMEM;
1249                        goto err_alloc;
1250                }
1251                mlxsw_cmd_mbox_map_fa_pa_set(mbox, nent, mem_item->mapaddr);
1252                mlxsw_cmd_mbox_map_fa_log2size_set(mbox, nent, 0); /* 1 page */
1253                if (++nent == MLXSW_CMD_MAP_FA_VPM_ENTRIES_MAX) {
1254                        err = mlxsw_cmd_map_fa(mlxsw_pci->core, mbox, nent);
1255                        if (err)
1256                                goto err_cmd_map_fa;
1257                        nent = 0;
1258                        mlxsw_cmd_mbox_zero(mbox);
1259                }
1260        }
1261
1262        if (nent) {
1263                err = mlxsw_cmd_map_fa(mlxsw_pci->core, mbox, nent);
1264                if (err)
1265                        goto err_cmd_map_fa;
1266        }
1267
1268        return 0;
1269
1270err_cmd_map_fa:
1271err_alloc:
1272        for (i--; i >= 0; i--) {
1273                mem_item = &mlxsw_pci->fw_area.items[i];
1274
1275                pci_free_consistent(mlxsw_pci->pdev, mem_item->size,
1276                                    mem_item->buf, mem_item->mapaddr);
1277        }
1278        kfree(mlxsw_pci->fw_area.items);
1279        return err;
1280}
1281
1282static void mlxsw_pci_fw_area_fini(struct mlxsw_pci *mlxsw_pci)
1283{
1284        struct mlxsw_pci_mem_item *mem_item;
1285        int i;
1286
1287        mlxsw_cmd_unmap_fa(mlxsw_pci->core);
1288
1289        for (i = 0; i < mlxsw_pci->fw_area.count; i++) {
1290                mem_item = &mlxsw_pci->fw_area.items[i];
1291
1292                pci_free_consistent(mlxsw_pci->pdev, mem_item->size,
1293                                    mem_item->buf, mem_item->mapaddr);
1294        }
1295        kfree(mlxsw_pci->fw_area.items);
1296}
1297
1298static irqreturn_t mlxsw_pci_eq_irq_handler(int irq, void *dev_id)
1299{
1300        struct mlxsw_pci *mlxsw_pci = dev_id;
1301        struct mlxsw_pci_queue *q;
1302        int i;
1303
1304        for (i = 0; i < MLXSW_PCI_EQS_COUNT; i++) {
1305                q = mlxsw_pci_eq_get(mlxsw_pci, i);
1306                mlxsw_pci_queue_tasklet_schedule(q);
1307        }
1308        return IRQ_HANDLED;
1309}
1310
1311static int mlxsw_pci_mbox_alloc(struct mlxsw_pci *mlxsw_pci,
1312                                struct mlxsw_pci_mem_item *mbox)
1313{
1314        struct pci_dev *pdev = mlxsw_pci->pdev;
1315        int err = 0;
1316
1317        mbox->size = MLXSW_CMD_MBOX_SIZE;
1318        mbox->buf = pci_alloc_consistent(pdev, MLXSW_CMD_MBOX_SIZE,
1319                                         &mbox->mapaddr);
1320        if (!mbox->buf) {
1321                dev_err(&pdev->dev, "Failed allocating memory for mailbox\n");
1322                err = -ENOMEM;
1323        }
1324
1325        return err;
1326}
1327
1328static void mlxsw_pci_mbox_free(struct mlxsw_pci *mlxsw_pci,
1329                                struct mlxsw_pci_mem_item *mbox)
1330{
1331        struct pci_dev *pdev = mlxsw_pci->pdev;
1332
1333        pci_free_consistent(pdev, MLXSW_CMD_MBOX_SIZE, mbox->buf,
1334                            mbox->mapaddr);
1335}
1336
1337static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core,
1338                          const struct mlxsw_config_profile *profile,
1339                          struct mlxsw_res *res)
1340{
1341        struct mlxsw_pci *mlxsw_pci = bus_priv;
1342        struct pci_dev *pdev = mlxsw_pci->pdev;
1343        char *mbox;
1344        u16 num_pages;
1345        int err;
1346
1347        mutex_init(&mlxsw_pci->cmd.lock);
1348        init_waitqueue_head(&mlxsw_pci->cmd.wait);
1349
1350        mlxsw_pci->core = mlxsw_core;
1351
1352        mbox = mlxsw_cmd_mbox_alloc();
1353        if (!mbox)
1354                return -ENOMEM;
1355
1356        err = mlxsw_pci_mbox_alloc(mlxsw_pci, &mlxsw_pci->cmd.in_mbox);
1357        if (err)
1358                goto mbox_put;
1359
1360        err = mlxsw_pci_mbox_alloc(mlxsw_pci, &mlxsw_pci->cmd.out_mbox);
1361        if (err)
1362                goto err_out_mbox_alloc;
1363
1364        err = mlxsw_cmd_query_fw(mlxsw_core, mbox);
1365        if (err)
1366                goto err_query_fw;
1367
1368        mlxsw_pci->bus_info.fw_rev.major =
1369                mlxsw_cmd_mbox_query_fw_fw_rev_major_get(mbox);
1370        mlxsw_pci->bus_info.fw_rev.minor =
1371                mlxsw_cmd_mbox_query_fw_fw_rev_minor_get(mbox);
1372        mlxsw_pci->bus_info.fw_rev.subminor =
1373                mlxsw_cmd_mbox_query_fw_fw_rev_subminor_get(mbox);
1374
1375        if (mlxsw_cmd_mbox_query_fw_cmd_interface_rev_get(mbox) != 1) {
1376                dev_err(&pdev->dev, "Unsupported cmd interface revision ID queried from hw\n");
1377                err = -EINVAL;
1378                goto err_iface_rev;
1379        }
1380        if (mlxsw_cmd_mbox_query_fw_doorbell_page_bar_get(mbox) != 0) {
1381                dev_err(&pdev->dev, "Unsupported doorbell page bar queried from hw\n");
1382                err = -EINVAL;
1383                goto err_doorbell_page_bar;
1384        }
1385
1386        mlxsw_pci->doorbell_offset =
1387                mlxsw_cmd_mbox_query_fw_doorbell_page_offset_get(mbox);
1388
1389        num_pages = mlxsw_cmd_mbox_query_fw_fw_pages_get(mbox);
1390        err = mlxsw_pci_fw_area_init(mlxsw_pci, mbox, num_pages);
1391        if (err)
1392                goto err_fw_area_init;
1393
1394        err = mlxsw_pci_boardinfo(mlxsw_pci, mbox);
1395        if (err)
1396                goto err_boardinfo;
1397
1398        err = mlxsw_pci_resources_query(mlxsw_pci, mbox, res,
1399                                        profile->resource_query_enable);
1400        if (err)
1401                goto err_query_resources;
1402
1403        err = mlxsw_pci_config_profile(mlxsw_pci, mbox, profile, res);
1404        if (err)
1405                goto err_config_profile;
1406
1407        err = mlxsw_pci_aqs_init(mlxsw_pci, mbox);
1408        if (err)
1409                goto err_aqs_init;
1410
1411        err = request_irq(pci_irq_vector(pdev, 0),
1412                          mlxsw_pci_eq_irq_handler, 0,
1413                          mlxsw_pci->bus_info.device_kind, mlxsw_pci);
1414        if (err) {
1415                dev_err(&pdev->dev, "IRQ request failed\n");
1416                goto err_request_eq_irq;
1417        }
1418
1419        goto mbox_put;
1420
1421err_request_eq_irq:
1422        mlxsw_pci_aqs_fini(mlxsw_pci);
1423err_aqs_init:
1424err_config_profile:
1425err_query_resources:
1426err_boardinfo:
1427        mlxsw_pci_fw_area_fini(mlxsw_pci);
1428err_fw_area_init:
1429err_doorbell_page_bar:
1430err_iface_rev:
1431err_query_fw:
1432        mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.out_mbox);
1433err_out_mbox_alloc:
1434        mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.in_mbox);
1435mbox_put:
1436        mlxsw_cmd_mbox_free(mbox);
1437        return err;
1438}
1439
1440static void mlxsw_pci_fini(void *bus_priv)
1441{
1442        struct mlxsw_pci *mlxsw_pci = bus_priv;
1443
1444        free_irq(pci_irq_vector(mlxsw_pci->pdev, 0), mlxsw_pci);
1445        mlxsw_pci_aqs_fini(mlxsw_pci);
1446        mlxsw_pci_fw_area_fini(mlxsw_pci);
1447        mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.out_mbox);
1448        mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.in_mbox);
1449}
1450
1451static struct mlxsw_pci_queue *
1452mlxsw_pci_sdq_pick(struct mlxsw_pci *mlxsw_pci,
1453                   const struct mlxsw_tx_info *tx_info)
1454{
1455        u8 sdqn = tx_info->local_port % mlxsw_pci_sdq_count(mlxsw_pci);
1456
1457        return mlxsw_pci_sdq_get(mlxsw_pci, sdqn);
1458}
1459
1460static bool mlxsw_pci_skb_transmit_busy(void *bus_priv,
1461                                        const struct mlxsw_tx_info *tx_info)
1462{
1463        struct mlxsw_pci *mlxsw_pci = bus_priv;
1464        struct mlxsw_pci_queue *q = mlxsw_pci_sdq_pick(mlxsw_pci, tx_info);
1465
1466        return !mlxsw_pci_queue_elem_info_producer_get(q);
1467}
1468
1469static int mlxsw_pci_skb_transmit(void *bus_priv, struct sk_buff *skb,
1470                                  const struct mlxsw_tx_info *tx_info)
1471{
1472        struct mlxsw_pci *mlxsw_pci = bus_priv;
1473        struct mlxsw_pci_queue *q;
1474        struct mlxsw_pci_queue_elem_info *elem_info;
1475        char *wqe;
1476        int i;
1477        int err;
1478
1479        if (skb_shinfo(skb)->nr_frags > MLXSW_PCI_WQE_SG_ENTRIES - 1) {
1480                err = skb_linearize(skb);
1481                if (err)
1482                        return err;
1483        }
1484
1485        q = mlxsw_pci_sdq_pick(mlxsw_pci, tx_info);
1486        spin_lock_bh(&q->lock);
1487        elem_info = mlxsw_pci_queue_elem_info_producer_get(q);
1488        if (!elem_info) {
1489                /* queue is full */
1490                err = -EAGAIN;
1491                goto unlock;
1492        }
1493        elem_info->u.sdq.skb = skb;
1494
1495        wqe = elem_info->elem;
1496        mlxsw_pci_wqe_c_set(wqe, 1); /* always report completion */
1497        mlxsw_pci_wqe_lp_set(wqe, !!tx_info->is_emad);
1498        mlxsw_pci_wqe_type_set(wqe, MLXSW_PCI_WQE_TYPE_ETHERNET);
1499
1500        err = mlxsw_pci_wqe_frag_map(mlxsw_pci, wqe, 0, skb->data,
1501                                     skb_headlen(skb), DMA_TO_DEVICE);
1502        if (err)
1503                goto unlock;
1504
1505        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1506                const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1507
1508                err = mlxsw_pci_wqe_frag_map(mlxsw_pci, wqe, i + 1,
1509                                             skb_frag_address(frag),
1510                                             skb_frag_size(frag),
1511                                             DMA_TO_DEVICE);
1512                if (err)
1513                        goto unmap_frags;
1514        }
1515
1516        /* Set unused sq entries byte count to zero. */
1517        for (i++; i < MLXSW_PCI_WQE_SG_ENTRIES; i++)
1518                mlxsw_pci_wqe_byte_count_set(wqe, i, 0);
1519
1520        /* Everything is set up, ring producer doorbell to get HW going */
1521        q->producer_counter++;
1522        mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
1523
1524        goto unlock;
1525
1526unmap_frags:
1527        for (; i >= 0; i--)
1528                mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, i, DMA_TO_DEVICE);
1529unlock:
1530        spin_unlock_bh(&q->lock);
1531        return err;
1532}
1533
1534static int mlxsw_pci_cmd_exec(void *bus_priv, u16 opcode, u8 opcode_mod,
1535                              u32 in_mod, bool out_mbox_direct,
1536                              char *in_mbox, size_t in_mbox_size,
1537                              char *out_mbox, size_t out_mbox_size,
1538                              u8 *p_status)
1539{
1540        struct mlxsw_pci *mlxsw_pci = bus_priv;
1541        dma_addr_t in_mapaddr = mlxsw_pci->cmd.in_mbox.mapaddr;
1542        dma_addr_t out_mapaddr = mlxsw_pci->cmd.out_mbox.mapaddr;
1543        bool evreq = mlxsw_pci->cmd.nopoll;
1544        unsigned long timeout = msecs_to_jiffies(MLXSW_PCI_CIR_TIMEOUT_MSECS);
1545        bool *p_wait_done = &mlxsw_pci->cmd.wait_done;
1546        int err;
1547
1548        *p_status = MLXSW_CMD_STATUS_OK;
1549
1550        err = mutex_lock_interruptible(&mlxsw_pci->cmd.lock);
1551        if (err)
1552                return err;
1553
1554        if (in_mbox)
1555                memcpy(mlxsw_pci->cmd.in_mbox.buf, in_mbox, in_mbox_size);
1556        mlxsw_pci_write32(mlxsw_pci, CIR_IN_PARAM_HI, upper_32_bits(in_mapaddr));
1557        mlxsw_pci_write32(mlxsw_pci, CIR_IN_PARAM_LO, lower_32_bits(in_mapaddr));
1558
1559        mlxsw_pci_write32(mlxsw_pci, CIR_OUT_PARAM_HI, upper_32_bits(out_mapaddr));
1560        mlxsw_pci_write32(mlxsw_pci, CIR_OUT_PARAM_LO, lower_32_bits(out_mapaddr));
1561
1562        mlxsw_pci_write32(mlxsw_pci, CIR_IN_MODIFIER, in_mod);
1563        mlxsw_pci_write32(mlxsw_pci, CIR_TOKEN, 0);
1564
1565        *p_wait_done = false;
1566
1567        wmb(); /* all needs to be written before we write control register */
1568        mlxsw_pci_write32(mlxsw_pci, CIR_CTRL,
1569                          MLXSW_PCI_CIR_CTRL_GO_BIT |
1570                          (evreq ? MLXSW_PCI_CIR_CTRL_EVREQ_BIT : 0) |
1571                          (opcode_mod << MLXSW_PCI_CIR_CTRL_OPCODE_MOD_SHIFT) |
1572                          opcode);
1573
1574        if (!evreq) {
1575                unsigned long end;
1576
1577                end = jiffies + timeout;
1578                do {
1579                        u32 ctrl = mlxsw_pci_read32(mlxsw_pci, CIR_CTRL);
1580
1581                        if (!(ctrl & MLXSW_PCI_CIR_CTRL_GO_BIT)) {
1582                                *p_wait_done = true;
1583                                *p_status = ctrl >> MLXSW_PCI_CIR_CTRL_STATUS_SHIFT;
1584                                break;
1585                        }
1586                        cond_resched();
1587                } while (time_before(jiffies, end));
1588        } else {
1589                wait_event_timeout(mlxsw_pci->cmd.wait, *p_wait_done, timeout);
1590                *p_status = mlxsw_pci->cmd.comp.status;
1591        }
1592
1593        err = 0;
1594        if (*p_wait_done) {
1595                if (*p_status)
1596                        err = -EIO;
1597        } else {
1598                err = -ETIMEDOUT;
1599        }
1600
1601        if (!err && out_mbox && out_mbox_direct) {
1602                /* Some commands don't use output param as address to mailbox
1603                 * but they store output directly into registers. In that case,
1604                 * copy registers into mbox buffer.
1605                 */
1606                __be32 tmp;
1607
1608                if (!evreq) {
1609                        tmp = cpu_to_be32(mlxsw_pci_read32(mlxsw_pci,
1610                                                           CIR_OUT_PARAM_HI));
1611                        memcpy(out_mbox, &tmp, sizeof(tmp));
1612                        tmp = cpu_to_be32(mlxsw_pci_read32(mlxsw_pci,
1613                                                           CIR_OUT_PARAM_LO));
1614                        memcpy(out_mbox + sizeof(tmp), &tmp, sizeof(tmp));
1615                }
1616        } else if (!err && out_mbox) {
1617                memcpy(out_mbox, mlxsw_pci->cmd.out_mbox.buf, out_mbox_size);
1618        }
1619
1620        mutex_unlock(&mlxsw_pci->cmd.lock);
1621
1622        return err;
1623}
1624
1625static const struct mlxsw_bus mlxsw_pci_bus = {
1626        .kind                   = "pci",
1627        .init                   = mlxsw_pci_init,
1628        .fini                   = mlxsw_pci_fini,
1629        .skb_transmit_busy      = mlxsw_pci_skb_transmit_busy,
1630        .skb_transmit           = mlxsw_pci_skb_transmit,
1631        .cmd_exec               = mlxsw_pci_cmd_exec,
1632        .features               = MLXSW_BUS_F_TXRX,
1633};
1634
1635static int mlxsw_pci_sw_reset(struct mlxsw_pci *mlxsw_pci,
1636                              const struct pci_device_id *id)
1637{
1638        unsigned long end;
1639
1640        mlxsw_pci_write32(mlxsw_pci, SW_RESET, MLXSW_PCI_SW_RESET_RST_BIT);
1641        if (id->device == PCI_DEVICE_ID_MELLANOX_SWITCHX2) {
1642                msleep(MLXSW_PCI_SW_RESET_TIMEOUT_MSECS);
1643                return 0;
1644        }
1645
1646        wmb(); /* reset needs to be written before we read control register */
1647        end = jiffies + msecs_to_jiffies(MLXSW_PCI_SW_RESET_TIMEOUT_MSECS);
1648        do {
1649                u32 val = mlxsw_pci_read32(mlxsw_pci, FW_READY);
1650
1651                if ((val & MLXSW_PCI_FW_READY_MASK) == MLXSW_PCI_FW_READY_MAGIC)
1652                        break;
1653                cond_resched();
1654        } while (time_before(jiffies, end));
1655        return 0;
1656}
1657
1658static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1659{
1660        const char *driver_name = pdev->driver->name;
1661        struct mlxsw_pci *mlxsw_pci;
1662        int err;
1663
1664        mlxsw_pci = kzalloc(sizeof(*mlxsw_pci), GFP_KERNEL);
1665        if (!mlxsw_pci)
1666                return -ENOMEM;
1667
1668        err = pci_enable_device(pdev);
1669        if (err) {
1670                dev_err(&pdev->dev, "pci_enable_device failed\n");
1671                goto err_pci_enable_device;
1672        }
1673
1674        err = pci_request_regions(pdev, driver_name);
1675        if (err) {
1676                dev_err(&pdev->dev, "pci_request_regions failed\n");
1677                goto err_pci_request_regions;
1678        }
1679
1680        err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
1681        if (!err) {
1682                err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
1683                if (err) {
1684                        dev_err(&pdev->dev, "pci_set_consistent_dma_mask failed\n");
1685                        goto err_pci_set_dma_mask;
1686                }
1687        } else {
1688                err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1689                if (err) {
1690                        dev_err(&pdev->dev, "pci_set_dma_mask failed\n");
1691                        goto err_pci_set_dma_mask;
1692                }
1693        }
1694
1695        if (pci_resource_len(pdev, 0) < MLXSW_PCI_BAR0_SIZE) {
1696                dev_err(&pdev->dev, "invalid PCI region size\n");
1697                err = -EINVAL;
1698                goto err_pci_resource_len_check;
1699        }
1700
1701        mlxsw_pci->hw_addr = ioremap(pci_resource_start(pdev, 0),
1702                                     pci_resource_len(pdev, 0));
1703        if (!mlxsw_pci->hw_addr) {
1704                dev_err(&pdev->dev, "ioremap failed\n");
1705                err = -EIO;
1706                goto err_ioremap;
1707        }
1708        pci_set_master(pdev);
1709
1710        mlxsw_pci->pdev = pdev;
1711        pci_set_drvdata(pdev, mlxsw_pci);
1712
1713        err = mlxsw_pci_sw_reset(mlxsw_pci, id);
1714        if (err) {
1715                dev_err(&pdev->dev, "Software reset failed\n");
1716                goto err_sw_reset;
1717        }
1718
1719        err = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSIX);
1720        if (err < 0) {
1721                dev_err(&pdev->dev, "MSI-X init failed\n");
1722                goto err_msix_init;
1723        }
1724
1725        mlxsw_pci->bus_info.device_kind = driver_name;
1726        mlxsw_pci->bus_info.device_name = pci_name(mlxsw_pci->pdev);
1727        mlxsw_pci->bus_info.dev = &pdev->dev;
1728
1729        err = mlxsw_core_bus_device_register(&mlxsw_pci->bus_info,
1730                                             &mlxsw_pci_bus, mlxsw_pci);
1731        if (err) {
1732                dev_err(&pdev->dev, "cannot register bus device\n");
1733                goto err_bus_device_register;
1734        }
1735
1736        return 0;
1737
1738err_bus_device_register:
1739        pci_free_irq_vectors(mlxsw_pci->pdev);
1740err_msix_init:
1741err_sw_reset:
1742        iounmap(mlxsw_pci->hw_addr);
1743err_ioremap:
1744err_pci_resource_len_check:
1745err_pci_set_dma_mask:
1746        pci_release_regions(pdev);
1747err_pci_request_regions:
1748        pci_disable_device(pdev);
1749err_pci_enable_device:
1750        kfree(mlxsw_pci);
1751        return err;
1752}
1753
1754static void mlxsw_pci_remove(struct pci_dev *pdev)
1755{
1756        struct mlxsw_pci *mlxsw_pci = pci_get_drvdata(pdev);
1757
1758        mlxsw_core_bus_device_unregister(mlxsw_pci->core);
1759        pci_free_irq_vectors(mlxsw_pci->pdev);
1760        iounmap(mlxsw_pci->hw_addr);
1761        pci_release_regions(mlxsw_pci->pdev);
1762        pci_disable_device(mlxsw_pci->pdev);
1763        kfree(mlxsw_pci);
1764}
1765
1766int mlxsw_pci_driver_register(struct pci_driver *pci_driver)
1767{
1768        pci_driver->probe = mlxsw_pci_probe;
1769        pci_driver->remove = mlxsw_pci_remove;
1770        return pci_register_driver(pci_driver);
1771}
1772EXPORT_SYMBOL(mlxsw_pci_driver_register);
1773
1774void mlxsw_pci_driver_unregister(struct pci_driver *pci_driver)
1775{
1776        pci_unregister_driver(pci_driver);
1777}
1778EXPORT_SYMBOL(mlxsw_pci_driver_unregister);
1779
1780static int __init mlxsw_pci_module_init(void)
1781{
1782        return 0;
1783}
1784
1785static void __exit mlxsw_pci_module_exit(void)
1786{
1787}
1788
1789module_init(mlxsw_pci_module_init);
1790module_exit(mlxsw_pci_module_exit);
1791
1792MODULE_LICENSE("Dual BSD/GPL");
1793MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
1794MODULE_DESCRIPTION("Mellanox switch PCI interface driver");
1795