linux/drivers/net/ethernet/mellanox/mlx5/core/main.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the
   8 * OpenIB.org BSD license below:
   9 *
  10 *     Redistribution and use in source and binary forms, with or
  11 *     without modification, are permitted provided that the following
  12 *     conditions are met:
  13 *
  14 *      - Redistributions of source code must retain the above
  15 *        copyright notice, this list of conditions and the following
  16 *        disclaimer.
  17 *
  18 *      - Redistributions in binary form must reproduce the above
  19 *        copyright notice, this list of conditions and the following
  20 *        disclaimer in the documentation and/or other materials
  21 *        provided with the distribution.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30 * SOFTWARE.
  31 */
  32
  33#include <generated/utsrelease.h>
  34#include <linux/highmem.h>
  35#include <linux/module.h>
  36#include <linux/init.h>
  37#include <linux/errno.h>
  38#include <linux/pci.h>
  39#include <linux/dma-mapping.h>
  40#include <linux/slab.h>
  41#include <linux/io-mapping.h>
  42#include <linux/interrupt.h>
  43#include <linux/delay.h>
  44#include <linux/mlx5/driver.h>
  45#include <linux/mlx5/cq.h>
  46#include <linux/mlx5/qp.h>
  47#include <linux/debugfs.h>
  48#include <linux/kmod.h>
  49#include <linux/mlx5/mlx5_ifc.h>
  50#include <linux/mlx5/vport.h>
  51#ifdef CONFIG_RFS_ACCEL
  52#include <linux/cpu_rmap.h>
  53#endif
  54#include <linux/version.h>
  55#include <net/devlink.h>
  56#include "mlx5_core.h"
  57#include "lib/eq.h"
  58#include "fs_core.h"
  59#include "lib/mpfs.h"
  60#include "eswitch.h"
  61#include "devlink.h"
  62#include "fw_reset.h"
  63#include "lib/mlx5.h"
  64#include "fpga/core.h"
  65#include "fpga/ipsec.h"
  66#include "accel/ipsec.h"
  67#include "accel/tls.h"
  68#include "lib/clock.h"
  69#include "lib/vxlan.h"
  70#include "lib/geneve.h"
  71#include "lib/devcom.h"
  72#include "lib/pci_vsc.h"
  73#include "diag/fw_tracer.h"
  74#include "ecpf.h"
  75#include "lib/hv_vhca.h"
  76#include "diag/rsc_dump.h"
  77#include "sf/vhca_event.h"
  78#include "sf/dev/dev.h"
  79#include "sf/sf.h"
  80#include "mlx5_irq.h"
  81
  82MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
  83MODULE_DESCRIPTION("Mellanox 5th generation network adapters (ConnectX series) core driver");
  84MODULE_LICENSE("Dual BSD/GPL");
  85MODULE_VERSION(UTS_RELEASE);
  86
  87unsigned int mlx5_core_debug_mask;
  88module_param_named(debug_mask, mlx5_core_debug_mask, uint, 0644);
  89MODULE_PARM_DESC(debug_mask, "debug mask: 1 = dump cmd data, 2 = dump cmd exec time, 3 = both. Default=0");
  90
  91static unsigned int prof_sel = MLX5_DEFAULT_PROF;
  92module_param_named(prof_sel, prof_sel, uint, 0444);
  93MODULE_PARM_DESC(prof_sel, "profile selector. Valid range 0 - 2");
  94
  95static u32 sw_owner_id[4];
  96
  97enum {
  98        MLX5_ATOMIC_REQ_MODE_BE = 0x0,
  99        MLX5_ATOMIC_REQ_MODE_HOST_ENDIANNESS = 0x1,
 100};
 101
 102static struct mlx5_profile profile[] = {
 103        [0] = {
 104                .mask           = 0,
 105        },
 106        [1] = {
 107                .mask           = MLX5_PROF_MASK_QP_SIZE,
 108                .log_max_qp     = 12,
 109        },
 110        [2] = {
 111                .mask           = MLX5_PROF_MASK_QP_SIZE |
 112                                  MLX5_PROF_MASK_MR_CACHE,
 113                .log_max_qp     = 18,
 114                .mr_cache[0]    = {
 115                        .size   = 500,
 116                        .limit  = 250
 117                },
 118                .mr_cache[1]    = {
 119                        .size   = 500,
 120                        .limit  = 250
 121                },
 122                .mr_cache[2]    = {
 123                        .size   = 500,
 124                        .limit  = 250
 125                },
 126                .mr_cache[3]    = {
 127                        .size   = 500,
 128                        .limit  = 250
 129                },
 130                .mr_cache[4]    = {
 131                        .size   = 500,
 132                        .limit  = 250
 133                },
 134                .mr_cache[5]    = {
 135                        .size   = 500,
 136                        .limit  = 250
 137                },
 138                .mr_cache[6]    = {
 139                        .size   = 500,
 140                        .limit  = 250
 141                },
 142                .mr_cache[7]    = {
 143                        .size   = 500,
 144                        .limit  = 250
 145                },
 146                .mr_cache[8]    = {
 147                        .size   = 500,
 148                        .limit  = 250
 149                },
 150                .mr_cache[9]    = {
 151                        .size   = 500,
 152                        .limit  = 250
 153                },
 154                .mr_cache[10]   = {
 155                        .size   = 500,
 156                        .limit  = 250
 157                },
 158                .mr_cache[11]   = {
 159                        .size   = 500,
 160                        .limit  = 250
 161                },
 162                .mr_cache[12]   = {
 163                        .size   = 64,
 164                        .limit  = 32
 165                },
 166                .mr_cache[13]   = {
 167                        .size   = 32,
 168                        .limit  = 16
 169                },
 170                .mr_cache[14]   = {
 171                        .size   = 16,
 172                        .limit  = 8
 173                },
 174                .mr_cache[15]   = {
 175                        .size   = 8,
 176                        .limit  = 4
 177                },
 178        },
 179};
 180
 181#define FW_INIT_TIMEOUT_MILI            2000
 182#define FW_INIT_WAIT_MS                 2
 183#define FW_PRE_INIT_TIMEOUT_MILI        120000
 184#define FW_INIT_WARN_MESSAGE_INTERVAL   20000
 185
 186static int fw_initializing(struct mlx5_core_dev *dev)
 187{
 188        return ioread32be(&dev->iseg->initializing) >> 31;
 189}
 190
 191static int wait_fw_init(struct mlx5_core_dev *dev, u32 max_wait_mili,
 192                        u32 warn_time_mili)
 193{
 194        unsigned long warn = jiffies + msecs_to_jiffies(warn_time_mili);
 195        unsigned long end = jiffies + msecs_to_jiffies(max_wait_mili);
 196        int err = 0;
 197
 198        BUILD_BUG_ON(FW_PRE_INIT_TIMEOUT_MILI < FW_INIT_WARN_MESSAGE_INTERVAL);
 199
 200        while (fw_initializing(dev)) {
 201                if (time_after(jiffies, end)) {
 202                        err = -EBUSY;
 203                        break;
 204                }
 205                if (warn_time_mili && time_after(jiffies, warn)) {
 206                        mlx5_core_warn(dev, "Waiting for FW initialization, timeout abort in %ds\n",
 207                                       jiffies_to_msecs(end - warn) / 1000);
 208                        warn = jiffies + msecs_to_jiffies(warn_time_mili);
 209                }
 210                msleep(FW_INIT_WAIT_MS);
 211        }
 212
 213        return err;
 214}
 215
 216static void mlx5_set_driver_version(struct mlx5_core_dev *dev)
 217{
 218        int driver_ver_sz = MLX5_FLD_SZ_BYTES(set_driver_version_in,
 219                                              driver_version);
 220        u8 in[MLX5_ST_SZ_BYTES(set_driver_version_in)] = {};
 221        int remaining_size = driver_ver_sz;
 222        char *string;
 223
 224        if (!MLX5_CAP_GEN(dev, driver_version))
 225                return;
 226
 227        string = MLX5_ADDR_OF(set_driver_version_in, in, driver_version);
 228
 229        strncpy(string, "Linux", remaining_size);
 230
 231        remaining_size = max_t(int, 0, driver_ver_sz - strlen(string));
 232        strncat(string, ",", remaining_size);
 233
 234        remaining_size = max_t(int, 0, driver_ver_sz - strlen(string));
 235        strncat(string, KBUILD_MODNAME, remaining_size);
 236
 237        remaining_size = max_t(int, 0, driver_ver_sz - strlen(string));
 238        strncat(string, ",", remaining_size);
 239
 240        remaining_size = max_t(int, 0, driver_ver_sz - strlen(string));
 241
 242        snprintf(string + strlen(string), remaining_size, "%u.%u.%u",
 243                 (u8)((LINUX_VERSION_CODE >> 16) & 0xff), (u8)((LINUX_VERSION_CODE >> 8) & 0xff),
 244                 (u16)(LINUX_VERSION_CODE & 0xffff));
 245
 246        /*Send the command*/
 247        MLX5_SET(set_driver_version_in, in, opcode,
 248                 MLX5_CMD_OP_SET_DRIVER_VERSION);
 249
 250        mlx5_cmd_exec_in(dev, set_driver_version, in);
 251}
 252
 253static int set_dma_caps(struct pci_dev *pdev)
 254{
 255        int err;
 256
 257        err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
 258        if (err) {
 259                dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask\n");
 260                err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
 261                if (err) {
 262                        dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting\n");
 263                        return err;
 264                }
 265        }
 266
 267        err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
 268        if (err) {
 269                dev_warn(&pdev->dev,
 270                         "Warning: couldn't set 64-bit consistent PCI DMA mask\n");
 271                err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
 272                if (err) {
 273                        dev_err(&pdev->dev,
 274                                "Can't set consistent PCI DMA mask, aborting\n");
 275                        return err;
 276                }
 277        }
 278
 279        dma_set_max_seg_size(&pdev->dev, 2u * 1024 * 1024 * 1024);
 280        return err;
 281}
 282
 283static int mlx5_pci_enable_device(struct mlx5_core_dev *dev)
 284{
 285        struct pci_dev *pdev = dev->pdev;
 286        int err = 0;
 287
 288        mutex_lock(&dev->pci_status_mutex);
 289        if (dev->pci_status == MLX5_PCI_STATUS_DISABLED) {
 290                err = pci_enable_device(pdev);
 291                if (!err)
 292                        dev->pci_status = MLX5_PCI_STATUS_ENABLED;
 293        }
 294        mutex_unlock(&dev->pci_status_mutex);
 295
 296        return err;
 297}
 298
 299static void mlx5_pci_disable_device(struct mlx5_core_dev *dev)
 300{
 301        struct pci_dev *pdev = dev->pdev;
 302
 303        mutex_lock(&dev->pci_status_mutex);
 304        if (dev->pci_status == MLX5_PCI_STATUS_ENABLED) {
 305                pci_disable_device(pdev);
 306                dev->pci_status = MLX5_PCI_STATUS_DISABLED;
 307        }
 308        mutex_unlock(&dev->pci_status_mutex);
 309}
 310
 311static int request_bar(struct pci_dev *pdev)
 312{
 313        int err = 0;
 314
 315        if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
 316                dev_err(&pdev->dev, "Missing registers BAR, aborting\n");
 317                return -ENODEV;
 318        }
 319
 320        err = pci_request_regions(pdev, KBUILD_MODNAME);
 321        if (err)
 322                dev_err(&pdev->dev, "Couldn't get PCI resources, aborting\n");
 323
 324        return err;
 325}
 326
 327static void release_bar(struct pci_dev *pdev)
 328{
 329        pci_release_regions(pdev);
 330}
 331
 332struct mlx5_reg_host_endianness {
 333        u8      he;
 334        u8      rsvd[15];
 335};
 336
 337#define CAP_MASK(pos, size) ((u64)((1 << (size)) - 1) << (pos))
 338
 339enum {
 340        MLX5_CAP_BITS_RW_MASK = CAP_MASK(MLX5_CAP_OFF_CMDIF_CSUM, 2) |
 341                                MLX5_DEV_CAP_FLAG_DCT,
 342};
 343
 344static u16 to_fw_pkey_sz(struct mlx5_core_dev *dev, u32 size)
 345{
 346        switch (size) {
 347        case 128:
 348                return 0;
 349        case 256:
 350                return 1;
 351        case 512:
 352                return 2;
 353        case 1024:
 354                return 3;
 355        case 2048:
 356                return 4;
 357        case 4096:
 358                return 5;
 359        default:
 360                mlx5_core_warn(dev, "invalid pkey table size %d\n", size);
 361                return 0;
 362        }
 363}
 364
 365static int mlx5_core_get_caps_mode(struct mlx5_core_dev *dev,
 366                                   enum mlx5_cap_type cap_type,
 367                                   enum mlx5_cap_mode cap_mode)
 368{
 369        u8 in[MLX5_ST_SZ_BYTES(query_hca_cap_in)];
 370        int out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
 371        void *out, *hca_caps;
 372        u16 opmod = (cap_type << 1) | (cap_mode & 0x01);
 373        int err;
 374
 375        memset(in, 0, sizeof(in));
 376        out = kzalloc(out_sz, GFP_KERNEL);
 377        if (!out)
 378                return -ENOMEM;
 379
 380        MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
 381        MLX5_SET(query_hca_cap_in, in, op_mod, opmod);
 382        err = mlx5_cmd_exec_inout(dev, query_hca_cap, in, out);
 383        if (err) {
 384                mlx5_core_warn(dev,
 385                               "QUERY_HCA_CAP : type(%x) opmode(%x) Failed(%d)\n",
 386                               cap_type, cap_mode, err);
 387                goto query_ex;
 388        }
 389
 390        hca_caps =  MLX5_ADDR_OF(query_hca_cap_out, out, capability);
 391
 392        switch (cap_mode) {
 393        case HCA_CAP_OPMOD_GET_MAX:
 394                memcpy(dev->caps.hca_max[cap_type], hca_caps,
 395                       MLX5_UN_SZ_BYTES(hca_cap_union));
 396                break;
 397        case HCA_CAP_OPMOD_GET_CUR:
 398                memcpy(dev->caps.hca_cur[cap_type], hca_caps,
 399                       MLX5_UN_SZ_BYTES(hca_cap_union));
 400                break;
 401        default:
 402                mlx5_core_warn(dev,
 403                               "Tried to query dev cap type(%x) with wrong opmode(%x)\n",
 404                               cap_type, cap_mode);
 405                err = -EINVAL;
 406                break;
 407        }
 408query_ex:
 409        kfree(out);
 410        return err;
 411}
 412
 413int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type)
 414{
 415        int ret;
 416
 417        ret = mlx5_core_get_caps_mode(dev, cap_type, HCA_CAP_OPMOD_GET_CUR);
 418        if (ret)
 419                return ret;
 420        return mlx5_core_get_caps_mode(dev, cap_type, HCA_CAP_OPMOD_GET_MAX);
 421}
 422
 423static int set_caps(struct mlx5_core_dev *dev, void *in, int opmod)
 424{
 425        MLX5_SET(set_hca_cap_in, in, opcode, MLX5_CMD_OP_SET_HCA_CAP);
 426        MLX5_SET(set_hca_cap_in, in, op_mod, opmod << 1);
 427        return mlx5_cmd_exec_in(dev, set_hca_cap, in);
 428}
 429
 430static int handle_hca_cap_atomic(struct mlx5_core_dev *dev, void *set_ctx)
 431{
 432        void *set_hca_cap;
 433        int req_endianness;
 434        int err;
 435
 436        if (!MLX5_CAP_GEN(dev, atomic))
 437                return 0;
 438
 439        err = mlx5_core_get_caps(dev, MLX5_CAP_ATOMIC);
 440        if (err)
 441                return err;
 442
 443        req_endianness =
 444                MLX5_CAP_ATOMIC(dev,
 445                                supported_atomic_req_8B_endianness_mode_1);
 446
 447        if (req_endianness != MLX5_ATOMIC_REQ_MODE_HOST_ENDIANNESS)
 448                return 0;
 449
 450        set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx, capability);
 451
 452        /* Set requestor to host endianness */
 453        MLX5_SET(atomic_caps, set_hca_cap, atomic_req_8B_endianness_mode,
 454                 MLX5_ATOMIC_REQ_MODE_HOST_ENDIANNESS);
 455
 456        return set_caps(dev, set_ctx, MLX5_SET_HCA_CAP_OP_MOD_ATOMIC);
 457}
 458
 459static int handle_hca_cap_odp(struct mlx5_core_dev *dev, void *set_ctx)
 460{
 461        void *set_hca_cap;
 462        bool do_set = false;
 463        int err;
 464
 465        if (!IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING) ||
 466            !MLX5_CAP_GEN(dev, pg))
 467                return 0;
 468
 469        err = mlx5_core_get_caps(dev, MLX5_CAP_ODP);
 470        if (err)
 471                return err;
 472
 473        set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx, capability);
 474        memcpy(set_hca_cap, dev->caps.hca_cur[MLX5_CAP_ODP],
 475               MLX5_ST_SZ_BYTES(odp_cap));
 476
 477#define ODP_CAP_SET_MAX(dev, field)                                            \
 478        do {                                                                   \
 479                u32 _res = MLX5_CAP_ODP_MAX(dev, field);                       \
 480                if (_res) {                                                    \
 481                        do_set = true;                                         \
 482                        MLX5_SET(odp_cap, set_hca_cap, field, _res);           \
 483                }                                                              \
 484        } while (0)
 485
 486        ODP_CAP_SET_MAX(dev, ud_odp_caps.srq_receive);
 487        ODP_CAP_SET_MAX(dev, rc_odp_caps.srq_receive);
 488        ODP_CAP_SET_MAX(dev, xrc_odp_caps.srq_receive);
 489        ODP_CAP_SET_MAX(dev, xrc_odp_caps.send);
 490        ODP_CAP_SET_MAX(dev, xrc_odp_caps.receive);
 491        ODP_CAP_SET_MAX(dev, xrc_odp_caps.write);
 492        ODP_CAP_SET_MAX(dev, xrc_odp_caps.read);
 493        ODP_CAP_SET_MAX(dev, xrc_odp_caps.atomic);
 494        ODP_CAP_SET_MAX(dev, dc_odp_caps.srq_receive);
 495        ODP_CAP_SET_MAX(dev, dc_odp_caps.send);
 496        ODP_CAP_SET_MAX(dev, dc_odp_caps.receive);
 497        ODP_CAP_SET_MAX(dev, dc_odp_caps.write);
 498        ODP_CAP_SET_MAX(dev, dc_odp_caps.read);
 499        ODP_CAP_SET_MAX(dev, dc_odp_caps.atomic);
 500
 501        if (!do_set)
 502                return 0;
 503
 504        return set_caps(dev, set_ctx, MLX5_SET_HCA_CAP_OP_MOD_ODP);
 505}
 506
 507static int handle_hca_cap(struct mlx5_core_dev *dev, void *set_ctx)
 508{
 509        struct mlx5_profile *prof = &dev->profile;
 510        void *set_hca_cap;
 511        int err;
 512
 513        err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL);
 514        if (err)
 515                return err;
 516
 517        set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx,
 518                                   capability);
 519        memcpy(set_hca_cap, dev->caps.hca_cur[MLX5_CAP_GENERAL],
 520               MLX5_ST_SZ_BYTES(cmd_hca_cap));
 521
 522        mlx5_core_dbg(dev, "Current Pkey table size %d Setting new size %d\n",
 523                      mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(dev, pkey_table_size)),
 524                      128);
 525        /* we limit the size of the pkey table to 128 entries for now */
 526        MLX5_SET(cmd_hca_cap, set_hca_cap, pkey_table_size,
 527                 to_fw_pkey_sz(dev, 128));
 528
 529        /* Check log_max_qp from HCA caps to set in current profile */
 530        if (MLX5_CAP_GEN_MAX(dev, log_max_qp) < prof->log_max_qp) {
 531                mlx5_core_warn(dev, "log_max_qp value in current profile is %d, changing it to HCA capability limit (%d)\n",
 532                               prof->log_max_qp,
 533                               MLX5_CAP_GEN_MAX(dev, log_max_qp));
 534                prof->log_max_qp = MLX5_CAP_GEN_MAX(dev, log_max_qp);
 535        }
 536        if (prof->mask & MLX5_PROF_MASK_QP_SIZE)
 537                MLX5_SET(cmd_hca_cap, set_hca_cap, log_max_qp,
 538                         prof->log_max_qp);
 539
 540        /* disable cmdif checksum */
 541        MLX5_SET(cmd_hca_cap, set_hca_cap, cmdif_checksum, 0);
 542
 543        /* Enable 4K UAR only when HCA supports it and page size is bigger
 544         * than 4K.
 545         */
 546        if (MLX5_CAP_GEN_MAX(dev, uar_4k) && PAGE_SIZE > 4096)
 547                MLX5_SET(cmd_hca_cap, set_hca_cap, uar_4k, 1);
 548
 549        MLX5_SET(cmd_hca_cap, set_hca_cap, log_uar_page_sz, PAGE_SHIFT - 12);
 550
 551        if (MLX5_CAP_GEN_MAX(dev, cache_line_128byte))
 552                MLX5_SET(cmd_hca_cap,
 553                         set_hca_cap,
 554                         cache_line_128byte,
 555                         cache_line_size() >= 128 ? 1 : 0);
 556
 557        if (MLX5_CAP_GEN_MAX(dev, dct))
 558                MLX5_SET(cmd_hca_cap, set_hca_cap, dct, 1);
 559
 560        if (MLX5_CAP_GEN_MAX(dev, pci_sync_for_fw_update_event))
 561                MLX5_SET(cmd_hca_cap, set_hca_cap, pci_sync_for_fw_update_event, 1);
 562
 563        if (MLX5_CAP_GEN_MAX(dev, num_vhca_ports))
 564                MLX5_SET(cmd_hca_cap,
 565                         set_hca_cap,
 566                         num_vhca_ports,
 567                         MLX5_CAP_GEN_MAX(dev, num_vhca_ports));
 568
 569        if (MLX5_CAP_GEN_MAX(dev, release_all_pages))
 570                MLX5_SET(cmd_hca_cap, set_hca_cap, release_all_pages, 1);
 571
 572        if (MLX5_CAP_GEN_MAX(dev, mkey_by_name))
 573                MLX5_SET(cmd_hca_cap, set_hca_cap, mkey_by_name, 1);
 574
 575        mlx5_vhca_state_cap_handle(dev, set_hca_cap);
 576
 577        if (MLX5_CAP_GEN_MAX(dev, num_total_dynamic_vf_msix))
 578                MLX5_SET(cmd_hca_cap, set_hca_cap, num_total_dynamic_vf_msix,
 579                         MLX5_CAP_GEN_MAX(dev, num_total_dynamic_vf_msix));
 580
 581        return set_caps(dev, set_ctx, MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE);
 582}
 583
 584static int handle_hca_cap_roce(struct mlx5_core_dev *dev, void *set_ctx)
 585{
 586        void *set_hca_cap;
 587        int err;
 588
 589        if (!MLX5_CAP_GEN(dev, roce))
 590                return 0;
 591
 592        err = mlx5_core_get_caps(dev, MLX5_CAP_ROCE);
 593        if (err)
 594                return err;
 595
 596        if (MLX5_CAP_ROCE(dev, sw_r_roce_src_udp_port) ||
 597            !MLX5_CAP_ROCE_MAX(dev, sw_r_roce_src_udp_port))
 598                return 0;
 599
 600        set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx, capability);
 601        memcpy(set_hca_cap, dev->caps.hca_cur[MLX5_CAP_ROCE],
 602               MLX5_ST_SZ_BYTES(roce_cap));
 603        MLX5_SET(roce_cap, set_hca_cap, sw_r_roce_src_udp_port, 1);
 604
 605        err = set_caps(dev, set_ctx, MLX5_SET_HCA_CAP_OP_MOD_ROCE);
 606        return err;
 607}
 608
 609static int set_hca_cap(struct mlx5_core_dev *dev)
 610{
 611        int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in);
 612        void *set_ctx;
 613        int err;
 614
 615        set_ctx = kzalloc(set_sz, GFP_KERNEL);
 616        if (!set_ctx)
 617                return -ENOMEM;
 618
 619        err = handle_hca_cap(dev, set_ctx);
 620        if (err) {
 621                mlx5_core_err(dev, "handle_hca_cap failed\n");
 622                goto out;
 623        }
 624
 625        memset(set_ctx, 0, set_sz);
 626        err = handle_hca_cap_atomic(dev, set_ctx);
 627        if (err) {
 628                mlx5_core_err(dev, "handle_hca_cap_atomic failed\n");
 629                goto out;
 630        }
 631
 632        memset(set_ctx, 0, set_sz);
 633        err = handle_hca_cap_odp(dev, set_ctx);
 634        if (err) {
 635                mlx5_core_err(dev, "handle_hca_cap_odp failed\n");
 636                goto out;
 637        }
 638
 639        memset(set_ctx, 0, set_sz);
 640        err = handle_hca_cap_roce(dev, set_ctx);
 641        if (err) {
 642                mlx5_core_err(dev, "handle_hca_cap_roce failed\n");
 643                goto out;
 644        }
 645
 646out:
 647        kfree(set_ctx);
 648        return err;
 649}
 650
 651static int set_hca_ctrl(struct mlx5_core_dev *dev)
 652{
 653        struct mlx5_reg_host_endianness he_in;
 654        struct mlx5_reg_host_endianness he_out;
 655        int err;
 656
 657        if (!mlx5_core_is_pf(dev))
 658                return 0;
 659
 660        memset(&he_in, 0, sizeof(he_in));
 661        he_in.he = MLX5_SET_HOST_ENDIANNESS;
 662        err = mlx5_core_access_reg(dev, &he_in,  sizeof(he_in),
 663                                        &he_out, sizeof(he_out),
 664                                        MLX5_REG_HOST_ENDIANNESS, 0, 1);
 665        return err;
 666}
 667
 668static int mlx5_core_set_hca_defaults(struct mlx5_core_dev *dev)
 669{
 670        int ret = 0;
 671
 672        /* Disable local_lb by default */
 673        if (MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH)
 674                ret = mlx5_nic_vport_update_local_lb(dev, false);
 675
 676        return ret;
 677}
 678
 679int mlx5_core_enable_hca(struct mlx5_core_dev *dev, u16 func_id)
 680{
 681        u32 in[MLX5_ST_SZ_DW(enable_hca_in)] = {};
 682
 683        MLX5_SET(enable_hca_in, in, opcode, MLX5_CMD_OP_ENABLE_HCA);
 684        MLX5_SET(enable_hca_in, in, function_id, func_id);
 685        MLX5_SET(enable_hca_in, in, embedded_cpu_function,
 686                 dev->caps.embedded_cpu);
 687        return mlx5_cmd_exec_in(dev, enable_hca, in);
 688}
 689
 690int mlx5_core_disable_hca(struct mlx5_core_dev *dev, u16 func_id)
 691{
 692        u32 in[MLX5_ST_SZ_DW(disable_hca_in)] = {};
 693
 694        MLX5_SET(disable_hca_in, in, opcode, MLX5_CMD_OP_DISABLE_HCA);
 695        MLX5_SET(disable_hca_in, in, function_id, func_id);
 696        MLX5_SET(enable_hca_in, in, embedded_cpu_function,
 697                 dev->caps.embedded_cpu);
 698        return mlx5_cmd_exec_in(dev, disable_hca, in);
 699}
 700
 701static int mlx5_core_set_issi(struct mlx5_core_dev *dev)
 702{
 703        u32 query_out[MLX5_ST_SZ_DW(query_issi_out)] = {};
 704        u32 query_in[MLX5_ST_SZ_DW(query_issi_in)] = {};
 705        u32 sup_issi;
 706        int err;
 707
 708        MLX5_SET(query_issi_in, query_in, opcode, MLX5_CMD_OP_QUERY_ISSI);
 709        err = mlx5_cmd_exec_inout(dev, query_issi, query_in, query_out);
 710        if (err) {
 711                u32 syndrome;
 712                u8 status;
 713
 714                mlx5_cmd_mbox_status(query_out, &status, &syndrome);
 715                if (!status || syndrome == MLX5_DRIVER_SYND) {
 716                        mlx5_core_err(dev, "Failed to query ISSI err(%d) status(%d) synd(%d)\n",
 717                                      err, status, syndrome);
 718                        return err;
 719                }
 720
 721                mlx5_core_warn(dev, "Query ISSI is not supported by FW, ISSI is 0\n");
 722                dev->issi = 0;
 723                return 0;
 724        }
 725
 726        sup_issi = MLX5_GET(query_issi_out, query_out, supported_issi_dw0);
 727
 728        if (sup_issi & (1 << 1)) {
 729                u32 set_in[MLX5_ST_SZ_DW(set_issi_in)] = {};
 730
 731                MLX5_SET(set_issi_in, set_in, opcode, MLX5_CMD_OP_SET_ISSI);
 732                MLX5_SET(set_issi_in, set_in, current_issi, 1);
 733                err = mlx5_cmd_exec_in(dev, set_issi, set_in);
 734                if (err) {
 735                        mlx5_core_err(dev, "Failed to set ISSI to 1 err(%d)\n",
 736                                      err);
 737                        return err;
 738                }
 739
 740                dev->issi = 1;
 741
 742                return 0;
 743        } else if (sup_issi & (1 << 0) || !sup_issi) {
 744                return 0;
 745        }
 746
 747        return -EOPNOTSUPP;
 748}
 749
 750/* PCI table of mlx5 devices that are unmaintained in RHEL */
 751static const struct pci_device_id mlx5_core_hw_unsupp_pci_table[] = {
 752        { PCI_VDEVICE(MELLANOX, 0x1021) },                      /* ConnectX-7 */
 753        { PCI_VDEVICE(MELLANOX, 0xa2dc) },                      /* BlueField-3 integrated ConnectX-7 network controller */
 754        { 0, }
 755};
 756
 757static int mlx5_pci_init(struct mlx5_core_dev *dev, struct pci_dev *pdev,
 758                         const struct pci_device_id *id)
 759{
 760        struct mlx5_priv *priv = &dev->priv;
 761        int err = 0;
 762
 763        mutex_init(&dev->pci_status_mutex);
 764        pci_set_drvdata(dev->pdev, dev);
 765
 766        dev->bar_addr = pci_resource_start(pdev, 0);
 767        priv->numa_node = dev_to_node(mlx5_core_dma_dev(dev));
 768
 769        err = mlx5_pci_enable_device(dev);
 770        if (err) {
 771                mlx5_core_err(dev, "Cannot enable PCI device, aborting\n");
 772                return err;
 773        }
 774
 775        err = request_bar(pdev);
 776        if (err) {
 777                mlx5_core_err(dev, "error requesting BARs, aborting\n");
 778                goto err_disable;
 779        }
 780
 781        pci_set_master(pdev);
 782
 783        err = set_dma_caps(pdev);
 784        if (err) {
 785                mlx5_core_err(dev, "Failed setting DMA capabilities mask, aborting\n");
 786                goto err_clr_master;
 787        }
 788
 789        if (pci_enable_atomic_ops_to_root(pdev, PCI_EXP_DEVCAP2_ATOMIC_COMP32) &&
 790            pci_enable_atomic_ops_to_root(pdev, PCI_EXP_DEVCAP2_ATOMIC_COMP64) &&
 791            pci_enable_atomic_ops_to_root(pdev, PCI_EXP_DEVCAP2_ATOMIC_COMP128))
 792                mlx5_core_dbg(dev, "Enabling pci atomics failed\n");
 793
 794        dev->iseg_base = dev->bar_addr;
 795        dev->iseg = ioremap(dev->iseg_base, sizeof(*dev->iseg));
 796        if (!dev->iseg) {
 797                err = -ENOMEM;
 798                mlx5_core_err(dev, "Failed mapping initialization segment, aborting\n");
 799                goto err_clr_master;
 800        }
 801
 802        mlx5_pci_vsc_init(dev);
 803        dev->caps.embedded_cpu = mlx5_read_embedded_cpu(dev);
 804        pci_hw_unmaintained(mlx5_core_hw_unsupp_pci_table, pdev);
 805        return 0;
 806
 807err_clr_master:
 808        pci_clear_master(dev->pdev);
 809        release_bar(dev->pdev);
 810err_disable:
 811        mlx5_pci_disable_device(dev);
 812        return err;
 813}
 814
 815static void mlx5_pci_close(struct mlx5_core_dev *dev)
 816{
 817        /* health work might still be active, and it needs pci bar in
 818         * order to know the NIC state. Therefore, drain the health WQ
 819         * before removing the pci bars
 820         */
 821        mlx5_drain_health_wq(dev);
 822        iounmap(dev->iseg);
 823        pci_clear_master(dev->pdev);
 824        release_bar(dev->pdev);
 825        mlx5_pci_disable_device(dev);
 826}
 827
 828static int mlx5_init_once(struct mlx5_core_dev *dev)
 829{
 830        int err;
 831
 832        dev->priv.devcom = mlx5_devcom_register_device(dev);
 833        if (IS_ERR(dev->priv.devcom))
 834                mlx5_core_err(dev, "failed to register with devcom (0x%p)\n",
 835                              dev->priv.devcom);
 836
 837        err = mlx5_query_board_id(dev);
 838        if (err) {
 839                mlx5_core_err(dev, "query board id failed\n");
 840                goto err_devcom;
 841        }
 842
 843        err = mlx5_irq_table_init(dev);
 844        if (err) {
 845                mlx5_core_err(dev, "failed to initialize irq table\n");
 846                goto err_devcom;
 847        }
 848
 849        err = mlx5_eq_table_init(dev);
 850        if (err) {
 851                mlx5_core_err(dev, "failed to initialize eq\n");
 852                goto err_irq_cleanup;
 853        }
 854
 855        err = mlx5_events_init(dev);
 856        if (err) {
 857                mlx5_core_err(dev, "failed to initialize events\n");
 858                goto err_eq_cleanup;
 859        }
 860
 861        err = mlx5_fw_reset_init(dev);
 862        if (err) {
 863                mlx5_core_err(dev, "failed to initialize fw reset events\n");
 864                goto err_events_cleanup;
 865        }
 866
 867        mlx5_cq_debugfs_init(dev);
 868
 869        mlx5_init_reserved_gids(dev);
 870
 871        mlx5_init_clock(dev);
 872
 873        dev->vxlan = mlx5_vxlan_create(dev);
 874        dev->geneve = mlx5_geneve_create(dev);
 875
 876        err = mlx5_init_rl_table(dev);
 877        if (err) {
 878                mlx5_core_err(dev, "Failed to init rate limiting\n");
 879                goto err_tables_cleanup;
 880        }
 881
 882        err = mlx5_mpfs_init(dev);
 883        if (err) {
 884                mlx5_core_err(dev, "Failed to init l2 table %d\n", err);
 885                goto err_rl_cleanup;
 886        }
 887
 888        err = mlx5_sriov_init(dev);
 889        if (err) {
 890                mlx5_core_err(dev, "Failed to init sriov %d\n", err);
 891                goto err_mpfs_cleanup;
 892        }
 893
 894        err = mlx5_eswitch_init(dev);
 895        if (err) {
 896                mlx5_core_err(dev, "Failed to init eswitch %d\n", err);
 897                goto err_sriov_cleanup;
 898        }
 899
 900        err = mlx5_fpga_init(dev);
 901        if (err) {
 902                mlx5_core_err(dev, "Failed to init fpga device %d\n", err);
 903                goto err_eswitch_cleanup;
 904        }
 905
 906        err = mlx5_vhca_event_init(dev);
 907        if (err) {
 908                mlx5_core_err(dev, "Failed to init vhca event notifier %d\n", err);
 909                goto err_fpga_cleanup;
 910        }
 911
 912        err = mlx5_sf_hw_table_init(dev);
 913        if (err) {
 914                mlx5_core_err(dev, "Failed to init SF HW table %d\n", err);
 915                goto err_sf_hw_table_cleanup;
 916        }
 917
 918        err = mlx5_sf_table_init(dev);
 919        if (err) {
 920                mlx5_core_err(dev, "Failed to init SF table %d\n", err);
 921                goto err_sf_table_cleanup;
 922        }
 923
 924        dev->dm = mlx5_dm_create(dev);
 925        if (IS_ERR(dev->dm))
 926                mlx5_core_warn(dev, "Failed to init device memory%d\n", err);
 927
 928        dev->tracer = mlx5_fw_tracer_create(dev);
 929        dev->hv_vhca = mlx5_hv_vhca_create(dev);
 930        dev->rsc_dump = mlx5_rsc_dump_create(dev);
 931
 932        return 0;
 933
 934err_sf_table_cleanup:
 935        mlx5_sf_hw_table_cleanup(dev);
 936err_sf_hw_table_cleanup:
 937        mlx5_vhca_event_cleanup(dev);
 938err_fpga_cleanup:
 939        mlx5_fpga_cleanup(dev);
 940err_eswitch_cleanup:
 941        mlx5_eswitch_cleanup(dev->priv.eswitch);
 942err_sriov_cleanup:
 943        mlx5_sriov_cleanup(dev);
 944err_mpfs_cleanup:
 945        mlx5_mpfs_cleanup(dev);
 946err_rl_cleanup:
 947        mlx5_cleanup_rl_table(dev);
 948err_tables_cleanup:
 949        mlx5_geneve_destroy(dev->geneve);
 950        mlx5_vxlan_destroy(dev->vxlan);
 951        mlx5_cq_debugfs_cleanup(dev);
 952        mlx5_fw_reset_cleanup(dev);
 953err_events_cleanup:
 954        mlx5_events_cleanup(dev);
 955err_eq_cleanup:
 956        mlx5_eq_table_cleanup(dev);
 957err_irq_cleanup:
 958        mlx5_irq_table_cleanup(dev);
 959err_devcom:
 960        mlx5_devcom_unregister_device(dev->priv.devcom);
 961
 962        return err;
 963}
 964
 965static void mlx5_cleanup_once(struct mlx5_core_dev *dev)
 966{
 967        mlx5_rsc_dump_destroy(dev);
 968        mlx5_hv_vhca_destroy(dev->hv_vhca);
 969        mlx5_fw_tracer_destroy(dev->tracer);
 970        mlx5_dm_cleanup(dev);
 971        mlx5_sf_table_cleanup(dev);
 972        mlx5_sf_hw_table_cleanup(dev);
 973        mlx5_vhca_event_cleanup(dev);
 974        mlx5_fpga_cleanup(dev);
 975        mlx5_eswitch_cleanup(dev->priv.eswitch);
 976        mlx5_sriov_cleanup(dev);
 977        mlx5_mpfs_cleanup(dev);
 978        mlx5_cleanup_rl_table(dev);
 979        mlx5_geneve_destroy(dev->geneve);
 980        mlx5_vxlan_destroy(dev->vxlan);
 981        mlx5_cleanup_clock(dev);
 982        mlx5_cleanup_reserved_gids(dev);
 983        mlx5_cq_debugfs_cleanup(dev);
 984        mlx5_fw_reset_cleanup(dev);
 985        mlx5_events_cleanup(dev);
 986        mlx5_eq_table_cleanup(dev);
 987        mlx5_irq_table_cleanup(dev);
 988        mlx5_devcom_unregister_device(dev->priv.devcom);
 989}
 990
 991static int mlx5_function_setup(struct mlx5_core_dev *dev, bool boot)
 992{
 993        int err;
 994
 995        mlx5_core_info(dev, "firmware version: %d.%d.%d\n", fw_rev_maj(dev),
 996                       fw_rev_min(dev), fw_rev_sub(dev));
 997
 998        /* Only PFs hold the relevant PCIe information for this query */
 999        if (mlx5_core_is_pf(dev))
1000                pcie_print_link_status(dev->pdev);
1001
1002        /* wait for firmware to accept initialization segments configurations
1003         */
1004        err = wait_fw_init(dev, FW_PRE_INIT_TIMEOUT_MILI, FW_INIT_WARN_MESSAGE_INTERVAL);
1005        if (err) {
1006                mlx5_core_err(dev, "Firmware over %d MS in pre-initializing state, aborting\n",
1007                              FW_PRE_INIT_TIMEOUT_MILI);
1008                return err;
1009        }
1010
1011        err = mlx5_cmd_init(dev);
1012        if (err) {
1013                mlx5_core_err(dev, "Failed initializing command interface, aborting\n");
1014                return err;
1015        }
1016
1017        err = wait_fw_init(dev, FW_INIT_TIMEOUT_MILI, 0);
1018        if (err) {
1019                mlx5_core_err(dev, "Firmware over %d MS in initializing state, aborting\n",
1020                              FW_INIT_TIMEOUT_MILI);
1021                goto err_cmd_cleanup;
1022        }
1023
1024        mlx5_cmd_set_state(dev, MLX5_CMDIF_STATE_UP);
1025
1026        err = mlx5_core_enable_hca(dev, 0);
1027        if (err) {
1028                mlx5_core_err(dev, "enable hca failed\n");
1029                goto err_cmd_cleanup;
1030        }
1031
1032        err = mlx5_core_set_issi(dev);
1033        if (err) {
1034                mlx5_core_err(dev, "failed to set issi\n");
1035                goto err_disable_hca;
1036        }
1037
1038        err = mlx5_satisfy_startup_pages(dev, 1);
1039        if (err) {
1040                mlx5_core_err(dev, "failed to allocate boot pages\n");
1041                goto err_disable_hca;
1042        }
1043
1044        err = set_hca_ctrl(dev);
1045        if (err) {
1046                mlx5_core_err(dev, "set_hca_ctrl failed\n");
1047                goto reclaim_boot_pages;
1048        }
1049
1050        err = set_hca_cap(dev);
1051        if (err) {
1052                mlx5_core_err(dev, "set_hca_cap failed\n");
1053                goto reclaim_boot_pages;
1054        }
1055
1056        err = mlx5_satisfy_startup_pages(dev, 0);
1057        if (err) {
1058                mlx5_core_err(dev, "failed to allocate init pages\n");
1059                goto reclaim_boot_pages;
1060        }
1061
1062        err = mlx5_cmd_init_hca(dev, sw_owner_id);
1063        if (err) {
1064                mlx5_core_err(dev, "init hca failed\n");
1065                goto reclaim_boot_pages;
1066        }
1067
1068        mlx5_set_driver_version(dev);
1069
1070        mlx5_start_health_poll(dev);
1071
1072        err = mlx5_query_hca_caps(dev);
1073        if (err) {
1074                mlx5_core_err(dev, "query hca failed\n");
1075                goto stop_health;
1076        }
1077
1078        return 0;
1079
1080stop_health:
1081        mlx5_stop_health_poll(dev, boot);
1082reclaim_boot_pages:
1083        mlx5_reclaim_startup_pages(dev);
1084err_disable_hca:
1085        mlx5_core_disable_hca(dev, 0);
1086err_cmd_cleanup:
1087        mlx5_cmd_set_state(dev, MLX5_CMDIF_STATE_DOWN);
1088        mlx5_cmd_cleanup(dev);
1089
1090        return err;
1091}
1092
1093static int mlx5_function_teardown(struct mlx5_core_dev *dev, bool boot)
1094{
1095        int err;
1096
1097        mlx5_stop_health_poll(dev, boot);
1098        err = mlx5_cmd_teardown_hca(dev);
1099        if (err) {
1100                mlx5_core_err(dev, "tear_down_hca failed, skip cleanup\n");
1101                return err;
1102        }
1103        mlx5_reclaim_startup_pages(dev);
1104        mlx5_core_disable_hca(dev, 0);
1105        mlx5_cmd_set_state(dev, MLX5_CMDIF_STATE_DOWN);
1106        mlx5_cmd_cleanup(dev);
1107
1108        return 0;
1109}
1110
1111static int mlx5_load(struct mlx5_core_dev *dev)
1112{
1113        int err;
1114
1115        dev->priv.uar = mlx5_get_uars_page(dev);
1116        if (IS_ERR(dev->priv.uar)) {
1117                mlx5_core_err(dev, "Failed allocating uar, aborting\n");
1118                err = PTR_ERR(dev->priv.uar);
1119                return err;
1120        }
1121
1122        mlx5_events_start(dev);
1123        mlx5_pagealloc_start(dev);
1124
1125        err = mlx5_irq_table_create(dev);
1126        if (err) {
1127                mlx5_core_err(dev, "Failed to alloc IRQs\n");
1128                goto err_irq_table;
1129        }
1130
1131        err = mlx5_eq_table_create(dev);
1132        if (err) {
1133                mlx5_core_err(dev, "Failed to create EQs\n");
1134                goto err_eq_table;
1135        }
1136
1137        err = mlx5_fw_tracer_init(dev->tracer);
1138        if (err) {
1139                mlx5_core_err(dev, "Failed to init FW tracer\n");
1140                goto err_fw_tracer;
1141        }
1142
1143        mlx5_fw_reset_events_start(dev);
1144        mlx5_hv_vhca_init(dev->hv_vhca);
1145
1146        err = mlx5_rsc_dump_init(dev);
1147        if (err) {
1148                mlx5_core_err(dev, "Failed to init Resource dump\n");
1149                goto err_rsc_dump;
1150        }
1151
1152        err = mlx5_fpga_device_start(dev);
1153        if (err) {
1154                mlx5_core_err(dev, "fpga device start failed %d\n", err);
1155                goto err_fpga_start;
1156        }
1157
1158        mlx5_accel_ipsec_init(dev);
1159
1160        err = mlx5_accel_tls_init(dev);
1161        if (err) {
1162                mlx5_core_err(dev, "TLS device start failed %d\n", err);
1163                goto err_tls_start;
1164        }
1165
1166        err = mlx5_init_fs(dev);
1167        if (err) {
1168                mlx5_core_err(dev, "Failed to init flow steering\n");
1169                goto err_fs;
1170        }
1171
1172        err = mlx5_core_set_hca_defaults(dev);
1173        if (err) {
1174                mlx5_core_err(dev, "Failed to set hca defaults\n");
1175                goto err_set_hca;
1176        }
1177
1178        mlx5_vhca_event_start(dev);
1179
1180        err = mlx5_sf_hw_table_create(dev);
1181        if (err) {
1182                mlx5_core_err(dev, "sf table create failed %d\n", err);
1183                goto err_vhca;
1184        }
1185
1186        err = mlx5_ec_init(dev);
1187        if (err) {
1188                mlx5_core_err(dev, "Failed to init embedded CPU\n");
1189                goto err_ec;
1190        }
1191
1192        mlx5_lag_add_mdev(dev);
1193        err = mlx5_sriov_attach(dev);
1194        if (err) {
1195                mlx5_core_err(dev, "sriov init failed %d\n", err);
1196                goto err_sriov;
1197        }
1198
1199        mlx5_sf_dev_table_create(dev);
1200
1201        return 0;
1202
1203err_sriov:
1204        mlx5_lag_remove_mdev(dev);
1205        mlx5_ec_cleanup(dev);
1206err_ec:
1207        mlx5_sf_hw_table_destroy(dev);
1208err_vhca:
1209        mlx5_vhca_event_stop(dev);
1210err_set_hca:
1211        mlx5_cleanup_fs(dev);
1212err_fs:
1213        mlx5_accel_tls_cleanup(dev);
1214err_tls_start:
1215        mlx5_accel_ipsec_cleanup(dev);
1216        mlx5_fpga_device_stop(dev);
1217err_fpga_start:
1218        mlx5_rsc_dump_cleanup(dev);
1219err_rsc_dump:
1220        mlx5_hv_vhca_cleanup(dev->hv_vhca);
1221        mlx5_fw_reset_events_stop(dev);
1222        mlx5_fw_tracer_cleanup(dev->tracer);
1223err_fw_tracer:
1224        mlx5_eq_table_destroy(dev);
1225err_eq_table:
1226        mlx5_irq_table_destroy(dev);
1227err_irq_table:
1228        mlx5_pagealloc_stop(dev);
1229        mlx5_events_stop(dev);
1230        mlx5_put_uars_page(dev, dev->priv.uar);
1231        return err;
1232}
1233
1234static void mlx5_unload(struct mlx5_core_dev *dev)
1235{
1236        mlx5_sf_dev_table_destroy(dev);
1237        mlx5_sriov_detach(dev);
1238        mlx5_lag_remove_mdev(dev);
1239        mlx5_ec_cleanup(dev);
1240        mlx5_sf_hw_table_destroy(dev);
1241        mlx5_vhca_event_stop(dev);
1242        mlx5_cleanup_fs(dev);
1243        mlx5_accel_ipsec_cleanup(dev);
1244        mlx5_accel_tls_cleanup(dev);
1245        mlx5_fpga_device_stop(dev);
1246        mlx5_rsc_dump_cleanup(dev);
1247        mlx5_hv_vhca_cleanup(dev->hv_vhca);
1248        mlx5_fw_reset_events_stop(dev);
1249        mlx5_fw_tracer_cleanup(dev->tracer);
1250        mlx5_eq_table_destroy(dev);
1251        mlx5_irq_table_destroy(dev);
1252        mlx5_pagealloc_stop(dev);
1253        mlx5_events_stop(dev);
1254        mlx5_put_uars_page(dev, dev->priv.uar);
1255}
1256
1257int mlx5_init_one(struct mlx5_core_dev *dev)
1258{
1259        int err = 0;
1260
1261        mutex_lock(&dev->intf_state_mutex);
1262        if (test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) {
1263                mlx5_core_warn(dev, "interface is up, NOP\n");
1264                goto out;
1265        }
1266        /* remove any previous indication of internal error */
1267        dev->state = MLX5_DEVICE_STATE_UP;
1268
1269        err = mlx5_function_setup(dev, true);
1270        if (err)
1271                goto err_function;
1272
1273        err = mlx5_init_once(dev);
1274        if (err) {
1275                mlx5_core_err(dev, "sw objs init failed\n");
1276                goto function_teardown;
1277        }
1278
1279        err = mlx5_load(dev);
1280        if (err)
1281                goto err_load;
1282
1283        set_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state);
1284
1285        err = mlx5_devlink_register(priv_to_devlink(dev), dev->device);
1286        if (err)
1287                goto err_devlink_reg;
1288
1289        err = mlx5_register_device(dev);
1290        if (err)
1291                goto err_register;
1292
1293        mutex_unlock(&dev->intf_state_mutex);
1294        return 0;
1295
1296err_register:
1297        mlx5_devlink_unregister(priv_to_devlink(dev));
1298err_devlink_reg:
1299        clear_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state);
1300        mlx5_unload(dev);
1301err_load:
1302        mlx5_cleanup_once(dev);
1303function_teardown:
1304        mlx5_function_teardown(dev, true);
1305err_function:
1306        dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
1307out:
1308        mutex_unlock(&dev->intf_state_mutex);
1309        return err;
1310}
1311
1312void mlx5_uninit_one(struct mlx5_core_dev *dev)
1313{
1314        mutex_lock(&dev->intf_state_mutex);
1315
1316        mlx5_unregister_device(dev);
1317        mlx5_devlink_unregister(priv_to_devlink(dev));
1318
1319        if (!test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) {
1320                mlx5_core_warn(dev, "%s: interface is down, NOP\n",
1321                               __func__);
1322                mlx5_cleanup_once(dev);
1323                goto out;
1324        }
1325
1326        clear_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state);
1327        mlx5_unload(dev);
1328        mlx5_cleanup_once(dev);
1329        mlx5_function_teardown(dev, true);
1330out:
1331        mutex_unlock(&dev->intf_state_mutex);
1332}
1333
1334int mlx5_load_one(struct mlx5_core_dev *dev)
1335{
1336        int err = 0;
1337
1338        mutex_lock(&dev->intf_state_mutex);
1339        if (test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) {
1340                mlx5_core_warn(dev, "interface is up, NOP\n");
1341                goto out;
1342        }
1343        /* remove any previous indication of internal error */
1344        dev->state = MLX5_DEVICE_STATE_UP;
1345
1346        err = mlx5_function_setup(dev, false);
1347        if (err)
1348                goto err_function;
1349
1350        err = mlx5_load(dev);
1351        if (err)
1352                goto err_load;
1353
1354        set_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state);
1355
1356        err = mlx5_attach_device(dev);
1357        if (err)
1358                goto err_attach;
1359
1360        mutex_unlock(&dev->intf_state_mutex);
1361        return 0;
1362
1363err_attach:
1364        clear_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state);
1365        mlx5_unload(dev);
1366err_load:
1367        mlx5_function_teardown(dev, false);
1368err_function:
1369        dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
1370out:
1371        mutex_unlock(&dev->intf_state_mutex);
1372        return err;
1373}
1374
1375void mlx5_unload_one(struct mlx5_core_dev *dev)
1376{
1377        mutex_lock(&dev->intf_state_mutex);
1378
1379        mlx5_detach_device(dev);
1380
1381        if (!test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) {
1382                mlx5_core_warn(dev, "%s: interface is down, NOP\n",
1383                               __func__);
1384                goto out;
1385        }
1386
1387        clear_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state);
1388        mlx5_unload(dev);
1389        mlx5_function_teardown(dev, false);
1390out:
1391        mutex_unlock(&dev->intf_state_mutex);
1392}
1393
1394int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx)
1395{
1396        struct mlx5_priv *priv = &dev->priv;
1397        int err;
1398
1399        memcpy(&dev->profile, &profile[profile_idx], sizeof(dev->profile));
1400        INIT_LIST_HEAD(&priv->ctx_list);
1401        spin_lock_init(&priv->ctx_lock);
1402        mutex_init(&dev->intf_state_mutex);
1403
1404        mutex_init(&priv->bfregs.reg_head.lock);
1405        mutex_init(&priv->bfregs.wc_head.lock);
1406        INIT_LIST_HEAD(&priv->bfregs.reg_head.list);
1407        INIT_LIST_HEAD(&priv->bfregs.wc_head.list);
1408
1409        mutex_init(&priv->alloc_mutex);
1410        mutex_init(&priv->pgdir_mutex);
1411        INIT_LIST_HEAD(&priv->pgdir_list);
1412
1413        priv->dbg_root = debugfs_create_dir(dev_name(dev->device),
1414                                            mlx5_debugfs_root);
1415        INIT_LIST_HEAD(&priv->traps);
1416
1417        err = mlx5_health_init(dev);
1418        if (err)
1419                goto err_health_init;
1420
1421        err = mlx5_pagealloc_init(dev);
1422        if (err)
1423                goto err_pagealloc_init;
1424
1425        err = mlx5_adev_init(dev);
1426        if (err)
1427                goto err_adev_init;
1428
1429        return 0;
1430
1431err_adev_init:
1432        mlx5_pagealloc_cleanup(dev);
1433err_pagealloc_init:
1434        mlx5_health_cleanup(dev);
1435err_health_init:
1436        debugfs_remove(dev->priv.dbg_root);
1437        mutex_destroy(&priv->pgdir_mutex);
1438        mutex_destroy(&priv->alloc_mutex);
1439        mutex_destroy(&priv->bfregs.wc_head.lock);
1440        mutex_destroy(&priv->bfregs.reg_head.lock);
1441        mutex_destroy(&dev->intf_state_mutex);
1442        return err;
1443}
1444
1445void mlx5_mdev_uninit(struct mlx5_core_dev *dev)
1446{
1447        struct mlx5_priv *priv = &dev->priv;
1448
1449        mlx5_adev_cleanup(dev);
1450        mlx5_pagealloc_cleanup(dev);
1451        mlx5_health_cleanup(dev);
1452        debugfs_remove_recursive(dev->priv.dbg_root);
1453        mutex_destroy(&priv->pgdir_mutex);
1454        mutex_destroy(&priv->alloc_mutex);
1455        mutex_destroy(&priv->bfregs.wc_head.lock);
1456        mutex_destroy(&priv->bfregs.reg_head.lock);
1457        mutex_destroy(&dev->intf_state_mutex);
1458}
1459
1460static int probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1461{
1462        struct mlx5_core_dev *dev;
1463        struct devlink *devlink;
1464        int err;
1465
1466        devlink = mlx5_devlink_alloc();
1467        if (!devlink) {
1468                dev_err(&pdev->dev, "devlink alloc failed\n");
1469                return -ENOMEM;
1470        }
1471
1472        dev = devlink_priv(devlink);
1473        dev->device = &pdev->dev;
1474        dev->pdev = pdev;
1475
1476        dev->coredev_type = id->driver_data & MLX5_PCI_DEV_IS_VF ?
1477                         MLX5_COREDEV_VF : MLX5_COREDEV_PF;
1478
1479        dev->priv.adev_idx = mlx5_adev_idx_alloc();
1480        if (dev->priv.adev_idx < 0) {
1481                err = dev->priv.adev_idx;
1482                goto adev_init_err;
1483        }
1484
1485        err = mlx5_mdev_init(dev, prof_sel);
1486        if (err)
1487                goto mdev_init_err;
1488
1489        err = mlx5_pci_init(dev, pdev, id);
1490        if (err) {
1491                mlx5_core_err(dev, "mlx5_pci_init failed with error code %d\n",
1492                              err);
1493                goto pci_init_err;
1494        }
1495
1496        err = mlx5_init_one(dev);
1497        if (err) {
1498                mlx5_core_err(dev, "mlx5_init_one failed with error code %d\n",
1499                              err);
1500                goto err_init_one;
1501        }
1502
1503        err = mlx5_crdump_enable(dev);
1504        if (err)
1505                dev_err(&pdev->dev, "mlx5_crdump_enable failed with error code %d\n", err);
1506
1507        pci_save_state(pdev);
1508        if (!mlx5_core_is_mp_slave(dev))
1509                devlink_reload_enable(devlink);
1510        return 0;
1511
1512err_init_one:
1513        mlx5_pci_close(dev);
1514pci_init_err:
1515        mlx5_mdev_uninit(dev);
1516mdev_init_err:
1517        mlx5_adev_idx_free(dev->priv.adev_idx);
1518adev_init_err:
1519        mlx5_devlink_free(devlink);
1520
1521        return err;
1522}
1523
1524static void remove_one(struct pci_dev *pdev)
1525{
1526        struct mlx5_core_dev *dev  = pci_get_drvdata(pdev);
1527        struct devlink *devlink = priv_to_devlink(dev);
1528
1529        devlink_reload_disable(devlink);
1530        mlx5_crdump_disable(dev);
1531        mlx5_drain_health_wq(dev);
1532        mlx5_uninit_one(dev);
1533        mlx5_pci_close(dev);
1534        mlx5_mdev_uninit(dev);
1535        mlx5_adev_idx_free(dev->priv.adev_idx);
1536        mlx5_devlink_free(devlink);
1537}
1538
1539static pci_ers_result_t mlx5_pci_err_detected(struct pci_dev *pdev,
1540                                              pci_channel_state_t state)
1541{
1542        struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
1543
1544        mlx5_core_info(dev, "%s was called\n", __func__);
1545
1546        mlx5_enter_error_state(dev, false);
1547        mlx5_error_sw_reset(dev);
1548        mlx5_unload_one(dev);
1549        mlx5_drain_health_wq(dev);
1550        mlx5_pci_disable_device(dev);
1551
1552        return state == pci_channel_io_perm_failure ?
1553                PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
1554}
1555
1556/* wait for the device to show vital signs by waiting
1557 * for the health counter to start counting.
1558 */
1559static int wait_vital(struct pci_dev *pdev)
1560{
1561        struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
1562        struct mlx5_core_health *health = &dev->priv.health;
1563        const int niter = 100;
1564        u32 last_count = 0;
1565        u32 count;
1566        int i;
1567
1568        for (i = 0; i < niter; i++) {
1569                count = ioread32be(health->health_counter);
1570                if (count && count != 0xffffffff) {
1571                        if (last_count && last_count != count) {
1572                                mlx5_core_info(dev,
1573                                               "wait vital counter value 0x%x after %d iterations\n",
1574                                               count, i);
1575                                return 0;
1576                        }
1577                        last_count = count;
1578                }
1579                msleep(50);
1580        }
1581
1582        return -ETIMEDOUT;
1583}
1584
1585static pci_ers_result_t mlx5_pci_slot_reset(struct pci_dev *pdev)
1586{
1587        struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
1588        int err;
1589
1590        mlx5_core_info(dev, "%s was called\n", __func__);
1591
1592        err = mlx5_pci_enable_device(dev);
1593        if (err) {
1594                mlx5_core_err(dev, "%s: mlx5_pci_enable_device failed with error code: %d\n",
1595                              __func__, err);
1596                return PCI_ERS_RESULT_DISCONNECT;
1597        }
1598
1599        pci_set_master(pdev);
1600        pci_restore_state(pdev);
1601        pci_save_state(pdev);
1602
1603        if (wait_vital(pdev)) {
1604                mlx5_core_err(dev, "%s: wait_vital timed out\n", __func__);
1605                return PCI_ERS_RESULT_DISCONNECT;
1606        }
1607
1608        return PCI_ERS_RESULT_RECOVERED;
1609}
1610
1611static void mlx5_pci_resume(struct pci_dev *pdev)
1612{
1613        struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
1614        int err;
1615
1616        mlx5_core_info(dev, "%s was called\n", __func__);
1617
1618        err = mlx5_load_one(dev);
1619        if (err)
1620                mlx5_core_err(dev, "%s: mlx5_load_one failed with error code: %d\n",
1621                              __func__, err);
1622        else
1623                mlx5_core_info(dev, "%s: device recovered\n", __func__);
1624}
1625
1626static const struct pci_error_handlers mlx5_err_handler = {
1627        .error_detected = mlx5_pci_err_detected,
1628        .slot_reset     = mlx5_pci_slot_reset,
1629        .resume         = mlx5_pci_resume
1630};
1631
1632static int mlx5_try_fast_unload(struct mlx5_core_dev *dev)
1633{
1634        bool fast_teardown = false, force_teardown = false;
1635        int ret = 1;
1636
1637        fast_teardown = MLX5_CAP_GEN(dev, fast_teardown);
1638        force_teardown = MLX5_CAP_GEN(dev, force_teardown);
1639
1640        mlx5_core_dbg(dev, "force teardown firmware support=%d\n", force_teardown);
1641        mlx5_core_dbg(dev, "fast teardown firmware support=%d\n", fast_teardown);
1642
1643        if (!fast_teardown && !force_teardown)
1644                return -EOPNOTSUPP;
1645
1646        if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
1647                mlx5_core_dbg(dev, "Device in internal error state, giving up\n");
1648                return -EAGAIN;
1649        }
1650
1651        /* Panic tear down fw command will stop the PCI bus communication
1652         * with the HCA, so the health polll is no longer needed.
1653         */
1654        mlx5_drain_health_wq(dev);
1655        mlx5_stop_health_poll(dev, false);
1656
1657        ret = mlx5_cmd_fast_teardown_hca(dev);
1658        if (!ret)
1659                goto succeed;
1660
1661        ret = mlx5_cmd_force_teardown_hca(dev);
1662        if (!ret)
1663                goto succeed;
1664
1665        mlx5_core_dbg(dev, "Firmware couldn't do fast unload error: %d\n", ret);
1666        mlx5_start_health_poll(dev);
1667        return ret;
1668
1669succeed:
1670        mlx5_enter_error_state(dev, true);
1671
1672        /* Some platforms requiring freeing the IRQ's in the shutdown
1673         * flow. If they aren't freed they can't be allocated after
1674         * kexec. There is no need to cleanup the mlx5_core software
1675         * contexts.
1676         */
1677        mlx5_core_eq_free_irqs(dev);
1678
1679        return 0;
1680}
1681
1682static void shutdown(struct pci_dev *pdev)
1683{
1684        struct mlx5_core_dev *dev  = pci_get_drvdata(pdev);
1685        int err;
1686
1687        mlx5_core_info(dev, "Shutdown was called\n");
1688        err = mlx5_try_fast_unload(dev);
1689        if (err)
1690                mlx5_unload_one(dev);
1691        mlx5_pci_disable_device(dev);
1692}
1693
1694static int mlx5_suspend(struct pci_dev *pdev, pm_message_t state)
1695{
1696        struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
1697
1698        mlx5_unload_one(dev);
1699
1700        return 0;
1701}
1702
1703static int mlx5_resume(struct pci_dev *pdev)
1704{
1705        struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
1706
1707        return mlx5_load_one(dev);
1708}
1709
1710static const struct pci_device_id mlx5_core_pci_table[] = {
1711        { PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_CONNECTIB) },
1712        { PCI_VDEVICE(MELLANOX, 0x1012), MLX5_PCI_DEV_IS_VF},   /* Connect-IB VF */
1713        { PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_CONNECTX4) },
1714        { PCI_VDEVICE(MELLANOX, 0x1014), MLX5_PCI_DEV_IS_VF},   /* ConnectX-4 VF */
1715        { PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_CONNECTX4_LX) },
1716        { PCI_VDEVICE(MELLANOX, 0x1016), MLX5_PCI_DEV_IS_VF},   /* ConnectX-4LX VF */
1717        { PCI_VDEVICE(MELLANOX, 0x1017) },                      /* ConnectX-5, PCIe 3.0 */
1718        { PCI_VDEVICE(MELLANOX, 0x1018), MLX5_PCI_DEV_IS_VF},   /* ConnectX-5 VF */
1719        { PCI_VDEVICE(MELLANOX, 0x1019) },                      /* ConnectX-5 Ex */
1720        { PCI_VDEVICE(MELLANOX, 0x101a), MLX5_PCI_DEV_IS_VF},   /* ConnectX-5 Ex VF */
1721        { PCI_VDEVICE(MELLANOX, 0x101b) },                      /* ConnectX-6 */
1722        { PCI_VDEVICE(MELLANOX, 0x101c), MLX5_PCI_DEV_IS_VF},   /* ConnectX-6 VF */
1723        { PCI_VDEVICE(MELLANOX, 0x101d) },                      /* ConnectX-6 Dx */
1724        { PCI_VDEVICE(MELLANOX, 0x101e), MLX5_PCI_DEV_IS_VF},   /* ConnectX Family mlx5Gen Virtual Function */
1725        { PCI_VDEVICE(MELLANOX, 0x101f) },                      /* ConnectX-6 LX */
1726        { PCI_VDEVICE(MELLANOX, 0x1021) },                      /* ConnectX-7 */
1727        { PCI_VDEVICE(MELLANOX, 0xa2d2) },                      /* BlueField integrated ConnectX-5 network controller */
1728        { PCI_VDEVICE(MELLANOX, 0xa2d3), MLX5_PCI_DEV_IS_VF},   /* BlueField integrated ConnectX-5 network controller VF */
1729        { PCI_VDEVICE(MELLANOX, 0xa2d6) },                      /* BlueField-2 integrated ConnectX-6 Dx network controller */
1730        { PCI_VDEVICE(MELLANOX, 0xa2dc) },                      /* BlueField-3 integrated ConnectX-7 network controller */
1731        { 0, }
1732};
1733
1734MODULE_DEVICE_TABLE(pci, mlx5_core_pci_table);
1735
1736void mlx5_disable_device(struct mlx5_core_dev *dev)
1737{
1738        mlx5_error_sw_reset(dev);
1739        mlx5_unload_one(dev);
1740}
1741
1742int mlx5_recover_device(struct mlx5_core_dev *dev)
1743{
1744        if (!mlx5_core_is_sf(dev)) {
1745                mlx5_pci_disable_device(dev);
1746                if (mlx5_pci_slot_reset(dev->pdev) != PCI_ERS_RESULT_RECOVERED)
1747                        return -EIO;
1748        }
1749
1750        return mlx5_load_one(dev);
1751}
1752
1753static struct pci_driver mlx5_core_driver = {
1754        .name           = KBUILD_MODNAME,
1755        .id_table       = mlx5_core_pci_table,
1756        .probe          = probe_one,
1757        .remove         = remove_one,
1758        .suspend        = mlx5_suspend,
1759        .resume         = mlx5_resume,
1760        .shutdown       = shutdown,
1761        .err_handler    = &mlx5_err_handler,
1762        .sriov_configure   = mlx5_core_sriov_configure,
1763        .sriov_get_vf_total_msix = mlx5_sriov_get_vf_total_msix,
1764        .sriov_set_msix_vec_count = mlx5_core_sriov_set_msix_vec_count,
1765};
1766
1767static void mlx5_core_verify_params(void)
1768{
1769        if (prof_sel >= ARRAY_SIZE(profile)) {
1770                pr_warn("mlx5_core: WARNING: Invalid module parameter prof_sel %d, valid range 0-%zu, changing back to default(%d)\n",
1771                        prof_sel,
1772                        ARRAY_SIZE(profile) - 1,
1773                        MLX5_DEFAULT_PROF);
1774                prof_sel = MLX5_DEFAULT_PROF;
1775        }
1776}
1777
1778static int __init init(void)
1779{
1780        int err;
1781
1782        WARN_ONCE(strcmp(MLX5_ADEV_NAME, KBUILD_MODNAME),
1783                  "mlx5_core name not in sync with kernel module name");
1784
1785        get_random_bytes(&sw_owner_id, sizeof(sw_owner_id));
1786
1787        mlx5_core_verify_params();
1788        mlx5_fpga_ipsec_build_fs_cmds();
1789        mlx5_register_debugfs();
1790
1791        err = pci_register_driver(&mlx5_core_driver);
1792        if (err)
1793                goto err_debug;
1794
1795        err = mlx5_sf_driver_register();
1796        if (err)
1797                goto err_sf;
1798
1799        err = mlx5e_init();
1800        if (err)
1801                goto err_en;
1802
1803        return 0;
1804
1805err_en:
1806        mlx5_sf_driver_unregister();
1807err_sf:
1808        pci_unregister_driver(&mlx5_core_driver);
1809err_debug:
1810        mlx5_unregister_debugfs();
1811        return err;
1812}
1813
1814static void __exit cleanup(void)
1815{
1816        mlx5e_cleanup();
1817        mlx5_sf_driver_unregister();
1818        pci_unregister_driver(&mlx5_core_driver);
1819        mlx5_unregister_debugfs();
1820}
1821
1822module_init(init);
1823module_exit(cleanup);
1824