linux/drivers/net/ethernet/intel/ice/ice_devlink.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/* Copyright (c) 2020, Intel Corporation. */
   3
   4#include "ice.h"
   5#include "ice_lib.h"
   6#include "ice_devlink.h"
   7#include "ice_eswitch.h"
   8#include "ice_fw_update.h"
   9
  10/* context for devlink info version reporting */
  11struct ice_info_ctx {
  12        char buf[128];
  13        struct ice_orom_info pending_orom;
  14        struct ice_nvm_info pending_nvm;
  15        struct ice_netlist_info pending_netlist;
  16        struct ice_hw_dev_caps dev_caps;
  17};
  18
  19/* The following functions are used to format specific strings for various
  20 * devlink info versions. The ctx parameter is used to provide the storage
  21 * buffer, as well as any ancillary information calculated when the info
  22 * request was made.
  23 *
  24 * If a version does not exist, for example when attempting to get the
  25 * inactive version of flash when there is no pending update, the function
  26 * should leave the buffer in the ctx structure empty.
  27 */
  28
  29static void ice_info_get_dsn(struct ice_pf *pf, struct ice_info_ctx *ctx)
  30{
  31        u8 dsn[8];
  32
  33        /* Copy the DSN into an array in Big Endian format */
  34        put_unaligned_be64(pci_get_dsn(pf->pdev), dsn);
  35
  36        snprintf(ctx->buf, sizeof(ctx->buf), "%8phD", dsn);
  37}
  38
  39static void ice_info_pba(struct ice_pf *pf, struct ice_info_ctx *ctx)
  40{
  41        struct ice_hw *hw = &pf->hw;
  42        int status;
  43
  44        status = ice_read_pba_string(hw, (u8 *)ctx->buf, sizeof(ctx->buf));
  45        if (status)
  46                /* We failed to locate the PBA, so just skip this entry */
  47                dev_dbg(ice_pf_to_dev(pf), "Failed to read Product Board Assembly string, status %d\n",
  48                        status);
  49}
  50
  51static void ice_info_fw_mgmt(struct ice_pf *pf, struct ice_info_ctx *ctx)
  52{
  53        struct ice_hw *hw = &pf->hw;
  54
  55        snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u.%u",
  56                 hw->fw_maj_ver, hw->fw_min_ver, hw->fw_patch);
  57}
  58
  59static void ice_info_fw_api(struct ice_pf *pf, struct ice_info_ctx *ctx)
  60{
  61        struct ice_hw *hw = &pf->hw;
  62
  63        snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u",
  64                 hw->api_maj_ver, hw->api_min_ver);
  65}
  66
  67static void ice_info_fw_build(struct ice_pf *pf, struct ice_info_ctx *ctx)
  68{
  69        struct ice_hw *hw = &pf->hw;
  70
  71        snprintf(ctx->buf, sizeof(ctx->buf), "0x%08x", hw->fw_build);
  72}
  73
  74static void ice_info_orom_ver(struct ice_pf *pf, struct ice_info_ctx *ctx)
  75{
  76        struct ice_orom_info *orom = &pf->hw.flash.orom;
  77
  78        snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u.%u",
  79                 orom->major, orom->build, orom->patch);
  80}
  81
  82static void
  83ice_info_pending_orom_ver(struct ice_pf __always_unused *pf,
  84                          struct ice_info_ctx *ctx)
  85{
  86        struct ice_orom_info *orom = &ctx->pending_orom;
  87
  88        if (ctx->dev_caps.common_cap.nvm_update_pending_orom)
  89                snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u.%u",
  90                         orom->major, orom->build, orom->patch);
  91}
  92
  93static void ice_info_nvm_ver(struct ice_pf *pf, struct ice_info_ctx *ctx)
  94{
  95        struct ice_nvm_info *nvm = &pf->hw.flash.nvm;
  96
  97        snprintf(ctx->buf, sizeof(ctx->buf), "%x.%02x", nvm->major, nvm->minor);
  98}
  99
 100static void
 101ice_info_pending_nvm_ver(struct ice_pf __always_unused *pf,
 102                         struct ice_info_ctx *ctx)
 103{
 104        struct ice_nvm_info *nvm = &ctx->pending_nvm;
 105
 106        if (ctx->dev_caps.common_cap.nvm_update_pending_nvm)
 107                snprintf(ctx->buf, sizeof(ctx->buf), "%x.%02x",
 108                         nvm->major, nvm->minor);
 109}
 110
 111static void ice_info_eetrack(struct ice_pf *pf, struct ice_info_ctx *ctx)
 112{
 113        struct ice_nvm_info *nvm = &pf->hw.flash.nvm;
 114
 115        snprintf(ctx->buf, sizeof(ctx->buf), "0x%08x", nvm->eetrack);
 116}
 117
 118static void
 119ice_info_pending_eetrack(struct ice_pf *pf, struct ice_info_ctx *ctx)
 120{
 121        struct ice_nvm_info *nvm = &ctx->pending_nvm;
 122
 123        if (ctx->dev_caps.common_cap.nvm_update_pending_nvm)
 124                snprintf(ctx->buf, sizeof(ctx->buf), "0x%08x", nvm->eetrack);
 125}
 126
 127static void ice_info_ddp_pkg_name(struct ice_pf *pf, struct ice_info_ctx *ctx)
 128{
 129        struct ice_hw *hw = &pf->hw;
 130
 131        snprintf(ctx->buf, sizeof(ctx->buf), "%s", hw->active_pkg_name);
 132}
 133
 134static void
 135ice_info_ddp_pkg_version(struct ice_pf *pf, struct ice_info_ctx *ctx)
 136{
 137        struct ice_pkg_ver *pkg = &pf->hw.active_pkg_ver;
 138
 139        snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u.%u.%u",
 140                 pkg->major, pkg->minor, pkg->update, pkg->draft);
 141}
 142
 143static void
 144ice_info_ddp_pkg_bundle_id(struct ice_pf *pf, struct ice_info_ctx *ctx)
 145{
 146        snprintf(ctx->buf, sizeof(ctx->buf), "0x%08x", pf->hw.active_track_id);
 147}
 148
 149static void ice_info_netlist_ver(struct ice_pf *pf, struct ice_info_ctx *ctx)
 150{
 151        struct ice_netlist_info *netlist = &pf->hw.flash.netlist;
 152
 153        /* The netlist version fields are BCD formatted */
 154        snprintf(ctx->buf, sizeof(ctx->buf), "%x.%x.%x-%x.%x.%x",
 155                 netlist->major, netlist->minor,
 156                 netlist->type >> 16, netlist->type & 0xFFFF,
 157                 netlist->rev, netlist->cust_ver);
 158}
 159
 160static void ice_info_netlist_build(struct ice_pf *pf, struct ice_info_ctx *ctx)
 161{
 162        struct ice_netlist_info *netlist = &pf->hw.flash.netlist;
 163
 164        snprintf(ctx->buf, sizeof(ctx->buf), "0x%08x", netlist->hash);
 165}
 166
 167static void
 168ice_info_pending_netlist_ver(struct ice_pf __always_unused *pf,
 169                             struct ice_info_ctx *ctx)
 170{
 171        struct ice_netlist_info *netlist = &ctx->pending_netlist;
 172
 173        /* The netlist version fields are BCD formatted */
 174        if (ctx->dev_caps.common_cap.nvm_update_pending_netlist)
 175                snprintf(ctx->buf, sizeof(ctx->buf), "%x.%x.%x-%x.%x.%x",
 176                         netlist->major, netlist->minor,
 177                         netlist->type >> 16, netlist->type & 0xFFFF,
 178                         netlist->rev, netlist->cust_ver);
 179}
 180
 181static void
 182ice_info_pending_netlist_build(struct ice_pf __always_unused *pf,
 183                               struct ice_info_ctx *ctx)
 184{
 185        struct ice_netlist_info *netlist = &ctx->pending_netlist;
 186
 187        if (ctx->dev_caps.common_cap.nvm_update_pending_netlist)
 188                snprintf(ctx->buf, sizeof(ctx->buf), "0x%08x", netlist->hash);
 189}
 190
 191#define fixed(key, getter) { ICE_VERSION_FIXED, key, getter, NULL }
 192#define running(key, getter) { ICE_VERSION_RUNNING, key, getter, NULL }
 193#define stored(key, getter, fallback) { ICE_VERSION_STORED, key, getter, fallback }
 194
 195/* The combined() macro inserts both the running entry as well as a stored
 196 * entry. The running entry will always report the version from the active
 197 * handler. The stored entry will first try the pending handler, and fallback
 198 * to the active handler if the pending function does not report a version.
 199 * The pending handler should check the status of a pending update for the
 200 * relevant flash component. It should only fill in the buffer in the case
 201 * where a valid pending version is available. This ensures that the related
 202 * stored and running versions remain in sync, and that stored versions are
 203 * correctly reported as expected.
 204 */
 205#define combined(key, active, pending) \
 206        running(key, active), \
 207        stored(key, pending, active)
 208
 209enum ice_version_type {
 210        ICE_VERSION_FIXED,
 211        ICE_VERSION_RUNNING,
 212        ICE_VERSION_STORED,
 213};
 214
 215static const struct ice_devlink_version {
 216        enum ice_version_type type;
 217        const char *key;
 218        void (*getter)(struct ice_pf *pf, struct ice_info_ctx *ctx);
 219        void (*fallback)(struct ice_pf *pf, struct ice_info_ctx *ctx);
 220} ice_devlink_versions[] = {
 221        fixed(DEVLINK_INFO_VERSION_GENERIC_BOARD_ID, ice_info_pba),
 222        running(DEVLINK_INFO_VERSION_GENERIC_FW_MGMT, ice_info_fw_mgmt),
 223        running("fw.mgmt.api", ice_info_fw_api),
 224        running("fw.mgmt.build", ice_info_fw_build),
 225        combined(DEVLINK_INFO_VERSION_GENERIC_FW_UNDI, ice_info_orom_ver, ice_info_pending_orom_ver),
 226        combined("fw.psid.api", ice_info_nvm_ver, ice_info_pending_nvm_ver),
 227        combined(DEVLINK_INFO_VERSION_GENERIC_FW_BUNDLE_ID, ice_info_eetrack, ice_info_pending_eetrack),
 228        running("fw.app.name", ice_info_ddp_pkg_name),
 229        running(DEVLINK_INFO_VERSION_GENERIC_FW_APP, ice_info_ddp_pkg_version),
 230        running("fw.app.bundle_id", ice_info_ddp_pkg_bundle_id),
 231        combined("fw.netlist", ice_info_netlist_ver, ice_info_pending_netlist_ver),
 232        combined("fw.netlist.build", ice_info_netlist_build, ice_info_pending_netlist_build),
 233};
 234
 235/**
 236 * ice_devlink_info_get - .info_get devlink handler
 237 * @devlink: devlink instance structure
 238 * @req: the devlink info request
 239 * @extack: extended netdev ack structure
 240 *
 241 * Callback for the devlink .info_get operation. Reports information about the
 242 * device.
 243 *
 244 * Return: zero on success or an error code on failure.
 245 */
 246static int ice_devlink_info_get(struct devlink *devlink,
 247                                struct devlink_info_req *req,
 248                                struct netlink_ext_ack *extack)
 249{
 250        struct ice_pf *pf = devlink_priv(devlink);
 251        struct device *dev = ice_pf_to_dev(pf);
 252        struct ice_hw *hw = &pf->hw;
 253        struct ice_info_ctx *ctx;
 254        size_t i;
 255        int err;
 256
 257        err = ice_wait_for_reset(pf, 10 * HZ);
 258        if (err) {
 259                NL_SET_ERR_MSG_MOD(extack, "Device is busy resetting");
 260                return err;
 261        }
 262
 263        ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
 264        if (!ctx)
 265                return -ENOMEM;
 266
 267        /* discover capabilities first */
 268        err = ice_discover_dev_caps(hw, &ctx->dev_caps);
 269        if (err) {
 270                dev_dbg(dev, "Failed to discover device capabilities, status %d aq_err %s\n",
 271                        err, ice_aq_str(hw->adminq.sq_last_status));
 272                NL_SET_ERR_MSG_MOD(extack, "Unable to discover device capabilities");
 273                goto out_free_ctx;
 274        }
 275
 276        if (ctx->dev_caps.common_cap.nvm_update_pending_orom) {
 277                err = ice_get_inactive_orom_ver(hw, &ctx->pending_orom);
 278                if (err) {
 279                        dev_dbg(dev, "Unable to read inactive Option ROM version data, status %d aq_err %s\n",
 280                                err, ice_aq_str(hw->adminq.sq_last_status));
 281
 282                        /* disable display of pending Option ROM */
 283                        ctx->dev_caps.common_cap.nvm_update_pending_orom = false;
 284                }
 285        }
 286
 287        if (ctx->dev_caps.common_cap.nvm_update_pending_nvm) {
 288                err = ice_get_inactive_nvm_ver(hw, &ctx->pending_nvm);
 289                if (err) {
 290                        dev_dbg(dev, "Unable to read inactive NVM version data, status %d aq_err %s\n",
 291                                err, ice_aq_str(hw->adminq.sq_last_status));
 292
 293                        /* disable display of pending Option ROM */
 294                        ctx->dev_caps.common_cap.nvm_update_pending_nvm = false;
 295                }
 296        }
 297
 298        if (ctx->dev_caps.common_cap.nvm_update_pending_netlist) {
 299                err = ice_get_inactive_netlist_ver(hw, &ctx->pending_netlist);
 300                if (err) {
 301                        dev_dbg(dev, "Unable to read inactive Netlist version data, status %d aq_err %s\n",
 302                                err, ice_aq_str(hw->adminq.sq_last_status));
 303
 304                        /* disable display of pending Option ROM */
 305                        ctx->dev_caps.common_cap.nvm_update_pending_netlist = false;
 306                }
 307        }
 308
 309        err = devlink_info_driver_name_put(req, KBUILD_MODNAME);
 310        if (err) {
 311                NL_SET_ERR_MSG_MOD(extack, "Unable to set driver name");
 312                goto out_free_ctx;
 313        }
 314
 315        ice_info_get_dsn(pf, ctx);
 316
 317        err = devlink_info_serial_number_put(req, ctx->buf);
 318        if (err) {
 319                NL_SET_ERR_MSG_MOD(extack, "Unable to set serial number");
 320                goto out_free_ctx;
 321        }
 322
 323        for (i = 0; i < ARRAY_SIZE(ice_devlink_versions); i++) {
 324                enum ice_version_type type = ice_devlink_versions[i].type;
 325                const char *key = ice_devlink_versions[i].key;
 326
 327                memset(ctx->buf, 0, sizeof(ctx->buf));
 328
 329                ice_devlink_versions[i].getter(pf, ctx);
 330
 331                /* If the default getter doesn't report a version, use the
 332                 * fallback function. This is primarily useful in the case of
 333                 * "stored" versions that want to report the same value as the
 334                 * running version in the normal case of no pending update.
 335                 */
 336                if (ctx->buf[0] == '\0' && ice_devlink_versions[i].fallback)
 337                        ice_devlink_versions[i].fallback(pf, ctx);
 338
 339                /* Do not report missing versions */
 340                if (ctx->buf[0] == '\0')
 341                        continue;
 342
 343                switch (type) {
 344                case ICE_VERSION_FIXED:
 345                        err = devlink_info_version_fixed_put(req, key, ctx->buf);
 346                        if (err) {
 347                                NL_SET_ERR_MSG_MOD(extack, "Unable to set fixed version");
 348                                goto out_free_ctx;
 349                        }
 350                        break;
 351                case ICE_VERSION_RUNNING:
 352                        err = devlink_info_version_running_put(req, key, ctx->buf);
 353                        if (err) {
 354                                NL_SET_ERR_MSG_MOD(extack, "Unable to set running version");
 355                                goto out_free_ctx;
 356                        }
 357                        break;
 358                case ICE_VERSION_STORED:
 359                        err = devlink_info_version_stored_put(req, key, ctx->buf);
 360                        if (err) {
 361                                NL_SET_ERR_MSG_MOD(extack, "Unable to set stored version");
 362                                goto out_free_ctx;
 363                        }
 364                        break;
 365                }
 366        }
 367
 368out_free_ctx:
 369        kfree(ctx);
 370        return err;
 371}
 372
 373/**
 374 * ice_devlink_reload_empr_start - Start EMP reset to activate new firmware
 375 * @devlink: pointer to the devlink instance to reload
 376 * @netns_change: if true, the network namespace is changing
 377 * @action: the action to perform. Must be DEVLINK_RELOAD_ACTION_FW_ACTIVATE
 378 * @limit: limits on what reload should do, such as not resetting
 379 * @extack: netlink extended ACK structure
 380 *
 381 * Allow user to activate new Embedded Management Processor firmware by
 382 * issuing device specific EMP reset. Called in response to
 383 * a DEVLINK_CMD_RELOAD with the DEVLINK_RELOAD_ACTION_FW_ACTIVATE.
 384 *
 385 * Note that teardown and rebuild of the driver state happens automatically as
 386 * part of an interrupt and watchdog task. This is because all physical
 387 * functions on the device must be able to reset when an EMP reset occurs from
 388 * any source.
 389 */
 390static int
 391ice_devlink_reload_empr_start(struct devlink *devlink, bool netns_change,
 392                              enum devlink_reload_action action,
 393                              enum devlink_reload_limit limit,
 394                              struct netlink_ext_ack *extack)
 395{
 396        struct ice_pf *pf = devlink_priv(devlink);
 397        struct device *dev = ice_pf_to_dev(pf);
 398        struct ice_hw *hw = &pf->hw;
 399        u8 pending;
 400        int err;
 401
 402        err = ice_get_pending_updates(pf, &pending, extack);
 403        if (err)
 404                return err;
 405
 406        /* pending is a bitmask of which flash banks have a pending update,
 407         * including the main NVM bank, the Option ROM bank, and the netlist
 408         * bank. If any of these bits are set, then there is a pending update
 409         * waiting to be activated.
 410         */
 411        if (!pending) {
 412                NL_SET_ERR_MSG_MOD(extack, "No pending firmware update");
 413                return -ECANCELED;
 414        }
 415
 416        if (pf->fw_emp_reset_disabled) {
 417                NL_SET_ERR_MSG_MOD(extack, "EMP reset is not available. To activate firmware, a reboot or power cycle is needed");
 418                return -ECANCELED;
 419        }
 420
 421        dev_dbg(dev, "Issuing device EMP reset to activate firmware\n");
 422
 423        err = ice_aq_nvm_update_empr(hw);
 424        if (err) {
 425                dev_err(dev, "Failed to trigger EMP device reset to reload firmware, err %d aq_err %s\n",
 426                        err, ice_aq_str(hw->adminq.sq_last_status));
 427                NL_SET_ERR_MSG_MOD(extack, "Failed to trigger EMP device reset to reload firmware");
 428                return err;
 429        }
 430
 431        return 0;
 432}
 433
 434/**
 435 * ice_devlink_reload_empr_finish - Wait for EMP reset to finish
 436 * @devlink: pointer to the devlink instance reloading
 437 * @action: the action requested
 438 * @limit: limits imposed by userspace, such as not resetting
 439 * @actions_performed: on return, indicate what actions actually performed
 440 * @extack: netlink extended ACK structure
 441 *
 442 * Wait for driver to finish rebuilding after EMP reset is completed. This
 443 * includes time to wait for both the actual device reset as well as the time
 444 * for the driver's rebuild to complete.
 445 */
 446static int
 447ice_devlink_reload_empr_finish(struct devlink *devlink,
 448                               enum devlink_reload_action action,
 449                               enum devlink_reload_limit limit,
 450                               u32 *actions_performed,
 451                               struct netlink_ext_ack *extack)
 452{
 453        struct ice_pf *pf = devlink_priv(devlink);
 454        int err;
 455
 456        *actions_performed = BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE);
 457
 458        err = ice_wait_for_reset(pf, 60 * HZ);
 459        if (err) {
 460                NL_SET_ERR_MSG_MOD(extack, "Device still resetting after 1 minute");
 461                return err;
 462        }
 463
 464        return 0;
 465}
 466
 467static const struct devlink_ops ice_devlink_ops = {
 468        .supported_flash_update_params = DEVLINK_SUPPORT_FLASH_UPDATE_OVERWRITE_MASK,
 469        .reload_actions = BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE),
 470        /* The ice driver currently does not support driver reinit */
 471        .reload_down = ice_devlink_reload_empr_start,
 472        .reload_up = ice_devlink_reload_empr_finish,
 473        .eswitch_mode_get = ice_eswitch_mode_get,
 474        .eswitch_mode_set = ice_eswitch_mode_set,
 475        .info_get = ice_devlink_info_get,
 476        .flash_update = ice_devlink_flash_update,
 477};
 478
 479static int
 480ice_devlink_enable_roce_get(struct devlink *devlink, u32 id,
 481                            struct devlink_param_gset_ctx *ctx)
 482{
 483        struct ice_pf *pf = devlink_priv(devlink);
 484
 485        ctx->val.vbool = pf->rdma_mode & IIDC_RDMA_PROTOCOL_ROCEV2 ? true : false;
 486
 487        return 0;
 488}
 489
 490static int
 491ice_devlink_enable_roce_set(struct devlink *devlink, u32 id,
 492                            struct devlink_param_gset_ctx *ctx)
 493{
 494        struct ice_pf *pf = devlink_priv(devlink);
 495        bool roce_ena = ctx->val.vbool;
 496        int ret;
 497
 498        if (!roce_ena) {
 499                ice_unplug_aux_dev(pf);
 500                pf->rdma_mode &= ~IIDC_RDMA_PROTOCOL_ROCEV2;
 501                return 0;
 502        }
 503
 504        pf->rdma_mode |= IIDC_RDMA_PROTOCOL_ROCEV2;
 505        ret = ice_plug_aux_dev(pf);
 506        if (ret)
 507                pf->rdma_mode &= ~IIDC_RDMA_PROTOCOL_ROCEV2;
 508
 509        return ret;
 510}
 511
 512static int
 513ice_devlink_enable_roce_validate(struct devlink *devlink, u32 id,
 514                                 union devlink_param_value val,
 515                                 struct netlink_ext_ack *extack)
 516{
 517        struct ice_pf *pf = devlink_priv(devlink);
 518
 519        if (!test_bit(ICE_FLAG_RDMA_ENA, pf->flags))
 520                return -EOPNOTSUPP;
 521
 522        if (pf->rdma_mode & IIDC_RDMA_PROTOCOL_IWARP) {
 523                NL_SET_ERR_MSG_MOD(extack, "iWARP is currently enabled. This device cannot enable iWARP and RoCEv2 simultaneously");
 524                return -EOPNOTSUPP;
 525        }
 526
 527        return 0;
 528}
 529
 530static int
 531ice_devlink_enable_iw_get(struct devlink *devlink, u32 id,
 532                          struct devlink_param_gset_ctx *ctx)
 533{
 534        struct ice_pf *pf = devlink_priv(devlink);
 535
 536        ctx->val.vbool = pf->rdma_mode & IIDC_RDMA_PROTOCOL_IWARP;
 537
 538        return 0;
 539}
 540
 541static int
 542ice_devlink_enable_iw_set(struct devlink *devlink, u32 id,
 543                          struct devlink_param_gset_ctx *ctx)
 544{
 545        struct ice_pf *pf = devlink_priv(devlink);
 546        bool iw_ena = ctx->val.vbool;
 547        int ret;
 548
 549        if (!iw_ena) {
 550                ice_unplug_aux_dev(pf);
 551                pf->rdma_mode &= ~IIDC_RDMA_PROTOCOL_IWARP;
 552                return 0;
 553        }
 554
 555        pf->rdma_mode |= IIDC_RDMA_PROTOCOL_IWARP;
 556        ret = ice_plug_aux_dev(pf);
 557        if (ret)
 558                pf->rdma_mode &= ~IIDC_RDMA_PROTOCOL_IWARP;
 559
 560        return ret;
 561}
 562
 563static int
 564ice_devlink_enable_iw_validate(struct devlink *devlink, u32 id,
 565                               union devlink_param_value val,
 566                               struct netlink_ext_ack *extack)
 567{
 568        struct ice_pf *pf = devlink_priv(devlink);
 569
 570        if (!test_bit(ICE_FLAG_RDMA_ENA, pf->flags))
 571                return -EOPNOTSUPP;
 572
 573        if (pf->rdma_mode & IIDC_RDMA_PROTOCOL_ROCEV2) {
 574                NL_SET_ERR_MSG_MOD(extack, "RoCEv2 is currently enabled. This device cannot enable iWARP and RoCEv2 simultaneously");
 575                return -EOPNOTSUPP;
 576        }
 577
 578        return 0;
 579}
 580
 581static const struct devlink_param ice_devlink_params[] = {
 582        DEVLINK_PARAM_GENERIC(ENABLE_ROCE, BIT(DEVLINK_PARAM_CMODE_RUNTIME),
 583                              ice_devlink_enable_roce_get,
 584                              ice_devlink_enable_roce_set,
 585                              ice_devlink_enable_roce_validate),
 586        DEVLINK_PARAM_GENERIC(ENABLE_IWARP, BIT(DEVLINK_PARAM_CMODE_RUNTIME),
 587                              ice_devlink_enable_iw_get,
 588                              ice_devlink_enable_iw_set,
 589                              ice_devlink_enable_iw_validate),
 590
 591};
 592
 593static void ice_devlink_free(void *devlink_ptr)
 594{
 595        devlink_free((struct devlink *)devlink_ptr);
 596}
 597
 598/**
 599 * ice_allocate_pf - Allocate devlink and return PF structure pointer
 600 * @dev: the device to allocate for
 601 *
 602 * Allocate a devlink instance for this device and return the private area as
 603 * the PF structure. The devlink memory is kept track of through devres by
 604 * adding an action to remove it when unwinding.
 605 */
 606struct ice_pf *ice_allocate_pf(struct device *dev)
 607{
 608        struct devlink *devlink;
 609
 610        devlink = devlink_alloc(&ice_devlink_ops, sizeof(struct ice_pf));
 611        if (!devlink)
 612                return NULL;
 613
 614        /* Add an action to teardown the devlink when unwinding the driver */
 615        if (devm_add_action_or_reset(dev, ice_devlink_free, devlink))
 616                return NULL;
 617
 618        return devlink_priv(devlink);
 619}
 620
 621/**
 622 * ice_devlink_register - Register devlink interface for this PF
 623 * @pf: the PF to register the devlink for.
 624 *
 625 * Register the devlink instance associated with this physical function.
 626 *
 627 * Return: zero on success or an error code on failure.
 628 */
 629void ice_devlink_register(struct ice_pf *pf)
 630{
 631        struct devlink *devlink = priv_to_devlink(pf);
 632        struct device *dev = ice_pf_to_dev(pf);
 633        devlink_register(devlink, dev);
 634        devlink_reload_enable(devlink);
 635}
 636
 637/**
 638 * ice_devlink_unregister - Unregister devlink resources for this PF.
 639 * @pf: the PF structure to cleanup
 640 *
 641 * Releases resources used by devlink and cleans up associated memory.
 642 */
 643void ice_devlink_unregister(struct ice_pf *pf)
 644{
 645        struct devlink *devlink = priv_to_devlink(pf);
 646        devlink_reload_disable(devlink);
 647        devlink_unregister(devlink);
 648}
 649
 650int ice_devlink_register_params(struct ice_pf *pf)
 651{
 652        struct devlink *devlink = priv_to_devlink(pf);
 653        union devlink_param_value value;
 654        int err;
 655
 656        err = devlink_params_register(devlink, ice_devlink_params,
 657                                      ARRAY_SIZE(ice_devlink_params));
 658        if (err)
 659                return err;
 660
 661        value.vbool = false;
 662        devlink_param_driverinit_value_set(devlink,
 663                                           DEVLINK_PARAM_GENERIC_ID_ENABLE_IWARP,
 664                                           value);
 665
 666        value.vbool = test_bit(ICE_FLAG_RDMA_ENA, pf->flags) ? true : false;
 667        devlink_param_driverinit_value_set(devlink,
 668                                           DEVLINK_PARAM_GENERIC_ID_ENABLE_ROCE,
 669                                           value);
 670
 671        devlink_params_publish(devlink);
 672
 673        return 0;
 674}
 675
 676void ice_devlink_unregister_params(struct ice_pf *pf)
 677{
 678        devlink_params_unpublish(priv_to_devlink(pf));
 679
 680        devlink_params_unregister(priv_to_devlink(pf), ice_devlink_params,
 681                                  ARRAY_SIZE(ice_devlink_params));
 682}
 683
 684/**
 685 * ice_devlink_create_pf_port - Create a devlink port for this PF
 686 * @pf: the PF to create a devlink port for
 687 *
 688 * Create and register a devlink_port for this PF.
 689 *
 690 * Return: zero on success or an error code on failure.
 691 */
 692int ice_devlink_create_pf_port(struct ice_pf *pf)
 693{
 694        struct devlink_port_attrs attrs = {};
 695        struct devlink_port *devlink_port;
 696        struct devlink *devlink;
 697        struct ice_vsi *vsi;
 698        struct device *dev;
 699        int err;
 700
 701        dev = ice_pf_to_dev(pf);
 702
 703        devlink_port = &pf->devlink_port;
 704
 705        vsi = ice_get_main_vsi(pf);
 706        if (!vsi)
 707                return -EIO;
 708
 709        attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
 710        attrs.phys.port_number = pf->hw.bus.func;
 711        devlink_port_attrs_set(devlink_port, &attrs);
 712        devlink = priv_to_devlink(pf);
 713
 714        err = devlink_port_register(devlink, devlink_port, vsi->idx);
 715        if (err) {
 716                dev_err(dev, "Failed to create devlink port for PF %d, error %d\n",
 717                        pf->hw.pf_id, err);
 718                return err;
 719        }
 720
 721        return 0;
 722}
 723
 724/**
 725 * ice_devlink_destroy_pf_port - Destroy the devlink_port for this PF
 726 * @pf: the PF to cleanup
 727 *
 728 * Unregisters the devlink_port structure associated with this PF.
 729 */
 730void ice_devlink_destroy_pf_port(struct ice_pf *pf)
 731{
 732        struct devlink_port *devlink_port;
 733
 734        devlink_port = &pf->devlink_port;
 735
 736        devlink_port_type_clear(devlink_port);
 737        devlink_port_unregister(devlink_port);
 738}
 739
 740/**
 741 * ice_devlink_create_vf_port - Create a devlink port for this VF
 742 * @vf: the VF to create a port for
 743 *
 744 * Create and register a devlink_port for this VF.
 745 *
 746 * Return: zero on success or an error code on failure.
 747 */
 748int ice_devlink_create_vf_port(struct ice_vf *vf)
 749{
 750        struct devlink_port_attrs attrs = {};
 751        struct devlink_port *devlink_port;
 752        struct devlink *devlink;
 753        struct ice_vsi *vsi;
 754        struct device *dev;
 755        struct ice_pf *pf;
 756        int err;
 757
 758        pf = vf->pf;
 759        dev = ice_pf_to_dev(pf);
 760        vsi = ice_get_vf_vsi(vf);
 761        devlink_port = &vf->devlink_port;
 762
 763        attrs.flavour = DEVLINK_PORT_FLAVOUR_PCI_VF;
 764        attrs.pci_vf.pf = pf->hw.bus.func;
 765        attrs.pci_vf.vf = vf->vf_id;
 766
 767        devlink_port_attrs_set(devlink_port, &attrs);
 768        devlink = priv_to_devlink(pf);
 769
 770        err = devlink_port_register(devlink, devlink_port, vsi->idx);
 771        if (err) {
 772                dev_err(dev, "Failed to create devlink port for VF %d, error %d\n",
 773                        vf->vf_id, err);
 774                return err;
 775        }
 776
 777        return 0;
 778}
 779
 780/**
 781 * ice_devlink_destroy_vf_port - Destroy the devlink_port for this VF
 782 * @vf: the VF to cleanup
 783 *
 784 * Unregisters the devlink_port structure associated with this VF.
 785 */
 786void ice_devlink_destroy_vf_port(struct ice_vf *vf)
 787{
 788        struct devlink_port *devlink_port;
 789
 790        devlink_port = &vf->devlink_port;
 791
 792        devlink_port_type_clear(devlink_port);
 793        devlink_port_unregister(devlink_port);
 794}
 795
 796/**
 797 * ice_devlink_nvm_snapshot - Capture a snapshot of the NVM flash contents
 798 * @devlink: the devlink instance
 799 * @ops: the devlink region being snapshotted
 800 * @extack: extended ACK response structure
 801 * @data: on exit points to snapshot data buffer
 802 *
 803 * This function is called in response to the DEVLINK_CMD_REGION_TRIGGER for
 804 * the nvm-flash devlink region. It captures a snapshot of the full NVM flash
 805 * contents, including both banks of flash. This snapshot can later be viewed
 806 * via the devlink-region interface.
 807 *
 808 * It captures the flash using the FLASH_ONLY bit set when reading via
 809 * firmware, so it does not read the current Shadow RAM contents. For that,
 810 * use the shadow-ram region.
 811 *
 812 * @returns zero on success, and updates the data pointer. Returns a non-zero
 813 * error code on failure.
 814 */
 815static int ice_devlink_nvm_snapshot(struct devlink *devlink,
 816                                    const struct devlink_region_ops *ops,
 817                                    struct netlink_ext_ack *extack, u8 **data)
 818{
 819        struct ice_pf *pf = devlink_priv(devlink);
 820        struct device *dev = ice_pf_to_dev(pf);
 821        struct ice_hw *hw = &pf->hw;
 822        void *nvm_data;
 823        u32 nvm_size;
 824        int status;
 825
 826        nvm_size = hw->flash.flash_size;
 827        nvm_data = vzalloc(nvm_size);
 828        if (!nvm_data)
 829                return -ENOMEM;
 830
 831        status = ice_acquire_nvm(hw, ICE_RES_READ);
 832        if (status) {
 833                dev_dbg(dev, "ice_acquire_nvm failed, err %d aq_err %d\n",
 834                        status, hw->adminq.sq_last_status);
 835                NL_SET_ERR_MSG_MOD(extack, "Failed to acquire NVM semaphore");
 836                vfree(nvm_data);
 837                return status;
 838        }
 839
 840        status = ice_read_flat_nvm(hw, 0, &nvm_size, nvm_data, false);
 841        if (status) {
 842                dev_dbg(dev, "ice_read_flat_nvm failed after reading %u bytes, err %d aq_err %d\n",
 843                        nvm_size, status, hw->adminq.sq_last_status);
 844                NL_SET_ERR_MSG_MOD(extack, "Failed to read NVM contents");
 845                ice_release_nvm(hw);
 846                vfree(nvm_data);
 847                return status;
 848        }
 849
 850        ice_release_nvm(hw);
 851
 852        *data = nvm_data;
 853
 854        return 0;
 855}
 856
 857/**
 858 * ice_devlink_sram_snapshot - Capture a snapshot of the Shadow RAM contents
 859 * @devlink: the devlink instance
 860 * @ops: the devlink region being snapshotted
 861 * @extack: extended ACK response structure
 862 * @data: on exit points to snapshot data buffer
 863 *
 864 * This function is called in response to the DEVLINK_CMD_REGION_TRIGGER for
 865 * the shadow-ram devlink region. It captures a snapshot of the shadow ram
 866 * contents. This snapshot can later be viewed via the devlink-region
 867 * interface.
 868 *
 869 * @returns zero on success, and updates the data pointer. Returns a non-zero
 870 * error code on failure.
 871 */
 872static int
 873ice_devlink_sram_snapshot(struct devlink *devlink,
 874                          const struct devlink_region_ops __always_unused *ops,
 875                          struct netlink_ext_ack *extack, u8 **data)
 876{
 877        struct ice_pf *pf = devlink_priv(devlink);
 878        struct device *dev = ice_pf_to_dev(pf);
 879        struct ice_hw *hw = &pf->hw;
 880        u8 *sram_data;
 881        u32 sram_size;
 882        int err;
 883
 884        sram_size = hw->flash.sr_words * 2u;
 885        sram_data = vzalloc(sram_size);
 886        if (!sram_data)
 887                return -ENOMEM;
 888
 889        err = ice_acquire_nvm(hw, ICE_RES_READ);
 890        if (err) {
 891                dev_dbg(dev, "ice_acquire_nvm failed, err %d aq_err %d\n",
 892                        err, hw->adminq.sq_last_status);
 893                NL_SET_ERR_MSG_MOD(extack, "Failed to acquire NVM semaphore");
 894                vfree(sram_data);
 895                return err;
 896        }
 897
 898        /* Read from the Shadow RAM, rather than directly from NVM */
 899        err = ice_read_flat_nvm(hw, 0, &sram_size, sram_data, true);
 900        if (err) {
 901                dev_dbg(dev, "ice_read_flat_nvm failed after reading %u bytes, err %d aq_err %d\n",
 902                        sram_size, err, hw->adminq.sq_last_status);
 903                NL_SET_ERR_MSG_MOD(extack,
 904                                   "Failed to read Shadow RAM contents");
 905                ice_release_nvm(hw);
 906                vfree(sram_data);
 907                return err;
 908        }
 909
 910        ice_release_nvm(hw);
 911
 912        *data = sram_data;
 913
 914        return 0;
 915}
 916
 917/**
 918 * ice_devlink_devcaps_snapshot - Capture snapshot of device capabilities
 919 * @devlink: the devlink instance
 920 * @ops: the devlink region being snapshotted
 921 * @extack: extended ACK response structure
 922 * @data: on exit points to snapshot data buffer
 923 *
 924 * This function is called in response to the DEVLINK_CMD_REGION_TRIGGER for
 925 * the device-caps devlink region. It captures a snapshot of the device
 926 * capabilities reported by firmware.
 927 *
 928 * @returns zero on success, and updates the data pointer. Returns a non-zero
 929 * error code on failure.
 930 */
 931static int
 932ice_devlink_devcaps_snapshot(struct devlink *devlink,
 933                             const struct devlink_region_ops *ops,
 934                             struct netlink_ext_ack *extack, u8 **data)
 935{
 936        struct ice_pf *pf = devlink_priv(devlink);
 937        struct device *dev = ice_pf_to_dev(pf);
 938        struct ice_hw *hw = &pf->hw;
 939        void *devcaps;
 940        int status;
 941
 942        devcaps = vzalloc(ICE_AQ_MAX_BUF_LEN);
 943        if (!devcaps)
 944                return -ENOMEM;
 945
 946        status = ice_aq_list_caps(hw, devcaps, ICE_AQ_MAX_BUF_LEN, NULL,
 947                                  ice_aqc_opc_list_dev_caps, NULL);
 948        if (status) {
 949                dev_dbg(dev, "ice_aq_list_caps: failed to read device capabilities, err %d aq_err %d\n",
 950                        status, hw->adminq.sq_last_status);
 951                NL_SET_ERR_MSG_MOD(extack, "Failed to read device capabilities");
 952                vfree(devcaps);
 953                return status;
 954        }
 955
 956        *data = (u8 *)devcaps;
 957
 958        return 0;
 959}
 960
 961static const struct devlink_region_ops ice_nvm_region_ops = {
 962        .name = "nvm-flash",
 963        .destructor = vfree,
 964        .snapshot = ice_devlink_nvm_snapshot,
 965};
 966
 967static const struct devlink_region_ops ice_sram_region_ops = {
 968        .name = "shadow-ram",
 969        .destructor = vfree,
 970        .snapshot = ice_devlink_sram_snapshot,
 971};
 972
 973static const struct devlink_region_ops ice_devcaps_region_ops = {
 974        .name = "device-caps",
 975        .destructor = vfree,
 976        .snapshot = ice_devlink_devcaps_snapshot,
 977};
 978
 979/**
 980 * ice_devlink_init_regions - Initialize devlink regions
 981 * @pf: the PF device structure
 982 *
 983 * Create devlink regions used to enable access to dump the contents of the
 984 * flash memory on the device.
 985 */
 986void ice_devlink_init_regions(struct ice_pf *pf)
 987{
 988        struct devlink *devlink = priv_to_devlink(pf);
 989        struct device *dev = ice_pf_to_dev(pf);
 990        u64 nvm_size, sram_size;
 991
 992        nvm_size = pf->hw.flash.flash_size;
 993        pf->nvm_region = devlink_region_create(devlink, &ice_nvm_region_ops, 1,
 994                                               nvm_size);
 995        if (IS_ERR(pf->nvm_region)) {
 996                dev_err(dev, "failed to create NVM devlink region, err %ld\n",
 997                        PTR_ERR(pf->nvm_region));
 998                pf->nvm_region = NULL;
 999        }
1000
1001        sram_size = pf->hw.flash.sr_words * 2u;
1002        pf->sram_region = devlink_region_create(devlink, &ice_sram_region_ops,
1003                                                1, sram_size);
1004        if (IS_ERR(pf->sram_region)) {
1005                dev_err(dev, "failed to create shadow-ram devlink region, err %ld\n",
1006                        PTR_ERR(pf->sram_region));
1007                pf->sram_region = NULL;
1008        }
1009
1010        pf->devcaps_region = devlink_region_create(devlink,
1011                                                   &ice_devcaps_region_ops, 10,
1012                                                   ICE_AQ_MAX_BUF_LEN);
1013        if (IS_ERR(pf->devcaps_region)) {
1014                dev_err(dev, "failed to create device-caps devlink region, err %ld\n",
1015                        PTR_ERR(pf->devcaps_region));
1016                pf->devcaps_region = NULL;
1017        }
1018}
1019
1020/**
1021 * ice_devlink_destroy_regions - Destroy devlink regions
1022 * @pf: the PF device structure
1023 *
1024 * Remove previously created regions for this PF.
1025 */
1026void ice_devlink_destroy_regions(struct ice_pf *pf)
1027{
1028        if (pf->nvm_region)
1029                devlink_region_destroy(pf->nvm_region);
1030
1031        if (pf->sram_region)
1032                devlink_region_destroy(pf->sram_region);
1033
1034        if (pf->devcaps_region)
1035                devlink_region_destroy(pf->devcaps_region);
1036}
1037