linux/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
<<
>>
Prefs
   1// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
   2/* Copyright (C) 2015-2018 Netronome Systems, Inc. */
   3
   4/*
   5 * nfp_net_main.c
   6 * Netronome network device driver: Main entry point
   7 * Authors: Jakub Kicinski <jakub.kicinski@netronome.com>
   8 *          Alejandro Lucero <alejandro.lucero@netronome.com>
   9 *          Jason McMullan <jason.mcmullan@netronome.com>
  10 *          Rolf Neugebauer <rolf.neugebauer@netronome.com>
  11 */
  12
  13#include <linux/etherdevice.h>
  14#include <linux/kernel.h>
  15#include <linux/init.h>
  16#include <linux/lockdep.h>
  17#include <linux/pci.h>
  18#include <linux/pci_regs.h>
  19#include <linux/msi.h>
  20#include <linux/random.h>
  21#include <linux/rtnetlink.h>
  22
  23#include "nfpcore/nfp.h"
  24#include "nfpcore/nfp_cpp.h"
  25#include "nfpcore/nfp_nffw.h"
  26#include "nfpcore/nfp_nsp.h"
  27#include "nfpcore/nfp6000_pcie.h"
  28#include "nfp_app.h"
  29#include "nfp_net_ctrl.h"
  30#include "nfp_net_sriov.h"
  31#include "nfp_net.h"
  32#include "nfp_main.h"
  33#include "nfp_port.h"
  34
  35#define NFP_PF_CSR_SLICE_SIZE   (32 * 1024)
  36
  37/**
  38 * nfp_net_get_mac_addr() - Get the MAC address.
  39 * @pf:       NFP PF handle
  40 * @netdev:   net_device to set MAC address on
  41 * @port:     NFP port structure
  42 *
  43 * First try to get the MAC address from NSP ETH table. If that
  44 * fails generate a random address.
  45 */
  46void
  47nfp_net_get_mac_addr(struct nfp_pf *pf, struct net_device *netdev,
  48                     struct nfp_port *port)
  49{
  50        struct nfp_eth_table_port *eth_port;
  51
  52        eth_port = __nfp_port_get_eth_port(port);
  53        if (!eth_port) {
  54                eth_hw_addr_random(netdev);
  55                return;
  56        }
  57
  58        ether_addr_copy(netdev->dev_addr, eth_port->mac_addr);
  59        ether_addr_copy(netdev->perm_addr, eth_port->mac_addr);
  60}
  61
  62static struct nfp_eth_table_port *
  63nfp_net_find_port(struct nfp_eth_table *eth_tbl, unsigned int index)
  64{
  65        int i;
  66
  67        for (i = 0; eth_tbl && i < eth_tbl->count; i++)
  68                if (eth_tbl->ports[i].index == index)
  69                        return &eth_tbl->ports[i];
  70
  71        return NULL;
  72}
  73
  74static int nfp_net_pf_get_num_ports(struct nfp_pf *pf)
  75{
  76        return nfp_pf_rtsym_read_optional(pf, "nfd_cfg_pf%u_num_ports", 1);
  77}
  78
  79static int nfp_net_pf_get_app_id(struct nfp_pf *pf)
  80{
  81        return nfp_pf_rtsym_read_optional(pf, "_pf%u_net_app_id",
  82                                          NFP_APP_CORE_NIC);
  83}
  84
  85static void nfp_net_pf_free_vnic(struct nfp_pf *pf, struct nfp_net *nn)
  86{
  87        if (nfp_net_is_data_vnic(nn))
  88                nfp_app_vnic_free(pf->app, nn);
  89        nfp_port_free(nn->port);
  90        list_del(&nn->vnic_list);
  91        pf->num_vnics--;
  92        nfp_net_free(nn);
  93}
  94
  95static void nfp_net_pf_free_vnics(struct nfp_pf *pf)
  96{
  97        struct nfp_net *nn, *next;
  98
  99        list_for_each_entry_safe(nn, next, &pf->vnics, vnic_list)
 100                if (nfp_net_is_data_vnic(nn))
 101                        nfp_net_pf_free_vnic(pf, nn);
 102}
 103
 104static struct nfp_net *
 105nfp_net_pf_alloc_vnic(struct nfp_pf *pf, bool needs_netdev,
 106                      void __iomem *ctrl_bar, void __iomem *qc_bar,
 107                      int stride, unsigned int id)
 108{
 109        u32 tx_base, rx_base, n_tx_rings, n_rx_rings;
 110        struct nfp_net *nn;
 111        int err;
 112
 113        tx_base = readl(ctrl_bar + NFP_NET_CFG_START_TXQ);
 114        rx_base = readl(ctrl_bar + NFP_NET_CFG_START_RXQ);
 115        n_tx_rings = readl(ctrl_bar + NFP_NET_CFG_MAX_TXRINGS);
 116        n_rx_rings = readl(ctrl_bar + NFP_NET_CFG_MAX_RXRINGS);
 117
 118        /* Allocate and initialise the vNIC */
 119        nn = nfp_net_alloc(pf->pdev, ctrl_bar, needs_netdev,
 120                           n_tx_rings, n_rx_rings);
 121        if (IS_ERR(nn))
 122                return nn;
 123
 124        nn->app = pf->app;
 125        nfp_net_get_fw_version(&nn->fw_ver, ctrl_bar);
 126        nn->tx_bar = qc_bar + tx_base * NFP_QCP_QUEUE_ADDR_SZ;
 127        nn->rx_bar = qc_bar + rx_base * NFP_QCP_QUEUE_ADDR_SZ;
 128        nn->dp.is_vf = 0;
 129        nn->stride_rx = stride;
 130        nn->stride_tx = stride;
 131
 132        if (needs_netdev) {
 133                err = nfp_app_vnic_alloc(pf->app, nn, id);
 134                if (err) {
 135                        nfp_net_free(nn);
 136                        return ERR_PTR(err);
 137                }
 138        }
 139
 140        pf->num_vnics++;
 141        list_add_tail(&nn->vnic_list, &pf->vnics);
 142
 143        return nn;
 144}
 145
 146static int
 147nfp_net_pf_init_vnic(struct nfp_pf *pf, struct nfp_net *nn, unsigned int id)
 148{
 149        int err;
 150
 151        nn->id = id;
 152
 153        if (nn->port) {
 154                err = nfp_devlink_port_register(pf->app, nn->port);
 155                if (err)
 156                        return err;
 157        }
 158
 159        err = nfp_net_init(nn);
 160        if (err)
 161                goto err_devlink_port_clean;
 162
 163        nfp_net_debugfs_vnic_add(nn, pf->ddir);
 164
 165        if (nn->port)
 166                nfp_devlink_port_type_eth_set(nn->port);
 167
 168        nfp_net_info(nn);
 169
 170        if (nfp_net_is_data_vnic(nn)) {
 171                err = nfp_app_vnic_init(pf->app, nn);
 172                if (err)
 173                        goto err_devlink_port_type_clean;
 174        }
 175
 176        return 0;
 177
 178err_devlink_port_type_clean:
 179        if (nn->port)
 180                nfp_devlink_port_type_clear(nn->port);
 181        nfp_net_debugfs_dir_clean(&nn->debugfs_dir);
 182        nfp_net_clean(nn);
 183err_devlink_port_clean:
 184        if (nn->port)
 185                nfp_devlink_port_unregister(nn->port);
 186        return err;
 187}
 188
 189static int
 190nfp_net_pf_alloc_vnics(struct nfp_pf *pf, void __iomem *ctrl_bar,
 191                       void __iomem *qc_bar, int stride)
 192{
 193        struct nfp_net *nn;
 194        unsigned int i;
 195        int err;
 196
 197        for (i = 0; i < pf->max_data_vnics; i++) {
 198                nn = nfp_net_pf_alloc_vnic(pf, true, ctrl_bar, qc_bar,
 199                                           stride, i);
 200                if (IS_ERR(nn)) {
 201                        err = PTR_ERR(nn);
 202                        goto err_free_prev;
 203                }
 204
 205                ctrl_bar += NFP_PF_CSR_SLICE_SIZE;
 206
 207                /* Kill the vNIC if app init marked it as invalid */
 208                if (nn->port && nn->port->type == NFP_PORT_INVALID) {
 209                        nfp_net_pf_free_vnic(pf, nn);
 210                        continue;
 211                }
 212        }
 213
 214        if (list_empty(&pf->vnics))
 215                return -ENODEV;
 216
 217        return 0;
 218
 219err_free_prev:
 220        nfp_net_pf_free_vnics(pf);
 221        return err;
 222}
 223
 224static void nfp_net_pf_clean_vnic(struct nfp_pf *pf, struct nfp_net *nn)
 225{
 226        if (nfp_net_is_data_vnic(nn))
 227                nfp_app_vnic_clean(pf->app, nn);
 228        if (nn->port)
 229                nfp_devlink_port_type_clear(nn->port);
 230        nfp_net_debugfs_dir_clean(&nn->debugfs_dir);
 231        nfp_net_clean(nn);
 232        if (nn->port)
 233                nfp_devlink_port_unregister(nn->port);
 234}
 235
 236static int nfp_net_pf_alloc_irqs(struct nfp_pf *pf)
 237{
 238        unsigned int wanted_irqs, num_irqs, vnics_left, irqs_left;
 239        struct nfp_net *nn;
 240
 241        /* Get MSI-X vectors */
 242        wanted_irqs = 0;
 243        list_for_each_entry(nn, &pf->vnics, vnic_list)
 244                wanted_irqs += NFP_NET_NON_Q_VECTORS + nn->dp.num_r_vecs;
 245        pf->irq_entries = kcalloc(wanted_irqs, sizeof(*pf->irq_entries),
 246                                  GFP_KERNEL);
 247        if (!pf->irq_entries)
 248                return -ENOMEM;
 249
 250        num_irqs = nfp_net_irqs_alloc(pf->pdev, pf->irq_entries,
 251                                      NFP_NET_MIN_VNIC_IRQS * pf->num_vnics,
 252                                      wanted_irqs);
 253        if (!num_irqs) {
 254                nfp_warn(pf->cpp, "Unable to allocate MSI-X vectors\n");
 255                kfree(pf->irq_entries);
 256                return -ENOMEM;
 257        }
 258
 259        /* Distribute IRQs to vNICs */
 260        irqs_left = num_irqs;
 261        vnics_left = pf->num_vnics;
 262        list_for_each_entry(nn, &pf->vnics, vnic_list) {
 263                unsigned int n;
 264
 265                n = min(NFP_NET_NON_Q_VECTORS + nn->dp.num_r_vecs,
 266                        DIV_ROUND_UP(irqs_left, vnics_left));
 267                nfp_net_irqs_assign(nn, &pf->irq_entries[num_irqs - irqs_left],
 268                                    n);
 269                irqs_left -= n;
 270                vnics_left--;
 271        }
 272
 273        return 0;
 274}
 275
 276static void nfp_net_pf_free_irqs(struct nfp_pf *pf)
 277{
 278        nfp_net_irqs_disable(pf->pdev);
 279        kfree(pf->irq_entries);
 280}
 281
 282static int nfp_net_pf_init_vnics(struct nfp_pf *pf)
 283{
 284        struct nfp_net *nn;
 285        unsigned int id;
 286        int err;
 287
 288        /* Finish vNIC init and register */
 289        id = 0;
 290        list_for_each_entry(nn, &pf->vnics, vnic_list) {
 291                if (!nfp_net_is_data_vnic(nn))
 292                        continue;
 293                err = nfp_net_pf_init_vnic(pf, nn, id);
 294                if (err)
 295                        goto err_prev_deinit;
 296
 297                id++;
 298        }
 299
 300        return 0;
 301
 302err_prev_deinit:
 303        list_for_each_entry_continue_reverse(nn, &pf->vnics, vnic_list)
 304                if (nfp_net_is_data_vnic(nn))
 305                        nfp_net_pf_clean_vnic(pf, nn);
 306        return err;
 307}
 308
 309static int
 310nfp_net_pf_app_init(struct nfp_pf *pf, u8 __iomem *qc_bar, unsigned int stride)
 311{
 312        u8 __iomem *ctrl_bar;
 313        int err;
 314
 315        pf->app = nfp_app_alloc(pf, nfp_net_pf_get_app_id(pf));
 316        if (IS_ERR(pf->app))
 317                return PTR_ERR(pf->app);
 318
 319        mutex_lock(&pf->lock);
 320        err = nfp_app_init(pf->app);
 321        mutex_unlock(&pf->lock);
 322        if (err)
 323                goto err_free;
 324
 325        if (!nfp_app_needs_ctrl_vnic(pf->app))
 326                return 0;
 327
 328        ctrl_bar = nfp_pf_map_rtsym(pf, "net.ctrl", "_pf%u_net_ctrl_bar",
 329                                    NFP_PF_CSR_SLICE_SIZE, &pf->ctrl_vnic_bar);
 330        if (IS_ERR(ctrl_bar)) {
 331                nfp_err(pf->cpp, "Failed to find ctrl vNIC memory symbol\n");
 332                err = PTR_ERR(ctrl_bar);
 333                goto err_app_clean;
 334        }
 335
 336        pf->ctrl_vnic = nfp_net_pf_alloc_vnic(pf, false, ctrl_bar, qc_bar,
 337                                              stride, 0);
 338        if (IS_ERR(pf->ctrl_vnic)) {
 339                err = PTR_ERR(pf->ctrl_vnic);
 340                goto err_unmap;
 341        }
 342
 343        return 0;
 344
 345err_unmap:
 346        nfp_cpp_area_release_free(pf->ctrl_vnic_bar);
 347err_app_clean:
 348        mutex_lock(&pf->lock);
 349        nfp_app_clean(pf->app);
 350        mutex_unlock(&pf->lock);
 351err_free:
 352        nfp_app_free(pf->app);
 353        pf->app = NULL;
 354        return err;
 355}
 356
 357static void nfp_net_pf_app_clean(struct nfp_pf *pf)
 358{
 359        if (pf->ctrl_vnic) {
 360                nfp_net_pf_free_vnic(pf, pf->ctrl_vnic);
 361                nfp_cpp_area_release_free(pf->ctrl_vnic_bar);
 362        }
 363
 364        mutex_lock(&pf->lock);
 365        nfp_app_clean(pf->app);
 366        mutex_unlock(&pf->lock);
 367
 368        nfp_app_free(pf->app);
 369        pf->app = NULL;
 370}
 371
 372static int nfp_net_pf_app_start_ctrl(struct nfp_pf *pf)
 373{
 374        int err;
 375
 376        if (!pf->ctrl_vnic)
 377                return 0;
 378        err = nfp_net_pf_init_vnic(pf, pf->ctrl_vnic, 0);
 379        if (err)
 380                return err;
 381
 382        err = nfp_ctrl_open(pf->ctrl_vnic);
 383        if (err)
 384                goto err_clean_ctrl;
 385
 386        return 0;
 387
 388err_clean_ctrl:
 389        nfp_net_pf_clean_vnic(pf, pf->ctrl_vnic);
 390        return err;
 391}
 392
 393static void nfp_net_pf_app_stop_ctrl(struct nfp_pf *pf)
 394{
 395        if (!pf->ctrl_vnic)
 396                return;
 397        nfp_ctrl_close(pf->ctrl_vnic);
 398        nfp_net_pf_clean_vnic(pf, pf->ctrl_vnic);
 399}
 400
 401static int nfp_net_pf_app_start(struct nfp_pf *pf)
 402{
 403        int err;
 404
 405        err = nfp_net_pf_app_start_ctrl(pf);
 406        if (err)
 407                return err;
 408
 409        err = nfp_app_start(pf->app, pf->ctrl_vnic);
 410        if (err)
 411                goto err_ctrl_stop;
 412
 413        if (pf->num_vfs) {
 414                err = nfp_app_sriov_enable(pf->app, pf->num_vfs);
 415                if (err)
 416                        goto err_app_stop;
 417        }
 418
 419        return 0;
 420
 421err_app_stop:
 422        nfp_app_stop(pf->app);
 423err_ctrl_stop:
 424        nfp_net_pf_app_stop_ctrl(pf);
 425        return err;
 426}
 427
 428static void nfp_net_pf_app_stop(struct nfp_pf *pf)
 429{
 430        if (pf->num_vfs)
 431                nfp_app_sriov_disable(pf->app);
 432        nfp_app_stop(pf->app);
 433        nfp_net_pf_app_stop_ctrl(pf);
 434}
 435
 436static void nfp_net_pci_unmap_mem(struct nfp_pf *pf)
 437{
 438        if (pf->vfcfg_tbl2_area)
 439                nfp_cpp_area_release_free(pf->vfcfg_tbl2_area);
 440        if (pf->vf_cfg_bar)
 441                nfp_cpp_area_release_free(pf->vf_cfg_bar);
 442        if (pf->mac_stats_bar)
 443                nfp_cpp_area_release_free(pf->mac_stats_bar);
 444        nfp_cpp_area_release_free(pf->qc_area);
 445        nfp_cpp_area_release_free(pf->data_vnic_bar);
 446}
 447
 448static int nfp_net_pci_map_mem(struct nfp_pf *pf)
 449{
 450        u32 min_size, cpp_id;
 451        u8 __iomem *mem;
 452        int err;
 453
 454        min_size = pf->max_data_vnics * NFP_PF_CSR_SLICE_SIZE;
 455        mem = nfp_pf_map_rtsym(pf, "net.bar0", "_pf%d_net_bar0",
 456                               min_size, &pf->data_vnic_bar);
 457        if (IS_ERR(mem)) {
 458                nfp_err(pf->cpp, "Failed to find data vNIC memory symbol\n");
 459                return PTR_ERR(mem);
 460        }
 461
 462        if (pf->eth_tbl) {
 463                min_size =  NFP_MAC_STATS_SIZE * (pf->eth_tbl->max_index + 1);
 464                pf->mac_stats_mem = nfp_rtsym_map(pf->rtbl, "_mac_stats",
 465                                                  "net.macstats", min_size,
 466                                                  &pf->mac_stats_bar);
 467                if (IS_ERR(pf->mac_stats_mem)) {
 468                        if (PTR_ERR(pf->mac_stats_mem) != -ENOENT) {
 469                                err = PTR_ERR(pf->mac_stats_mem);
 470                                goto err_unmap_ctrl;
 471                        }
 472                        pf->mac_stats_mem = NULL;
 473                }
 474        }
 475
 476        pf->vf_cfg_mem = nfp_pf_map_rtsym(pf, "net.vfcfg", "_pf%d_net_vf_bar",
 477                                          NFP_NET_CFG_BAR_SZ * pf->limit_vfs,
 478                                          &pf->vf_cfg_bar);
 479        if (IS_ERR(pf->vf_cfg_mem)) {
 480                if (PTR_ERR(pf->vf_cfg_mem) != -ENOENT) {
 481                        err = PTR_ERR(pf->vf_cfg_mem);
 482                        goto err_unmap_mac_stats;
 483                }
 484                pf->vf_cfg_mem = NULL;
 485        }
 486
 487        min_size = NFP_NET_VF_CFG_SZ * pf->limit_vfs + NFP_NET_VF_CFG_MB_SZ;
 488        pf->vfcfg_tbl2 = nfp_pf_map_rtsym(pf, "net.vfcfg_tbl2",
 489                                          "_pf%d_net_vf_cfg2",
 490                                          min_size, &pf->vfcfg_tbl2_area);
 491        if (IS_ERR(pf->vfcfg_tbl2)) {
 492                if (PTR_ERR(pf->vfcfg_tbl2) != -ENOENT) {
 493                        err = PTR_ERR(pf->vfcfg_tbl2);
 494                        goto err_unmap_vf_cfg;
 495                }
 496                pf->vfcfg_tbl2 = NULL;
 497        }
 498
 499        cpp_id = NFP_CPP_ISLAND_ID(0, NFP_CPP_ACTION_RW, 0, 0);
 500        mem = nfp_cpp_map_area(pf->cpp, "net.qc", cpp_id, NFP_PCIE_QUEUE(0),
 501                               NFP_QCP_QUEUE_AREA_SZ, &pf->qc_area);
 502        if (IS_ERR(mem)) {
 503                nfp_err(pf->cpp, "Failed to map Queue Controller area.\n");
 504                err = PTR_ERR(mem);
 505                goto err_unmap_vfcfg_tbl2;
 506        }
 507
 508        return 0;
 509
 510err_unmap_vfcfg_tbl2:
 511        if (pf->vfcfg_tbl2_area)
 512                nfp_cpp_area_release_free(pf->vfcfg_tbl2_area);
 513err_unmap_vf_cfg:
 514        if (pf->vf_cfg_bar)
 515                nfp_cpp_area_release_free(pf->vf_cfg_bar);
 516err_unmap_mac_stats:
 517        if (pf->mac_stats_bar)
 518                nfp_cpp_area_release_free(pf->mac_stats_bar);
 519err_unmap_ctrl:
 520        nfp_cpp_area_release_free(pf->data_vnic_bar);
 521        return err;
 522}
 523
 524static int
 525nfp_net_eth_port_update(struct nfp_cpp *cpp, struct nfp_port *port,
 526                        struct nfp_eth_table *eth_table)
 527{
 528        struct nfp_eth_table_port *eth_port;
 529
 530        ASSERT_RTNL();
 531
 532        eth_port = nfp_net_find_port(eth_table, port->eth_id);
 533        if (!eth_port) {
 534                set_bit(NFP_PORT_CHANGED, &port->flags);
 535                nfp_warn(cpp, "Warning: port #%d not present after reconfig\n",
 536                         port->eth_id);
 537                return -EIO;
 538        }
 539        if (eth_port->override_changed) {
 540                nfp_warn(cpp, "Port #%d config changed, unregistering. Driver reload required before port will be operational again.\n", port->eth_id);
 541                port->type = NFP_PORT_INVALID;
 542        }
 543
 544        memcpy(port->eth_port, eth_port, sizeof(*eth_port));
 545
 546        return 0;
 547}
 548
 549int nfp_net_refresh_port_table_sync(struct nfp_pf *pf)
 550{
 551        struct nfp_eth_table *eth_table;
 552        struct nfp_net *nn, *next;
 553        struct nfp_port *port;
 554        int err;
 555
 556        lockdep_assert_held(&pf->lock);
 557
 558        /* Check for nfp_net_pci_remove() racing against us */
 559        if (list_empty(&pf->vnics))
 560                return 0;
 561
 562        /* Update state of all ports */
 563        rtnl_lock();
 564        list_for_each_entry(port, &pf->ports, port_list)
 565                clear_bit(NFP_PORT_CHANGED, &port->flags);
 566
 567        eth_table = nfp_eth_read_ports(pf->cpp);
 568        if (!eth_table) {
 569                list_for_each_entry(port, &pf->ports, port_list)
 570                        if (__nfp_port_get_eth_port(port))
 571                                set_bit(NFP_PORT_CHANGED, &port->flags);
 572                rtnl_unlock();
 573                nfp_err(pf->cpp, "Error refreshing port config!\n");
 574                return -EIO;
 575        }
 576
 577        list_for_each_entry(port, &pf->ports, port_list)
 578                if (__nfp_port_get_eth_port(port))
 579                        nfp_net_eth_port_update(pf->cpp, port, eth_table);
 580        rtnl_unlock();
 581
 582        kfree(eth_table);
 583
 584        /* Resync repr state. This may cause reprs to be removed. */
 585        err = nfp_reprs_resync_phys_ports(pf->app);
 586        if (err)
 587                return err;
 588
 589        /* Shoot off the ports which became invalid */
 590        list_for_each_entry_safe(nn, next, &pf->vnics, vnic_list) {
 591                if (!nn->port || nn->port->type != NFP_PORT_INVALID)
 592                        continue;
 593
 594                nfp_net_pf_clean_vnic(pf, nn);
 595                nfp_net_pf_free_vnic(pf, nn);
 596        }
 597
 598        return 0;
 599}
 600
 601static void nfp_net_refresh_vnics(struct work_struct *work)
 602{
 603        struct nfp_pf *pf = container_of(work, struct nfp_pf,
 604                                         port_refresh_work);
 605
 606        mutex_lock(&pf->lock);
 607        nfp_net_refresh_port_table_sync(pf);
 608        mutex_unlock(&pf->lock);
 609}
 610
 611void nfp_net_refresh_port_table(struct nfp_port *port)
 612{
 613        struct nfp_pf *pf = port->app->pf;
 614
 615        set_bit(NFP_PORT_CHANGED, &port->flags);
 616
 617        queue_work(pf->wq, &pf->port_refresh_work);
 618}
 619
 620int nfp_net_refresh_eth_port(struct nfp_port *port)
 621{
 622        struct nfp_cpp *cpp = port->app->cpp;
 623        struct nfp_eth_table *eth_table;
 624        int ret;
 625
 626        clear_bit(NFP_PORT_CHANGED, &port->flags);
 627
 628        eth_table = nfp_eth_read_ports(cpp);
 629        if (!eth_table) {
 630                set_bit(NFP_PORT_CHANGED, &port->flags);
 631                nfp_err(cpp, "Error refreshing port state table!\n");
 632                return -EIO;
 633        }
 634
 635        ret = nfp_net_eth_port_update(cpp, port, eth_table);
 636
 637        kfree(eth_table);
 638
 639        return ret;
 640}
 641
 642/*
 643 * PCI device functions
 644 */
 645int nfp_net_pci_probe(struct nfp_pf *pf)
 646{
 647        struct devlink *devlink = priv_to_devlink(pf);
 648        struct nfp_net_fw_version fw_ver;
 649        u8 __iomem *ctrl_bar, *qc_bar;
 650        int stride;
 651        int err;
 652
 653        INIT_WORK(&pf->port_refresh_work, nfp_net_refresh_vnics);
 654
 655        if (!pf->rtbl) {
 656                nfp_err(pf->cpp, "No %s, giving up.\n",
 657                        pf->fw_loaded ? "symbol table" : "firmware found");
 658                return -EINVAL;
 659        }
 660
 661        pf->max_data_vnics = nfp_net_pf_get_num_ports(pf);
 662        if ((int)pf->max_data_vnics < 0)
 663                return pf->max_data_vnics;
 664
 665        err = nfp_net_pci_map_mem(pf);
 666        if (err)
 667                return err;
 668
 669        ctrl_bar = nfp_cpp_area_iomem(pf->data_vnic_bar);
 670        qc_bar = nfp_cpp_area_iomem(pf->qc_area);
 671        if (!ctrl_bar || !qc_bar) {
 672                err = -EIO;
 673                goto err_unmap;
 674        }
 675
 676        nfp_net_get_fw_version(&fw_ver, ctrl_bar);
 677        if (fw_ver.resv || fw_ver.class != NFP_NET_CFG_VERSION_CLASS_GENERIC) {
 678                nfp_err(pf->cpp, "Unknown Firmware ABI %d.%d.%d.%d\n",
 679                        fw_ver.resv, fw_ver.class, fw_ver.major, fw_ver.minor);
 680                err = -EINVAL;
 681                goto err_unmap;
 682        }
 683
 684        /* Determine stride */
 685        if (nfp_net_fw_ver_eq(&fw_ver, 0, 0, 0, 1)) {
 686                stride = 2;
 687                nfp_warn(pf->cpp, "OBSOLETE Firmware detected - VF isolation not available\n");
 688        } else {
 689                switch (fw_ver.major) {
 690                case 1 ... 5:
 691                        stride = 4;
 692                        break;
 693                default:
 694                        nfp_err(pf->cpp, "Unsupported Firmware ABI %d.%d.%d.%d\n",
 695                                fw_ver.resv, fw_ver.class,
 696                                fw_ver.major, fw_ver.minor);
 697                        err = -EINVAL;
 698                        goto err_unmap;
 699                }
 700        }
 701
 702        err = nfp_net_pf_app_init(pf, qc_bar, stride);
 703        if (err)
 704                goto err_unmap;
 705
 706        err = devlink_register(devlink, &pf->pdev->dev);
 707        if (err)
 708                goto err_app_clean;
 709
 710        err = nfp_shared_buf_register(pf);
 711        if (err)
 712                goto err_devlink_unreg;
 713
 714        mutex_lock(&pf->lock);
 715        pf->ddir = nfp_net_debugfs_device_add(pf->pdev);
 716
 717        /* Allocate the vnics and do basic init */
 718        err = nfp_net_pf_alloc_vnics(pf, ctrl_bar, qc_bar, stride);
 719        if (err)
 720                goto err_clean_ddir;
 721
 722        err = nfp_net_pf_alloc_irqs(pf);
 723        if (err)
 724                goto err_free_vnics;
 725
 726        err = nfp_net_pf_app_start(pf);
 727        if (err)
 728                goto err_free_irqs;
 729
 730        err = nfp_net_pf_init_vnics(pf);
 731        if (err)
 732                goto err_stop_app;
 733
 734        mutex_unlock(&pf->lock);
 735
 736        return 0;
 737
 738err_stop_app:
 739        nfp_net_pf_app_stop(pf);
 740err_free_irqs:
 741        nfp_net_pf_free_irqs(pf);
 742err_free_vnics:
 743        nfp_net_pf_free_vnics(pf);
 744err_clean_ddir:
 745        nfp_net_debugfs_dir_clean(&pf->ddir);
 746        mutex_unlock(&pf->lock);
 747        nfp_shared_buf_unregister(pf);
 748err_devlink_unreg:
 749        cancel_work_sync(&pf->port_refresh_work);
 750        devlink_unregister(devlink);
 751err_app_clean:
 752        nfp_net_pf_app_clean(pf);
 753err_unmap:
 754        nfp_net_pci_unmap_mem(pf);
 755        return err;
 756}
 757
 758void nfp_net_pci_remove(struct nfp_pf *pf)
 759{
 760        struct nfp_net *nn, *next;
 761
 762        mutex_lock(&pf->lock);
 763        list_for_each_entry_safe(nn, next, &pf->vnics, vnic_list) {
 764                if (!nfp_net_is_data_vnic(nn))
 765                        continue;
 766                nfp_net_pf_clean_vnic(pf, nn);
 767                nfp_net_pf_free_vnic(pf, nn);
 768        }
 769
 770        nfp_net_pf_app_stop(pf);
 771        /* stop app first, to avoid double free of ctrl vNIC's ddir */
 772        nfp_net_debugfs_dir_clean(&pf->ddir);
 773
 774        mutex_unlock(&pf->lock);
 775
 776        nfp_shared_buf_unregister(pf);
 777        devlink_unregister(priv_to_devlink(pf));
 778
 779        nfp_net_pf_free_irqs(pf);
 780        nfp_net_pf_app_clean(pf);
 781        nfp_net_pci_unmap_mem(pf);
 782
 783        cancel_work_sync(&pf->port_refresh_work);
 784}
 785