linux/drivers/net/ethernet/broadcom/bnxt/bnxt.c
<<
>>
Prefs
   1/* Broadcom NetXtreme-C/E network driver.
   2 *
   3 * Copyright (c) 2014-2016 Broadcom Corporation
   4 * Copyright (c) 2016-2019 Broadcom Limited
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License as published by
   8 * the Free Software Foundation.
   9 */
  10
  11#include <linux/module.h>
  12
  13#include <linux/stringify.h>
  14#include <linux/kernel.h>
  15#include <linux/timer.h>
  16#include <linux/errno.h>
  17#include <linux/ioport.h>
  18#include <linux/slab.h>
  19#include <linux/vmalloc.h>
  20#include <linux/interrupt.h>
  21#include <linux/pci.h>
  22#include <linux/netdevice.h>
  23#include <linux/etherdevice.h>
  24#include <linux/skbuff.h>
  25#include <linux/dma-mapping.h>
  26#include <linux/bitops.h>
  27#include <linux/io.h>
  28#include <linux/irq.h>
  29#include <linux/delay.h>
  30#include <asm/byteorder.h>
  31#include <asm/page.h>
  32#include <linux/time.h>
  33#include <linux/mii.h>
  34#include <linux/mdio.h>
  35#include <linux/if.h>
  36#include <linux/if_vlan.h>
  37#include <linux/if_bridge.h>
  38#include <linux/rtc.h>
  39#include <linux/bpf.h>
  40#include <net/gro.h>
  41#include <net/ip.h>
  42#include <net/tcp.h>
  43#include <net/udp.h>
  44#include <net/checksum.h>
  45#include <net/ip6_checksum.h>
  46#include <net/udp_tunnel.h>
  47#include <linux/workqueue.h>
  48#include <linux/prefetch.h>
  49#include <linux/cache.h>
  50#include <linux/log2.h>
  51#include <linux/bitmap.h>
  52#include <linux/cpu_rmap.h>
  53#include <linux/cpumask.h>
  54#include <net/pkt_cls.h>
  55#include <net/page_pool/helpers.h>
  56#include <linux/align.h>
  57#include <net/netdev_lock.h>
  58#include <net/netdev_queues.h>
  59#include <net/netdev_rx_queue.h>
  60#include <linux/pci-tph.h>
  61#include <linux/bnxt/hsi.h>
  62
  63#include "bnxt.h"
  64#include "bnxt_hwrm.h"
  65#include "bnxt_ulp.h"
  66#include "bnxt_sriov.h"
  67#include "bnxt_ethtool.h"
  68#include "bnxt_dcb.h"
  69#include "bnxt_xdp.h"
  70#include "bnxt_ptp.h"
  71#include "bnxt_vfr.h"
  72#include "bnxt_tc.h"
  73#include "bnxt_devlink.h"
  74#include "bnxt_debugfs.h"
  75#include "bnxt_coredump.h"
  76#include "bnxt_hwmon.h"
  77
  78#define BNXT_TX_TIMEOUT         (5 * HZ)
  79#define BNXT_DEF_MSG_ENABLE     (NETIF_MSG_DRV | NETIF_MSG_HW | \
  80                                 NETIF_MSG_TX_ERR)
  81
  82MODULE_IMPORT_NS("NETDEV_INTERNAL");
  83MODULE_LICENSE("GPL");
  84MODULE_DESCRIPTION("Broadcom NetXtreme network driver");
  85
  86#define BNXT_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN)
  87#define BNXT_RX_DMA_OFFSET NET_SKB_PAD
  88
  89#define BNXT_TX_PUSH_THRESH 164
  90
  91/* indexed by enum board_idx */
  92static const struct {
  93        char *name;
  94} board_info[] = {
  95        [BCM57301] = { "Broadcom BCM57301 NetXtreme-C 10Gb Ethernet" },
  96        [BCM57302] = { "Broadcom BCM57302 NetXtreme-C 10Gb/25Gb Ethernet" },
  97        [BCM57304] = { "Broadcom BCM57304 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
  98        [BCM57417_NPAR] = { "Broadcom BCM57417 NetXtreme-E Ethernet Partition" },
  99        [BCM58700] = { "Broadcom BCM58700 Nitro 1Gb/2.5Gb/10Gb Ethernet" },
 100        [BCM57311] = { "Broadcom BCM57311 NetXtreme-C 10Gb Ethernet" },
 101        [BCM57312] = { "Broadcom BCM57312 NetXtreme-C 10Gb/25Gb Ethernet" },
 102        [BCM57402] = { "Broadcom BCM57402 NetXtreme-E 10Gb Ethernet" },
 103        [BCM57404] = { "Broadcom BCM57404 NetXtreme-E 10Gb/25Gb Ethernet" },
 104        [BCM57406] = { "Broadcom BCM57406 NetXtreme-E 10GBase-T Ethernet" },
 105        [BCM57402_NPAR] = { "Broadcom BCM57402 NetXtreme-E Ethernet Partition" },
 106        [BCM57407] = { "Broadcom BCM57407 NetXtreme-E 10GBase-T Ethernet" },
 107        [BCM57412] = { "Broadcom BCM57412 NetXtreme-E 10Gb Ethernet" },
 108        [BCM57414] = { "Broadcom BCM57414 NetXtreme-E 10Gb/25Gb Ethernet" },
 109        [BCM57416] = { "Broadcom BCM57416 NetXtreme-E 10GBase-T Ethernet" },
 110        [BCM57417] = { "Broadcom BCM57417 NetXtreme-E 10GBase-T Ethernet" },
 111        [BCM57412_NPAR] = { "Broadcom BCM57412 NetXtreme-E Ethernet Partition" },
 112        [BCM57314] = { "Broadcom BCM57314 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
 113        [BCM57417_SFP] = { "Broadcom BCM57417 NetXtreme-E 10Gb/25Gb Ethernet" },
 114        [BCM57416_SFP] = { "Broadcom BCM57416 NetXtreme-E 10Gb Ethernet" },
 115        [BCM57404_NPAR] = { "Broadcom BCM57404 NetXtreme-E Ethernet Partition" },
 116        [BCM57406_NPAR] = { "Broadcom BCM57406 NetXtreme-E Ethernet Partition" },
 117        [BCM57407_SFP] = { "Broadcom BCM57407 NetXtreme-E 25Gb Ethernet" },
 118        [BCM57407_NPAR] = { "Broadcom BCM57407 NetXtreme-E Ethernet Partition" },
 119        [BCM57414_NPAR] = { "Broadcom BCM57414 NetXtreme-E Ethernet Partition" },
 120        [BCM57416_NPAR] = { "Broadcom BCM57416 NetXtreme-E Ethernet Partition" },
 121        [BCM57452] = { "Broadcom BCM57452 NetXtreme-E 10Gb/25Gb/40Gb/50Gb Ethernet" },
 122        [BCM57454] = { "Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
 123        [BCM5745x_NPAR] = { "Broadcom BCM5745x NetXtreme-E Ethernet Partition" },
 124        [BCM57508] = { "Broadcom BCM57508 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
 125        [BCM57504] = { "Broadcom BCM57504 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
 126        [BCM57502] = { "Broadcom BCM57502 NetXtreme-E 10Gb/25Gb/50Gb Ethernet" },
 127        [BCM57608] = { "Broadcom BCM57608 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb/400Gb Ethernet" },
 128        [BCM57604] = { "Broadcom BCM57604 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
 129        [BCM57602] = { "Broadcom BCM57602 NetXtreme-E 10Gb/25Gb/50Gb/100Gb Ethernet" },
 130        [BCM57601] = { "Broadcom BCM57601 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb/400Gb Ethernet" },
 131        [BCM57508_NPAR] = { "Broadcom BCM57508 NetXtreme-E Ethernet Partition" },
 132        [BCM57504_NPAR] = { "Broadcom BCM57504 NetXtreme-E Ethernet Partition" },
 133        [BCM57502_NPAR] = { "Broadcom BCM57502 NetXtreme-E Ethernet Partition" },
 134        [BCM58802] = { "Broadcom BCM58802 NetXtreme-S 10Gb/25Gb/40Gb/50Gb Ethernet" },
 135        [BCM58804] = { "Broadcom BCM58804 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
 136        [BCM58808] = { "Broadcom BCM58808 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
 137        [NETXTREME_E_VF] = { "Broadcom NetXtreme-E Ethernet Virtual Function" },
 138        [NETXTREME_C_VF] = { "Broadcom NetXtreme-C Ethernet Virtual Function" },
 139        [NETXTREME_S_VF] = { "Broadcom NetXtreme-S Ethernet Virtual Function" },
 140        [NETXTREME_C_VF_HV] = { "Broadcom NetXtreme-C Virtual Function for Hyper-V" },
 141        [NETXTREME_E_VF_HV] = { "Broadcom NetXtreme-E Virtual Function for Hyper-V" },
 142        [NETXTREME_E_P5_VF] = { "Broadcom BCM5750X NetXtreme-E Ethernet Virtual Function" },
 143        [NETXTREME_E_P5_VF_HV] = { "Broadcom BCM5750X NetXtreme-E Virtual Function for Hyper-V" },
 144        [NETXTREME_E_P7_VF] = { "Broadcom BCM5760X Virtual Function" },
 145};
 146
 147static const struct pci_device_id bnxt_pci_tbl[] = {
 148        { PCI_VDEVICE(BROADCOM, 0x1604), .driver_data = BCM5745x_NPAR },
 149        { PCI_VDEVICE(BROADCOM, 0x1605), .driver_data = BCM5745x_NPAR },
 150        { PCI_VDEVICE(BROADCOM, 0x1614), .driver_data = BCM57454 },
 151        { PCI_VDEVICE(BROADCOM, 0x16c0), .driver_data = BCM57417_NPAR },
 152        { PCI_VDEVICE(BROADCOM, 0x16c8), .driver_data = BCM57301 },
 153        { PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 },
 154        { PCI_VDEVICE(BROADCOM, 0x16ca), .driver_data = BCM57304 },
 155        { PCI_VDEVICE(BROADCOM, 0x16cc), .driver_data = BCM57417_NPAR },
 156        { PCI_VDEVICE(BROADCOM, 0x16cd), .driver_data = BCM58700 },
 157        { PCI_VDEVICE(BROADCOM, 0x16ce), .driver_data = BCM57311 },
 158        { PCI_VDEVICE(BROADCOM, 0x16cf), .driver_data = BCM57312 },
 159        { PCI_VDEVICE(BROADCOM, 0x16d0), .driver_data = BCM57402 },
 160        { PCI_VDEVICE(BROADCOM, 0x16d1), .driver_data = BCM57404 },
 161        { PCI_VDEVICE(BROADCOM, 0x16d2), .driver_data = BCM57406 },
 162        { PCI_VDEVICE(BROADCOM, 0x16d4), .driver_data = BCM57402_NPAR },
 163        { PCI_VDEVICE(BROADCOM, 0x16d5), .driver_data = BCM57407 },
 164        { PCI_VDEVICE(BROADCOM, 0x16d6), .driver_data = BCM57412 },
 165        { PCI_VDEVICE(BROADCOM, 0x16d7), .driver_data = BCM57414 },
 166        { PCI_VDEVICE(BROADCOM, 0x16d8), .driver_data = BCM57416 },
 167        { PCI_VDEVICE(BROADCOM, 0x16d9), .driver_data = BCM57417 },
 168        { PCI_VDEVICE(BROADCOM, 0x16de), .driver_data = BCM57412_NPAR },
 169        { PCI_VDEVICE(BROADCOM, 0x16df), .driver_data = BCM57314 },
 170        { PCI_VDEVICE(BROADCOM, 0x16e2), .driver_data = BCM57417_SFP },
 171        { PCI_VDEVICE(BROADCOM, 0x16e3), .driver_data = BCM57416_SFP },
 172        { PCI_VDEVICE(BROADCOM, 0x16e7), .driver_data = BCM57404_NPAR },
 173        { PCI_VDEVICE(BROADCOM, 0x16e8), .driver_data = BCM57406_NPAR },
 174        { PCI_VDEVICE(BROADCOM, 0x16e9), .driver_data = BCM57407_SFP },
 175        { PCI_VDEVICE(BROADCOM, 0x16ea), .driver_data = BCM57407_NPAR },
 176        { PCI_VDEVICE(BROADCOM, 0x16eb), .driver_data = BCM57412_NPAR },
 177        { PCI_VDEVICE(BROADCOM, 0x16ec), .driver_data = BCM57414_NPAR },
 178        { PCI_VDEVICE(BROADCOM, 0x16ed), .driver_data = BCM57414_NPAR },
 179        { PCI_VDEVICE(BROADCOM, 0x16ee), .driver_data = BCM57416_NPAR },
 180        { PCI_VDEVICE(BROADCOM, 0x16ef), .driver_data = BCM57416_NPAR },
 181        { PCI_VDEVICE(BROADCOM, 0x16f0), .driver_data = BCM58808 },
 182        { PCI_VDEVICE(BROADCOM, 0x16f1), .driver_data = BCM57452 },
 183        { PCI_VDEVICE(BROADCOM, 0x1750), .driver_data = BCM57508 },
 184        { PCI_VDEVICE(BROADCOM, 0x1751), .driver_data = BCM57504 },
 185        { PCI_VDEVICE(BROADCOM, 0x1752), .driver_data = BCM57502 },
 186        { PCI_VDEVICE(BROADCOM, 0x1760), .driver_data = BCM57608 },
 187        { PCI_VDEVICE(BROADCOM, 0x1761), .driver_data = BCM57604 },
 188        { PCI_VDEVICE(BROADCOM, 0x1762), .driver_data = BCM57602 },
 189        { PCI_VDEVICE(BROADCOM, 0x1763), .driver_data = BCM57601 },
 190        { PCI_VDEVICE(BROADCOM, 0x1800), .driver_data = BCM57502_NPAR },
 191        { PCI_VDEVICE(BROADCOM, 0x1801), .driver_data = BCM57504_NPAR },
 192        { PCI_VDEVICE(BROADCOM, 0x1802), .driver_data = BCM57508_NPAR },
 193        { PCI_VDEVICE(BROADCOM, 0x1803), .driver_data = BCM57502_NPAR },
 194        { PCI_VDEVICE(BROADCOM, 0x1804), .driver_data = BCM57504_NPAR },
 195        { PCI_VDEVICE(BROADCOM, 0x1805), .driver_data = BCM57508_NPAR },
 196        { PCI_VDEVICE(BROADCOM, 0xd802), .driver_data = BCM58802 },
 197        { PCI_VDEVICE(BROADCOM, 0xd804), .driver_data = BCM58804 },
 198#ifdef CONFIG_BNXT_SRIOV
 199        { PCI_VDEVICE(BROADCOM, 0x1606), .driver_data = NETXTREME_E_VF },
 200        { PCI_VDEVICE(BROADCOM, 0x1607), .driver_data = NETXTREME_E_VF_HV },
 201        { PCI_VDEVICE(BROADCOM, 0x1608), .driver_data = NETXTREME_E_VF_HV },
 202        { PCI_VDEVICE(BROADCOM, 0x1609), .driver_data = NETXTREME_E_VF },
 203        { PCI_VDEVICE(BROADCOM, 0x16bd), .driver_data = NETXTREME_E_VF_HV },
 204        { PCI_VDEVICE(BROADCOM, 0x16c1), .driver_data = NETXTREME_E_VF },
 205        { PCI_VDEVICE(BROADCOM, 0x16c2), .driver_data = NETXTREME_C_VF_HV },
 206        { PCI_VDEVICE(BROADCOM, 0x16c3), .driver_data = NETXTREME_C_VF_HV },
 207        { PCI_VDEVICE(BROADCOM, 0x16c4), .driver_data = NETXTREME_E_VF_HV },
 208        { PCI_VDEVICE(BROADCOM, 0x16c5), .driver_data = NETXTREME_E_VF_HV },
 209        { PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = NETXTREME_C_VF },
 210        { PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = NETXTREME_E_VF },
 211        { PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = NETXTREME_E_VF },
 212        { PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = NETXTREME_C_VF },
 213        { PCI_VDEVICE(BROADCOM, 0x16e5), .driver_data = NETXTREME_C_VF },
 214        { PCI_VDEVICE(BROADCOM, 0x16e6), .driver_data = NETXTREME_C_VF_HV },
 215        { PCI_VDEVICE(BROADCOM, 0x1806), .driver_data = NETXTREME_E_P5_VF },
 216        { PCI_VDEVICE(BROADCOM, 0x1807), .driver_data = NETXTREME_E_P5_VF },
 217        { PCI_VDEVICE(BROADCOM, 0x1808), .driver_data = NETXTREME_E_P5_VF_HV },
 218        { PCI_VDEVICE(BROADCOM, 0x1809), .driver_data = NETXTREME_E_P5_VF_HV },
 219        { PCI_VDEVICE(BROADCOM, 0x1819), .driver_data = NETXTREME_E_P7_VF },
 220        { PCI_VDEVICE(BROADCOM, 0xd800), .driver_data = NETXTREME_S_VF },
 221#endif
 222        { 0 }
 223};
 224
 225MODULE_DEVICE_TABLE(pci, bnxt_pci_tbl);
 226
 227static const u16 bnxt_vf_req_snif[] = {
 228        HWRM_FUNC_CFG,
 229        HWRM_FUNC_VF_CFG,
 230        HWRM_PORT_PHY_QCFG,
 231        HWRM_CFA_L2_FILTER_ALLOC,
 232};
 233
 234static const u16 bnxt_async_events_arr[] = {
 235        ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE,
 236        ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE,
 237        ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD,
 238        ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED,
 239        ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE,
 240        ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE,
 241        ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE,
 242        ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY,
 243        ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY,
 244        ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION,
 245        ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE,
 246        ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG,
 247        ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST,
 248        ASYNC_EVENT_CMPL_EVENT_ID_PPS_TIMESTAMP,
 249        ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT,
 250        ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE,
 251        ASYNC_EVENT_CMPL_EVENT_ID_DBG_BUF_PRODUCER,
 252};
 253
 254const u16 bnxt_bstore_to_trace[] = {
 255        [BNXT_CTX_SRT]          = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_SRT_TRACE,
 256        [BNXT_CTX_SRT2]         = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_SRT2_TRACE,
 257        [BNXT_CTX_CRT]          = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CRT_TRACE,
 258        [BNXT_CTX_CRT2]         = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CRT2_TRACE,
 259        [BNXT_CTX_RIGP0]        = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_RIGP0_TRACE,
 260        [BNXT_CTX_L2HWRM]       = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_L2_HWRM_TRACE,
 261        [BNXT_CTX_REHWRM]       = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_ROCE_HWRM_TRACE,
 262        [BNXT_CTX_CA0]          = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CA0_TRACE,
 263        [BNXT_CTX_CA1]          = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CA1_TRACE,
 264        [BNXT_CTX_CA2]          = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CA2_TRACE,
 265        [BNXT_CTX_RIGP1]        = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_RIGP1_TRACE,
 266};
 267
 268static struct workqueue_struct *bnxt_pf_wq;
 269
 270#define BNXT_IPV6_MASK_ALL {{{ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, \
 271                               0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }}}
 272#define BNXT_IPV6_MASK_NONE {{{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }}}
 273
 274const struct bnxt_flow_masks BNXT_FLOW_MASK_NONE = {
 275        .ports = {
 276                .src = 0,
 277                .dst = 0,
 278        },
 279        .addrs = {
 280                .v6addrs = {
 281                        .src = BNXT_IPV6_MASK_NONE,
 282                        .dst = BNXT_IPV6_MASK_NONE,
 283                },
 284        },
 285};
 286
 287const struct bnxt_flow_masks BNXT_FLOW_IPV6_MASK_ALL = {
 288        .ports = {
 289                .src = cpu_to_be16(0xffff),
 290                .dst = cpu_to_be16(0xffff),
 291        },
 292        .addrs = {
 293                .v6addrs = {
 294                        .src = BNXT_IPV6_MASK_ALL,
 295                        .dst = BNXT_IPV6_MASK_ALL,
 296                },
 297        },
 298};
 299
 300const struct bnxt_flow_masks BNXT_FLOW_IPV4_MASK_ALL = {
 301        .ports = {
 302                .src = cpu_to_be16(0xffff),
 303                .dst = cpu_to_be16(0xffff),
 304        },
 305        .addrs = {
 306                .v4addrs = {
 307                        .src = cpu_to_be32(0xffffffff),
 308                        .dst = cpu_to_be32(0xffffffff),
 309                },
 310        },
 311};
 312
 313static bool bnxt_vf_pciid(enum board_idx idx)
 314{
 315        return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF ||
 316                idx == NETXTREME_S_VF || idx == NETXTREME_C_VF_HV ||
 317                idx == NETXTREME_E_VF_HV || idx == NETXTREME_E_P5_VF ||
 318                idx == NETXTREME_E_P5_VF_HV || idx == NETXTREME_E_P7_VF);
 319}
 320
 321#define DB_CP_REARM_FLAGS       (DB_KEY_CP | DB_IDX_VALID)
 322#define DB_CP_FLAGS             (DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS)
 323
 324#define BNXT_DB_CQ(db, idx)                                             \
 325        writel(DB_CP_FLAGS | DB_RING_IDX(db, idx), (db)->doorbell)
 326
 327#define BNXT_DB_NQ_P5(db, idx)                                          \
 328        bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ | DB_RING_IDX(db, idx),\
 329                    (db)->doorbell)
 330
 331#define BNXT_DB_NQ_P7(db, idx)                                          \
 332        bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ_MASK |             \
 333                    DB_RING_IDX(db, idx), (db)->doorbell)
 334
 335#define BNXT_DB_CQ_ARM(db, idx)                                         \
 336        writel(DB_CP_REARM_FLAGS | DB_RING_IDX(db, idx), (db)->doorbell)
 337
 338#define BNXT_DB_NQ_ARM_P5(db, idx)                                      \
 339        bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ_ARM |              \
 340                    DB_RING_IDX(db, idx), (db)->doorbell)
 341
 342static void bnxt_db_nq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
 343{
 344        if (bp->flags & BNXT_FLAG_CHIP_P7)
 345                BNXT_DB_NQ_P7(db, idx);
 346        else if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
 347                BNXT_DB_NQ_P5(db, idx);
 348        else
 349                BNXT_DB_CQ(db, idx);
 350}
 351
 352static void bnxt_db_nq_arm(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
 353{
 354        if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
 355                BNXT_DB_NQ_ARM_P5(db, idx);
 356        else
 357                BNXT_DB_CQ_ARM(db, idx);
 358}
 359
 360static void bnxt_db_cq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
 361{
 362        if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
 363                bnxt_writeq(bp, db->db_key64 | DBR_TYPE_CQ_ARMALL |
 364                            DB_RING_IDX(db, idx), db->doorbell);
 365        else
 366                BNXT_DB_CQ(db, idx);
 367}
 368
 369static void bnxt_queue_fw_reset_work(struct bnxt *bp, unsigned long delay)
 370{
 371        if (!(test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)))
 372                return;
 373
 374        if (BNXT_PF(bp))
 375                queue_delayed_work(bnxt_pf_wq, &bp->fw_reset_task, delay);
 376        else
 377                schedule_delayed_work(&bp->fw_reset_task, delay);
 378}
 379
 380static void __bnxt_queue_sp_work(struct bnxt *bp)
 381{
 382        if (BNXT_PF(bp))
 383                queue_work(bnxt_pf_wq, &bp->sp_task);
 384        else
 385                schedule_work(&bp->sp_task);
 386}
 387
 388static void bnxt_queue_sp_work(struct bnxt *bp, unsigned int event)
 389{
 390        set_bit(event, &bp->sp_event);
 391        __bnxt_queue_sp_work(bp);
 392}
 393
 394static void bnxt_sched_reset_rxr(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
 395{
 396        if (!rxr->bnapi->in_reset) {
 397                rxr->bnapi->in_reset = true;
 398                if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
 399                        set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
 400                else
 401                        set_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event);
 402                __bnxt_queue_sp_work(bp);
 403        }
 404        rxr->rx_next_cons = 0xffff;
 405}
 406
 407void bnxt_sched_reset_txr(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
 408                          u16 curr)
 409{
 410        struct bnxt_napi *bnapi = txr->bnapi;
 411
 412        if (bnapi->tx_fault)
 413                return;
 414
 415        netdev_err(bp->dev, "Invalid Tx completion (ring:%d tx_hw_cons:%u cons:%u prod:%u curr:%u)",
 416                   txr->txq_index, txr->tx_hw_cons,
 417                   txr->tx_cons, txr->tx_prod, curr);
 418        WARN_ON_ONCE(1);
 419        bnapi->tx_fault = 1;
 420        bnxt_queue_sp_work(bp, BNXT_RESET_TASK_SP_EVENT);
 421}
 422
 423const u16 bnxt_lhint_arr[] = {
 424        TX_BD_FLAGS_LHINT_512_AND_SMALLER,
 425        TX_BD_FLAGS_LHINT_512_TO_1023,
 426        TX_BD_FLAGS_LHINT_1024_TO_2047,
 427        TX_BD_FLAGS_LHINT_1024_TO_2047,
 428        TX_BD_FLAGS_LHINT_2048_AND_LARGER,
 429        TX_BD_FLAGS_LHINT_2048_AND_LARGER,
 430        TX_BD_FLAGS_LHINT_2048_AND_LARGER,
 431        TX_BD_FLAGS_LHINT_2048_AND_LARGER,
 432        TX_BD_FLAGS_LHINT_2048_AND_LARGER,
 433        TX_BD_FLAGS_LHINT_2048_AND_LARGER,
 434        TX_BD_FLAGS_LHINT_2048_AND_LARGER,
 435        TX_BD_FLAGS_LHINT_2048_AND_LARGER,
 436        TX_BD_FLAGS_LHINT_2048_AND_LARGER,
 437        TX_BD_FLAGS_LHINT_2048_AND_LARGER,
 438        TX_BD_FLAGS_LHINT_2048_AND_LARGER,
 439        TX_BD_FLAGS_LHINT_2048_AND_LARGER,
 440        TX_BD_FLAGS_LHINT_2048_AND_LARGER,
 441        TX_BD_FLAGS_LHINT_2048_AND_LARGER,
 442        TX_BD_FLAGS_LHINT_2048_AND_LARGER,
 443};
 444
 445static u16 bnxt_xmit_get_cfa_action(struct sk_buff *skb)
 446{
 447        struct metadata_dst *md_dst = skb_metadata_dst(skb);
 448
 449        if (!md_dst || md_dst->type != METADATA_HW_PORT_MUX)
 450                return 0;
 451
 452        return md_dst->u.port_info.port_id;
 453}
 454
 455static void bnxt_txr_db_kick(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
 456                             u16 prod)
 457{
 458        /* Sync BD data before updating doorbell */
 459        wmb();
 460        bnxt_db_write(bp, &txr->tx_db, prod);
 461        txr->kick_pending = 0;
 462}
 463
 464static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
 465{
 466        struct bnxt *bp = netdev_priv(dev);
 467        struct tx_bd *txbd, *txbd0;
 468        struct tx_bd_ext *txbd1;
 469        struct netdev_queue *txq;
 470        int i;
 471        dma_addr_t mapping;
 472        unsigned int length, pad = 0;
 473        u32 len, free_size, vlan_tag_flags, cfa_action, flags;
 474        struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
 475        struct pci_dev *pdev = bp->pdev;
 476        u16 prod, last_frag, txts_prod;
 477        struct bnxt_tx_ring_info *txr;
 478        struct bnxt_sw_tx_bd *tx_buf;
 479        __le32 lflags = 0;
 480        skb_frag_t *frag;
 481
 482        i = skb_get_queue_mapping(skb);
 483        if (unlikely(i >= bp->tx_nr_rings)) {
 484                dev_kfree_skb_any(skb);
 485                dev_core_stats_tx_dropped_inc(dev);
 486                return NETDEV_TX_OK;
 487        }
 488
 489        txq = netdev_get_tx_queue(dev, i);
 490        txr = &bp->tx_ring[bp->tx_ring_map[i]];
 491        prod = txr->tx_prod;
 492
 493#if (MAX_SKB_FRAGS > TX_MAX_FRAGS)
 494        if (skb_shinfo(skb)->nr_frags > TX_MAX_FRAGS) {
 495                netdev_warn_once(dev, "SKB has too many (%d) fragments, max supported is %d.  SKB will be linearized.\n",
 496                                 skb_shinfo(skb)->nr_frags, TX_MAX_FRAGS);
 497                if (skb_linearize(skb)) {
 498                        dev_kfree_skb_any(skb);
 499                        dev_core_stats_tx_dropped_inc(dev);
 500                        return NETDEV_TX_OK;
 501                }
 502        }
 503#endif
 504        free_size = bnxt_tx_avail(bp, txr);
 505        if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) {
 506                /* We must have raced with NAPI cleanup */
 507                if (net_ratelimit() && txr->kick_pending)
 508                        netif_warn(bp, tx_err, dev,
 509                                   "bnxt: ring busy w/ flush pending!\n");
 510                if (!netif_txq_try_stop(txq, bnxt_tx_avail(bp, txr),
 511                                        bp->tx_wake_thresh))
 512                        return NETDEV_TX_BUSY;
 513        }
 514
 515        if (unlikely(ipv6_hopopt_jumbo_remove(skb)))
 516                goto tx_free;
 517
 518        length = skb->len;
 519        len = skb_headlen(skb);
 520        last_frag = skb_shinfo(skb)->nr_frags;
 521
 522        txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)];
 523
 524        tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)];
 525        tx_buf->skb = skb;
 526        tx_buf->nr_frags = last_frag;
 527
 528        vlan_tag_flags = 0;
 529        cfa_action = bnxt_xmit_get_cfa_action(skb);
 530        if (skb_vlan_tag_present(skb)) {
 531                vlan_tag_flags = TX_BD_CFA_META_KEY_VLAN |
 532                                 skb_vlan_tag_get(skb);
 533                /* Currently supports 8021Q, 8021AD vlan offloads
 534                 * QINQ1, QINQ2, QINQ3 vlan headers are deprecated
 535                 */
 536                if (skb->vlan_proto == htons(ETH_P_8021Q))
 537                        vlan_tag_flags |= 1 << TX_BD_CFA_META_TPID_SHIFT;
 538        }
 539
 540        if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && ptp &&
 541            ptp->tx_tstamp_en) {
 542                if (bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP) {
 543                        lflags |= cpu_to_le32(TX_BD_FLAGS_STAMP);
 544                        tx_buf->is_ts_pkt = 1;
 545                        skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
 546                } else if (!skb_is_gso(skb)) {
 547                        u16 seq_id, hdr_off;
 548
 549                        if (!bnxt_ptp_parse(skb, &seq_id, &hdr_off) &&
 550                            !bnxt_ptp_get_txts_prod(ptp, &txts_prod)) {
 551                                if (vlan_tag_flags)
 552                                        hdr_off += VLAN_HLEN;
 553                                lflags |= cpu_to_le32(TX_BD_FLAGS_STAMP);
 554                                tx_buf->is_ts_pkt = 1;
 555                                skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
 556
 557                                ptp->txts_req[txts_prod].tx_seqid = seq_id;
 558                                ptp->txts_req[txts_prod].tx_hdr_off = hdr_off;
 559                                tx_buf->txts_prod = txts_prod;
 560                        }
 561                }
 562        }
 563        if (unlikely(skb->no_fcs))
 564                lflags |= cpu_to_le32(TX_BD_FLAGS_NO_CRC);
 565
 566        if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh &&
 567            skb_frags_readable(skb) && !lflags) {
 568                struct tx_push_buffer *tx_push_buf = txr->tx_push;
 569                struct tx_push_bd *tx_push = &tx_push_buf->push_bd;
 570                struct tx_bd_ext *tx_push1 = &tx_push->txbd2;
 571                void __iomem *db = txr->tx_db.doorbell;
 572                void *pdata = tx_push_buf->data;
 573                u64 *end;
 574                int j, push_len;
 575
 576                /* Set COAL_NOW to be ready quickly for the next push */
 577                tx_push->tx_bd_len_flags_type =
 578                        cpu_to_le32((length << TX_BD_LEN_SHIFT) |
 579                                        TX_BD_TYPE_LONG_TX_BD |
 580                                        TX_BD_FLAGS_LHINT_512_AND_SMALLER |
 581                                        TX_BD_FLAGS_COAL_NOW |
 582                                        TX_BD_FLAGS_PACKET_END |
 583                                        TX_BD_CNT(2));
 584
 585                if (skb->ip_summed == CHECKSUM_PARTIAL)
 586                        tx_push1->tx_bd_hsize_lflags =
 587                                        cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
 588                else
 589                        tx_push1->tx_bd_hsize_lflags = 0;
 590
 591                tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
 592                tx_push1->tx_bd_cfa_action =
 593                        cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
 594
 595                end = pdata + length;
 596                end = PTR_ALIGN(end, 8) - 1;
 597                *end = 0;
 598
 599                skb_copy_from_linear_data(skb, pdata, len);
 600                pdata += len;
 601                for (j = 0; j < last_frag; j++) {
 602                        void *fptr;
 603
 604                        frag = &skb_shinfo(skb)->frags[j];
 605                        fptr = skb_frag_address_safe(frag);
 606                        if (!fptr)
 607                                goto normal_tx;
 608
 609                        memcpy(pdata, fptr, skb_frag_size(frag));
 610                        pdata += skb_frag_size(frag);
 611                }
 612
 613                txbd->tx_bd_len_flags_type = tx_push->tx_bd_len_flags_type;
 614                txbd->tx_bd_haddr = txr->data_mapping;
 615                txbd->tx_bd_opaque = SET_TX_OPAQUE(bp, txr, prod, 2);
 616                prod = NEXT_TX(prod);
 617                tx_push->tx_bd_opaque = txbd->tx_bd_opaque;
 618                txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)];
 619                memcpy(txbd, tx_push1, sizeof(*txbd));
 620                prod = NEXT_TX(prod);
 621                tx_push->doorbell =
 622                        cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH |
 623                                    DB_RING_IDX(&txr->tx_db, prod));
 624                WRITE_ONCE(txr->tx_prod, prod);
 625
 626                tx_buf->is_push = 1;
 627                netdev_tx_sent_queue(txq, skb->len);
 628                wmb();  /* Sync is_push and byte queue before pushing data */
 629
 630                push_len = (length + sizeof(*tx_push) + 7) / 8;
 631                if (push_len > 16) {
 632                        __iowrite64_copy(db, tx_push_buf, 16);
 633                        __iowrite32_copy(db + 4, tx_push_buf + 1,
 634                                         (push_len - 16) << 1);
 635                } else {
 636                        __iowrite64_copy(db, tx_push_buf, push_len);
 637                }
 638
 639                goto tx_done;
 640        }
 641
 642normal_tx:
 643        if (length < BNXT_MIN_PKT_SIZE) {
 644                pad = BNXT_MIN_PKT_SIZE - length;
 645                if (skb_pad(skb, pad))
 646                        /* SKB already freed. */
 647                        goto tx_kick_pending;
 648                length = BNXT_MIN_PKT_SIZE;
 649        }
 650
 651        mapping = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE);
 652
 653        if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
 654                goto tx_free;
 655
 656        dma_unmap_addr_set(tx_buf, mapping, mapping);
 657        flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD |
 658                TX_BD_CNT(last_frag + 2);
 659
 660        txbd->tx_bd_haddr = cpu_to_le64(mapping);
 661        txbd->tx_bd_opaque = SET_TX_OPAQUE(bp, txr, prod, 2 + last_frag);
 662
 663        prod = NEXT_TX(prod);
 664        txbd1 = (struct tx_bd_ext *)
 665                &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)];
 666
 667        txbd1->tx_bd_hsize_lflags = lflags;
 668        if (skb_is_gso(skb)) {
 669                bool udp_gso = !!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4);
 670                u32 hdr_len;
 671
 672                if (skb->encapsulation) {
 673                        if (udp_gso)
 674                                hdr_len = skb_inner_transport_offset(skb) +
 675                                          sizeof(struct udphdr);
 676                        else
 677                                hdr_len = skb_inner_tcp_all_headers(skb);
 678                } else if (udp_gso) {
 679                        hdr_len = skb_transport_offset(skb) +
 680                                  sizeof(struct udphdr);
 681                } else {
 682                        hdr_len = skb_tcp_all_headers(skb);
 683                }
 684
 685                txbd1->tx_bd_hsize_lflags |= cpu_to_le32(TX_BD_FLAGS_LSO |
 686                                        TX_BD_FLAGS_T_IPID |
 687                                        (hdr_len << (TX_BD_HSIZE_SHIFT - 1)));
 688                length = skb_shinfo(skb)->gso_size;
 689                txbd1->tx_bd_mss = cpu_to_le32(length);
 690                length += hdr_len;
 691        } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
 692                txbd1->tx_bd_hsize_lflags |=
 693                        cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
 694                txbd1->tx_bd_mss = 0;
 695        }
 696
 697        length >>= 9;
 698        if (unlikely(length >= ARRAY_SIZE(bnxt_lhint_arr))) {
 699                dev_warn_ratelimited(&pdev->dev, "Dropped oversize %d bytes TX packet.\n",
 700                                     skb->len);
 701                i = 0;
 702                goto tx_dma_error;
 703        }
 704        flags |= bnxt_lhint_arr[length];
 705        txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
 706
 707        txbd1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
 708        txbd1->tx_bd_cfa_action =
 709                        cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
 710        txbd0 = txbd;
 711        for (i = 0; i < last_frag; i++) {
 712                frag = &skb_shinfo(skb)->frags[i];
 713                prod = NEXT_TX(prod);
 714                txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)];
 715
 716                len = skb_frag_size(frag);
 717                mapping = skb_frag_dma_map(&pdev->dev, frag, 0, len,
 718                                           DMA_TO_DEVICE);
 719
 720                if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
 721                        goto tx_dma_error;
 722
 723                tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)];
 724                netmem_dma_unmap_addr_set(skb_frag_netmem(frag), tx_buf,
 725                                          mapping, mapping);
 726
 727                txbd->tx_bd_haddr = cpu_to_le64(mapping);
 728
 729                flags = len << TX_BD_LEN_SHIFT;
 730                txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
 731        }
 732
 733        flags &= ~TX_BD_LEN;
 734        txbd->tx_bd_len_flags_type =
 735                cpu_to_le32(((len + pad) << TX_BD_LEN_SHIFT) | flags |
 736                            TX_BD_FLAGS_PACKET_END);
 737
 738        netdev_tx_sent_queue(txq, skb->len);
 739
 740        skb_tx_timestamp(skb);
 741
 742        prod = NEXT_TX(prod);
 743        WRITE_ONCE(txr->tx_prod, prod);
 744
 745        if (!netdev_xmit_more() || netif_xmit_stopped(txq)) {
 746                bnxt_txr_db_kick(bp, txr, prod);
 747        } else {
 748                if (free_size >= bp->tx_wake_thresh)
 749                        txbd0->tx_bd_len_flags_type |=
 750                                cpu_to_le32(TX_BD_FLAGS_NO_CMPL);
 751                txr->kick_pending = 1;
 752        }
 753
 754tx_done:
 755
 756        if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) {
 757                if (netdev_xmit_more() && !tx_buf->is_push) {
 758                        txbd0->tx_bd_len_flags_type &=
 759                                cpu_to_le32(~TX_BD_FLAGS_NO_CMPL);
 760                        bnxt_txr_db_kick(bp, txr, prod);
 761                }
 762
 763                netif_txq_try_stop(txq, bnxt_tx_avail(bp, txr),
 764                                   bp->tx_wake_thresh);
 765        }
 766        return NETDEV_TX_OK;
 767
 768tx_dma_error:
 769        last_frag = i;
 770
 771        /* start back at beginning and unmap skb */
 772        prod = txr->tx_prod;
 773        tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)];
 774        dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
 775                         skb_headlen(skb), DMA_TO_DEVICE);
 776        prod = NEXT_TX(prod);
 777
 778        /* unmap remaining mapped pages */
 779        for (i = 0; i < last_frag; i++) {
 780                prod = NEXT_TX(prod);
 781                tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)];
 782                frag = &skb_shinfo(skb)->frags[i];
 783                netmem_dma_unmap_page_attrs(&pdev->dev,
 784                                            dma_unmap_addr(tx_buf, mapping),
 785                                            skb_frag_size(frag),
 786                                            DMA_TO_DEVICE, 0);
 787        }
 788
 789tx_free:
 790        dev_kfree_skb_any(skb);
 791tx_kick_pending:
 792        if (BNXT_TX_PTP_IS_SET(lflags)) {
 793                txr->tx_buf_ring[RING_TX(bp, txr->tx_prod)].is_ts_pkt = 0;
 794                atomic64_inc(&bp->ptp_cfg->stats.ts_err);
 795                if (!(bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP))
 796                        /* set SKB to err so PTP worker will clean up */
 797                        ptp->txts_req[txts_prod].tx_skb = ERR_PTR(-EIO);
 798        }
 799        if (txr->kick_pending)
 800                bnxt_txr_db_kick(bp, txr, txr->tx_prod);
 801        txr->tx_buf_ring[RING_TX(bp, txr->tx_prod)].skb = NULL;
 802        dev_core_stats_tx_dropped_inc(dev);
 803        return NETDEV_TX_OK;
 804}
 805
 806/* Returns true if some remaining TX packets not processed. */
 807static bool __bnxt_tx_int(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
 808                          int budget)
 809{
 810        struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, txr->txq_index);
 811        struct pci_dev *pdev = bp->pdev;
 812        u16 hw_cons = txr->tx_hw_cons;
 813        unsigned int tx_bytes = 0;
 814        u16 cons = txr->tx_cons;
 815        skb_frag_t *frag;
 816        int tx_pkts = 0;
 817        bool rc = false;
 818
 819        while (RING_TX(bp, cons) != hw_cons) {
 820                struct bnxt_sw_tx_bd *tx_buf;
 821                struct sk_buff *skb;
 822                bool is_ts_pkt;
 823                int j, last;
 824
 825                tx_buf = &txr->tx_buf_ring[RING_TX(bp, cons)];
 826                skb = tx_buf->skb;
 827
 828                if (unlikely(!skb)) {
 829                        bnxt_sched_reset_txr(bp, txr, cons);
 830                        return rc;
 831                }
 832
 833                is_ts_pkt = tx_buf->is_ts_pkt;
 834                if (is_ts_pkt && (bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP)) {
 835                        rc = true;
 836                        break;
 837                }
 838
 839                cons = NEXT_TX(cons);
 840                tx_pkts++;
 841                tx_bytes += skb->len;
 842                tx_buf->skb = NULL;
 843                tx_buf->is_ts_pkt = 0;
 844
 845                if (tx_buf->is_push) {
 846                        tx_buf->is_push = 0;
 847                        goto next_tx_int;
 848                }
 849
 850                dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
 851                                 skb_headlen(skb), DMA_TO_DEVICE);
 852                last = tx_buf->nr_frags;
 853
 854                for (j = 0; j < last; j++) {
 855                        frag = &skb_shinfo(skb)->frags[j];
 856                        cons = NEXT_TX(cons);
 857                        tx_buf = &txr->tx_buf_ring[RING_TX(bp, cons)];
 858                        netmem_dma_unmap_page_attrs(&pdev->dev,
 859                                                    dma_unmap_addr(tx_buf,
 860                                                                   mapping),
 861                                                    skb_frag_size(frag),
 862                                                    DMA_TO_DEVICE, 0);
 863                }
 864                if (unlikely(is_ts_pkt)) {
 865                        if (BNXT_CHIP_P5(bp)) {
 866                                /* PTP worker takes ownership of the skb */
 867                                bnxt_get_tx_ts_p5(bp, skb, tx_buf->txts_prod);
 868                                skb = NULL;
 869                        }
 870                }
 871
 872next_tx_int:
 873                cons = NEXT_TX(cons);
 874
 875                dev_consume_skb_any(skb);
 876        }
 877
 878        WRITE_ONCE(txr->tx_cons, cons);
 879
 880        __netif_txq_completed_wake(txq, tx_pkts, tx_bytes,
 881                                   bnxt_tx_avail(bp, txr), bp->tx_wake_thresh,
 882                                   READ_ONCE(txr->dev_state) == BNXT_DEV_STATE_CLOSING);
 883
 884        return rc;
 885}
 886
 887static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
 888{
 889        struct bnxt_tx_ring_info *txr;
 890        bool more = false;
 891        int i;
 892
 893        bnxt_for_each_napi_tx(i, bnapi, txr) {
 894                if (txr->tx_hw_cons != RING_TX(bp, txr->tx_cons))
 895                        more |= __bnxt_tx_int(bp, txr, budget);
 896        }
 897        if (!more)
 898                bnapi->events &= ~BNXT_TX_CMP_EVENT;
 899}
 900
 901static bool bnxt_separate_head_pool(struct bnxt_rx_ring_info *rxr)
 902{
 903        return rxr->need_head_pool || PAGE_SIZE > BNXT_RX_PAGE_SIZE;
 904}
 905
 906static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
 907                                         struct bnxt_rx_ring_info *rxr,
 908                                         unsigned int *offset,
 909                                         gfp_t gfp)
 910{
 911        struct page *page;
 912
 913        if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) {
 914                page = page_pool_dev_alloc_frag(rxr->page_pool, offset,
 915                                                BNXT_RX_PAGE_SIZE);
 916        } else {
 917                page = page_pool_dev_alloc_pages(rxr->page_pool);
 918                *offset = 0;
 919        }
 920        if (!page)
 921                return NULL;
 922
 923        *mapping = page_pool_get_dma_addr(page) + *offset;
 924        return page;
 925}
 926
 927static netmem_ref __bnxt_alloc_rx_netmem(struct bnxt *bp, dma_addr_t *mapping,
 928                                         struct bnxt_rx_ring_info *rxr,
 929                                         unsigned int *offset,
 930                                         gfp_t gfp)
 931{
 932        netmem_ref netmem;
 933
 934        if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) {
 935                netmem = page_pool_alloc_frag_netmem(rxr->page_pool, offset, BNXT_RX_PAGE_SIZE, gfp);
 936        } else {
 937                netmem = page_pool_alloc_netmems(rxr->page_pool, gfp);
 938                *offset = 0;
 939        }
 940        if (!netmem)
 941                return 0;
 942
 943        *mapping = page_pool_get_dma_addr_netmem(netmem) + *offset;
 944        return netmem;
 945}
 946
 947static inline u8 *__bnxt_alloc_rx_frag(struct bnxt *bp, dma_addr_t *mapping,
 948                                       struct bnxt_rx_ring_info *rxr,
 949                                       gfp_t gfp)
 950{
 951        unsigned int offset;
 952        struct page *page;
 953
 954        page = page_pool_alloc_frag(rxr->head_pool, &offset,
 955                                    bp->rx_buf_size, gfp);
 956        if (!page)
 957                return NULL;
 958
 959        *mapping = page_pool_get_dma_addr(page) + bp->rx_dma_offset + offset;
 960        return page_address(page) + offset;
 961}
 962
 963int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
 964                       u16 prod, gfp_t gfp)
 965{
 966        struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(bp, prod)][RX_IDX(prod)];
 967        struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[RING_RX(bp, prod)];
 968        dma_addr_t mapping;
 969
 970        if (BNXT_RX_PAGE_MODE(bp)) {
 971                unsigned int offset;
 972                struct page *page =
 973                        __bnxt_alloc_rx_page(bp, &mapping, rxr, &offset, gfp);
 974
 975                if (!page)
 976                        return -ENOMEM;
 977
 978                mapping += bp->rx_dma_offset;
 979                rx_buf->data = page;
 980                rx_buf->data_ptr = page_address(page) + offset + bp->rx_offset;
 981        } else {
 982                u8 *data = __bnxt_alloc_rx_frag(bp, &mapping, rxr, gfp);
 983
 984                if (!data)
 985                        return -ENOMEM;
 986
 987                rx_buf->data = data;
 988                rx_buf->data_ptr = data + bp->rx_offset;
 989        }
 990        rx_buf->mapping = mapping;
 991
 992        rxbd->rx_bd_haddr = cpu_to_le64(mapping);
 993        return 0;
 994}
 995
 996void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, void *data)
 997{
 998        u16 prod = rxr->rx_prod;
 999        struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
1000        struct bnxt *bp = rxr->bnapi->bp;
1001        struct rx_bd *cons_bd, *prod_bd;
1002
1003        prod_rx_buf = &rxr->rx_buf_ring[RING_RX(bp, prod)];
1004        cons_rx_buf = &rxr->rx_buf_ring[cons];
1005
1006        prod_rx_buf->data = data;
1007        prod_rx_buf->data_ptr = cons_rx_buf->data_ptr;
1008
1009        prod_rx_buf->mapping = cons_rx_buf->mapping;
1010
1011        prod_bd = &rxr->rx_desc_ring[RX_RING(bp, prod)][RX_IDX(prod)];
1012        cons_bd = &rxr->rx_desc_ring[RX_RING(bp, cons)][RX_IDX(cons)];
1013
1014        prod_bd->rx_bd_haddr = cons_bd->rx_bd_haddr;
1015}
1016
1017static inline u16 bnxt_find_next_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
1018{
1019        u16 next, max = rxr->rx_agg_bmap_size;
1020
1021        next = find_next_zero_bit(rxr->rx_agg_bmap, max, idx);
1022        if (next >= max)
1023                next = find_first_zero_bit(rxr->rx_agg_bmap, max);
1024        return next;
1025}
1026
1027static int bnxt_alloc_rx_netmem(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
1028                                u16 prod, gfp_t gfp)
1029{
1030        struct rx_bd *rxbd =
1031                &rxr->rx_agg_desc_ring[RX_AGG_RING(bp, prod)][RX_IDX(prod)];
1032        struct bnxt_sw_rx_agg_bd *rx_agg_buf;
1033        u16 sw_prod = rxr->rx_sw_agg_prod;
1034        unsigned int offset = 0;
1035        dma_addr_t mapping;
1036        netmem_ref netmem;
1037
1038        netmem = __bnxt_alloc_rx_netmem(bp, &mapping, rxr, &offset, gfp);
1039        if (!netmem)
1040                return -ENOMEM;
1041
1042        if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
1043                sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
1044
1045        __set_bit(sw_prod, rxr->rx_agg_bmap);
1046        rx_agg_buf = &rxr->rx_agg_ring[sw_prod];
1047        rxr->rx_sw_agg_prod = RING_RX_AGG(bp, NEXT_RX_AGG(sw_prod));
1048
1049        rx_agg_buf->netmem = netmem;
1050        rx_agg_buf->offset = offset;
1051        rx_agg_buf->mapping = mapping;
1052        rxbd->rx_bd_haddr = cpu_to_le64(mapping);
1053        rxbd->rx_bd_opaque = sw_prod;
1054        return 0;
1055}
1056
1057static struct rx_agg_cmp *bnxt_get_agg(struct bnxt *bp,
1058                                       struct bnxt_cp_ring_info *cpr,
1059                                       u16 cp_cons, u16 curr)
1060{
1061        struct rx_agg_cmp *agg;
1062
1063        cp_cons = RING_CMP(ADV_RAW_CMP(cp_cons, curr));
1064        agg = (struct rx_agg_cmp *)
1065                &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1066        return agg;
1067}
1068
1069static struct rx_agg_cmp *bnxt_get_tpa_agg_p5(struct bnxt *bp,
1070                                              struct bnxt_rx_ring_info *rxr,
1071                                              u16 agg_id, u16 curr)
1072{
1073        struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[agg_id];
1074
1075        return &tpa_info->agg_arr[curr];
1076}
1077
1078static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 idx,
1079                                   u16 start, u32 agg_bufs, bool tpa)
1080{
1081        struct bnxt_napi *bnapi = cpr->bnapi;
1082        struct bnxt *bp = bnapi->bp;
1083        struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1084        u16 prod = rxr->rx_agg_prod;
1085        u16 sw_prod = rxr->rx_sw_agg_prod;
1086        bool p5_tpa = false;
1087        u32 i;
1088
1089        if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && tpa)
1090                p5_tpa = true;
1091
1092        for (i = 0; i < agg_bufs; i++) {
1093                struct bnxt_sw_rx_agg_bd *cons_rx_buf, *prod_rx_buf;
1094                struct rx_agg_cmp *agg;
1095                struct rx_bd *prod_bd;
1096                netmem_ref netmem;
1097                u16 cons;
1098
1099                if (p5_tpa)
1100                        agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, start + i);
1101                else
1102                        agg = bnxt_get_agg(bp, cpr, idx, start + i);
1103                cons = agg->rx_agg_cmp_opaque;
1104                __clear_bit(cons, rxr->rx_agg_bmap);
1105
1106                if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
1107                        sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
1108
1109                __set_bit(sw_prod, rxr->rx_agg_bmap);
1110                prod_rx_buf = &rxr->rx_agg_ring[sw_prod];
1111                cons_rx_buf = &rxr->rx_agg_ring[cons];
1112
1113                /* It is possible for sw_prod to be equal to cons, so
1114                 * set cons_rx_buf->netmem to 0 first.
1115                 */
1116                netmem = cons_rx_buf->netmem;
1117                cons_rx_buf->netmem = 0;
1118                prod_rx_buf->netmem = netmem;
1119                prod_rx_buf->offset = cons_rx_buf->offset;
1120
1121                prod_rx_buf->mapping = cons_rx_buf->mapping;
1122
1123                prod_bd = &rxr->rx_agg_desc_ring[RX_AGG_RING(bp, prod)][RX_IDX(prod)];
1124
1125                prod_bd->rx_bd_haddr = cpu_to_le64(cons_rx_buf->mapping);
1126                prod_bd->rx_bd_opaque = sw_prod;
1127
1128                prod = NEXT_RX_AGG(prod);
1129                sw_prod = RING_RX_AGG(bp, NEXT_RX_AGG(sw_prod));
1130        }
1131        rxr->rx_agg_prod = prod;
1132        rxr->rx_sw_agg_prod = sw_prod;
1133}
1134
1135static struct sk_buff *bnxt_rx_multi_page_skb(struct bnxt *bp,
1136                                              struct bnxt_rx_ring_info *rxr,
1137                                              u16 cons, void *data, u8 *data_ptr,
1138                                              dma_addr_t dma_addr,
1139                                              unsigned int offset_and_len)
1140{
1141        unsigned int len = offset_and_len & 0xffff;
1142        struct page *page = data;
1143        u16 prod = rxr->rx_prod;
1144        struct sk_buff *skb;
1145        int err;
1146
1147        err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
1148        if (unlikely(err)) {
1149                bnxt_reuse_rx_data(rxr, cons, data);
1150                return NULL;
1151        }
1152        dma_addr -= bp->rx_dma_offset;
1153        dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, BNXT_RX_PAGE_SIZE,
1154                                bp->rx_dir);
1155        skb = napi_build_skb(data_ptr - bp->rx_offset, BNXT_RX_PAGE_SIZE);
1156        if (!skb) {
1157                page_pool_recycle_direct(rxr->page_pool, page);
1158                return NULL;
1159        }
1160        skb_mark_for_recycle(skb);
1161        skb_reserve(skb, bp->rx_offset);
1162        __skb_put(skb, len);
1163
1164        return skb;
1165}
1166
1167static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
1168                                        struct bnxt_rx_ring_info *rxr,
1169                                        u16 cons, void *data, u8 *data_ptr,
1170                                        dma_addr_t dma_addr,
1171                                        unsigned int offset_and_len)
1172{
1173        unsigned int payload = offset_and_len >> 16;
1174        unsigned int len = offset_and_len & 0xffff;
1175        skb_frag_t *frag;
1176        struct page *page = data;
1177        u16 prod = rxr->rx_prod;
1178        struct sk_buff *skb;
1179        int off, err;
1180
1181        err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
1182        if (unlikely(err)) {
1183                bnxt_reuse_rx_data(rxr, cons, data);
1184                return NULL;
1185        }
1186        dma_addr -= bp->rx_dma_offset;
1187        dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, BNXT_RX_PAGE_SIZE,
1188                                bp->rx_dir);
1189
1190        if (unlikely(!payload))
1191                payload = eth_get_headlen(bp->dev, data_ptr, len);
1192
1193        skb = napi_alloc_skb(&rxr->bnapi->napi, payload);
1194        if (!skb) {
1195                page_pool_recycle_direct(rxr->page_pool, page);
1196                return NULL;
1197        }
1198
1199        skb_mark_for_recycle(skb);
1200        off = (void *)data_ptr - page_address(page);
1201        skb_add_rx_frag(skb, 0, page, off, len, BNXT_RX_PAGE_SIZE);
1202        memcpy(skb->data - NET_IP_ALIGN, data_ptr - NET_IP_ALIGN,
1203               payload + NET_IP_ALIGN);
1204
1205        frag = &skb_shinfo(skb)->frags[0];
1206        skb_frag_size_sub(frag, payload);
1207        skb_frag_off_add(frag, payload);
1208        skb->data_len -= payload;
1209        skb->tail += payload;
1210
1211        return skb;
1212}
1213
1214static struct sk_buff *bnxt_rx_skb(struct bnxt *bp,
1215                                   struct bnxt_rx_ring_info *rxr, u16 cons,
1216                                   void *data, u8 *data_ptr,
1217                                   dma_addr_t dma_addr,
1218                                   unsigned int offset_and_len)
1219{
1220        u16 prod = rxr->rx_prod;
1221        struct sk_buff *skb;
1222        int err;
1223
1224        err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
1225        if (unlikely(err)) {
1226                bnxt_reuse_rx_data(rxr, cons, data);
1227                return NULL;
1228        }
1229
1230        skb = napi_build_skb(data, bp->rx_buf_size);
1231        dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
1232                                bp->rx_dir);
1233        if (!skb) {
1234                page_pool_free_va(rxr->head_pool, data, true);
1235                return NULL;
1236        }
1237
1238        skb_mark_for_recycle(skb);
1239        skb_reserve(skb, bp->rx_offset);
1240        skb_put(skb, offset_and_len & 0xffff);
1241        return skb;
1242}
1243
1244static u32 __bnxt_rx_agg_netmems(struct bnxt *bp,
1245                                 struct bnxt_cp_ring_info *cpr,
1246                                 u16 idx, u32 agg_bufs, bool tpa,
1247                                 struct sk_buff *skb,
1248                                 struct xdp_buff *xdp)
1249{
1250        struct bnxt_napi *bnapi = cpr->bnapi;
1251        struct skb_shared_info *shinfo;
1252        struct bnxt_rx_ring_info *rxr;
1253        u32 i, total_frag_len = 0;
1254        bool p5_tpa = false;
1255        u16 prod;
1256
1257        rxr = bnapi->rx_ring;
1258        prod = rxr->rx_agg_prod;
1259
1260        if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && tpa)
1261                p5_tpa = true;
1262
1263        if (skb)
1264                shinfo = skb_shinfo(skb);
1265        else
1266                shinfo = xdp_get_shared_info_from_buff(xdp);
1267
1268        for (i = 0; i < agg_bufs; i++) {
1269                struct bnxt_sw_rx_agg_bd *cons_rx_buf;
1270                struct rx_agg_cmp *agg;
1271                u16 cons, frag_len;
1272                netmem_ref netmem;
1273
1274                if (p5_tpa)
1275                        agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, i);
1276                else
1277                        agg = bnxt_get_agg(bp, cpr, idx, i);
1278                cons = agg->rx_agg_cmp_opaque;
1279                frag_len = (le32_to_cpu(agg->rx_agg_cmp_len_flags_type) &
1280                            RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT;
1281
1282                cons_rx_buf = &rxr->rx_agg_ring[cons];
1283                if (skb) {
1284                        skb_add_rx_frag_netmem(skb, i, cons_rx_buf->netmem,
1285                                               cons_rx_buf->offset,
1286                                               frag_len, BNXT_RX_PAGE_SIZE);
1287                } else {
1288                        skb_frag_t *frag = &shinfo->frags[i];
1289
1290                        skb_frag_fill_netmem_desc(frag, cons_rx_buf->netmem,
1291                                                  cons_rx_buf->offset,
1292                                                  frag_len);
1293                        shinfo->nr_frags = i + 1;
1294                }
1295                __clear_bit(cons, rxr->rx_agg_bmap);
1296
1297                /* It is possible for bnxt_alloc_rx_netmem() to allocate
1298                 * a sw_prod index that equals the cons index, so we
1299                 * need to clear the cons entry now.
1300                 */
1301                netmem = cons_rx_buf->netmem;
1302                cons_rx_buf->netmem = 0;
1303
1304                if (xdp && netmem_is_pfmemalloc(netmem))
1305                        xdp_buff_set_frag_pfmemalloc(xdp);
1306
1307                if (bnxt_alloc_rx_netmem(bp, rxr, prod, GFP_ATOMIC) != 0) {
1308                        if (skb) {
1309                                skb->len -= frag_len;
1310                                skb->data_len -= frag_len;
1311                                skb->truesize -= BNXT_RX_PAGE_SIZE;
1312                        }
1313
1314                        --shinfo->nr_frags;
1315                        cons_rx_buf->netmem = netmem;
1316
1317                        /* Update prod since possibly some netmems have been
1318                         * allocated already.
1319                         */
1320                        rxr->rx_agg_prod = prod;
1321                        bnxt_reuse_rx_agg_bufs(cpr, idx, i, agg_bufs - i, tpa);
1322                        return 0;
1323                }
1324
1325                page_pool_dma_sync_netmem_for_cpu(rxr->page_pool, netmem, 0,
1326                                                  BNXT_RX_PAGE_SIZE);
1327
1328                total_frag_len += frag_len;
1329                prod = NEXT_RX_AGG(prod);
1330        }
1331        rxr->rx_agg_prod = prod;
1332        return total_frag_len;
1333}
1334
1335static struct sk_buff *bnxt_rx_agg_netmems_skb(struct bnxt *bp,
1336                                               struct bnxt_cp_ring_info *cpr,
1337                                               struct sk_buff *skb, u16 idx,
1338                                               u32 agg_bufs, bool tpa)
1339{
1340        u32 total_frag_len = 0;
1341
1342        total_frag_len = __bnxt_rx_agg_netmems(bp, cpr, idx, agg_bufs, tpa,
1343                                               skb, NULL);
1344        if (!total_frag_len) {
1345                skb_mark_for_recycle(skb);
1346                dev_kfree_skb(skb);
1347                return NULL;
1348        }
1349
1350        return skb;
1351}
1352
1353static u32 bnxt_rx_agg_netmems_xdp(struct bnxt *bp,
1354                                   struct bnxt_cp_ring_info *cpr,
1355                                   struct xdp_buff *xdp, u16 idx,
1356                                   u32 agg_bufs, bool tpa)
1357{
1358        struct skb_shared_info *shinfo = xdp_get_shared_info_from_buff(xdp);
1359        u32 total_frag_len = 0;
1360
1361        if (!xdp_buff_has_frags(xdp))
1362                shinfo->nr_frags = 0;
1363
1364        total_frag_len = __bnxt_rx_agg_netmems(bp, cpr, idx, agg_bufs, tpa,
1365                                               NULL, xdp);
1366        if (total_frag_len) {
1367                xdp_buff_set_frags_flag(xdp);
1368                shinfo->nr_frags = agg_bufs;
1369                shinfo->xdp_frags_size = total_frag_len;
1370        }
1371        return total_frag_len;
1372}
1373
1374static int bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1375                               u8 agg_bufs, u32 *raw_cons)
1376{
1377        u16 last;
1378        struct rx_agg_cmp *agg;
1379
1380        *raw_cons = ADV_RAW_CMP(*raw_cons, agg_bufs);
1381        last = RING_CMP(*raw_cons);
1382        agg = (struct rx_agg_cmp *)
1383                &cpr->cp_desc_ring[CP_RING(last)][CP_IDX(last)];
1384        return RX_AGG_CMP_VALID(agg, *raw_cons);
1385}
1386
1387static struct sk_buff *bnxt_copy_data(struct bnxt_napi *bnapi, u8 *data,
1388                                      unsigned int len,
1389                                      dma_addr_t mapping)
1390{
1391        struct bnxt *bp = bnapi->bp;
1392        struct pci_dev *pdev = bp->pdev;
1393        struct sk_buff *skb;
1394
1395        skb = napi_alloc_skb(&bnapi->napi, len);
1396        if (!skb)
1397                return NULL;
1398
1399        dma_sync_single_for_cpu(&pdev->dev, mapping, bp->rx_copybreak,
1400                                bp->rx_dir);
1401
1402        memcpy(skb->data - NET_IP_ALIGN, data - NET_IP_ALIGN,
1403               len + NET_IP_ALIGN);
1404
1405        dma_sync_single_for_device(&pdev->dev, mapping, bp->rx_copybreak,
1406                                   bp->rx_dir);
1407
1408        skb_put(skb, len);
1409
1410        return skb;
1411}
1412
1413static struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data,
1414                                     unsigned int len,
1415                                     dma_addr_t mapping)
1416{
1417        return bnxt_copy_data(bnapi, data, len, mapping);
1418}
1419
1420static struct sk_buff *bnxt_copy_xdp(struct bnxt_napi *bnapi,
1421                                     struct xdp_buff *xdp,
1422                                     unsigned int len,
1423                                     dma_addr_t mapping)
1424{
1425        unsigned int metasize = 0;
1426        u8 *data = xdp->data;
1427        struct sk_buff *skb;
1428
1429        len = xdp->data_end - xdp->data_meta;
1430        metasize = xdp->data - xdp->data_meta;
1431        data = xdp->data_meta;
1432
1433        skb = bnxt_copy_data(bnapi, data, len, mapping);
1434        if (!skb)
1435                return skb;
1436
1437        if (metasize) {
1438                skb_metadata_set(skb, metasize);
1439                __skb_pull(skb, metasize);
1440        }
1441
1442        return skb;
1443}
1444
1445static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1446                           u32 *raw_cons, void *cmp)
1447{
1448        struct rx_cmp *rxcmp = cmp;
1449        u32 tmp_raw_cons = *raw_cons;
1450        u8 cmp_type, agg_bufs = 0;
1451
1452        cmp_type = RX_CMP_TYPE(rxcmp);
1453
1454        if (cmp_type == CMP_TYPE_RX_L2_CMP) {
1455                agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) &
1456                            RX_CMP_AGG_BUFS) >>
1457                           RX_CMP_AGG_BUFS_SHIFT;
1458        } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1459                struct rx_tpa_end_cmp *tpa_end = cmp;
1460
1461                if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
1462                        return 0;
1463
1464                agg_bufs = TPA_END_AGG_BUFS(tpa_end);
1465        }
1466
1467        if (agg_bufs) {
1468                if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
1469                        return -EBUSY;
1470        }
1471        *raw_cons = tmp_raw_cons;
1472        return 0;
1473}
1474
1475static u16 bnxt_alloc_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id)
1476{
1477        struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1478        u16 idx = agg_id & MAX_TPA_P5_MASK;
1479
1480        if (test_bit(idx, map->agg_idx_bmap))
1481                idx = find_first_zero_bit(map->agg_idx_bmap,
1482                                          BNXT_AGG_IDX_BMAP_SIZE);
1483        __set_bit(idx, map->agg_idx_bmap);
1484        map->agg_id_tbl[agg_id] = idx;
1485        return idx;
1486}
1487
1488static void bnxt_free_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
1489{
1490        struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1491
1492        __clear_bit(idx, map->agg_idx_bmap);
1493}
1494
1495static u16 bnxt_lookup_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id)
1496{
1497        struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1498
1499        return map->agg_id_tbl[agg_id];
1500}
1501
1502static void bnxt_tpa_metadata(struct bnxt_tpa_info *tpa_info,
1503                              struct rx_tpa_start_cmp *tpa_start,
1504                              struct rx_tpa_start_cmp_ext *tpa_start1)
1505{
1506        tpa_info->cfa_code_valid = 1;
1507        tpa_info->cfa_code = TPA_START_CFA_CODE(tpa_start1);
1508        tpa_info->vlan_valid = 0;
1509        if (tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) {
1510                tpa_info->vlan_valid = 1;
1511                tpa_info->metadata =
1512                        le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata);
1513        }
1514}
1515
1516static void bnxt_tpa_metadata_v2(struct bnxt_tpa_info *tpa_info,
1517                                 struct rx_tpa_start_cmp *tpa_start,
1518                                 struct rx_tpa_start_cmp_ext *tpa_start1)
1519{
1520        tpa_info->vlan_valid = 0;
1521        if (TPA_START_VLAN_VALID(tpa_start)) {
1522                u32 tpid_sel = TPA_START_VLAN_TPID_SEL(tpa_start);
1523                u32 vlan_proto = ETH_P_8021Q;
1524
1525                tpa_info->vlan_valid = 1;
1526                if (tpid_sel == RX_TPA_START_METADATA1_TPID_8021AD)
1527                        vlan_proto = ETH_P_8021AD;
1528                tpa_info->metadata = vlan_proto << 16 |
1529                                     TPA_START_METADATA0_TCI(tpa_start1);
1530        }
1531}
1532
1533static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
1534                           u8 cmp_type, struct rx_tpa_start_cmp *tpa_start,
1535                           struct rx_tpa_start_cmp_ext *tpa_start1)
1536{
1537        struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
1538        struct bnxt_tpa_info *tpa_info;
1539        u16 cons, prod, agg_id;
1540        struct rx_bd *prod_bd;
1541        dma_addr_t mapping;
1542
1543        if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
1544                agg_id = TPA_START_AGG_ID_P5(tpa_start);
1545                agg_id = bnxt_alloc_agg_idx(rxr, agg_id);
1546        } else {
1547                agg_id = TPA_START_AGG_ID(tpa_start);
1548        }
1549        cons = tpa_start->rx_tpa_start_cmp_opaque;
1550        prod = rxr->rx_prod;
1551        cons_rx_buf = &rxr->rx_buf_ring[cons];
1552        prod_rx_buf = &rxr->rx_buf_ring[RING_RX(bp, prod)];
1553        tpa_info = &rxr->rx_tpa[agg_id];
1554
1555        if (unlikely(cons != rxr->rx_next_cons ||
1556                     TPA_START_ERROR(tpa_start))) {
1557                netdev_warn(bp->dev, "TPA cons %x, expected cons %x, error code %x\n",
1558                            cons, rxr->rx_next_cons,
1559                            TPA_START_ERROR_CODE(tpa_start1));
1560                bnxt_sched_reset_rxr(bp, rxr);
1561                return;
1562        }
1563        prod_rx_buf->data = tpa_info->data;
1564        prod_rx_buf->data_ptr = tpa_info->data_ptr;
1565
1566        mapping = tpa_info->mapping;
1567        prod_rx_buf->mapping = mapping;
1568
1569        prod_bd = &rxr->rx_desc_ring[RX_RING(bp, prod)][RX_IDX(prod)];
1570
1571        prod_bd->rx_bd_haddr = cpu_to_le64(mapping);
1572
1573        tpa_info->data = cons_rx_buf->data;
1574        tpa_info->data_ptr = cons_rx_buf->data_ptr;
1575        cons_rx_buf->data = NULL;
1576        tpa_info->mapping = cons_rx_buf->mapping;
1577
1578        tpa_info->len =
1579                le32_to_cpu(tpa_start->rx_tpa_start_cmp_len_flags_type) >>
1580                                RX_TPA_START_CMP_LEN_SHIFT;
1581        if (likely(TPA_START_HASH_VALID(tpa_start))) {
1582                tpa_info->hash_type = PKT_HASH_TYPE_L4;
1583                tpa_info->gso_type = SKB_GSO_TCPV4;
1584                if (TPA_START_IS_IPV6(tpa_start1))
1585                        tpa_info->gso_type = SKB_GSO_TCPV6;
1586                /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
1587                else if (!BNXT_CHIP_P4_PLUS(bp) &&
1588                         TPA_START_HASH_TYPE(tpa_start) == 3)
1589                        tpa_info->gso_type = SKB_GSO_TCPV6;
1590                tpa_info->rss_hash =
1591                        le32_to_cpu(tpa_start->rx_tpa_start_cmp_rss_hash);
1592        } else {
1593                tpa_info->hash_type = PKT_HASH_TYPE_NONE;
1594                tpa_info->gso_type = 0;
1595                netif_warn(bp, rx_err, bp->dev, "TPA packet without valid hash\n");
1596        }
1597        tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2);
1598        tpa_info->hdr_info = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_hdr_info);
1599        if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP)
1600                bnxt_tpa_metadata(tpa_info, tpa_start, tpa_start1);
1601        else
1602                bnxt_tpa_metadata_v2(tpa_info, tpa_start, tpa_start1);
1603        tpa_info->agg_count = 0;
1604
1605        rxr->rx_prod = NEXT_RX(prod);
1606        cons = RING_RX(bp, NEXT_RX(cons));
1607        rxr->rx_next_cons = RING_RX(bp, NEXT_RX(cons));
1608        cons_rx_buf = &rxr->rx_buf_ring[cons];
1609
1610        bnxt_reuse_rx_data(rxr, cons, cons_rx_buf->data);
1611        rxr->rx_prod = NEXT_RX(rxr->rx_prod);
1612        cons_rx_buf->data = NULL;
1613}
1614
1615static void bnxt_abort_tpa(struct bnxt_cp_ring_info *cpr, u16 idx, u32 agg_bufs)
1616{
1617        if (agg_bufs)
1618                bnxt_reuse_rx_agg_bufs(cpr, idx, 0, agg_bufs, true);
1619}
1620
1621#ifdef CONFIG_INET
1622static void bnxt_gro_tunnel(struct sk_buff *skb, __be16 ip_proto)
1623{
1624        struct udphdr *uh = NULL;
1625
1626        if (ip_proto == htons(ETH_P_IP)) {
1627                struct iphdr *iph = (struct iphdr *)skb->data;
1628
1629                if (iph->protocol == IPPROTO_UDP)
1630                        uh = (struct udphdr *)(iph + 1);
1631        } else {
1632                struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
1633
1634                if (iph->nexthdr == IPPROTO_UDP)
1635                        uh = (struct udphdr *)(iph + 1);
1636        }
1637        if (uh) {
1638                if (uh->check)
1639                        skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL_CSUM;
1640                else
1641                        skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
1642        }
1643}
1644#endif
1645
1646static struct sk_buff *bnxt_gro_func_5731x(struct bnxt_tpa_info *tpa_info,
1647                                           int payload_off, int tcp_ts,
1648                                           struct sk_buff *skb)
1649{
1650#ifdef CONFIG_INET
1651        struct tcphdr *th;
1652        int len, nw_off;
1653        u16 outer_ip_off, inner_ip_off, inner_mac_off;
1654        u32 hdr_info = tpa_info->hdr_info;
1655        bool loopback = false;
1656
1657        inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info);
1658        inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info);
1659        outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info);
1660
1661        /* If the packet is an internal loopback packet, the offsets will
1662         * have an extra 4 bytes.
1663         */
1664        if (inner_mac_off == 4) {
1665                loopback = true;
1666        } else if (inner_mac_off > 4) {
1667                __be16 proto = *((__be16 *)(skb->data + inner_ip_off -
1668                                            ETH_HLEN - 2));
1669
1670                /* We only support inner iPv4/ipv6.  If we don't see the
1671                 * correct protocol ID, it must be a loopback packet where
1672                 * the offsets are off by 4.
1673                 */
1674                if (proto != htons(ETH_P_IP) && proto != htons(ETH_P_IPV6))
1675                        loopback = true;
1676        }
1677        if (loopback) {
1678                /* internal loopback packet, subtract all offsets by 4 */
1679                inner_ip_off -= 4;
1680                inner_mac_off -= 4;
1681                outer_ip_off -= 4;
1682        }
1683
1684        nw_off = inner_ip_off - ETH_HLEN;
1685        skb_set_network_header(skb, nw_off);
1686        if (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) {
1687                struct ipv6hdr *iph = ipv6_hdr(skb);
1688
1689                skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1690                len = skb->len - skb_transport_offset(skb);
1691                th = tcp_hdr(skb);
1692                th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1693        } else {
1694                struct iphdr *iph = ip_hdr(skb);
1695
1696                skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1697                len = skb->len - skb_transport_offset(skb);
1698                th = tcp_hdr(skb);
1699                th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1700        }
1701
1702        if (inner_mac_off) { /* tunnel */
1703                __be16 proto = *((__be16 *)(skb->data + outer_ip_off -
1704                                            ETH_HLEN - 2));
1705
1706                bnxt_gro_tunnel(skb, proto);
1707        }
1708#endif
1709        return skb;
1710}
1711
1712static struct sk_buff *bnxt_gro_func_5750x(struct bnxt_tpa_info *tpa_info,
1713                                           int payload_off, int tcp_ts,
1714                                           struct sk_buff *skb)
1715{
1716#ifdef CONFIG_INET
1717        u16 outer_ip_off, inner_ip_off, inner_mac_off;
1718        u32 hdr_info = tpa_info->hdr_info;
1719        int iphdr_len, nw_off;
1720
1721        inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info);
1722        inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info);
1723        outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info);
1724
1725        nw_off = inner_ip_off - ETH_HLEN;
1726        skb_set_network_header(skb, nw_off);
1727        iphdr_len = (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) ?
1728                     sizeof(struct ipv6hdr) : sizeof(struct iphdr);
1729        skb_set_transport_header(skb, nw_off + iphdr_len);
1730
1731        if (inner_mac_off) { /* tunnel */
1732                __be16 proto = *((__be16 *)(skb->data + outer_ip_off -
1733                                            ETH_HLEN - 2));
1734
1735                bnxt_gro_tunnel(skb, proto);
1736        }
1737#endif
1738        return skb;
1739}
1740
1741#define BNXT_IPV4_HDR_SIZE      (sizeof(struct iphdr) + sizeof(struct tcphdr))
1742#define BNXT_IPV6_HDR_SIZE      (sizeof(struct ipv6hdr) + sizeof(struct tcphdr))
1743
1744static struct sk_buff *bnxt_gro_func_5730x(struct bnxt_tpa_info *tpa_info,
1745                                           int payload_off, int tcp_ts,
1746                                           struct sk_buff *skb)
1747{
1748#ifdef CONFIG_INET
1749        struct tcphdr *th;
1750        int len, nw_off, tcp_opt_len = 0;
1751
1752        if (tcp_ts)
1753                tcp_opt_len = 12;
1754
1755        if (tpa_info->gso_type == SKB_GSO_TCPV4) {
1756                struct iphdr *iph;
1757
1758                nw_off = payload_off - BNXT_IPV4_HDR_SIZE - tcp_opt_len -
1759                         ETH_HLEN;
1760                skb_set_network_header(skb, nw_off);
1761                iph = ip_hdr(skb);
1762                skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1763                len = skb->len - skb_transport_offset(skb);
1764                th = tcp_hdr(skb);
1765                th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1766        } else if (tpa_info->gso_type == SKB_GSO_TCPV6) {
1767                struct ipv6hdr *iph;
1768
1769                nw_off = payload_off - BNXT_IPV6_HDR_SIZE - tcp_opt_len -
1770                         ETH_HLEN;
1771                skb_set_network_header(skb, nw_off);
1772                iph = ipv6_hdr(skb);
1773                skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1774                len = skb->len - skb_transport_offset(skb);
1775                th = tcp_hdr(skb);
1776                th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1777        } else {
1778                dev_kfree_skb_any(skb);
1779                return NULL;
1780        }
1781
1782        if (nw_off) /* tunnel */
1783                bnxt_gro_tunnel(skb, skb->protocol);
1784#endif
1785        return skb;
1786}
1787
1788static inline struct sk_buff *bnxt_gro_skb(struct bnxt *bp,
1789                                           struct bnxt_tpa_info *tpa_info,
1790                                           struct rx_tpa_end_cmp *tpa_end,
1791                                           struct rx_tpa_end_cmp_ext *tpa_end1,
1792                                           struct sk_buff *skb)
1793{
1794#ifdef CONFIG_INET
1795        int payload_off;
1796        u16 segs;
1797
1798        segs = TPA_END_TPA_SEGS(tpa_end);
1799        if (segs == 1)
1800                return skb;
1801
1802        NAPI_GRO_CB(skb)->count = segs;
1803        skb_shinfo(skb)->gso_size =
1804                le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len);
1805        skb_shinfo(skb)->gso_type = tpa_info->gso_type;
1806        if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
1807                payload_off = TPA_END_PAYLOAD_OFF_P5(tpa_end1);
1808        else
1809                payload_off = TPA_END_PAYLOAD_OFF(tpa_end);
1810        skb = bp->gro_func(tpa_info, payload_off, TPA_END_GRO_TS(tpa_end), skb);
1811        if (likely(skb))
1812                tcp_gro_complete(skb);
1813#endif
1814        return skb;
1815}
1816
1817/* Given the cfa_code of a received packet determine which
1818 * netdev (vf-rep or PF) the packet is destined to.
1819 */
1820static struct net_device *bnxt_get_pkt_dev(struct bnxt *bp, u16 cfa_code)
1821{
1822        struct net_device *dev = bnxt_get_vf_rep(bp, cfa_code);
1823
1824        /* if vf-rep dev is NULL, it must belong to the PF */
1825        return dev ? dev : bp->dev;
1826}
1827
1828static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
1829                                           struct bnxt_cp_ring_info *cpr,
1830                                           u32 *raw_cons,
1831                                           struct rx_tpa_end_cmp *tpa_end,
1832                                           struct rx_tpa_end_cmp_ext *tpa_end1,
1833                                           u8 *event)
1834{
1835        struct bnxt_napi *bnapi = cpr->bnapi;
1836        struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1837        struct net_device *dev = bp->dev;
1838        u8 *data_ptr, agg_bufs;
1839        unsigned int len;
1840        struct bnxt_tpa_info *tpa_info;
1841        dma_addr_t mapping;
1842        struct sk_buff *skb;
1843        u16 idx = 0, agg_id;
1844        void *data;
1845        bool gro;
1846
1847        if (unlikely(bnapi->in_reset)) {
1848                int rc = bnxt_discard_rx(bp, cpr, raw_cons, tpa_end);
1849
1850                if (rc < 0)
1851                        return ERR_PTR(-EBUSY);
1852                return NULL;
1853        }
1854
1855        if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
1856                agg_id = TPA_END_AGG_ID_P5(tpa_end);
1857                agg_id = bnxt_lookup_agg_idx(rxr, agg_id);
1858                agg_bufs = TPA_END_AGG_BUFS_P5(tpa_end1);
1859                tpa_info = &rxr->rx_tpa[agg_id];
1860                if (unlikely(agg_bufs != tpa_info->agg_count)) {
1861                        netdev_warn(bp->dev, "TPA end agg_buf %d != expected agg_bufs %d\n",
1862                                    agg_bufs, tpa_info->agg_count);
1863                        agg_bufs = tpa_info->agg_count;
1864                }
1865                tpa_info->agg_count = 0;
1866                *event |= BNXT_AGG_EVENT;
1867                bnxt_free_agg_idx(rxr, agg_id);
1868                idx = agg_id;
1869                gro = !!(bp->flags & BNXT_FLAG_GRO);
1870        } else {
1871                agg_id = TPA_END_AGG_ID(tpa_end);
1872                agg_bufs = TPA_END_AGG_BUFS(tpa_end);
1873                tpa_info = &rxr->rx_tpa[agg_id];
1874                idx = RING_CMP(*raw_cons);
1875                if (agg_bufs) {
1876                        if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons))
1877                                return ERR_PTR(-EBUSY);
1878
1879                        *event |= BNXT_AGG_EVENT;
1880                        idx = NEXT_CMP(idx);
1881                }
1882                gro = !!TPA_END_GRO(tpa_end);
1883        }
1884        data = tpa_info->data;
1885        data_ptr = tpa_info->data_ptr;
1886        prefetch(data_ptr);
1887        len = tpa_info->len;
1888        mapping = tpa_info->mapping;
1889
1890        if (unlikely(agg_bufs > MAX_SKB_FRAGS || TPA_END_ERRORS(tpa_end1))) {
1891                bnxt_abort_tpa(cpr, idx, agg_bufs);
1892                if (agg_bufs > MAX_SKB_FRAGS)
1893                        netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n",
1894                                    agg_bufs, (int)MAX_SKB_FRAGS);
1895                return NULL;
1896        }
1897
1898        if (len <= bp->rx_copybreak) {
1899                skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping);
1900                if (!skb) {
1901                        bnxt_abort_tpa(cpr, idx, agg_bufs);
1902                        cpr->sw_stats->rx.rx_oom_discards += 1;
1903                        return NULL;
1904                }
1905        } else {
1906                u8 *new_data;
1907                dma_addr_t new_mapping;
1908
1909                new_data = __bnxt_alloc_rx_frag(bp, &new_mapping, rxr,
1910                                                GFP_ATOMIC);
1911                if (!new_data) {
1912                        bnxt_abort_tpa(cpr, idx, agg_bufs);
1913                        cpr->sw_stats->rx.rx_oom_discards += 1;
1914                        return NULL;
1915                }
1916
1917                tpa_info->data = new_data;
1918                tpa_info->data_ptr = new_data + bp->rx_offset;
1919                tpa_info->mapping = new_mapping;
1920
1921                skb = napi_build_skb(data, bp->rx_buf_size);
1922                dma_sync_single_for_cpu(&bp->pdev->dev, mapping,
1923                                        bp->rx_buf_use_size, bp->rx_dir);
1924
1925                if (!skb) {
1926                        page_pool_free_va(rxr->head_pool, data, true);
1927                        bnxt_abort_tpa(cpr, idx, agg_bufs);
1928                        cpr->sw_stats->rx.rx_oom_discards += 1;
1929                        return NULL;
1930                }
1931                skb_mark_for_recycle(skb);
1932                skb_reserve(skb, bp->rx_offset);
1933                skb_put(skb, len);
1934        }
1935
1936        if (agg_bufs) {
1937                skb = bnxt_rx_agg_netmems_skb(bp, cpr, skb, idx, agg_bufs,
1938                                              true);
1939                if (!skb) {
1940                        /* Page reuse already handled by bnxt_rx_pages(). */
1941                        cpr->sw_stats->rx.rx_oom_discards += 1;
1942                        return NULL;
1943                }
1944        }
1945
1946        if (tpa_info->cfa_code_valid)
1947                dev = bnxt_get_pkt_dev(bp, tpa_info->cfa_code);
1948        skb->protocol = eth_type_trans(skb, dev);
1949
1950        if (tpa_info->hash_type != PKT_HASH_TYPE_NONE)
1951                skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type);
1952
1953        if (tpa_info->vlan_valid &&
1954            (dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)) {
1955                __be16 vlan_proto = htons(tpa_info->metadata >>
1956                                          RX_CMP_FLAGS2_METADATA_TPID_SFT);
1957                u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_TCI_MASK;
1958
1959                if (eth_type_vlan(vlan_proto)) {
1960                        __vlan_hwaccel_put_tag(skb, vlan_proto, vtag);
1961                } else {
1962                        dev_kfree_skb(skb);
1963                        return NULL;
1964                }
1965        }
1966
1967        skb_checksum_none_assert(skb);
1968        if (likely(tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_L4_CS_CALC)) {
1969                skb->ip_summed = CHECKSUM_UNNECESSARY;
1970                skb->csum_level =
1971                        (tpa_info->flags2 & RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3;
1972        }
1973
1974        if (gro)
1975                skb = bnxt_gro_skb(bp, tpa_info, tpa_end, tpa_end1, skb);
1976
1977        return skb;
1978}
1979
1980static void bnxt_tpa_agg(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
1981                         struct rx_agg_cmp *rx_agg)
1982{
1983        u16 agg_id = TPA_AGG_AGG_ID(rx_agg);
1984        struct bnxt_tpa_info *tpa_info;
1985
1986        agg_id = bnxt_lookup_agg_idx(rxr, agg_id);
1987        tpa_info = &rxr->rx_tpa[agg_id];
1988        BUG_ON(tpa_info->agg_count >= MAX_SKB_FRAGS);
1989        tpa_info->agg_arr[tpa_info->agg_count++] = *rx_agg;
1990}
1991
1992static void bnxt_deliver_skb(struct bnxt *bp, struct bnxt_napi *bnapi,
1993                             struct sk_buff *skb)
1994{
1995        skb_mark_for_recycle(skb);
1996
1997        if (skb->dev != bp->dev) {
1998                /* this packet belongs to a vf-rep */
1999                bnxt_vf_rep_rx(bp, skb);
2000                return;
2001        }
2002        skb_record_rx_queue(skb, bnapi->index);
2003        napi_gro_receive(&bnapi->napi, skb);
2004}
2005
2006static bool bnxt_rx_ts_valid(struct bnxt *bp, u32 flags,
2007                             struct rx_cmp_ext *rxcmp1, u32 *cmpl_ts)
2008{
2009        u32 ts = le32_to_cpu(rxcmp1->rx_cmp_timestamp);
2010
2011        if (BNXT_PTP_RX_TS_VALID(flags))
2012                goto ts_valid;
2013        if (!bp->ptp_all_rx_tstamp || !ts || !BNXT_ALL_RX_TS_VALID(flags))
2014                return false;
2015
2016ts_valid:
2017        *cmpl_ts = ts;
2018        return true;
2019}
2020
2021static struct sk_buff *bnxt_rx_vlan(struct sk_buff *skb, u8 cmp_type,
2022                                    struct rx_cmp *rxcmp,
2023                                    struct rx_cmp_ext *rxcmp1)
2024{
2025        __be16 vlan_proto;
2026        u16 vtag;
2027
2028        if (cmp_type == CMP_TYPE_RX_L2_CMP) {
2029                __le32 flags2 = rxcmp1->rx_cmp_flags2;
2030                u32 meta_data;
2031
2032                if (!(flags2 & cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)))
2033                        return skb;
2034
2035                meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data);
2036                vtag = meta_data & RX_CMP_FLAGS2_METADATA_TCI_MASK;
2037                vlan_proto = htons(meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT);
2038                if (eth_type_vlan(vlan_proto))
2039                        __vlan_hwaccel_put_tag(skb, vlan_proto, vtag);
2040                else
2041                        goto vlan_err;
2042        } else if (cmp_type == CMP_TYPE_RX_L2_V3_CMP) {
2043                if (RX_CMP_VLAN_VALID(rxcmp)) {
2044                        u32 tpid_sel = RX_CMP_VLAN_TPID_SEL(rxcmp);
2045
2046                        if (tpid_sel == RX_CMP_METADATA1_TPID_8021Q)
2047                                vlan_proto = htons(ETH_P_8021Q);
2048                        else if (tpid_sel == RX_CMP_METADATA1_TPID_8021AD)
2049                                vlan_proto = htons(ETH_P_8021AD);
2050                        else
2051                                goto vlan_err;
2052                        vtag = RX_CMP_METADATA0_TCI(rxcmp1);
2053                        __vlan_hwaccel_put_tag(skb, vlan_proto, vtag);
2054                }
2055        }
2056        return skb;
2057vlan_err:
2058        skb_mark_for_recycle(skb);
2059        dev_kfree_skb(skb);
2060        return NULL;
2061}
2062
2063static enum pkt_hash_types bnxt_rss_ext_op(struct bnxt *bp,
2064                                           struct rx_cmp *rxcmp)
2065{
2066        u8 ext_op;
2067
2068        ext_op = RX_CMP_V3_HASH_TYPE(bp, rxcmp);
2069        switch (ext_op) {
2070        case EXT_OP_INNER_4:
2071        case EXT_OP_OUTER_4:
2072        case EXT_OP_INNFL_3:
2073        case EXT_OP_OUTFL_3:
2074                return PKT_HASH_TYPE_L4;
2075        default:
2076                return PKT_HASH_TYPE_L3;
2077        }
2078}
2079
2080/* returns the following:
2081 * 1       - 1 packet successfully received
2082 * 0       - successful TPA_START, packet not completed yet
2083 * -EBUSY  - completion ring does not have all the agg buffers yet
2084 * -ENOMEM - packet aborted due to out of memory
2085 * -EIO    - packet aborted due to hw error indicated in BD
2086 */
2087static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
2088                       u32 *raw_cons, u8 *event)
2089{
2090        struct bnxt_napi *bnapi = cpr->bnapi;
2091        struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
2092        struct net_device *dev = bp->dev;
2093        struct rx_cmp *rxcmp;
2094        struct rx_cmp_ext *rxcmp1;
2095        u32 tmp_raw_cons = *raw_cons;
2096        u16 cons, prod, cp_cons = RING_CMP(tmp_raw_cons);
2097        struct skb_shared_info *sinfo;
2098        struct bnxt_sw_rx_bd *rx_buf;
2099        unsigned int len;
2100        u8 *data_ptr, agg_bufs, cmp_type;
2101        bool xdp_active = false;
2102        dma_addr_t dma_addr;
2103        struct sk_buff *skb;
2104        struct xdp_buff xdp;
2105        u32 flags, misc;
2106        u32 cmpl_ts;
2107        void *data;
2108        int rc = 0;
2109
2110        rxcmp = (struct rx_cmp *)
2111                        &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2112
2113        cmp_type = RX_CMP_TYPE(rxcmp);
2114
2115        if (cmp_type == CMP_TYPE_RX_TPA_AGG_CMP) {
2116                bnxt_tpa_agg(bp, rxr, (struct rx_agg_cmp *)rxcmp);
2117                goto next_rx_no_prod_no_len;
2118        }
2119
2120        tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
2121        cp_cons = RING_CMP(tmp_raw_cons);
2122        rxcmp1 = (struct rx_cmp_ext *)
2123                        &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2124
2125        if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
2126                return -EBUSY;
2127
2128        /* The valid test of the entry must be done first before
2129         * reading any further.
2130         */
2131        dma_rmb();
2132        prod = rxr->rx_prod;
2133
2134        if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP ||
2135            cmp_type == CMP_TYPE_RX_L2_TPA_START_V3_CMP) {
2136                bnxt_tpa_start(bp, rxr, cmp_type,
2137                               (struct rx_tpa_start_cmp *)rxcmp,
2138                               (struct rx_tpa_start_cmp_ext *)rxcmp1);
2139
2140                *event |= BNXT_RX_EVENT;
2141                goto next_rx_no_prod_no_len;
2142
2143        } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
2144                skb = bnxt_tpa_end(bp, cpr, &tmp_raw_cons,
2145                                   (struct rx_tpa_end_cmp *)rxcmp,
2146                                   (struct rx_tpa_end_cmp_ext *)rxcmp1, event);
2147
2148                if (IS_ERR(skb))
2149                        return -EBUSY;
2150
2151                rc = -ENOMEM;
2152                if (likely(skb)) {
2153                        bnxt_deliver_skb(bp, bnapi, skb);
2154                        rc = 1;
2155                }
2156                *event |= BNXT_RX_EVENT;
2157                goto next_rx_no_prod_no_len;
2158        }
2159
2160        cons = rxcmp->rx_cmp_opaque;
2161        if (unlikely(cons != rxr->rx_next_cons)) {
2162                int rc1 = bnxt_discard_rx(bp, cpr, &tmp_raw_cons, rxcmp);
2163
2164                /* 0xffff is forced error, don't print it */
2165                if (rxr->rx_next_cons != 0xffff)
2166                        netdev_warn(bp->dev, "RX cons %x != expected cons %x\n",
2167                                    cons, rxr->rx_next_cons);
2168                bnxt_sched_reset_rxr(bp, rxr);
2169                if (rc1)
2170                        return rc1;
2171                goto next_rx_no_prod_no_len;
2172        }
2173        rx_buf = &rxr->rx_buf_ring[cons];
2174        data = rx_buf->data;
2175        data_ptr = rx_buf->data_ptr;
2176        prefetch(data_ptr);
2177
2178        misc = le32_to_cpu(rxcmp->rx_cmp_misc_v1);
2179        agg_bufs = (misc & RX_CMP_AGG_BUFS) >> RX_CMP_AGG_BUFS_SHIFT;
2180
2181        if (agg_bufs) {
2182                if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
2183                        return -EBUSY;
2184
2185                cp_cons = NEXT_CMP(cp_cons);
2186                *event |= BNXT_AGG_EVENT;
2187        }
2188        *event |= BNXT_RX_EVENT;
2189
2190        rx_buf->data = NULL;
2191        if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) {
2192                u32 rx_err = le32_to_cpu(rxcmp1->rx_cmp_cfa_code_errors_v2);
2193
2194                bnxt_reuse_rx_data(rxr, cons, data);
2195                if (agg_bufs)
2196                        bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0, agg_bufs,
2197                                               false);
2198
2199                rc = -EIO;
2200                if (rx_err & RX_CMPL_ERRORS_BUFFER_ERROR_MASK) {
2201                        bnapi->cp_ring.sw_stats->rx.rx_buf_errors++;
2202                        if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
2203                            !(bp->fw_cap & BNXT_FW_CAP_RING_MONITOR)) {
2204                                netdev_warn_once(bp->dev, "RX buffer error %x\n",
2205                                                 rx_err);
2206                                bnxt_sched_reset_rxr(bp, rxr);
2207                        }
2208                }
2209                goto next_rx_no_len;
2210        }
2211
2212        flags = le32_to_cpu(rxcmp->rx_cmp_len_flags_type);
2213        len = flags >> RX_CMP_LEN_SHIFT;
2214        dma_addr = rx_buf->mapping;
2215
2216        if (bnxt_xdp_attached(bp, rxr)) {
2217                bnxt_xdp_buff_init(bp, rxr, cons, data_ptr, len, &xdp);
2218                if (agg_bufs) {
2219                        u32 frag_len = bnxt_rx_agg_netmems_xdp(bp, cpr, &xdp,
2220                                                               cp_cons,
2221                                                               agg_bufs,
2222                                                               false);
2223                        if (!frag_len)
2224                                goto oom_next_rx;
2225
2226                }
2227                xdp_active = true;
2228        }
2229
2230        if (xdp_active) {
2231                if (bnxt_rx_xdp(bp, rxr, cons, &xdp, data, &data_ptr, &len, event)) {
2232                        rc = 1;
2233                        goto next_rx;
2234                }
2235                if (xdp_buff_has_frags(&xdp)) {
2236                        sinfo = xdp_get_shared_info_from_buff(&xdp);
2237                        agg_bufs = sinfo->nr_frags;
2238                } else {
2239                        agg_bufs = 0;
2240                }
2241        }
2242
2243        if (len <= bp->rx_copybreak) {
2244                if (!xdp_active)
2245                        skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr);
2246                else
2247                        skb = bnxt_copy_xdp(bnapi, &xdp, len, dma_addr);
2248                bnxt_reuse_rx_data(rxr, cons, data);
2249                if (!skb) {
2250                        if (agg_bufs) {
2251                                if (!xdp_active)
2252                                        bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0,
2253                                                               agg_bufs, false);
2254                                else
2255                                        bnxt_xdp_buff_frags_free(rxr, &xdp);
2256                        }
2257                        goto oom_next_rx;
2258                }
2259        } else {
2260                u32 payload;
2261
2262                if (rx_buf->data_ptr == data_ptr)
2263                        payload = misc & RX_CMP_PAYLOAD_OFFSET;
2264                else
2265                        payload = 0;
2266                skb = bp->rx_skb_func(bp, rxr, cons, data, data_ptr, dma_addr,
2267                                      payload | len);
2268                if (!skb)
2269                        goto oom_next_rx;
2270        }
2271
2272        if (agg_bufs) {
2273                if (!xdp_active) {
2274                        skb = bnxt_rx_agg_netmems_skb(bp, cpr, skb, cp_cons,
2275                                                      agg_bufs, false);
2276                        if (!skb)
2277                                goto oom_next_rx;
2278                } else {
2279                        skb = bnxt_xdp_build_skb(bp, skb, agg_bufs,
2280                                                 rxr->page_pool, &xdp);
2281                        if (!skb) {
2282                                /* we should be able to free the old skb here */
2283                                bnxt_xdp_buff_frags_free(rxr, &xdp);
2284                                goto oom_next_rx;
2285                        }
2286                }
2287        }
2288
2289        if (RX_CMP_HASH_VALID(rxcmp)) {
2290                enum pkt_hash_types type;
2291
2292                if (cmp_type == CMP_TYPE_RX_L2_V3_CMP) {
2293                        type = bnxt_rss_ext_op(bp, rxcmp);
2294                } else {
2295                        u32 itypes = RX_CMP_ITYPES(rxcmp);
2296
2297                        if (itypes == RX_CMP_FLAGS_ITYPE_TCP ||
2298                            itypes == RX_CMP_FLAGS_ITYPE_UDP)
2299                                type = PKT_HASH_TYPE_L4;
2300                        else
2301                                type = PKT_HASH_TYPE_L3;
2302                }
2303                skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type);
2304        }
2305
2306        if (cmp_type == CMP_TYPE_RX_L2_CMP)
2307                dev = bnxt_get_pkt_dev(bp, RX_CMP_CFA_CODE(rxcmp1));
2308        skb->protocol = eth_type_trans(skb, dev);
2309
2310        if (skb->dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX) {
2311                skb = bnxt_rx_vlan(skb, cmp_type, rxcmp, rxcmp1);
2312                if (!skb)
2313                        goto next_rx;
2314        }
2315
2316        skb_checksum_none_assert(skb);
2317        if (RX_CMP_L4_CS_OK(rxcmp1)) {
2318                if (dev->features & NETIF_F_RXCSUM) {
2319                        skb->ip_summed = CHECKSUM_UNNECESSARY;
2320                        skb->csum_level = RX_CMP_ENCAP(rxcmp1);
2321                }
2322        } else {
2323                if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) {
2324                        if (dev->features & NETIF_F_RXCSUM)
2325                                bnapi->cp_ring.sw_stats->rx.rx_l4_csum_errors++;
2326                }
2327        }
2328
2329        if (bnxt_rx_ts_valid(bp, flags, rxcmp1, &cmpl_ts)) {
2330                if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
2331                        u64 ns, ts;
2332
2333                        if (!bnxt_get_rx_ts_p5(bp, &ts, cmpl_ts)) {
2334                                struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
2335
2336                                ns = bnxt_timecounter_cyc2time(ptp, ts);
2337                                memset(skb_hwtstamps(skb), 0,
2338                                       sizeof(*skb_hwtstamps(skb)));
2339                                skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(ns);
2340                        }
2341                }
2342        }
2343        bnxt_deliver_skb(bp, bnapi, skb);
2344        rc = 1;
2345
2346next_rx:
2347        cpr->rx_packets += 1;
2348        cpr->rx_bytes += len;
2349
2350next_rx_no_len:
2351        rxr->rx_prod = NEXT_RX(prod);
2352        rxr->rx_next_cons = RING_RX(bp, NEXT_RX(cons));
2353
2354next_rx_no_prod_no_len:
2355        *raw_cons = tmp_raw_cons;
2356
2357        return rc;
2358
2359oom_next_rx:
2360        cpr->sw_stats->rx.rx_oom_discards += 1;
2361        rc = -ENOMEM;
2362        goto next_rx;
2363}
2364
2365/* In netpoll mode, if we are using a combined completion ring, we need to
2366 * discard the rx packets and recycle the buffers.
2367 */
2368static int bnxt_force_rx_discard(struct bnxt *bp,
2369                                 struct bnxt_cp_ring_info *cpr,
2370                                 u32 *raw_cons, u8 *event)
2371{
2372        u32 tmp_raw_cons = *raw_cons;
2373        struct rx_cmp_ext *rxcmp1;
2374        struct rx_cmp *rxcmp;
2375        u16 cp_cons;
2376        u8 cmp_type;
2377        int rc;
2378
2379        cp_cons = RING_CMP(tmp_raw_cons);
2380        rxcmp = (struct rx_cmp *)
2381                        &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2382
2383        tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
2384        cp_cons = RING_CMP(tmp_raw_cons);
2385        rxcmp1 = (struct rx_cmp_ext *)
2386                        &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2387
2388        if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
2389                return -EBUSY;
2390
2391        /* The valid test of the entry must be done first before
2392         * reading any further.
2393         */
2394        dma_rmb();
2395        cmp_type = RX_CMP_TYPE(rxcmp);
2396        if (cmp_type == CMP_TYPE_RX_L2_CMP ||
2397            cmp_type == CMP_TYPE_RX_L2_V3_CMP) {
2398                rxcmp1->rx_cmp_cfa_code_errors_v2 |=
2399                        cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
2400        } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
2401                struct rx_tpa_end_cmp_ext *tpa_end1;
2402
2403                tpa_end1 = (struct rx_tpa_end_cmp_ext *)rxcmp1;
2404                tpa_end1->rx_tpa_end_cmp_errors_v2 |=
2405                        cpu_to_le32(RX_TPA_END_CMP_ERRORS);
2406        }
2407        rc = bnxt_rx_pkt(bp, cpr, raw_cons, event);
2408        if (rc && rc != -EBUSY)
2409                cpr->sw_stats->rx.rx_netpoll_discards += 1;
2410        return rc;
2411}
2412
2413u32 bnxt_fw_health_readl(struct bnxt *bp, int reg_idx)
2414{
2415        struct bnxt_fw_health *fw_health = bp->fw_health;
2416        u32 reg = fw_health->regs[reg_idx];
2417        u32 reg_type, reg_off, val = 0;
2418
2419        reg_type = BNXT_FW_HEALTH_REG_TYPE(reg);
2420        reg_off = BNXT_FW_HEALTH_REG_OFF(reg);
2421        switch (reg_type) {
2422        case BNXT_FW_HEALTH_REG_TYPE_CFG:
2423                pci_read_config_dword(bp->pdev, reg_off, &val);
2424                break;
2425        case BNXT_FW_HEALTH_REG_TYPE_GRC:
2426                reg_off = fw_health->mapped_regs[reg_idx];
2427                fallthrough;
2428        case BNXT_FW_HEALTH_REG_TYPE_BAR0:
2429                val = readl(bp->bar0 + reg_off);
2430                break;
2431        case BNXT_FW_HEALTH_REG_TYPE_BAR1:
2432                val = readl(bp->bar1 + reg_off);
2433                break;
2434        }
2435        if (reg_idx == BNXT_FW_RESET_INPROG_REG)
2436                val &= fw_health->fw_reset_inprog_reg_mask;
2437        return val;
2438}
2439
2440static u16 bnxt_agg_ring_id_to_grp_idx(struct bnxt *bp, u16 ring_id)
2441{
2442        int i;
2443
2444        for (i = 0; i < bp->rx_nr_rings; i++) {
2445                u16 grp_idx = bp->rx_ring[i].bnapi->index;
2446                struct bnxt_ring_grp_info *grp_info;
2447
2448                grp_info = &bp->grp_info[grp_idx];
2449                if (grp_info->agg_fw_ring_id == ring_id)
2450                        return grp_idx;
2451        }
2452        return INVALID_HW_RING_ID;
2453}
2454
2455static u16 bnxt_get_force_speed(struct bnxt_link_info *link_info)
2456{
2457        struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
2458
2459        if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2)
2460                return link_info->force_link_speed2;
2461        if (link_info->req_signal_mode == BNXT_SIG_MODE_PAM4)
2462                return link_info->force_pam4_link_speed;
2463        return link_info->force_link_speed;
2464}
2465
2466static void bnxt_set_force_speed(struct bnxt_link_info *link_info)
2467{
2468        struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
2469
2470        if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
2471                link_info->req_link_speed = link_info->force_link_speed2;
2472                link_info->req_signal_mode = BNXT_SIG_MODE_NRZ;
2473                switch (link_info->req_link_speed) {
2474                case BNXT_LINK_SPEED_50GB_PAM4:
2475                case BNXT_LINK_SPEED_100GB_PAM4:
2476                case BNXT_LINK_SPEED_200GB_PAM4:
2477                case BNXT_LINK_SPEED_400GB_PAM4:
2478                        link_info->req_signal_mode = BNXT_SIG_MODE_PAM4;
2479                        break;
2480                case BNXT_LINK_SPEED_100GB_PAM4_112:
2481                case BNXT_LINK_SPEED_200GB_PAM4_112:
2482                case BNXT_LINK_SPEED_400GB_PAM4_112:
2483                        link_info->req_signal_mode = BNXT_SIG_MODE_PAM4_112;
2484                        break;
2485                default:
2486                        link_info->req_signal_mode = BNXT_SIG_MODE_NRZ;
2487                }
2488                return;
2489        }
2490        link_info->req_link_speed = link_info->force_link_speed;
2491        link_info->req_signal_mode = BNXT_SIG_MODE_NRZ;
2492        if (link_info->force_pam4_link_speed) {
2493                link_info->req_link_speed = link_info->force_pam4_link_speed;
2494                link_info->req_signal_mode = BNXT_SIG_MODE_PAM4;
2495        }
2496}
2497
2498static void bnxt_set_auto_speed(struct bnxt_link_info *link_info)
2499{
2500        struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
2501
2502        if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
2503                link_info->advertising = link_info->auto_link_speeds2;
2504                return;
2505        }
2506        link_info->advertising = link_info->auto_link_speeds;
2507        link_info->advertising_pam4 = link_info->auto_pam4_link_speeds;
2508}
2509
2510static bool bnxt_force_speed_updated(struct bnxt_link_info *link_info)
2511{
2512        struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
2513
2514        if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
2515                if (link_info->req_link_speed != link_info->force_link_speed2)
2516                        return true;
2517                return false;
2518        }
2519        if (link_info->req_signal_mode == BNXT_SIG_MODE_NRZ &&
2520            link_info->req_link_speed != link_info->force_link_speed)
2521                return true;
2522        if (link_info->req_signal_mode == BNXT_SIG_MODE_PAM4 &&
2523            link_info->req_link_speed != link_info->force_pam4_link_speed)
2524                return true;
2525        return false;
2526}
2527
2528static bool bnxt_auto_speed_updated(struct bnxt_link_info *link_info)
2529{
2530        struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
2531
2532        if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
2533                if (link_info->advertising != link_info->auto_link_speeds2)
2534                        return true;
2535                return false;
2536        }
2537        if (link_info->advertising != link_info->auto_link_speeds ||
2538            link_info->advertising_pam4 != link_info->auto_pam4_link_speeds)
2539                return true;
2540        return false;
2541}
2542
2543bool bnxt_bs_trace_avail(struct bnxt *bp, u16 type)
2544{
2545        u32 flags = bp->ctx->ctx_arr[type].flags;
2546
2547        return (flags & BNXT_CTX_MEM_TYPE_VALID) &&
2548                ((flags & BNXT_CTX_MEM_FW_TRACE) ||
2549                 (flags & BNXT_CTX_MEM_FW_BIN_TRACE));
2550}
2551
2552static void bnxt_bs_trace_init(struct bnxt *bp, struct bnxt_ctx_mem_type *ctxm)
2553{
2554        u32 mem_size, pages, rem_bytes, magic_byte_offset;
2555        u16 trace_type = bnxt_bstore_to_trace[ctxm->type];
2556        struct bnxt_ctx_pg_info *ctx_pg = ctxm->pg_info;
2557        struct bnxt_ring_mem_info *rmem, *rmem_pg_tbl;
2558        struct bnxt_bs_trace_info *bs_trace;
2559        int last_pg;
2560
2561        if (ctxm->instance_bmap && ctxm->instance_bmap > 1)
2562                return;
2563
2564        mem_size = ctxm->max_entries * ctxm->entry_size;
2565        rem_bytes = mem_size % BNXT_PAGE_SIZE;
2566        pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
2567
2568        last_pg = (pages - 1) & (MAX_CTX_PAGES - 1);
2569        magic_byte_offset = (rem_bytes ? rem_bytes : BNXT_PAGE_SIZE) - 1;
2570
2571        rmem = &ctx_pg[0].ring_mem;
2572        bs_trace = &bp->bs_trace[trace_type];
2573        bs_trace->ctx_type = ctxm->type;
2574        bs_trace->trace_type = trace_type;
2575        if (pages > MAX_CTX_PAGES) {
2576                int last_pg_dir = rmem->nr_pages - 1;
2577
2578                rmem_pg_tbl = &ctx_pg[0].ctx_pg_tbl[last_pg_dir]->ring_mem;
2579                bs_trace->magic_byte = rmem_pg_tbl->pg_arr[last_pg];
2580        } else {
2581                bs_trace->magic_byte = rmem->pg_arr[last_pg];
2582        }
2583        bs_trace->magic_byte += magic_byte_offset;
2584        *bs_trace->magic_byte = BNXT_TRACE_BUF_MAGIC_BYTE;
2585}
2586
2587#define BNXT_EVENT_BUF_PRODUCER_TYPE(data1)                             \
2588        (((data1) & ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_MASK) >>\
2589         ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_SFT)
2590
2591#define BNXT_EVENT_BUF_PRODUCER_OFFSET(data2)                           \
2592        (((data2) &                                                     \
2593          ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA2_CURR_OFF_MASK) >>\
2594         ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA2_CURR_OFF_SFT)
2595
2596#define BNXT_EVENT_THERMAL_CURRENT_TEMP(data2)                          \
2597        ((data2) &                                                      \
2598          ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_CURRENT_TEMP_MASK)
2599
2600#define BNXT_EVENT_THERMAL_THRESHOLD_TEMP(data2)                        \
2601        (((data2) &                                                     \
2602          ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_THRESHOLD_TEMP_MASK) >>\
2603         ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_THRESHOLD_TEMP_SFT)
2604
2605#define EVENT_DATA1_THERMAL_THRESHOLD_TYPE(data1)                       \
2606        ((data1) &                                                      \
2607         ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_MASK)
2608
2609#define EVENT_DATA1_THERMAL_THRESHOLD_DIR_INCREASING(data1)             \
2610        (((data1) &                                                     \
2611          ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR) ==\
2612         ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR_INCREASING)
2613
2614/* Return true if the workqueue has to be scheduled */
2615static bool bnxt_event_error_report(struct bnxt *bp, u32 data1, u32 data2)
2616{
2617        u32 err_type = BNXT_EVENT_ERROR_REPORT_TYPE(data1);
2618
2619        switch (err_type) {
2620        case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_INVALID_SIGNAL:
2621                netdev_err(bp->dev, "1PPS: Received invalid signal on pin%lu from the external source. Please fix the signal and reconfigure the pin\n",
2622                           BNXT_EVENT_INVALID_SIGNAL_DATA(data2));
2623                break;
2624        case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_PAUSE_STORM:
2625                netdev_warn(bp->dev, "Pause Storm detected!\n");
2626                break;
2627        case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD:
2628                netdev_warn(bp->dev, "One or more MMIO doorbells dropped by the device!\n");
2629                break;
2630        case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_THERMAL_THRESHOLD: {
2631                u32 type = EVENT_DATA1_THERMAL_THRESHOLD_TYPE(data1);
2632                char *threshold_type;
2633                bool notify = false;
2634                char *dir_str;
2635
2636                switch (type) {
2637                case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_WARN:
2638                        threshold_type = "warning";
2639                        break;
2640                case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_CRITICAL:
2641                        threshold_type = "critical";
2642                        break;
2643                case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_FATAL:
2644                        threshold_type = "fatal";
2645                        break;
2646                case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_SHUTDOWN:
2647                        threshold_type = "shutdown";
2648                        break;
2649                default:
2650                        netdev_err(bp->dev, "Unknown Thermal threshold type event\n");
2651                        return false;
2652                }
2653                if (EVENT_DATA1_THERMAL_THRESHOLD_DIR_INCREASING(data1)) {
2654                        dir_str = "above";
2655                        notify = true;
2656                } else {
2657                        dir_str = "below";
2658                }
2659                netdev_warn(bp->dev, "Chip temperature has gone %s the %s thermal threshold!\n",
2660                            dir_str, threshold_type);
2661                netdev_warn(bp->dev, "Temperature (In Celsius), Current: %lu, threshold: %lu\n",
2662                            BNXT_EVENT_THERMAL_CURRENT_TEMP(data2),
2663                            BNXT_EVENT_THERMAL_THRESHOLD_TEMP(data2));
2664                if (notify) {
2665                        bp->thermal_threshold_type = type;
2666                        set_bit(BNXT_THERMAL_THRESHOLD_SP_EVENT, &bp->sp_event);
2667                        return true;
2668                }
2669                return false;
2670        }
2671        case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DUAL_DATA_RATE_NOT_SUPPORTED:
2672                netdev_warn(bp->dev, "Speed change not supported with dual rate transceivers on this board\n");
2673                break;
2674        default:
2675                netdev_err(bp->dev, "FW reported unknown error type %u\n",
2676                           err_type);
2677                break;
2678        }
2679        return false;
2680}
2681
2682#define BNXT_GET_EVENT_PORT(data)       \
2683        ((data) &                       \
2684         ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK)
2685
2686#define BNXT_EVENT_RING_TYPE(data2)     \
2687        ((data2) &                      \
2688         ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_MASK)
2689
2690#define BNXT_EVENT_RING_TYPE_RX(data2)  \
2691        (BNXT_EVENT_RING_TYPE(data2) == \
2692         ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_RX)
2693
2694#define BNXT_EVENT_PHC_EVENT_TYPE(data1)        \
2695        (((data1) & ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_MASK) >>\
2696         ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_SFT)
2697
2698#define BNXT_EVENT_PHC_RTC_UPDATE(data1)        \
2699        (((data1) & ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_PHC_TIME_MSB_MASK) >>\
2700         ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_PHC_TIME_MSB_SFT)
2701
2702#define BNXT_PHC_BITS   48
2703
2704static int bnxt_async_event_process(struct bnxt *bp,
2705                                    struct hwrm_async_event_cmpl *cmpl)
2706{
2707        u16 event_id = le16_to_cpu(cmpl->event_id);
2708        u32 data1 = le32_to_cpu(cmpl->event_data1);
2709        u32 data2 = le32_to_cpu(cmpl->event_data2);
2710
2711        netdev_dbg(bp->dev, "hwrm event 0x%x {0x%x, 0x%x}\n",
2712                   event_id, data1, data2);
2713
2714        /* TODO CHIMP_FW: Define event id's for link change, error etc */
2715        switch (event_id) {
2716        case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE: {
2717                struct bnxt_link_info *link_info = &bp->link_info;
2718
2719                if (BNXT_VF(bp))
2720                        goto async_event_process_exit;
2721
2722                /* print unsupported speed warning in forced speed mode only */
2723                if (!(link_info->autoneg & BNXT_AUTONEG_SPEED) &&
2724                    (data1 & 0x20000)) {
2725                        u16 fw_speed = bnxt_get_force_speed(link_info);
2726                        u32 speed = bnxt_fw_to_ethtool_speed(fw_speed);
2727
2728                        if (speed != SPEED_UNKNOWN)
2729                                netdev_warn(bp->dev, "Link speed %d no longer supported\n",
2730                                            speed);
2731                }
2732                set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event);
2733        }
2734                fallthrough;
2735        case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE:
2736        case ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE:
2737                set_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT, &bp->sp_event);
2738                fallthrough;
2739        case ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
2740                set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event);
2741                break;
2742        case ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD:
2743                set_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event);
2744                break;
2745        case ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED: {
2746                u16 port_id = BNXT_GET_EVENT_PORT(data1);
2747
2748                if (BNXT_VF(bp))
2749                        break;
2750
2751                if (bp->pf.port_id != port_id)
2752                        break;
2753
2754                set_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event);
2755                break;
2756        }
2757        case ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE:
2758                if (BNXT_PF(bp))
2759                        goto async_event_process_exit;
2760                set_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event);
2761                break;
2762        case ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY: {
2763                char *type_str = "Solicited";
2764
2765                if (!bp->fw_health)
2766                        goto async_event_process_exit;
2767
2768                bp->fw_reset_timestamp = jiffies;
2769                bp->fw_reset_min_dsecs = cmpl->timestamp_lo;
2770                if (!bp->fw_reset_min_dsecs)
2771                        bp->fw_reset_min_dsecs = BNXT_DFLT_FW_RST_MIN_DSECS;
2772                bp->fw_reset_max_dsecs = le16_to_cpu(cmpl->timestamp_hi);
2773                if (!bp->fw_reset_max_dsecs)
2774                        bp->fw_reset_max_dsecs = BNXT_DFLT_FW_RST_MAX_DSECS;
2775                if (EVENT_DATA1_RESET_NOTIFY_FW_ACTIVATION(data1)) {
2776                        set_bit(BNXT_STATE_FW_ACTIVATE_RESET, &bp->state);
2777                } else if (EVENT_DATA1_RESET_NOTIFY_FATAL(data1)) {
2778                        type_str = "Fatal";
2779                        bp->fw_health->fatalities++;
2780                        set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
2781                } else if (data2 && BNXT_FW_STATUS_HEALTHY !=
2782                           EVENT_DATA2_RESET_NOTIFY_FW_STATUS_CODE(data2)) {
2783                        type_str = "Non-fatal";
2784                        bp->fw_health->survivals++;
2785                        set_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state);
2786                }
2787                netif_warn(bp, hw, bp->dev,
2788                           "%s firmware reset event, data1: 0x%x, data2: 0x%x, min wait %u ms, max wait %u ms\n",
2789                           type_str, data1, data2,
2790                           bp->fw_reset_min_dsecs * 100,
2791                           bp->fw_reset_max_dsecs * 100);
2792                set_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event);
2793                break;
2794        }
2795        case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY: {
2796                struct bnxt_fw_health *fw_health = bp->fw_health;
2797                char *status_desc = "healthy";
2798                u32 status;
2799
2800                if (!fw_health)
2801                        goto async_event_process_exit;
2802
2803                if (!EVENT_DATA1_RECOVERY_ENABLED(data1)) {
2804                        fw_health->enabled = false;
2805                        netif_info(bp, drv, bp->dev, "Driver recovery watchdog is disabled\n");
2806                        break;
2807                }
2808                fw_health->primary = EVENT_DATA1_RECOVERY_MASTER_FUNC(data1);
2809                fw_health->tmr_multiplier =
2810                        DIV_ROUND_UP(fw_health->polling_dsecs * HZ,
2811                                     bp->current_interval * 10);
2812                fw_health->tmr_counter = fw_health->tmr_multiplier;
2813                if (!fw_health->enabled)
2814                        fw_health->last_fw_heartbeat =
2815                                bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
2816                fw_health->last_fw_reset_cnt =
2817                        bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
2818                status = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
2819                if (status != BNXT_FW_STATUS_HEALTHY)
2820                        status_desc = "unhealthy";
2821                netif_info(bp, drv, bp->dev,
2822                           "Driver recovery watchdog, role: %s, firmware status: 0x%x (%s), resets: %u\n",
2823                           fw_health->primary ? "primary" : "backup", status,
2824                           status_desc, fw_health->last_fw_reset_cnt);
2825                if (!fw_health->enabled) {
2826                        /* Make sure tmr_counter is set and visible to
2827                         * bnxt_health_check() before setting enabled to true.
2828                         */
2829                        smp_wmb();
2830                        fw_health->enabled = true;
2831                }
2832                goto async_event_process_exit;
2833        }
2834        case ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION:
2835                netif_notice(bp, hw, bp->dev,
2836                             "Received firmware debug notification, data1: 0x%x, data2: 0x%x\n",
2837                             data1, data2);
2838                goto async_event_process_exit;
2839        case ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG: {
2840                struct bnxt_rx_ring_info *rxr;
2841                u16 grp_idx;
2842
2843                if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
2844                        goto async_event_process_exit;
2845
2846                netdev_warn(bp->dev, "Ring monitor event, ring type %lu id 0x%x\n",
2847                            BNXT_EVENT_RING_TYPE(data2), data1);
2848                if (!BNXT_EVENT_RING_TYPE_RX(data2))
2849                        goto async_event_process_exit;
2850
2851                grp_idx = bnxt_agg_ring_id_to_grp_idx(bp, data1);
2852                if (grp_idx == INVALID_HW_RING_ID) {
2853                        netdev_warn(bp->dev, "Unknown RX agg ring id 0x%x\n",
2854                                    data1);
2855                        goto async_event_process_exit;
2856                }
2857                rxr = bp->bnapi[grp_idx]->rx_ring;
2858                bnxt_sched_reset_rxr(bp, rxr);
2859                goto async_event_process_exit;
2860        }
2861        case ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST: {
2862                struct bnxt_fw_health *fw_health = bp->fw_health;
2863
2864                netif_notice(bp, hw, bp->dev,
2865                             "Received firmware echo request, data1: 0x%x, data2: 0x%x\n",
2866                             data1, data2);
2867                if (fw_health) {
2868                        fw_health->echo_req_data1 = data1;
2869                        fw_health->echo_req_data2 = data2;
2870                        set_bit(BNXT_FW_ECHO_REQUEST_SP_EVENT, &bp->sp_event);
2871                        break;
2872                }
2873                goto async_event_process_exit;
2874        }
2875        case ASYNC_EVENT_CMPL_EVENT_ID_PPS_TIMESTAMP: {
2876                bnxt_ptp_pps_event(bp, data1, data2);
2877                goto async_event_process_exit;
2878        }
2879        case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT: {
2880                if (bnxt_event_error_report(bp, data1, data2))
2881                        break;
2882                goto async_event_process_exit;
2883        }
2884        case ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE: {
2885                switch (BNXT_EVENT_PHC_EVENT_TYPE(data1)) {
2886                case ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_PHC_RTC_UPDATE:
2887                        if (BNXT_PTP_USE_RTC(bp)) {
2888                                struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
2889                                unsigned long flags;
2890                                u64 ns;
2891
2892                                if (!ptp)
2893                                        goto async_event_process_exit;
2894
2895                                bnxt_ptp_update_current_time(bp);
2896                                ns = (((u64)BNXT_EVENT_PHC_RTC_UPDATE(data1) <<
2897                                       BNXT_PHC_BITS) | ptp->current_time);
2898                                write_seqlock_irqsave(&ptp->ptp_lock, flags);
2899                                bnxt_ptp_rtc_timecounter_init(ptp, ns);
2900                                write_sequnlock_irqrestore(&ptp->ptp_lock, flags);
2901                        }
2902                        break;
2903                }
2904                goto async_event_process_exit;
2905        }
2906        case ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE: {
2907                u16 seq_id = le32_to_cpu(cmpl->event_data2) & 0xffff;
2908
2909                hwrm_update_token(bp, seq_id, BNXT_HWRM_DEFERRED);
2910                goto async_event_process_exit;
2911        }
2912        case ASYNC_EVENT_CMPL_EVENT_ID_DBG_BUF_PRODUCER: {
2913                u16 type = (u16)BNXT_EVENT_BUF_PRODUCER_TYPE(data1);
2914                u32 offset =  BNXT_EVENT_BUF_PRODUCER_OFFSET(data2);
2915
2916                bnxt_bs_trace_check_wrap(&bp->bs_trace[type], offset);
2917                goto async_event_process_exit;
2918        }
2919        default:
2920                goto async_event_process_exit;
2921        }
2922        __bnxt_queue_sp_work(bp);
2923async_event_process_exit:
2924        bnxt_ulp_async_events(bp, cmpl);
2925        return 0;
2926}
2927
2928static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp)
2929{
2930        u16 cmpl_type = TX_CMP_TYPE(txcmp), vf_id, seq_id;
2931        struct hwrm_cmpl *h_cmpl = (struct hwrm_cmpl *)txcmp;
2932        struct hwrm_fwd_req_cmpl *fwd_req_cmpl =
2933                                (struct hwrm_fwd_req_cmpl *)txcmp;
2934
2935        switch (cmpl_type) {
2936        case CMPL_BASE_TYPE_HWRM_DONE:
2937                seq_id = le16_to_cpu(h_cmpl->sequence_id);
2938                hwrm_update_token(bp, seq_id, BNXT_HWRM_COMPLETE);
2939                break;
2940
2941        case CMPL_BASE_TYPE_HWRM_FWD_REQ:
2942                vf_id = le16_to_cpu(fwd_req_cmpl->source_id);
2943
2944                if ((vf_id < bp->pf.first_vf_id) ||
2945                    (vf_id >= bp->pf.first_vf_id + bp->pf.active_vfs)) {
2946                        netdev_err(bp->dev, "Msg contains invalid VF id %x\n",
2947                                   vf_id);
2948                        return -EINVAL;
2949                }
2950
2951                set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap);
2952                bnxt_queue_sp_work(bp, BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT);
2953                break;
2954
2955        case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT:
2956                bnxt_async_event_process(bp,
2957                                         (struct hwrm_async_event_cmpl *)txcmp);
2958                break;
2959
2960        default:
2961                break;
2962        }
2963
2964        return 0;
2965}
2966
2967static bool bnxt_vnic_is_active(struct bnxt *bp)
2968{
2969        struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
2970
2971        return vnic->fw_vnic_id != INVALID_HW_RING_ID && vnic->mru > 0;
2972}
2973
2974static irqreturn_t bnxt_msix(int irq, void *dev_instance)
2975{
2976        struct bnxt_napi *bnapi = dev_instance;
2977        struct bnxt *bp = bnapi->bp;
2978        struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2979        u32 cons = RING_CMP(cpr->cp_raw_cons);
2980
2981        cpr->event_ctr++;
2982        prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
2983        napi_schedule(&bnapi->napi);
2984        return IRQ_HANDLED;
2985}
2986
2987static inline int bnxt_has_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
2988{
2989        u32 raw_cons = cpr->cp_raw_cons;
2990        u16 cons = RING_CMP(raw_cons);
2991        struct tx_cmp *txcmp;
2992
2993        txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
2994
2995        return TX_CMP_VALID(txcmp, raw_cons);
2996}
2997
2998static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
2999                            int budget)
3000{
3001        struct bnxt_napi *bnapi = cpr->bnapi;
3002        u32 raw_cons = cpr->cp_raw_cons;
3003        bool flush_xdp = false;
3004        u32 cons;
3005        int rx_pkts = 0;
3006        u8 event = 0;
3007        struct tx_cmp *txcmp;
3008
3009        cpr->has_more_work = 0;
3010        cpr->had_work_done = 1;
3011        while (1) {
3012                u8 cmp_type;
3013                int rc;
3014
3015                cons = RING_CMP(raw_cons);
3016                txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
3017
3018                if (!TX_CMP_VALID(txcmp, raw_cons))
3019                        break;
3020
3021                /* The valid test of the entry must be done first before
3022                 * reading any further.
3023                 */
3024                dma_rmb();
3025                cmp_type = TX_CMP_TYPE(txcmp);
3026                if (cmp_type == CMP_TYPE_TX_L2_CMP ||
3027                    cmp_type == CMP_TYPE_TX_L2_COAL_CMP) {
3028                        u32 opaque = txcmp->tx_cmp_opaque;
3029                        struct bnxt_tx_ring_info *txr;
3030                        u16 tx_freed;
3031
3032                        txr = bnapi->tx_ring[TX_OPAQUE_RING(opaque)];
3033                        event |= BNXT_TX_CMP_EVENT;
3034                        if (cmp_type == CMP_TYPE_TX_L2_COAL_CMP)
3035                                txr->tx_hw_cons = TX_CMP_SQ_CONS_IDX(txcmp);
3036                        else
3037                                txr->tx_hw_cons = TX_OPAQUE_PROD(bp, opaque);
3038                        tx_freed = (txr->tx_hw_cons - txr->tx_cons) &
3039                                   bp->tx_ring_mask;
3040                        /* return full budget so NAPI will complete. */
3041                        if (unlikely(tx_freed >= bp->tx_wake_thresh)) {
3042                                rx_pkts = budget;
3043                                raw_cons = NEXT_RAW_CMP(raw_cons);
3044                                if (budget)
3045                                        cpr->has_more_work = 1;
3046                                break;
3047                        }
3048                } else if (cmp_type == CMP_TYPE_TX_L2_PKT_TS_CMP) {
3049                        bnxt_tx_ts_cmp(bp, bnapi, (struct tx_ts_cmp *)txcmp);
3050                } else if (cmp_type >= CMP_TYPE_RX_L2_CMP &&
3051                           cmp_type <= CMP_TYPE_RX_L2_TPA_START_V3_CMP) {
3052                        if (likely(budget))
3053                                rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
3054                        else
3055                                rc = bnxt_force_rx_discard(bp, cpr, &raw_cons,
3056                                                           &event);
3057                        if (event & BNXT_REDIRECT_EVENT)
3058                                flush_xdp = true;
3059                        if (likely(rc >= 0))
3060                                rx_pkts += rc;
3061                        /* Increment rx_pkts when rc is -ENOMEM to count towards
3062                         * the NAPI budget.  Otherwise, we may potentially loop
3063                         * here forever if we consistently cannot allocate
3064                         * buffers.
3065                         */
3066                        else if (rc == -ENOMEM && budget)
3067                                rx_pkts++;
3068                        else if (rc == -EBUSY)  /* partial completion */
3069                                break;
3070                } else if (unlikely(cmp_type == CMPL_BASE_TYPE_HWRM_DONE ||
3071                                    cmp_type == CMPL_BASE_TYPE_HWRM_FWD_REQ ||
3072                                    cmp_type == CMPL_BASE_TYPE_HWRM_ASYNC_EVENT)) {
3073                        bnxt_hwrm_handler(bp, txcmp);
3074                }
3075                raw_cons = NEXT_RAW_CMP(raw_cons);
3076
3077                if (rx_pkts && rx_pkts == budget) {
3078                        cpr->has_more_work = 1;
3079                        break;
3080                }
3081        }
3082
3083        if (flush_xdp) {
3084                xdp_do_flush();
3085                event &= ~BNXT_REDIRECT_EVENT;
3086        }
3087
3088        if (event & BNXT_TX_EVENT) {
3089                struct bnxt_tx_ring_info *txr = bnapi->tx_ring[0];
3090                u16 prod = txr->tx_prod;
3091
3092                /* Sync BD data before updating doorbell */
3093                wmb();
3094
3095                bnxt_db_write_relaxed(bp, &txr->tx_db, prod);
3096                event &= ~BNXT_TX_EVENT;
3097        }
3098
3099        cpr->cp_raw_cons = raw_cons;
3100        bnapi->events |= event;
3101        return rx_pkts;
3102}
3103
3104static void __bnxt_poll_work_done(struct bnxt *bp, struct bnxt_napi *bnapi,
3105                                  int budget)
3106{
3107        if ((bnapi->events & BNXT_TX_CMP_EVENT) && !bnapi->tx_fault)
3108                bnapi->tx_int(bp, bnapi, budget);
3109
3110        if ((bnapi->events & BNXT_RX_EVENT) && !(bnapi->in_reset)) {
3111                struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
3112
3113                bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
3114                bnapi->events &= ~BNXT_RX_EVENT;
3115        }
3116        if (bnapi->events & BNXT_AGG_EVENT) {
3117                struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
3118
3119                bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
3120                bnapi->events &= ~BNXT_AGG_EVENT;
3121        }
3122}
3123
3124static int bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
3125                          int budget)
3126{
3127        struct bnxt_napi *bnapi = cpr->bnapi;
3128        int rx_pkts;
3129
3130        rx_pkts = __bnxt_poll_work(bp, cpr, budget);
3131
3132        /* ACK completion ring before freeing tx ring and producing new
3133         * buffers in rx/agg rings to prevent overflowing the completion
3134         * ring.
3135         */
3136        bnxt_db_cq(bp, &cpr->cp_db, cpr->cp_raw_cons);
3137
3138        __bnxt_poll_work_done(bp, bnapi, budget);
3139        return rx_pkts;
3140}
3141
3142static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget)
3143{
3144        struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
3145        struct bnxt *bp = bnapi->bp;
3146        struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3147        struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
3148        struct tx_cmp *txcmp;
3149        struct rx_cmp_ext *rxcmp1;
3150        u32 cp_cons, tmp_raw_cons;
3151        u32 raw_cons = cpr->cp_raw_cons;
3152        bool flush_xdp = false;
3153        u32 rx_pkts = 0;
3154        u8 event = 0;
3155
3156        while (1) {
3157                int rc;
3158
3159                cp_cons = RING_CMP(raw_cons);
3160                txcmp = &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
3161
3162                if (!TX_CMP_VALID(txcmp, raw_cons))
3163                        break;
3164
3165                /* The valid test of the entry must be done first before
3166                 * reading any further.
3167                 */
3168                dma_rmb();
3169                if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
3170                        tmp_raw_cons = NEXT_RAW_CMP(raw_cons);
3171                        cp_cons = RING_CMP(tmp_raw_cons);
3172                        rxcmp1 = (struct rx_cmp_ext *)
3173                          &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
3174
3175                        if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
3176                                break;
3177
3178                        /* force an error to recycle the buffer */
3179                        rxcmp1->rx_cmp_cfa_code_errors_v2 |=
3180                                cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
3181
3182                        rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
3183                        if (likely(rc == -EIO) && budget)
3184                                rx_pkts++;
3185                        else if (rc == -EBUSY)  /* partial completion */
3186                                break;
3187                        if (event & BNXT_REDIRECT_EVENT)
3188                                flush_xdp = true;
3189                } else if (unlikely(TX_CMP_TYPE(txcmp) ==
3190                                    CMPL_BASE_TYPE_HWRM_DONE)) {
3191                        bnxt_hwrm_handler(bp, txcmp);
3192                } else {
3193                        netdev_err(bp->dev,
3194                                   "Invalid completion received on special ring\n");
3195                }
3196                raw_cons = NEXT_RAW_CMP(raw_cons);
3197
3198                if (rx_pkts == budget)
3199                        break;
3200        }
3201
3202        cpr->cp_raw_cons = raw_cons;
3203        BNXT_DB_CQ(&cpr->cp_db, cpr->cp_raw_cons);
3204        bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
3205
3206        if (event & BNXT_AGG_EVENT)
3207                bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
3208        if (flush_xdp)
3209                xdp_do_flush();
3210
3211        if (!bnxt_has_work(bp, cpr) && rx_pkts < budget) {
3212                napi_complete_done(napi, rx_pkts);
3213                BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
3214        }
3215        return rx_pkts;
3216}
3217
3218static int bnxt_poll(struct napi_struct *napi, int budget)
3219{
3220        struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
3221        struct bnxt *bp = bnapi->bp;
3222        struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3223        int work_done = 0;
3224
3225        if (unlikely(test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))) {
3226                napi_complete(napi);
3227                return 0;
3228        }
3229        while (1) {
3230                work_done += bnxt_poll_work(bp, cpr, budget - work_done);
3231
3232                if (work_done >= budget) {
3233                        if (!budget)
3234                                BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
3235                        break;
3236                }
3237
3238                if (!bnxt_has_work(bp, cpr)) {
3239                        if (napi_complete_done(napi, work_done))
3240                                BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
3241                        break;
3242                }
3243        }
3244        if ((bp->flags & BNXT_FLAG_DIM) && bnxt_vnic_is_active(bp)) {
3245                struct dim_sample dim_sample = {};
3246
3247                dim_update_sample(cpr->event_ctr,
3248                                  cpr->rx_packets,
3249                                  cpr->rx_bytes,
3250                                  &dim_sample);
3251                net_dim(&cpr->dim, &dim_sample);
3252        }
3253        return work_done;
3254}
3255
3256static int __bnxt_poll_cqs(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
3257{
3258        struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3259        int i, work_done = 0;
3260
3261        for (i = 0; i < cpr->cp_ring_count; i++) {
3262                struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[i];
3263
3264                if (cpr2->had_nqe_notify) {
3265                        work_done += __bnxt_poll_work(bp, cpr2,
3266                                                      budget - work_done);
3267                        cpr->has_more_work |= cpr2->has_more_work;
3268                }
3269        }
3270        return work_done;
3271}
3272
3273static void __bnxt_poll_cqs_done(struct bnxt *bp, struct bnxt_napi *bnapi,
3274                                 u64 dbr_type, int budget)
3275{
3276        struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3277        int i;
3278
3279        for (i = 0; i < cpr->cp_ring_count; i++) {
3280                struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[i];
3281                struct bnxt_db_info *db;
3282
3283                if (cpr2->had_work_done) {
3284                        u32 tgl = 0;
3285
3286                        if (dbr_type == DBR_TYPE_CQ_ARMALL) {
3287                                cpr2->had_nqe_notify = 0;
3288                                tgl = cpr2->toggle;
3289                        }
3290                        db = &cpr2->cp_db;
3291                        bnxt_writeq(bp,
3292                                    db->db_key64 | dbr_type | DB_TOGGLE(tgl) |
3293                                    DB_RING_IDX(db, cpr2->cp_raw_cons),
3294                                    db->doorbell);
3295                        cpr2->had_work_done = 0;
3296                }
3297        }
3298        __bnxt_poll_work_done(bp, bnapi, budget);
3299}
3300
3301static int bnxt_poll_p5(struct napi_struct *napi, int budget)
3302{
3303        struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
3304        struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3305        struct bnxt_cp_ring_info *cpr_rx;
3306        u32 raw_cons = cpr->cp_raw_cons;
3307        struct bnxt *bp = bnapi->bp;
3308        struct nqe_cn *nqcmp;
3309        int work_done = 0;
3310        u32 cons;
3311
3312        if (unlikely(test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))) {
3313                napi_complete(napi);
3314                return 0;
3315        }
3316        if (cpr->has_more_work) {
3317                cpr->has_more_work = 0;
3318                work_done = __bnxt_poll_cqs(bp, bnapi, budget);
3319        }
3320        while (1) {
3321                u16 type;
3322
3323                cons = RING_CMP(raw_cons);
3324                nqcmp = &cpr->nq_desc_ring[CP_RING(cons)][CP_IDX(cons)];
3325
3326                if (!NQ_CMP_VALID(nqcmp, raw_cons)) {
3327                        if (cpr->has_more_work)
3328                                break;
3329
3330                        __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL,
3331                                             budget);
3332                        cpr->cp_raw_cons = raw_cons;
3333                        if (napi_complete_done(napi, work_done))
3334                                BNXT_DB_NQ_ARM_P5(&cpr->cp_db,
3335                                                  cpr->cp_raw_cons);
3336                        goto poll_done;
3337                }
3338
3339                /* The valid test of the entry must be done first before
3340                 * reading any further.
3341                 */
3342                dma_rmb();
3343
3344                type = le16_to_cpu(nqcmp->type);
3345                if (NQE_CN_TYPE(type) == NQ_CN_TYPE_CQ_NOTIFICATION) {
3346                        u32 idx = le32_to_cpu(nqcmp->cq_handle_low);
3347                        u32 cq_type = BNXT_NQ_HDL_TYPE(idx);
3348                        struct bnxt_cp_ring_info *cpr2;
3349
3350                        /* No more budget for RX work */
3351                        if (budget && work_done >= budget &&
3352                            cq_type == BNXT_NQ_HDL_TYPE_RX)
3353                                break;
3354
3355                        idx = BNXT_NQ_HDL_IDX(idx);
3356                        cpr2 = &cpr->cp_ring_arr[idx];
3357                        cpr2->had_nqe_notify = 1;
3358                        cpr2->toggle = NQE_CN_TOGGLE(type);
3359                        work_done += __bnxt_poll_work(bp, cpr2,
3360                                                      budget - work_done);
3361                        cpr->has_more_work |= cpr2->has_more_work;
3362                } else {
3363                        bnxt_hwrm_handler(bp, (struct tx_cmp *)nqcmp);
3364                }
3365                raw_cons = NEXT_RAW_CMP(raw_cons);
3366        }
3367        __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ, budget);
3368        if (raw_cons != cpr->cp_raw_cons) {
3369                cpr->cp_raw_cons = raw_cons;
3370                BNXT_DB_NQ_P5(&cpr->cp_db, raw_cons);
3371        }
3372poll_done:
3373        cpr_rx = &cpr->cp_ring_arr[0];
3374        if (cpr_rx->cp_ring_type == BNXT_NQ_HDL_TYPE_RX &&
3375            (bp->flags & BNXT_FLAG_DIM) && bnxt_vnic_is_active(bp)) {
3376                struct dim_sample dim_sample = {};
3377
3378                dim_update_sample(cpr->event_ctr,
3379                                  cpr_rx->rx_packets,
3380                                  cpr_rx->rx_bytes,
3381                                  &dim_sample);
3382                net_dim(&cpr->dim, &dim_sample);
3383        }
3384        return work_done;
3385}
3386
3387static void bnxt_free_one_tx_ring_skbs(struct bnxt *bp,
3388                                       struct bnxt_tx_ring_info *txr, int idx)
3389{
3390        int i, max_idx;
3391        struct pci_dev *pdev = bp->pdev;
3392
3393        max_idx = bp->tx_nr_pages * TX_DESC_CNT;
3394
3395        for (i = 0; i < max_idx;) {
3396                struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[i];
3397                struct sk_buff *skb;
3398                int j, last;
3399
3400                if (idx  < bp->tx_nr_rings_xdp &&
3401                    tx_buf->action == XDP_REDIRECT) {
3402                        dma_unmap_single(&pdev->dev,
3403                                         dma_unmap_addr(tx_buf, mapping),
3404                                         dma_unmap_len(tx_buf, len),
3405                                         DMA_TO_DEVICE);
3406                        xdp_return_frame(tx_buf->xdpf);
3407                        tx_buf->action = 0;
3408                        tx_buf->xdpf = NULL;
3409                        i++;
3410                        continue;
3411                }
3412
3413                skb = tx_buf->skb;
3414                if (!skb) {
3415                        i++;
3416                        continue;
3417                }
3418
3419                tx_buf->skb = NULL;
3420
3421                if (tx_buf->is_push) {
3422                        dev_kfree_skb(skb);
3423                        i += 2;
3424                        continue;
3425                }
3426
3427                dma_unmap_single(&pdev->dev,
3428                                 dma_unmap_addr(tx_buf, mapping),
3429                                 skb_headlen(skb),
3430                                 DMA_TO_DEVICE);
3431
3432                last = tx_buf->nr_frags;
3433                i += 2;
3434                for (j = 0; j < last; j++, i++) {
3435                        int ring_idx = i & bp->tx_ring_mask;
3436                        skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
3437
3438                        tx_buf = &txr->tx_buf_ring[ring_idx];
3439                        netmem_dma_unmap_page_attrs(&pdev->dev,
3440                                                    dma_unmap_addr(tx_buf,
3441                                                                   mapping),
3442                                                    skb_frag_size(frag),
3443                                                    DMA_TO_DEVICE, 0);
3444                }
3445                dev_kfree_skb(skb);
3446        }
3447        netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, idx));
3448}
3449
3450static void bnxt_free_tx_skbs(struct bnxt *bp)
3451{
3452        int i;
3453
3454        if (!bp->tx_ring)
3455                return;
3456
3457        for (i = 0; i < bp->tx_nr_rings; i++) {
3458                struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3459
3460                if (!txr->tx_buf_ring)
3461                        continue;
3462
3463                bnxt_free_one_tx_ring_skbs(bp, txr, i);
3464        }
3465
3466        if (bp->ptp_cfg && !(bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP))
3467                bnxt_ptp_free_txts_skbs(bp->ptp_cfg);
3468}
3469
3470static void bnxt_free_one_rx_ring(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
3471{
3472        int i, max_idx;
3473
3474        max_idx = bp->rx_nr_pages * RX_DESC_CNT;
3475
3476        for (i = 0; i < max_idx; i++) {
3477                struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[i];
3478                void *data = rx_buf->data;
3479
3480                if (!data)
3481                        continue;
3482
3483                rx_buf->data = NULL;
3484                if (BNXT_RX_PAGE_MODE(bp))
3485                        page_pool_recycle_direct(rxr->page_pool, data);
3486                else
3487                        page_pool_free_va(rxr->head_pool, data, true);
3488        }
3489}
3490
3491static void bnxt_free_one_rx_agg_ring(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
3492{
3493        int i, max_idx;
3494
3495        max_idx = bp->rx_agg_nr_pages * RX_DESC_CNT;
3496
3497        for (i = 0; i < max_idx; i++) {
3498                struct bnxt_sw_rx_agg_bd *rx_agg_buf = &rxr->rx_agg_ring[i];
3499                netmem_ref netmem = rx_agg_buf->netmem;
3500
3501                if (!netmem)
3502                        continue;
3503
3504                rx_agg_buf->netmem = 0;
3505                __clear_bit(i, rxr->rx_agg_bmap);
3506
3507                page_pool_recycle_direct_netmem(rxr->page_pool, netmem);
3508        }
3509}
3510
3511static void bnxt_free_one_tpa_info_data(struct bnxt *bp,
3512                                        struct bnxt_rx_ring_info *rxr)
3513{
3514        int i;
3515
3516        for (i = 0; i < bp->max_tpa; i++) {
3517                struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[i];
3518                u8 *data = tpa_info->data;
3519
3520                if (!data)
3521                        continue;
3522
3523                tpa_info->data = NULL;
3524                page_pool_free_va(rxr->head_pool, data, false);
3525        }
3526}
3527
3528static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp,
3529                                       struct bnxt_rx_ring_info *rxr)
3530{
3531        struct bnxt_tpa_idx_map *map;
3532
3533        if (!rxr->rx_tpa)
3534                goto skip_rx_tpa_free;
3535
3536        bnxt_free_one_tpa_info_data(bp, rxr);
3537
3538skip_rx_tpa_free:
3539        if (!rxr->rx_buf_ring)
3540                goto skip_rx_buf_free;
3541
3542        bnxt_free_one_rx_ring(bp, rxr);
3543
3544skip_rx_buf_free:
3545        if (!rxr->rx_agg_ring)
3546                goto skip_rx_agg_free;
3547
3548        bnxt_free_one_rx_agg_ring(bp, rxr);
3549
3550skip_rx_agg_free:
3551        map = rxr->rx_tpa_idx_map;
3552        if (map)
3553                memset(map->agg_idx_bmap, 0, sizeof(map->agg_idx_bmap));
3554}
3555
3556static void bnxt_free_rx_skbs(struct bnxt *bp)
3557{
3558        int i;
3559
3560        if (!bp->rx_ring)
3561                return;
3562
3563        for (i = 0; i < bp->rx_nr_rings; i++)
3564                bnxt_free_one_rx_ring_skbs(bp, &bp->rx_ring[i]);
3565}
3566
3567static void bnxt_free_skbs(struct bnxt *bp)
3568{
3569        bnxt_free_tx_skbs(bp);
3570        bnxt_free_rx_skbs(bp);
3571}
3572
3573static void bnxt_init_ctx_mem(struct bnxt_ctx_mem_type *ctxm, void *p, int len)
3574{
3575        u8 init_val = ctxm->init_value;
3576        u16 offset = ctxm->init_offset;
3577        u8 *p2 = p;
3578        int i;
3579
3580        if (!init_val)
3581                return;
3582        if (offset == BNXT_CTX_INIT_INVALID_OFFSET) {
3583                memset(p, init_val, len);
3584                return;
3585        }
3586        for (i = 0; i < len; i += ctxm->entry_size)
3587                *(p2 + i + offset) = init_val;
3588}
3589
3590static size_t __bnxt_copy_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem,
3591                               void *buf, size_t offset, size_t head,
3592                               size_t tail)
3593{
3594        int i, head_page, start_idx, source_offset;
3595        size_t len, rem_len, total_len, max_bytes;
3596
3597        head_page = head / rmem->page_size;
3598        source_offset = head % rmem->page_size;
3599        total_len = (tail - head) & MAX_CTX_BYTES_MASK;
3600        if (!total_len)
3601                total_len = MAX_CTX_BYTES;
3602        start_idx = head_page % MAX_CTX_PAGES;
3603        max_bytes = (rmem->nr_pages - start_idx) * rmem->page_size -
3604                    source_offset;
3605        total_len = min(total_len, max_bytes);
3606        rem_len = total_len;
3607
3608        for (i = start_idx; rem_len; i++, source_offset = 0) {
3609                len = min((size_t)(rmem->page_size - source_offset), rem_len);
3610                if (buf)
3611                        memcpy(buf + offset, rmem->pg_arr[i] + source_offset,
3612                               len);
3613                offset += len;
3614                rem_len -= len;
3615        }
3616        return total_len;
3617}
3618
3619static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
3620{
3621        struct pci_dev *pdev = bp->pdev;
3622        int i;
3623
3624        if (!rmem->pg_arr)
3625                goto skip_pages;
3626
3627        for (i = 0; i < rmem->nr_pages; i++) {
3628                if (!rmem->pg_arr[i])
3629                        continue;
3630
3631                dma_free_coherent(&pdev->dev, rmem->page_size,
3632                                  rmem->pg_arr[i], rmem->dma_arr[i]);
3633
3634                rmem->pg_arr[i] = NULL;
3635        }
3636skip_pages:
3637        if (rmem->pg_tbl) {
3638                size_t pg_tbl_size = rmem->nr_pages * 8;
3639
3640                if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG)
3641                        pg_tbl_size = rmem->page_size;
3642                dma_free_coherent(&pdev->dev, pg_tbl_size,
3643                                  rmem->pg_tbl, rmem->pg_tbl_map);
3644                rmem->pg_tbl = NULL;
3645        }
3646        if (rmem->vmem_size && *rmem->vmem) {
3647                vfree(*rmem->vmem);
3648                *rmem->vmem = NULL;
3649        }
3650}
3651
3652static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
3653{
3654        struct pci_dev *pdev = bp->pdev;
3655        u64 valid_bit = 0;
3656        int i;
3657
3658        if (rmem->flags & (BNXT_RMEM_VALID_PTE_FLAG | BNXT_RMEM_RING_PTE_FLAG))
3659                valid_bit = PTU_PTE_VALID;
3660        if ((rmem->nr_pages > 1 || rmem->depth > 0) && !rmem->pg_tbl) {
3661                size_t pg_tbl_size = rmem->nr_pages * 8;
3662
3663                if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG)
3664                        pg_tbl_size = rmem->page_size;
3665                rmem->pg_tbl = dma_alloc_coherent(&pdev->dev, pg_tbl_size,
3666                                                  &rmem->pg_tbl_map,
3667                                                  GFP_KERNEL);
3668                if (!rmem->pg_tbl)
3669                        return -ENOMEM;
3670        }
3671
3672        for (i = 0; i < rmem->nr_pages; i++) {
3673                u64 extra_bits = valid_bit;
3674
3675                rmem->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
3676                                                     rmem->page_size,
3677                                                     &rmem->dma_arr[i],
3678                                                     GFP_KERNEL);
3679                if (!rmem->pg_arr[i])
3680                        return -ENOMEM;
3681
3682                if (rmem->ctx_mem)
3683                        bnxt_init_ctx_mem(rmem->ctx_mem, rmem->pg_arr[i],
3684                                          rmem->page_size);
3685                if (rmem->nr_pages > 1 || rmem->depth > 0) {
3686                        if (i == rmem->nr_pages - 2 &&
3687                            (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
3688                                extra_bits |= PTU_PTE_NEXT_TO_LAST;
3689                        else if (i == rmem->nr_pages - 1 &&
3690                                 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
3691                                extra_bits |= PTU_PTE_LAST;
3692                        rmem->pg_tbl[i] =
3693                                cpu_to_le64(rmem->dma_arr[i] | extra_bits);
3694                }
3695        }
3696
3697        if (rmem->vmem_size) {
3698                *rmem->vmem = vzalloc(rmem->vmem_size);
3699                if (!(*rmem->vmem))
3700                        return -ENOMEM;
3701        }
3702        return 0;
3703}
3704
3705static void bnxt_free_one_tpa_info(struct bnxt *bp,
3706                                   struct bnxt_rx_ring_info *rxr)
3707{
3708        int i;
3709
3710        kfree(rxr->rx_tpa_idx_map);
3711        rxr->rx_tpa_idx_map = NULL;
3712        if (rxr->rx_tpa) {
3713                for (i = 0; i < bp->max_tpa; i++) {
3714                        kfree(rxr->rx_tpa[i].agg_arr);
3715                        rxr->rx_tpa[i].agg_arr = NULL;
3716                }
3717        }
3718        kfree(rxr->rx_tpa);
3719        rxr->rx_tpa = NULL;
3720}
3721
3722static void bnxt_free_tpa_info(struct bnxt *bp)
3723{
3724        int i;
3725
3726        for (i = 0; i < bp->rx_nr_rings; i++) {
3727                struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3728
3729                bnxt_free_one_tpa_info(bp, rxr);
3730        }
3731}
3732
3733static int bnxt_alloc_one_tpa_info(struct bnxt *bp,
3734                                   struct bnxt_rx_ring_info *rxr)
3735{
3736        struct rx_agg_cmp *agg;
3737        int i;
3738
3739        rxr->rx_tpa = kcalloc(bp->max_tpa, sizeof(struct bnxt_tpa_info),
3740                              GFP_KERNEL);
3741        if (!rxr->rx_tpa)
3742                return -ENOMEM;
3743
3744        if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
3745                return 0;
3746        for (i = 0; i < bp->max_tpa; i++) {
3747                agg = kcalloc(MAX_SKB_FRAGS, sizeof(*agg), GFP_KERNEL);
3748                if (!agg)
3749                        return -ENOMEM;
3750                rxr->rx_tpa[i].agg_arr = agg;
3751        }
3752        rxr->rx_tpa_idx_map = kzalloc(sizeof(*rxr->rx_tpa_idx_map),
3753                                      GFP_KERNEL);
3754        if (!rxr->rx_tpa_idx_map)
3755                return -ENOMEM;
3756
3757        return 0;
3758}
3759
3760static int bnxt_alloc_tpa_info(struct bnxt *bp)
3761{
3762        int i, rc;
3763
3764        bp->max_tpa = MAX_TPA;
3765        if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
3766                if (!bp->max_tpa_v2)
3767                        return 0;
3768                bp->max_tpa = max_t(u16, bp->max_tpa_v2, MAX_TPA_P5);
3769        }
3770
3771        for (i = 0; i < bp->rx_nr_rings; i++) {
3772                struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3773
3774                rc = bnxt_alloc_one_tpa_info(bp, rxr);
3775                if (rc)
3776                        return rc;
3777        }
3778        return 0;
3779}
3780
3781static void bnxt_free_rx_rings(struct bnxt *bp)
3782{
3783        int i;
3784
3785        if (!bp->rx_ring)
3786                return;
3787
3788        bnxt_free_tpa_info(bp);
3789        for (i = 0; i < bp->rx_nr_rings; i++) {
3790                struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3791                struct bnxt_ring_struct *ring;
3792
3793                if (rxr->xdp_prog)
3794                        bpf_prog_put(rxr->xdp_prog);
3795
3796                if (xdp_rxq_info_is_reg(&rxr->xdp_rxq))
3797                        xdp_rxq_info_unreg(&rxr->xdp_rxq);
3798
3799                page_pool_destroy(rxr->page_pool);
3800                if (bnxt_separate_head_pool(rxr))
3801                        page_pool_destroy(rxr->head_pool);
3802                rxr->page_pool = rxr->head_pool = NULL;
3803
3804                kfree(rxr->rx_agg_bmap);
3805                rxr->rx_agg_bmap = NULL;
3806
3807                ring = &rxr->rx_ring_struct;
3808                bnxt_free_ring(bp, &ring->ring_mem);
3809
3810                ring = &rxr->rx_agg_ring_struct;
3811                bnxt_free_ring(bp, &ring->ring_mem);
3812        }
3813}
3814
3815static int bnxt_alloc_rx_page_pool(struct bnxt *bp,
3816                                   struct bnxt_rx_ring_info *rxr,
3817                                   int numa_node)
3818{
3819        const unsigned int agg_size_fac = PAGE_SIZE / BNXT_RX_PAGE_SIZE;
3820        const unsigned int rx_size_fac = PAGE_SIZE / SZ_4K;
3821        struct page_pool_params pp = { 0 };
3822        struct page_pool *pool;
3823
3824        pp.pool_size = bp->rx_agg_ring_size / agg_size_fac;
3825        if (BNXT_RX_PAGE_MODE(bp))
3826                pp.pool_size += bp->rx_ring_size / rx_size_fac;
3827        pp.nid = numa_node;
3828        pp.netdev = bp->dev;
3829        pp.dev = &bp->pdev->dev;
3830        pp.dma_dir = bp->rx_dir;
3831        pp.max_len = PAGE_SIZE;
3832        pp.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV |
3833                   PP_FLAG_ALLOW_UNREADABLE_NETMEM;
3834        pp.queue_idx = rxr->bnapi->index;
3835
3836        pool = page_pool_create(&pp);
3837        if (IS_ERR(pool))
3838                return PTR_ERR(pool);
3839        rxr->page_pool = pool;
3840
3841        rxr->need_head_pool = page_pool_is_unreadable(pool);
3842        if (bnxt_separate_head_pool(rxr)) {
3843                pp.pool_size = min(bp->rx_ring_size / rx_size_fac, 1024);
3844                pp.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
3845                pool = page_pool_create(&pp);
3846                if (IS_ERR(pool))
3847                        goto err_destroy_pp;
3848        }
3849        rxr->head_pool = pool;
3850
3851        return 0;
3852
3853err_destroy_pp:
3854        page_pool_destroy(rxr->page_pool);
3855        rxr->page_pool = NULL;
3856        return PTR_ERR(pool);
3857}
3858
3859static void bnxt_enable_rx_page_pool(struct bnxt_rx_ring_info *rxr)
3860{
3861        page_pool_enable_direct_recycling(rxr->head_pool, &rxr->bnapi->napi);
3862        page_pool_enable_direct_recycling(rxr->page_pool, &rxr->bnapi->napi);
3863}
3864
3865static int bnxt_alloc_rx_agg_bmap(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
3866{
3867        u16 mem_size;
3868
3869        rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1;
3870        mem_size = rxr->rx_agg_bmap_size / 8;
3871        rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL);
3872        if (!rxr->rx_agg_bmap)
3873                return -ENOMEM;
3874
3875        return 0;
3876}
3877
3878static int bnxt_alloc_rx_rings(struct bnxt *bp)
3879{
3880        int numa_node = dev_to_node(&bp->pdev->dev);
3881        int i, rc = 0, agg_rings = 0, cpu;
3882
3883        if (!bp->rx_ring)
3884                return -ENOMEM;
3885
3886        if (bp->flags & BNXT_FLAG_AGG_RINGS)
3887                agg_rings = 1;
3888
3889        for (i = 0; i < bp->rx_nr_rings; i++) {
3890                struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3891                struct bnxt_ring_struct *ring;
3892                int cpu_node;
3893
3894                ring = &rxr->rx_ring_struct;
3895
3896                cpu = cpumask_local_spread(i, numa_node);
3897                cpu_node = cpu_to_node(cpu);
3898                netdev_dbg(bp->dev, "Allocating page pool for rx_ring[%d] on numa_node: %d\n",
3899                           i, cpu_node);
3900                rc = bnxt_alloc_rx_page_pool(bp, rxr, cpu_node);
3901                if (rc)
3902                        return rc;
3903                bnxt_enable_rx_page_pool(rxr);
3904
3905                rc = xdp_rxq_info_reg(&rxr->xdp_rxq, bp->dev, i, 0);
3906                if (rc < 0)
3907                        return rc;
3908
3909                rc = xdp_rxq_info_reg_mem_model(&rxr->xdp_rxq,
3910                                                MEM_TYPE_PAGE_POOL,
3911                                                rxr->page_pool);
3912                if (rc) {
3913                        xdp_rxq_info_unreg(&rxr->xdp_rxq);
3914                        return rc;
3915                }
3916
3917                rc = bnxt_alloc_ring(bp, &ring->ring_mem);
3918                if (rc)
3919                        return rc;
3920
3921                ring->grp_idx = i;
3922                if (agg_rings) {
3923                        ring = &rxr->rx_agg_ring_struct;
3924                        rc = bnxt_alloc_ring(bp, &ring->ring_mem);
3925                        if (rc)
3926                                return rc;
3927
3928                        ring->grp_idx = i;
3929                        rc = bnxt_alloc_rx_agg_bmap(bp, rxr);
3930                        if (rc)
3931                                return rc;
3932                }
3933        }
3934        if (bp->flags & BNXT_FLAG_TPA)
3935                rc = bnxt_alloc_tpa_info(bp);
3936        return rc;
3937}
3938
3939static void bnxt_free_tx_rings(struct bnxt *bp)
3940{
3941        int i;
3942        struct pci_dev *pdev = bp->pdev;
3943
3944        if (!bp->tx_ring)
3945                return;
3946
3947        for (i = 0; i < bp->tx_nr_rings; i++) {
3948                struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3949                struct bnxt_ring_struct *ring;
3950
3951                if (txr->tx_push) {
3952                        dma_free_coherent(&pdev->dev, bp->tx_push_size,
3953                                          txr->tx_push, txr->tx_push_mapping);
3954                        txr->tx_push = NULL;
3955                }
3956
3957                ring = &txr->tx_ring_struct;
3958
3959                bnxt_free_ring(bp, &ring->ring_mem);
3960        }
3961}
3962
3963#define BNXT_TC_TO_RING_BASE(bp, tc)    \
3964        ((tc) * (bp)->tx_nr_rings_per_tc)
3965
3966#define BNXT_RING_TO_TC_OFF(bp, tx)     \
3967        ((tx) % (bp)->tx_nr_rings_per_tc)
3968
3969#define BNXT_RING_TO_TC(bp, tx)         \
3970        ((tx) / (bp)->tx_nr_rings_per_tc)
3971
3972static int bnxt_alloc_tx_rings(struct bnxt *bp)
3973{
3974        int i, j, rc;
3975        struct pci_dev *pdev = bp->pdev;
3976
3977        bp->tx_push_size = 0;
3978        if (bp->tx_push_thresh) {
3979                int push_size;
3980
3981                push_size  = L1_CACHE_ALIGN(sizeof(struct tx_push_bd) +
3982                                        bp->tx_push_thresh);
3983
3984                if (push_size > 256) {
3985                        push_size = 0;
3986                        bp->tx_push_thresh = 0;
3987                }
3988
3989                bp->tx_push_size = push_size;
3990        }
3991
3992        for (i = 0, j = 0; i < bp->tx_nr_rings; i++) {
3993                struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3994                struct bnxt_ring_struct *ring;
3995                u8 qidx;
3996
3997                ring = &txr->tx_ring_struct;
3998
3999                rc = bnxt_alloc_ring(bp, &ring->ring_mem);
4000                if (rc)
4001                        return rc;
4002
4003                ring->grp_idx = txr->bnapi->index;
4004                if (bp->tx_push_size) {
4005                        dma_addr_t mapping;
4006
4007                        /* One pre-allocated DMA buffer to backup
4008                         * TX push operation
4009                         */
4010                        txr->tx_push = dma_alloc_coherent(&pdev->dev,
4011                                                bp->tx_push_size,
4012                                                &txr->tx_push_mapping,
4013                                                GFP_KERNEL);
4014
4015                        if (!txr->tx_push)
4016                                return -ENOMEM;
4017
4018                        mapping = txr->tx_push_mapping +
4019                                sizeof(struct tx_push_bd);
4020                        txr->data_mapping = cpu_to_le64(mapping);
4021                }
4022                qidx = bp->tc_to_qidx[j];
4023                ring->queue_id = bp->q_info[qidx].queue_id;
4024                spin_lock_init(&txr->xdp_tx_lock);
4025                if (i < bp->tx_nr_rings_xdp)
4026                        continue;
4027                if (BNXT_RING_TO_TC_OFF(bp, i) == (bp->tx_nr_rings_per_tc - 1))
4028                        j++;
4029        }
4030        return 0;
4031}
4032
4033static void bnxt_free_cp_arrays(struct bnxt_cp_ring_info *cpr)
4034{
4035        struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
4036
4037        kfree(cpr->cp_desc_ring);
4038        cpr->cp_desc_ring = NULL;
4039        ring->ring_mem.pg_arr = NULL;
4040        kfree(cpr->cp_desc_mapping);
4041        cpr->cp_desc_mapping = NULL;
4042        ring->ring_mem.dma_arr = NULL;
4043}
4044
4045static int bnxt_alloc_cp_arrays(struct bnxt_cp_ring_info *cpr, int n)
4046{
4047        cpr->cp_desc_ring = kcalloc(n, sizeof(*cpr->cp_desc_ring), GFP_KERNEL);
4048        if (!cpr->cp_desc_ring)
4049                return -ENOMEM;
4050        cpr->cp_desc_mapping = kcalloc(n, sizeof(*cpr->cp_desc_mapping),
4051                                       GFP_KERNEL);
4052        if (!cpr->cp_desc_mapping)
4053                return -ENOMEM;
4054        return 0;
4055}
4056
4057static void bnxt_free_all_cp_arrays(struct bnxt *bp)
4058{
4059        int i;
4060
4061        if (!bp->bnapi)
4062                return;
4063        for (i = 0; i < bp->cp_nr_rings; i++) {
4064                struct bnxt_napi *bnapi = bp->bnapi[i];
4065
4066                if (!bnapi)
4067                        continue;
4068                bnxt_free_cp_arrays(&bnapi->cp_ring);
4069        }
4070}
4071
4072static int bnxt_alloc_all_cp_arrays(struct bnxt *bp)
4073{
4074        int i, n = bp->cp_nr_pages;
4075
4076        for (i = 0; i < bp->cp_nr_rings; i++) {
4077                struct bnxt_napi *bnapi = bp->bnapi[i];
4078                int rc;
4079
4080                if (!bnapi)
4081                        continue;
4082                rc = bnxt_alloc_cp_arrays(&bnapi->cp_ring, n);
4083                if (rc)
4084                        return rc;
4085        }
4086        return 0;
4087}
4088
4089static void bnxt_free_cp_rings(struct bnxt *bp)
4090{
4091        int i;
4092
4093        if (!bp->bnapi)
4094                return;
4095
4096        for (i = 0; i < bp->cp_nr_rings; i++) {
4097                struct bnxt_napi *bnapi = bp->bnapi[i];
4098                struct bnxt_cp_ring_info *cpr;
4099                struct bnxt_ring_struct *ring;
4100                int j;
4101
4102                if (!bnapi)
4103                        continue;
4104
4105                cpr = &bnapi->cp_ring;
4106                ring = &cpr->cp_ring_struct;
4107
4108                bnxt_free_ring(bp, &ring->ring_mem);
4109
4110                if (!cpr->cp_ring_arr)
4111                        continue;
4112
4113                for (j = 0; j < cpr->cp_ring_count; j++) {
4114                        struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[j];
4115
4116                        ring = &cpr2->cp_ring_struct;
4117                        bnxt_free_ring(bp, &ring->ring_mem);
4118                        bnxt_free_cp_arrays(cpr2);
4119                }
4120                kfree(cpr->cp_ring_arr);
4121                cpr->cp_ring_arr = NULL;
4122                cpr->cp_ring_count = 0;
4123        }
4124}
4125
4126static int bnxt_alloc_cp_sub_ring(struct bnxt *bp,
4127                                  struct bnxt_cp_ring_info *cpr)
4128{
4129        struct bnxt_ring_mem_info *rmem;
4130        struct bnxt_ring_struct *ring;
4131        int rc;
4132
4133        rc = bnxt_alloc_cp_arrays(cpr, bp->cp_nr_pages);
4134        if (rc) {
4135                bnxt_free_cp_arrays(cpr);
4136                return -ENOMEM;
4137        }
4138        ring = &cpr->cp_ring_struct;
4139        rmem = &ring->ring_mem;
4140        rmem->nr_pages = bp->cp_nr_pages;
4141        rmem->page_size = HW_CMPD_RING_SIZE;
4142        rmem->pg_arr = (void **)cpr->cp_desc_ring;
4143        rmem->dma_arr = cpr->cp_desc_mapping;
4144        rmem->flags = BNXT_RMEM_RING_PTE_FLAG;
4145        rc = bnxt_alloc_ring(bp, rmem);
4146        if (rc) {
4147                bnxt_free_ring(bp, rmem);
4148                bnxt_free_cp_arrays(cpr);
4149        }
4150        return rc;
4151}
4152
4153static int bnxt_alloc_cp_rings(struct bnxt *bp)
4154{
4155        bool sh = !!(bp->flags & BNXT_FLAG_SHARED_RINGS);
4156        int i, j, rc, ulp_msix;
4157        int tcs = bp->num_tc;
4158
4159        if (!tcs)
4160                tcs = 1;
4161        ulp_msix = bnxt_get_ulp_msix_num(bp);
4162        for (i = 0, j = 0; i < bp->cp_nr_rings; i++) {
4163                struct bnxt_napi *bnapi = bp->bnapi[i];
4164                struct bnxt_cp_ring_info *cpr, *cpr2;
4165                struct bnxt_ring_struct *ring;
4166                int cp_count = 0, k;
4167                int rx = 0, tx = 0;
4168
4169                if (!bnapi)
4170                        continue;
4171
4172                cpr = &bnapi->cp_ring;
4173                cpr->bnapi = bnapi;
4174                ring = &cpr->cp_ring_struct;
4175
4176                rc = bnxt_alloc_ring(bp, &ring->ring_mem);
4177                if (rc)
4178                        return rc;
4179
4180                ring->map_idx = ulp_msix + i;
4181
4182                if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
4183                        continue;
4184
4185                if (i < bp->rx_nr_rings) {
4186                        cp_count++;
4187                        rx = 1;
4188                }
4189                if (i < bp->tx_nr_rings_xdp) {
4190                        cp_count++;
4191                        tx = 1;
4192                } else if ((sh && i < bp->tx_nr_rings) ||
4193                         (!sh && i >= bp->rx_nr_rings)) {
4194                        cp_count += tcs;
4195                        tx = 1;
4196                }
4197
4198                cpr->cp_ring_arr = kcalloc(cp_count, sizeof(*cpr),
4199                                           GFP_KERNEL);
4200                if (!cpr->cp_ring_arr)
4201                        return -ENOMEM;
4202                cpr->cp_ring_count = cp_count;
4203
4204                for (k = 0; k < cp_count; k++) {
4205                        cpr2 = &cpr->cp_ring_arr[k];
4206                        rc = bnxt_alloc_cp_sub_ring(bp, cpr2);
4207                        if (rc)
4208                                return rc;
4209                        cpr2->bnapi = bnapi;
4210                        cpr2->sw_stats = cpr->sw_stats;
4211                        cpr2->cp_idx = k;
4212                        if (!k && rx) {
4213                                bp->rx_ring[i].rx_cpr = cpr2;
4214                                cpr2->cp_ring_type = BNXT_NQ_HDL_TYPE_RX;
4215                        } else {
4216                                int n, tc = k - rx;
4217
4218                                n = BNXT_TC_TO_RING_BASE(bp, tc) + j;
4219                                bp->tx_ring[n].tx_cpr = cpr2;
4220                                cpr2->cp_ring_type = BNXT_NQ_HDL_TYPE_TX;
4221                        }
4222                }
4223                if (tx)
4224                        j++;
4225        }
4226        return 0;
4227}
4228
4229static void bnxt_init_rx_ring_struct(struct bnxt *bp,
4230                                     struct bnxt_rx_ring_info *rxr)
4231{
4232        struct bnxt_ring_mem_info *rmem;
4233        struct bnxt_ring_struct *ring;
4234
4235        ring = &rxr->rx_ring_struct;
4236        rmem = &ring->ring_mem;
4237        rmem->nr_pages = bp->rx_nr_pages;
4238        rmem->page_size = HW_RXBD_RING_SIZE;
4239        rmem->pg_arr = (void **)rxr->rx_desc_ring;
4240        rmem->dma_arr = rxr->rx_desc_mapping;
4241        rmem->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages;
4242        rmem->vmem = (void **)&rxr->rx_buf_ring;
4243
4244        ring = &rxr->rx_agg_ring_struct;
4245        rmem = &ring->ring_mem;
4246        rmem->nr_pages = bp->rx_agg_nr_pages;
4247        rmem->page_size = HW_RXBD_RING_SIZE;
4248        rmem->pg_arr = (void **)rxr->rx_agg_desc_ring;
4249        rmem->dma_arr = rxr->rx_agg_desc_mapping;
4250        rmem->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages;
4251        rmem->vmem = (void **)&rxr->rx_agg_ring;
4252}
4253
4254static void bnxt_reset_rx_ring_struct(struct bnxt *bp,
4255                                      struct bnxt_rx_ring_info *rxr)
4256{
4257        struct bnxt_ring_mem_info *rmem;
4258        struct bnxt_ring_struct *ring;
4259        int i;
4260
4261        rxr->page_pool->p.napi = NULL;
4262        rxr->page_pool = NULL;
4263        rxr->head_pool->p.napi = NULL;
4264        rxr->head_pool = NULL;
4265        memset(&rxr->xdp_rxq, 0, sizeof(struct xdp_rxq_info));
4266
4267        ring = &rxr->rx_ring_struct;
4268        rmem = &ring->ring_mem;
4269        rmem->pg_tbl = NULL;
4270        rmem->pg_tbl_map = 0;
4271        for (i = 0; i < rmem->nr_pages; i++) {
4272                rmem->pg_arr[i] = NULL;
4273                rmem->dma_arr[i] = 0;
4274        }
4275        *rmem->vmem = NULL;
4276
4277        ring = &rxr->rx_agg_ring_struct;
4278        rmem = &ring->ring_mem;
4279        rmem->pg_tbl = NULL;
4280        rmem->pg_tbl_map = 0;
4281        for (i = 0; i < rmem->nr_pages; i++) {
4282                rmem->pg_arr[i] = NULL;
4283                rmem->dma_arr[i] = 0;
4284        }
4285        *rmem->vmem = NULL;
4286}
4287
4288static void bnxt_init_ring_struct(struct bnxt *bp)
4289{
4290        int i, j;
4291
4292        for (i = 0; i < bp->cp_nr_rings; i++) {
4293                struct bnxt_napi *bnapi = bp->bnapi[i];
4294                struct bnxt_ring_mem_info *rmem;
4295                struct bnxt_cp_ring_info *cpr;
4296                struct bnxt_rx_ring_info *rxr;
4297                struct bnxt_tx_ring_info *txr;
4298                struct bnxt_ring_struct *ring;
4299
4300                if (!bnapi)
4301                        continue;
4302
4303                cpr = &bnapi->cp_ring;
4304                ring = &cpr->cp_ring_struct;
4305                rmem = &ring->ring_mem;
4306                rmem->nr_pages = bp->cp_nr_pages;
4307                rmem->page_size = HW_CMPD_RING_SIZE;
4308                rmem->pg_arr = (void **)cpr->cp_desc_ring;
4309                rmem->dma_arr = cpr->cp_desc_mapping;
4310                rmem->vmem_size = 0;
4311
4312                rxr = bnapi->rx_ring;
4313                if (!rxr)
4314                        goto skip_rx;
4315
4316                ring = &rxr->rx_ring_struct;
4317                rmem = &ring->ring_mem;
4318                rmem->nr_pages = bp->rx_nr_pages;
4319                rmem->page_size = HW_RXBD_RING_SIZE;
4320                rmem->pg_arr = (void **)rxr->rx_desc_ring;
4321                rmem->dma_arr = rxr->rx_desc_mapping;
4322                rmem->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages;
4323                rmem->vmem = (void **)&rxr->rx_buf_ring;
4324
4325                ring = &rxr->rx_agg_ring_struct;
4326                rmem = &ring->ring_mem;
4327                rmem->nr_pages = bp->rx_agg_nr_pages;
4328                rmem->page_size = HW_RXBD_RING_SIZE;
4329                rmem->pg_arr = (void **)rxr->rx_agg_desc_ring;
4330                rmem->dma_arr = rxr->rx_agg_desc_mapping;
4331                rmem->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages;
4332                rmem->vmem = (void **)&rxr->rx_agg_ring;
4333
4334skip_rx:
4335                bnxt_for_each_napi_tx(j, bnapi, txr) {
4336                        ring = &txr->tx_ring_struct;
4337                        rmem = &ring->ring_mem;
4338                        rmem->nr_pages = bp->tx_nr_pages;
4339                        rmem->page_size = HW_TXBD_RING_SIZE;
4340                        rmem->pg_arr = (void **)txr->tx_desc_ring;
4341                        rmem->dma_arr = txr->tx_desc_mapping;
4342                        rmem->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages;
4343                        rmem->vmem = (void **)&txr->tx_buf_ring;
4344                }
4345        }
4346}
4347
4348static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type)
4349{
4350        int i;
4351        u32 prod;
4352        struct rx_bd **rx_buf_ring;
4353
4354        rx_buf_ring = (struct rx_bd **)ring->ring_mem.pg_arr;
4355        for (i = 0, prod = 0; i < ring->ring_mem.nr_pages; i++) {
4356                int j;
4357                struct rx_bd *rxbd;
4358
4359                rxbd = rx_buf_ring[i];
4360                if (!rxbd)
4361                        continue;
4362
4363                for (j = 0; j < RX_DESC_CNT; j++, rxbd++, prod++) {
4364                        rxbd->rx_bd_len_flags_type = cpu_to_le32(type);
4365                        rxbd->rx_bd_opaque = prod;
4366                }
4367        }
4368}
4369
4370static void bnxt_alloc_one_rx_ring_skb(struct bnxt *bp,
4371                                       struct bnxt_rx_ring_info *rxr,
4372                                       int ring_nr)
4373{
4374        u32 prod;
4375        int i;
4376
4377        prod = rxr->rx_prod;
4378        for (i = 0; i < bp->rx_ring_size; i++) {
4379                if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL)) {
4380                        netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d skbs only\n",
4381                                    ring_nr, i, bp->rx_ring_size);
4382                        break;
4383                }
4384                prod = NEXT_RX(prod);
4385        }
4386        rxr->rx_prod = prod;
4387}
4388
4389static void bnxt_alloc_one_rx_ring_netmem(struct bnxt *bp,
4390                                          struct bnxt_rx_ring_info *rxr,
4391                                          int ring_nr)
4392{
4393        u32 prod;
4394        int i;
4395
4396        prod = rxr->rx_agg_prod;
4397        for (i = 0; i < bp->rx_agg_ring_size; i++) {
4398                if (bnxt_alloc_rx_netmem(bp, rxr, prod, GFP_KERNEL)) {
4399                        netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d pages only\n",
4400                                    ring_nr, i, bp->rx_agg_ring_size);
4401                        break;
4402                }
4403                prod = NEXT_RX_AGG(prod);
4404        }
4405        rxr->rx_agg_prod = prod;
4406}
4407
4408static int bnxt_alloc_one_tpa_info_data(struct bnxt *bp,
4409                                        struct bnxt_rx_ring_info *rxr)
4410{
4411        dma_addr_t mapping;
4412        u8 *data;
4413        int i;
4414
4415        for (i = 0; i < bp->max_tpa; i++) {
4416                data = __bnxt_alloc_rx_frag(bp, &mapping, rxr,
4417                                            GFP_KERNEL);
4418                if (!data)
4419                        return -ENOMEM;
4420
4421                rxr->rx_tpa[i].data = data;
4422                rxr->rx_tpa[i].data_ptr = data + bp->rx_offset;
4423                rxr->rx_tpa[i].mapping = mapping;
4424        }
4425
4426        return 0;
4427}
4428
4429static int bnxt_alloc_one_rx_ring(struct bnxt *bp, int ring_nr)
4430{
4431        struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
4432        int rc;
4433
4434        bnxt_alloc_one_rx_ring_skb(bp, rxr, ring_nr);
4435
4436        if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
4437                return 0;
4438
4439        bnxt_alloc_one_rx_ring_netmem(bp, rxr, ring_nr);
4440
4441        if (rxr->rx_tpa) {
4442                rc = bnxt_alloc_one_tpa_info_data(bp, rxr);
4443                if (rc)
4444                        return rc;
4445        }
4446        return 0;
4447}
4448
4449static void bnxt_init_one_rx_ring_rxbd(struct bnxt *bp,
4450                                       struct bnxt_rx_ring_info *rxr)
4451{
4452        struct bnxt_ring_struct *ring;
4453        u32 type;
4454
4455        type = (bp->rx_buf_use_size << RX_BD_LEN_SHIFT) |
4456                RX_BD_TYPE_RX_PACKET_BD | RX_BD_FLAGS_EOP;
4457
4458        if (NET_IP_ALIGN == 2)
4459                type |= RX_BD_FLAGS_SOP;
4460
4461        ring = &rxr->rx_ring_struct;
4462        bnxt_init_rxbd_pages(ring, type);
4463        ring->fw_ring_id = INVALID_HW_RING_ID;
4464}
4465
4466static void bnxt_init_one_rx_agg_ring_rxbd(struct bnxt *bp,
4467                                           struct bnxt_rx_ring_info *rxr)
4468{
4469        struct bnxt_ring_struct *ring;
4470        u32 type;
4471
4472        ring = &rxr->rx_agg_ring_struct;
4473        ring->fw_ring_id = INVALID_HW_RING_ID;
4474        if ((bp->flags & BNXT_FLAG_AGG_RINGS)) {
4475                type = ((u32)BNXT_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) |
4476                        RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP;
4477
4478                bnxt_init_rxbd_pages(ring, type);
4479        }
4480}
4481
4482static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
4483{
4484        struct bnxt_rx_ring_info *rxr;
4485
4486        rxr = &bp->rx_ring[ring_nr];
4487        bnxt_init_one_rx_ring_rxbd(bp, rxr);
4488
4489        netif_queue_set_napi(bp->dev, ring_nr, NETDEV_QUEUE_TYPE_RX,
4490                             &rxr->bnapi->napi);
4491
4492        if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) {
4493                bpf_prog_add(bp->xdp_prog, 1);
4494                rxr->xdp_prog = bp->xdp_prog;
4495        }
4496
4497        bnxt_init_one_rx_agg_ring_rxbd(bp, rxr);
4498
4499        return bnxt_alloc_one_rx_ring(bp, ring_nr);
4500}
4501
4502static void bnxt_init_cp_rings(struct bnxt *bp)
4503{
4504        int i, j;
4505
4506        for (i = 0; i < bp->cp_nr_rings; i++) {
4507                struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
4508                struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
4509
4510                ring->fw_ring_id = INVALID_HW_RING_ID;
4511                cpr->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
4512                cpr->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
4513                if (!cpr->cp_ring_arr)
4514                        continue;
4515                for (j = 0; j < cpr->cp_ring_count; j++) {
4516                        struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[j];
4517
4518                        ring = &cpr2->cp_ring_struct;
4519                        ring->fw_ring_id = INVALID_HW_RING_ID;
4520                        cpr2->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
4521                        cpr2->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
4522                }
4523        }
4524}
4525
4526static int bnxt_init_rx_rings(struct bnxt *bp)
4527{
4528        int i, rc = 0;
4529
4530        if (BNXT_RX_PAGE_MODE(bp)) {
4531                bp->rx_offset = NET_IP_ALIGN + XDP_PACKET_HEADROOM;
4532                bp->rx_dma_offset = XDP_PACKET_HEADROOM;
4533        } else {
4534                bp->rx_offset = BNXT_RX_OFFSET;
4535                bp->rx_dma_offset = BNXT_RX_DMA_OFFSET;
4536        }
4537
4538        for (i = 0; i < bp->rx_nr_rings; i++) {
4539                rc = bnxt_init_one_rx_ring(bp, i);
4540                if (rc)
4541                        break;
4542        }
4543
4544        return rc;
4545}
4546
4547static int bnxt_init_tx_rings(struct bnxt *bp)
4548{
4549        u16 i;
4550
4551        bp->tx_wake_thresh = max_t(int, bp->tx_ring_size / 2,
4552                                   BNXT_MIN_TX_DESC_CNT);
4553
4554        for (i = 0; i < bp->tx_nr_rings; i++) {
4555                struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
4556                struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
4557
4558                ring->fw_ring_id = INVALID_HW_RING_ID;
4559
4560                if (i >= bp->tx_nr_rings_xdp)
4561                        netif_queue_set_napi(bp->dev, i - bp->tx_nr_rings_xdp,
4562                                             NETDEV_QUEUE_TYPE_TX,
4563                                             &txr->bnapi->napi);
4564        }
4565
4566        return 0;
4567}
4568
4569static void bnxt_free_ring_grps(struct bnxt *bp)
4570{
4571        kfree(bp->grp_info);
4572        bp->grp_info = NULL;
4573}
4574
4575static int bnxt_init_ring_grps(struct bnxt *bp, bool irq_re_init)
4576{
4577        int i;
4578
4579        if (irq_re_init) {
4580                bp->grp_info = kcalloc(bp->cp_nr_rings,
4581                                       sizeof(struct bnxt_ring_grp_info),
4582                                       GFP_KERNEL);
4583                if (!bp->grp_info)
4584                        return -ENOMEM;
4585        }
4586        for (i = 0; i < bp->cp_nr_rings; i++) {
4587                if (irq_re_init)
4588                        bp->grp_info[i].fw_stats_ctx = INVALID_HW_RING_ID;
4589                bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
4590                bp->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID;
4591                bp->grp_info[i].agg_fw_ring_id = INVALID_HW_RING_ID;
4592                bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
4593        }
4594        return 0;
4595}
4596
4597static void bnxt_free_vnics(struct bnxt *bp)
4598{
4599        kfree(bp->vnic_info);
4600        bp->vnic_info = NULL;
4601        bp->nr_vnics = 0;
4602}
4603
4604static int bnxt_alloc_vnics(struct bnxt *bp)
4605{
4606        int num_vnics = 1;
4607
4608#ifdef CONFIG_RFS_ACCEL
4609        if (bp->flags & BNXT_FLAG_RFS) {
4610                if (BNXT_SUPPORTS_NTUPLE_VNIC(bp))
4611                        num_vnics++;
4612                else if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
4613                        num_vnics += bp->rx_nr_rings;
4614        }
4615#endif
4616
4617        if (BNXT_CHIP_TYPE_NITRO_A0(bp))
4618                num_vnics++;
4619
4620        bp->vnic_info = kcalloc(num_vnics, sizeof(struct bnxt_vnic_info),
4621                                GFP_KERNEL);
4622        if (!bp->vnic_info)
4623                return -ENOMEM;
4624
4625        bp->nr_vnics = num_vnics;
4626        return 0;
4627}
4628
4629static void bnxt_init_vnics(struct bnxt *bp)
4630{
4631        struct bnxt_vnic_info *vnic0 = &bp->vnic_info[BNXT_VNIC_DEFAULT];
4632        int i;
4633
4634        for (i = 0; i < bp->nr_vnics; i++) {
4635                struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
4636                int j;
4637
4638                vnic->fw_vnic_id = INVALID_HW_RING_ID;
4639                vnic->vnic_id = i;
4640                for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++)
4641                        vnic->fw_rss_cos_lb_ctx[j] = INVALID_HW_RING_ID;
4642
4643                vnic->fw_l2_ctx_id = INVALID_HW_RING_ID;
4644
4645                if (bp->vnic_info[i].rss_hash_key) {
4646                        if (i == BNXT_VNIC_DEFAULT) {
4647                                u8 *key = (void *)vnic->rss_hash_key;
4648                                int k;
4649
4650                                if (!bp->rss_hash_key_valid &&
4651                                    !bp->rss_hash_key_updated) {
4652                                        get_random_bytes(bp->rss_hash_key,
4653                                                         HW_HASH_KEY_SIZE);
4654                                        bp->rss_hash_key_updated = true;
4655                                }
4656
4657                                memcpy(vnic->rss_hash_key, bp->rss_hash_key,
4658                                       HW_HASH_KEY_SIZE);
4659
4660                                if (!bp->rss_hash_key_updated)
4661                                        continue;
4662
4663                                bp->rss_hash_key_updated = false;
4664                                bp->rss_hash_key_valid = true;
4665
4666                                bp->toeplitz_prefix = 0;
4667                                for (k = 0; k < 8; k++) {
4668                                        bp->toeplitz_prefix <<= 8;
4669                                        bp->toeplitz_prefix |= key[k];
4670                                }
4671                        } else {
4672                                memcpy(vnic->rss_hash_key, vnic0->rss_hash_key,
4673                                       HW_HASH_KEY_SIZE);
4674                        }
4675                }
4676        }
4677}
4678
4679static int bnxt_calc_nr_ring_pages(u32 ring_size, int desc_per_pg)
4680{
4681        int pages;
4682
4683        pages = ring_size / desc_per_pg;
4684
4685        if (!pages)
4686                return 1;
4687
4688        pages++;
4689
4690        while (pages & (pages - 1))
4691                pages++;
4692
4693        return pages;
4694}
4695
4696void bnxt_set_tpa_flags(struct bnxt *bp)
4697{
4698        bp->flags &= ~BNXT_FLAG_TPA;
4699        if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
4700                return;
4701        if (bp->dev->features & NETIF_F_LRO)
4702                bp->flags |= BNXT_FLAG_LRO;
4703        else if (bp->dev->features & NETIF_F_GRO_HW)
4704                bp->flags |= BNXT_FLAG_GRO;
4705}
4706
4707static void bnxt_init_ring_params(struct bnxt *bp)
4708{
4709        unsigned int rx_size;
4710
4711        bp->rx_copybreak = BNXT_DEFAULT_RX_COPYBREAK;
4712        /* Try to fit 4 chunks into a 4k page */
4713        rx_size = SZ_1K -
4714                NET_SKB_PAD - SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
4715        bp->dev->cfg->hds_thresh = max(BNXT_DEFAULT_RX_COPYBREAK, rx_size);
4716}
4717
4718/* bp->rx_ring_size, bp->tx_ring_size, dev->mtu, BNXT_FLAG_{G|L}RO flags must
4719 * be set on entry.
4720 */
4721void bnxt_set_ring_params(struct bnxt *bp)
4722{
4723        u32 ring_size, rx_size, rx_space, max_rx_cmpl;
4724        u32 agg_factor = 0, agg_ring_size = 0;
4725
4726        /* 8 for CRC and VLAN */
4727        rx_size = SKB_DATA_ALIGN(bp->dev->mtu + ETH_HLEN + NET_IP_ALIGN + 8);
4728
4729        rx_space = rx_size + ALIGN(max(NET_SKB_PAD, XDP_PACKET_HEADROOM), 8) +
4730                SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
4731
4732        ring_size = bp->rx_ring_size;
4733        bp->rx_agg_ring_size = 0;
4734        bp->rx_agg_nr_pages = 0;
4735
4736        if (bp->flags & BNXT_FLAG_TPA || bp->flags & BNXT_FLAG_HDS)
4737                agg_factor = min_t(u32, 4, 65536 / BNXT_RX_PAGE_SIZE);
4738
4739        bp->flags &= ~BNXT_FLAG_JUMBO;
4740        if (rx_space > PAGE_SIZE && !(bp->flags & BNXT_FLAG_NO_AGG_RINGS)) {
4741                u32 jumbo_factor;
4742
4743                bp->flags |= BNXT_FLAG_JUMBO;
4744                jumbo_factor = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
4745                if (jumbo_factor > agg_factor)
4746                        agg_factor = jumbo_factor;
4747        }
4748        if (agg_factor) {
4749                if (ring_size > BNXT_MAX_RX_DESC_CNT_JUM_ENA) {
4750                        ring_size = BNXT_MAX_RX_DESC_CNT_JUM_ENA;
4751                        netdev_warn(bp->dev, "RX ring size reduced from %d to %d because the jumbo ring is now enabled\n",
4752                                    bp->rx_ring_size, ring_size);
4753                        bp->rx_ring_size = ring_size;
4754                }
4755                agg_ring_size = ring_size * agg_factor;
4756
4757                bp->rx_agg_nr_pages = bnxt_calc_nr_ring_pages(agg_ring_size,
4758                                                        RX_DESC_CNT);
4759                if (bp->rx_agg_nr_pages > MAX_RX_AGG_PAGES) {
4760                        u32 tmp = agg_ring_size;
4761
4762                        bp->rx_agg_nr_pages = MAX_RX_AGG_PAGES;
4763                        agg_ring_size = MAX_RX_AGG_PAGES * RX_DESC_CNT - 1;
4764                        netdev_warn(bp->dev, "rx agg ring size %d reduced to %d.\n",
4765                                    tmp, agg_ring_size);
4766                }
4767                bp->rx_agg_ring_size = agg_ring_size;
4768                bp->rx_agg_ring_mask = (bp->rx_agg_nr_pages * RX_DESC_CNT) - 1;
4769
4770                if (BNXT_RX_PAGE_MODE(bp)) {
4771                        rx_space = PAGE_SIZE;
4772                        rx_size = PAGE_SIZE -
4773                                  ALIGN(max(NET_SKB_PAD, XDP_PACKET_HEADROOM), 8) -
4774                                  SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
4775                } else {
4776                        rx_size = max3(BNXT_DEFAULT_RX_COPYBREAK,
4777                                       bp->rx_copybreak,
4778                                       bp->dev->cfg_pending->hds_thresh);
4779                        rx_size = SKB_DATA_ALIGN(rx_size + NET_IP_ALIGN);
4780                        rx_space = rx_size + NET_SKB_PAD +
4781                                SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
4782                }
4783        }
4784
4785        bp->rx_buf_use_size = rx_size;
4786        bp->rx_buf_size = rx_space;
4787
4788        bp->rx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, RX_DESC_CNT);
4789        bp->rx_ring_mask = (bp->rx_nr_pages * RX_DESC_CNT) - 1;
4790
4791        ring_size = bp->tx_ring_size;
4792        bp->tx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, TX_DESC_CNT);
4793        bp->tx_ring_mask = (bp->tx_nr_pages * TX_DESC_CNT) - 1;
4794
4795        max_rx_cmpl = bp->rx_ring_size;
4796        /* MAX TPA needs to be added because TPA_START completions are
4797         * immediately recycled, so the TPA completions are not bound by
4798         * the RX ring size.
4799         */
4800        if (bp->flags & BNXT_FLAG_TPA)
4801                max_rx_cmpl += bp->max_tpa;
4802        /* RX and TPA completions are 32-byte, all others are 16-byte */
4803        ring_size = max_rx_cmpl * 2 + agg_ring_size + bp->tx_ring_size;
4804        bp->cp_ring_size = ring_size;
4805
4806        bp->cp_nr_pages = bnxt_calc_nr_ring_pages(ring_size, CP_DESC_CNT);
4807        if (bp->cp_nr_pages > MAX_CP_PAGES) {
4808                bp->cp_nr_pages = MAX_CP_PAGES;
4809                bp->cp_ring_size = MAX_CP_PAGES * CP_DESC_CNT - 1;
4810                netdev_warn(bp->dev, "completion ring size %d reduced to %d.\n",
4811                            ring_size, bp->cp_ring_size);
4812        }
4813        bp->cp_bit = bp->cp_nr_pages * CP_DESC_CNT;
4814        bp->cp_ring_mask = bp->cp_bit - 1;
4815}
4816
4817/* Changing allocation mode of RX rings.
4818 * TODO: Update when extending xdp_rxq_info to support allocation modes.
4819 */
4820static void __bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
4821{
4822        struct net_device *dev = bp->dev;
4823
4824        if (page_mode) {
4825                bp->flags &= ~(BNXT_FLAG_AGG_RINGS | BNXT_FLAG_NO_AGG_RINGS);
4826                bp->flags |= BNXT_FLAG_RX_PAGE_MODE;
4827
4828                if (bp->xdp_prog->aux->xdp_has_frags)
4829                        dev->max_mtu = min_t(u16, bp->max_mtu, BNXT_MAX_MTU);
4830                else
4831                        dev->max_mtu =
4832                                min_t(u16, bp->max_mtu, BNXT_MAX_PAGE_MODE_MTU);
4833                if (dev->mtu > BNXT_MAX_PAGE_MODE_MTU) {
4834                        bp->flags |= BNXT_FLAG_JUMBO;
4835                        bp->rx_skb_func = bnxt_rx_multi_page_skb;
4836                } else {
4837                        bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
4838                        bp->rx_skb_func = bnxt_rx_page_skb;
4839                }
4840                bp->rx_dir = DMA_BIDIRECTIONAL;
4841        } else {
4842                dev->max_mtu = bp->max_mtu;
4843                bp->flags &= ~BNXT_FLAG_RX_PAGE_MODE;
4844                bp->rx_dir = DMA_FROM_DEVICE;
4845                bp->rx_skb_func = bnxt_rx_skb;
4846        }
4847}
4848
4849void bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
4850{
4851        __bnxt_set_rx_skb_mode(bp, page_mode);
4852
4853        if (!page_mode) {
4854                int rx, tx;
4855
4856                bnxt_get_max_rings(bp, &rx, &tx, true);
4857                if (rx > 1) {
4858                        bp->flags &= ~BNXT_FLAG_NO_AGG_RINGS;
4859                        bp->dev->hw_features |= NETIF_F_LRO;
4860                }
4861        }
4862
4863        /* Update LRO and GRO_HW availability */
4864        netdev_update_features(bp->dev);
4865}
4866
4867static void bnxt_free_vnic_attributes(struct bnxt *bp)
4868{
4869        int i;
4870        struct bnxt_vnic_info *vnic;
4871        struct pci_dev *pdev = bp->pdev;
4872
4873        if (!bp->vnic_info)
4874                return;
4875
4876        for (i = 0; i < bp->nr_vnics; i++) {
4877                vnic = &bp->vnic_info[i];
4878
4879                kfree(vnic->fw_grp_ids);
4880                vnic->fw_grp_ids = NULL;
4881
4882                kfree(vnic->uc_list);
4883                vnic->uc_list = NULL;
4884
4885                if (vnic->mc_list) {
4886                        dma_free_coherent(&pdev->dev, vnic->mc_list_size,
4887                                          vnic->mc_list, vnic->mc_list_mapping);
4888                        vnic->mc_list = NULL;
4889                }
4890
4891                if (vnic->rss_table) {
4892                        dma_free_coherent(&pdev->dev, vnic->rss_table_size,
4893                                          vnic->rss_table,
4894                                          vnic->rss_table_dma_addr);
4895                        vnic->rss_table = NULL;
4896                }
4897
4898                vnic->rss_hash_key = NULL;
4899                vnic->flags = 0;
4900        }
4901}
4902
4903static int bnxt_alloc_vnic_attributes(struct bnxt *bp)
4904{
4905        int i, rc = 0, size;
4906        struct bnxt_vnic_info *vnic;
4907        struct pci_dev *pdev = bp->pdev;
4908        int max_rings;
4909
4910        for (i = 0; i < bp->nr_vnics; i++) {
4911                vnic = &bp->vnic_info[i];
4912
4913                if (vnic->flags & BNXT_VNIC_UCAST_FLAG) {
4914                        int mem_size = (BNXT_MAX_UC_ADDRS - 1) * ETH_ALEN;
4915
4916                        if (mem_size > 0) {
4917                                vnic->uc_list = kmalloc(mem_size, GFP_KERNEL);
4918                                if (!vnic->uc_list) {
4919                                        rc = -ENOMEM;
4920                                        goto out;
4921                                }
4922                        }
4923                }
4924
4925                if (vnic->flags & BNXT_VNIC_MCAST_FLAG) {
4926                        vnic->mc_list_size = BNXT_MAX_MC_ADDRS * ETH_ALEN;
4927                        vnic->mc_list =
4928                                dma_alloc_coherent(&pdev->dev,
4929                                                   vnic->mc_list_size,
4930                                                   &vnic->mc_list_mapping,
4931                                                   GFP_KERNEL);
4932                        if (!vnic->mc_list) {
4933                                rc = -ENOMEM;
4934                                goto out;
4935                        }
4936                }
4937
4938                if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
4939                        goto vnic_skip_grps;
4940
4941                if (vnic->flags & BNXT_VNIC_RSS_FLAG)
4942                        max_rings = bp->rx_nr_rings;
4943                else
4944                        max_rings = 1;
4945
4946                vnic->fw_grp_ids = kcalloc(max_rings, sizeof(u16), GFP_KERNEL);
4947                if (!vnic->fw_grp_ids) {
4948                        rc = -ENOMEM;
4949                        goto out;
4950                }
4951vnic_skip_grps:
4952                if ((bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP) &&
4953                    !(vnic->flags & BNXT_VNIC_RSS_FLAG))
4954                        continue;
4955
4956                /* Allocate rss table and hash key */
4957                size = L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE * sizeof(u16));
4958                if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
4959                        size = L1_CACHE_ALIGN(BNXT_MAX_RSS_TABLE_SIZE_P5);
4960
4961                vnic->rss_table_size = size + HW_HASH_KEY_SIZE;
4962                vnic->rss_table = dma_alloc_coherent(&pdev->dev,
4963                                                     vnic->rss_table_size,
4964                                                     &vnic->rss_table_dma_addr,
4965                                                     GFP_KERNEL);
4966                if (!vnic->rss_table) {
4967                        rc = -ENOMEM;
4968                        goto out;
4969                }
4970
4971                vnic->rss_hash_key = ((void *)vnic->rss_table) + size;
4972                vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size;
4973        }
4974        return 0;
4975
4976out:
4977        return rc;
4978}
4979
4980static void bnxt_free_hwrm_resources(struct bnxt *bp)
4981{
4982        struct bnxt_hwrm_wait_token *token;
4983
4984        dma_pool_destroy(bp->hwrm_dma_pool);
4985        bp->hwrm_dma_pool = NULL;
4986
4987        rcu_read_lock();
4988        hlist_for_each_entry_rcu(token, &bp->hwrm_pending_list, node)
4989                WRITE_ONCE(token->state, BNXT_HWRM_CANCELLED);
4990        rcu_read_unlock();
4991}
4992
4993static int bnxt_alloc_hwrm_resources(struct bnxt *bp)
4994{
4995        bp->hwrm_dma_pool = dma_pool_create("bnxt_hwrm", &bp->pdev->dev,
4996                                            BNXT_HWRM_DMA_SIZE,
4997                                            BNXT_HWRM_DMA_ALIGN, 0);
4998        if (!bp->hwrm_dma_pool)
4999                return -ENOMEM;
5000
5001        INIT_HLIST_HEAD(&bp->hwrm_pending_list);
5002
5003        return 0;
5004}
5005
5006static void bnxt_free_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats)
5007{
5008        kfree(stats->hw_masks);
5009        stats->hw_masks = NULL;
5010        kfree(stats->sw_stats);
5011        stats->sw_stats = NULL;
5012        if (stats->hw_stats) {
5013                dma_free_coherent(&bp->pdev->dev, stats->len, stats->hw_stats,
5014                                  stats->hw_stats_map);
5015                stats->hw_stats = NULL;
5016        }
5017}
5018
5019static int bnxt_alloc_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats,
5020                                bool alloc_masks)
5021{
5022        stats->hw_stats = dma_alloc_coherent(&bp->pdev->dev, stats->len,
5023                                             &stats->hw_stats_map, GFP_KERNEL);
5024        if (!stats->hw_stats)
5025                return -ENOMEM;
5026
5027        stats->sw_stats = kzalloc(stats->len, GFP_KERNEL);
5028        if (!stats->sw_stats)
5029                goto stats_mem_err;
5030
5031        if (alloc_masks) {
5032                stats->hw_masks = kzalloc(stats->len, GFP_KERNEL);
5033                if (!stats->hw_masks)
5034                        goto stats_mem_err;
5035        }
5036        return 0;
5037
5038stats_mem_err:
5039        bnxt_free_stats_mem(bp, stats);
5040        return -ENOMEM;
5041}
5042
5043static void bnxt_fill_masks(u64 *mask_arr, u64 mask, int count)
5044{
5045        int i;
5046
5047        for (i = 0; i < count; i++)
5048                mask_arr[i] = mask;
5049}
5050
5051static void bnxt_copy_hw_masks(u64 *mask_arr, __le64 *hw_mask_arr, int count)
5052{
5053        int i;
5054
5055        for (i = 0; i < count; i++)
5056                mask_arr[i] = le64_to_cpu(hw_mask_arr[i]);
5057}
5058
5059static int bnxt_hwrm_func_qstat_ext(struct bnxt *bp,
5060                                    struct bnxt_stats_mem *stats)
5061{
5062        struct hwrm_func_qstats_ext_output *resp;
5063        struct hwrm_func_qstats_ext_input *req;
5064        __le64 *hw_masks;
5065        int rc;
5066
5067        if (!(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED) ||
5068            !(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
5069                return -EOPNOTSUPP;
5070
5071        rc = hwrm_req_init(bp, req, HWRM_FUNC_QSTATS_EXT);
5072        if (rc)
5073                return rc;
5074
5075        req->fid = cpu_to_le16(0xffff);
5076        req->flags = FUNC_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK;
5077
5078        resp = hwrm_req_hold(bp, req);
5079        rc = hwrm_req_send(bp, req);
5080        if (!rc) {
5081                hw_masks = &resp->rx_ucast_pkts;
5082                bnxt_copy_hw_masks(stats->hw_masks, hw_masks, stats->len / 8);
5083        }
5084        hwrm_req_drop(bp, req);
5085        return rc;
5086}
5087
5088static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags);
5089static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags);
5090
5091static void bnxt_init_stats(struct bnxt *bp)
5092{
5093        struct bnxt_napi *bnapi = bp->bnapi[0];
5094        struct bnxt_cp_ring_info *cpr;
5095        struct bnxt_stats_mem *stats;
5096        __le64 *rx_stats, *tx_stats;
5097        int rc, rx_count, tx_count;
5098        u64 *rx_masks, *tx_masks;
5099        u64 mask;
5100        u8 flags;
5101
5102        cpr = &bnapi->cp_ring;
5103        stats = &cpr->stats;
5104        rc = bnxt_hwrm_func_qstat_ext(bp, stats);
5105        if (rc) {
5106                if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
5107                        mask = (1ULL << 48) - 1;
5108                else
5109                        mask = -1ULL;
5110                bnxt_fill_masks(stats->hw_masks, mask, stats->len / 8);
5111        }
5112        if (bp->flags & BNXT_FLAG_PORT_STATS) {
5113                stats = &bp->port_stats;
5114                rx_stats = stats->hw_stats;
5115                rx_masks = stats->hw_masks;
5116                rx_count = sizeof(struct rx_port_stats) / 8;
5117                tx_stats = rx_stats + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
5118                tx_masks = rx_masks + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
5119                tx_count = sizeof(struct tx_port_stats) / 8;
5120
5121                flags = PORT_QSTATS_REQ_FLAGS_COUNTER_MASK;
5122                rc = bnxt_hwrm_port_qstats(bp, flags);
5123                if (rc) {
5124                        mask = (1ULL << 40) - 1;
5125
5126                        bnxt_fill_masks(rx_masks, mask, rx_count);
5127                        bnxt_fill_masks(tx_masks, mask, tx_count);
5128                } else {
5129                        bnxt_copy_hw_masks(rx_masks, rx_stats, rx_count);
5130                        bnxt_copy_hw_masks(tx_masks, tx_stats, tx_count);
5131                        bnxt_hwrm_port_qstats(bp, 0);
5132                }
5133        }
5134        if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
5135                stats = &bp->rx_port_stats_ext;
5136                rx_stats = stats->hw_stats;
5137                rx_masks = stats->hw_masks;
5138                rx_count = sizeof(struct rx_port_stats_ext) / 8;
5139                stats = &bp->tx_port_stats_ext;
5140                tx_stats = stats->hw_stats;
5141                tx_masks = stats->hw_masks;
5142                tx_count = sizeof(struct tx_port_stats_ext) / 8;
5143
5144                flags = PORT_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK;
5145                rc = bnxt_hwrm_port_qstats_ext(bp, flags);
5146                if (rc) {
5147                        mask = (1ULL << 40) - 1;
5148
5149                        bnxt_fill_masks(rx_masks, mask, rx_count);
5150                        if (tx_stats)
5151                                bnxt_fill_masks(tx_masks, mask, tx_count);
5152                } else {
5153                        bnxt_copy_hw_masks(rx_masks, rx_stats, rx_count);
5154                        if (tx_stats)
5155                                bnxt_copy_hw_masks(tx_masks, tx_stats,
5156                                                   tx_count);
5157                        bnxt_hwrm_port_qstats_ext(bp, 0);
5158                }
5159        }
5160}
5161
5162static void bnxt_free_port_stats(struct bnxt *bp)
5163{
5164        bp->flags &= ~BNXT_FLAG_PORT_STATS;
5165        bp->flags &= ~BNXT_FLAG_PORT_STATS_EXT;
5166
5167        bnxt_free_stats_mem(bp, &bp->port_stats);
5168        bnxt_free_stats_mem(bp, &bp->rx_port_stats_ext);
5169        bnxt_free_stats_mem(bp, &bp->tx_port_stats_ext);
5170}
5171
5172static void bnxt_free_ring_stats(struct bnxt *bp)
5173{
5174        int i;
5175
5176        if (!bp->bnapi)
5177                return;
5178
5179        for (i = 0; i < bp->cp_nr_rings; i++) {
5180                struct bnxt_napi *bnapi = bp->bnapi[i];
5181                struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5182
5183                bnxt_free_stats_mem(bp, &cpr->stats);
5184
5185                kfree(cpr->sw_stats);
5186                cpr->sw_stats = NULL;
5187        }
5188}
5189
5190static int bnxt_alloc_stats(struct bnxt *bp)
5191{
5192        u32 size, i;
5193        int rc;
5194
5195        size = bp->hw_ring_stats_size;
5196
5197        for (i = 0; i < bp->cp_nr_rings; i++) {
5198                struct bnxt_napi *bnapi = bp->bnapi[i];
5199                struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5200
5201                cpr->sw_stats = kzalloc(sizeof(*cpr->sw_stats), GFP_KERNEL);
5202                if (!cpr->sw_stats)
5203                        return -ENOMEM;
5204
5205                cpr->stats.len = size;
5206                rc = bnxt_alloc_stats_mem(bp, &cpr->stats, !i);
5207                if (rc)
5208                        return rc;
5209
5210                cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
5211        }
5212
5213        if (BNXT_VF(bp) || bp->chip_num == CHIP_NUM_58700)
5214                return 0;
5215
5216        if (bp->port_stats.hw_stats)
5217                goto alloc_ext_stats;
5218
5219        bp->port_stats.len = BNXT_PORT_STATS_SIZE;
5220        rc = bnxt_alloc_stats_mem(bp, &bp->port_stats, true);
5221        if (rc)
5222                return rc;
5223
5224        bp->flags |= BNXT_FLAG_PORT_STATS;
5225
5226alloc_ext_stats:
5227        /* Display extended statistics only if FW supports it */
5228        if (bp->hwrm_spec_code < 0x10804 || bp->hwrm_spec_code == 0x10900)
5229                if (!(bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED))
5230                        return 0;
5231
5232        if (bp->rx_port_stats_ext.hw_stats)
5233                goto alloc_tx_ext_stats;
5234
5235        bp->rx_port_stats_ext.len = sizeof(struct rx_port_stats_ext);
5236        rc = bnxt_alloc_stats_mem(bp, &bp->rx_port_stats_ext, true);
5237        /* Extended stats are optional */
5238        if (rc)
5239                return 0;
5240
5241alloc_tx_ext_stats:
5242        if (bp->tx_port_stats_ext.hw_stats)
5243                return 0;
5244
5245        if (bp->hwrm_spec_code >= 0x10902 ||
5246            (bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED)) {
5247                bp->tx_port_stats_ext.len = sizeof(struct tx_port_stats_ext);
5248                rc = bnxt_alloc_stats_mem(bp, &bp->tx_port_stats_ext, true);
5249                /* Extended stats are optional */
5250                if (rc)
5251                        return 0;
5252        }
5253        bp->flags |= BNXT_FLAG_PORT_STATS_EXT;
5254        return 0;
5255}
5256
5257static void bnxt_clear_ring_indices(struct bnxt *bp)
5258{
5259        int i, j;
5260
5261        if (!bp->bnapi)
5262                return;
5263
5264        for (i = 0; i < bp->cp_nr_rings; i++) {
5265                struct bnxt_napi *bnapi = bp->bnapi[i];
5266                struct bnxt_cp_ring_info *cpr;
5267                struct bnxt_rx_ring_info *rxr;
5268                struct bnxt_tx_ring_info *txr;
5269
5270                if (!bnapi)
5271                        continue;
5272
5273                cpr = &bnapi->cp_ring;
5274                cpr->cp_raw_cons = 0;
5275
5276                bnxt_for_each_napi_tx(j, bnapi, txr) {
5277                        txr->tx_prod = 0;
5278                        txr->tx_cons = 0;
5279                        txr->tx_hw_cons = 0;
5280                }
5281
5282                rxr = bnapi->rx_ring;
5283                if (rxr) {
5284                        rxr->rx_prod = 0;
5285                        rxr->rx_agg_prod = 0;
5286                        rxr->rx_sw_agg_prod = 0;
5287                        rxr->rx_next_cons = 0;
5288                }
5289                bnapi->events = 0;
5290        }
5291}
5292
5293void bnxt_insert_usr_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr)
5294{
5295        u8 type = fltr->type, flags = fltr->flags;
5296
5297        INIT_LIST_HEAD(&fltr->list);
5298        if ((type == BNXT_FLTR_TYPE_L2 && flags & BNXT_ACT_RING_DST) ||
5299            (type == BNXT_FLTR_TYPE_NTUPLE && flags & BNXT_ACT_NO_AGING))
5300                list_add_tail(&fltr->list, &bp->usr_fltr_list);
5301}
5302
5303void bnxt_del_one_usr_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr)
5304{
5305        if (!list_empty(&fltr->list))
5306                list_del_init(&fltr->list);
5307}
5308
5309static void bnxt_clear_usr_fltrs(struct bnxt *bp, bool all)
5310{
5311        struct bnxt_filter_base *usr_fltr, *tmp;
5312
5313        list_for_each_entry_safe(usr_fltr, tmp, &bp->usr_fltr_list, list) {
5314                if (!all && usr_fltr->type == BNXT_FLTR_TYPE_L2)
5315                        continue;
5316                bnxt_del_one_usr_fltr(bp, usr_fltr);
5317        }
5318}
5319
5320static void bnxt_del_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr)
5321{
5322        hlist_del(&fltr->hash);
5323        bnxt_del_one_usr_fltr(bp, fltr);
5324        if (fltr->flags) {
5325                clear_bit(fltr->sw_id, bp->ntp_fltr_bmap);
5326                bp->ntp_fltr_count--;
5327        }
5328        kfree(fltr);
5329}
5330
5331static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool all)
5332{
5333        int i;
5334
5335        netdev_assert_locked_or_invisible(bp->dev);
5336
5337        /* Under netdev instance lock and all our NAPIs have been disabled.
5338         * It's safe to delete the hash table.
5339         */
5340        for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
5341                struct hlist_head *head;
5342                struct hlist_node *tmp;
5343                struct bnxt_ntuple_filter *fltr;
5344
5345                head = &bp->ntp_fltr_hash_tbl[i];
5346                hlist_for_each_entry_safe(fltr, tmp, head, base.hash) {
5347                        bnxt_del_l2_filter(bp, fltr->l2_fltr);
5348                        if (!all && ((fltr->base.flags & BNXT_ACT_FUNC_DST) ||
5349                                     !list_empty(&fltr->base.list)))
5350                                continue;
5351                        bnxt_del_fltr(bp, &fltr->base);
5352                }
5353        }
5354        if (!all)
5355                return;
5356
5357        bitmap_free(bp->ntp_fltr_bmap);
5358        bp->ntp_fltr_bmap = NULL;
5359        bp->ntp_fltr_count = 0;
5360}
5361
5362static int bnxt_alloc_ntp_fltrs(struct bnxt *bp)
5363{
5364        int i, rc = 0;
5365
5366        if (!(bp->flags & BNXT_FLAG_RFS) || bp->ntp_fltr_bmap)
5367                return 0;
5368
5369        for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++)
5370                INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]);
5371
5372        bp->ntp_fltr_count = 0;
5373        bp->ntp_fltr_bmap = bitmap_zalloc(bp->max_fltr, GFP_KERNEL);
5374
5375        if (!bp->ntp_fltr_bmap)
5376                rc = -ENOMEM;
5377
5378        return rc;
5379}
5380
5381static void bnxt_free_l2_filters(struct bnxt *bp, bool all)
5382{
5383        int i;
5384
5385        for (i = 0; i < BNXT_L2_FLTR_HASH_SIZE; i++) {
5386                struct hlist_head *head;
5387                struct hlist_node *tmp;
5388                struct bnxt_l2_filter *fltr;
5389
5390                head = &bp->l2_fltr_hash_tbl[i];
5391                hlist_for_each_entry_safe(fltr, tmp, head, base.hash) {
5392                        if (!all && ((fltr->base.flags & BNXT_ACT_FUNC_DST) ||
5393                                     !list_empty(&fltr->base.list)))
5394                                continue;
5395                        bnxt_del_fltr(bp, &fltr->base);
5396                }
5397        }
5398}
5399
5400static void bnxt_init_l2_fltr_tbl(struct bnxt *bp)
5401{
5402        int i;
5403
5404        for (i = 0; i < BNXT_L2_FLTR_HASH_SIZE; i++)
5405                INIT_HLIST_HEAD(&bp->l2_fltr_hash_tbl[i]);
5406        get_random_bytes(&bp->hash_seed, sizeof(bp->hash_seed));
5407}
5408
5409static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init)
5410{
5411        bnxt_free_vnic_attributes(bp);
5412        bnxt_free_tx_rings(bp);
5413        bnxt_free_rx_rings(bp);
5414        bnxt_free_cp_rings(bp);
5415        bnxt_free_all_cp_arrays(bp);
5416        bnxt_free_ntp_fltrs(bp, false);
5417        bnxt_free_l2_filters(bp, false);
5418        if (irq_re_init) {
5419                bnxt_free_ring_stats(bp);
5420                if (!(bp->phy_flags & BNXT_PHY_FL_PORT_STATS_NO_RESET) ||
5421                    test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
5422                        bnxt_free_port_stats(bp);
5423                bnxt_free_ring_grps(bp);
5424                bnxt_free_vnics(bp);
5425                kfree(bp->tx_ring_map);
5426                bp->tx_ring_map = NULL;
5427                kfree(bp->tx_ring);
5428                bp->tx_ring = NULL;
5429                kfree(bp->rx_ring);
5430                bp->rx_ring = NULL;
5431                kfree(bp->bnapi);
5432                bp->bnapi = NULL;
5433        } else {
5434                bnxt_clear_ring_indices(bp);
5435        }
5436}
5437
5438static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
5439{
5440        int i, j, rc, size, arr_size;
5441        void *bnapi;
5442
5443        if (irq_re_init) {
5444                /* Allocate bnapi mem pointer array and mem block for
5445                 * all queues
5446                 */
5447                arr_size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi *) *
5448                                bp->cp_nr_rings);
5449                size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi));
5450                bnapi = kzalloc(arr_size + size * bp->cp_nr_rings, GFP_KERNEL);
5451                if (!bnapi)
5452                        return -ENOMEM;
5453
5454                bp->bnapi = bnapi;
5455                bnapi += arr_size;
5456                for (i = 0; i < bp->cp_nr_rings; i++, bnapi += size) {
5457                        bp->bnapi[i] = bnapi;
5458                        bp->bnapi[i]->index = i;
5459                        bp->bnapi[i]->bp = bp;
5460                        if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
5461                                struct bnxt_cp_ring_info *cpr =
5462                                        &bp->bnapi[i]->cp_ring;
5463
5464                                cpr->cp_ring_struct.ring_mem.flags =
5465                                        BNXT_RMEM_RING_PTE_FLAG;
5466                        }
5467                }
5468
5469                bp->rx_ring = kcalloc(bp->rx_nr_rings,
5470                                      sizeof(struct bnxt_rx_ring_info),
5471                                      GFP_KERNEL);
5472                if (!bp->rx_ring)
5473                        return -ENOMEM;
5474
5475                for (i = 0; i < bp->rx_nr_rings; i++) {
5476                        struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
5477
5478                        if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
5479                                rxr->rx_ring_struct.ring_mem.flags =
5480                                        BNXT_RMEM_RING_PTE_FLAG;
5481                                rxr->rx_agg_ring_struct.ring_mem.flags =
5482                                        BNXT_RMEM_RING_PTE_FLAG;
5483                        } else {
5484                                rxr->rx_cpr =  &bp->bnapi[i]->cp_ring;
5485                        }
5486                        rxr->bnapi = bp->bnapi[i];
5487                        bp->bnapi[i]->rx_ring = &bp->rx_ring[i];
5488                }
5489
5490                bp->tx_ring = kcalloc(bp->tx_nr_rings,
5491                                      sizeof(struct bnxt_tx_ring_info),
5492                                      GFP_KERNEL);
5493                if (!bp->tx_ring)
5494                        return -ENOMEM;
5495
5496                bp->tx_ring_map = kcalloc(bp->tx_nr_rings, sizeof(u16),
5497                                          GFP_KERNEL);
5498
5499                if (!bp->tx_ring_map)
5500                        return -ENOMEM;
5501
5502                if (bp->flags & BNXT_FLAG_SHARED_RINGS)
5503                        j = 0;
5504                else
5505                        j = bp->rx_nr_rings;
5506
5507                for (i = 0; i < bp->tx_nr_rings; i++) {
5508                        struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
5509                        struct bnxt_napi *bnapi2;
5510
5511                        if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
5512                                txr->tx_ring_struct.ring_mem.flags =
5513                                        BNXT_RMEM_RING_PTE_FLAG;
5514                        bp->tx_ring_map[i] = bp->tx_nr_rings_xdp + i;
5515                        if (i >= bp->tx_nr_rings_xdp) {
5516                                int k = j + BNXT_RING_TO_TC_OFF(bp, i);
5517
5518                                bnapi2 = bp->bnapi[k];
5519                                txr->txq_index = i - bp->tx_nr_rings_xdp;
5520                                txr->tx_napi_idx =
5521                                        BNXT_RING_TO_TC(bp, txr->txq_index);
5522                                bnapi2->tx_ring[txr->tx_napi_idx] = txr;
5523                                bnapi2->tx_int = bnxt_tx_int;
5524                        } else {
5525                                bnapi2 = bp->bnapi[j];
5526                                bnapi2->flags |= BNXT_NAPI_FLAG_XDP;
5527                                bnapi2->tx_ring[0] = txr;
5528                                bnapi2->tx_int = bnxt_tx_int_xdp;
5529                                j++;
5530                        }
5531                        txr->bnapi = bnapi2;
5532                        if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
5533                                txr->tx_cpr = &bnapi2->cp_ring;
5534                }
5535
5536                rc = bnxt_alloc_stats(bp);
5537                if (rc)
5538                        goto alloc_mem_err;
5539                bnxt_init_stats(bp);
5540
5541                rc = bnxt_alloc_ntp_fltrs(bp);
5542                if (rc)
5543                        goto alloc_mem_err;
5544
5545                rc = bnxt_alloc_vnics(bp);
5546                if (rc)
5547                        goto alloc_mem_err;
5548        }
5549
5550        rc = bnxt_alloc_all_cp_arrays(bp);
5551        if (rc)
5552                goto alloc_mem_err;
5553
5554        bnxt_init_ring_struct(bp);
5555
5556        rc = bnxt_alloc_rx_rings(bp);
5557        if (rc)
5558                goto alloc_mem_err;
5559
5560        rc = bnxt_alloc_tx_rings(bp);
5561        if (rc)
5562                goto alloc_mem_err;
5563
5564        rc = bnxt_alloc_cp_rings(bp);
5565        if (rc)
5566                goto alloc_mem_err;
5567
5568        bp->vnic_info[BNXT_VNIC_DEFAULT].flags |= BNXT_VNIC_RSS_FLAG |
5569                                                  BNXT_VNIC_MCAST_FLAG |
5570                                                  BNXT_VNIC_UCAST_FLAG;
5571        if (BNXT_SUPPORTS_NTUPLE_VNIC(bp) && (bp->flags & BNXT_FLAG_RFS))
5572                bp->vnic_info[BNXT_VNIC_NTUPLE].flags |=
5573                        BNXT_VNIC_RSS_FLAG | BNXT_VNIC_NTUPLE_FLAG;
5574
5575        rc = bnxt_alloc_vnic_attributes(bp);
5576        if (rc)
5577                goto alloc_mem_err;
5578        return 0;
5579
5580alloc_mem_err:
5581        bnxt_free_mem(bp, true);
5582        return rc;
5583}
5584
5585static void bnxt_disable_int(struct bnxt *bp)
5586{
5587        int i;
5588
5589        if (!bp->bnapi)
5590                return;
5591
5592        for (i = 0; i < bp->cp_nr_rings; i++) {
5593                struct bnxt_napi *bnapi = bp->bnapi[i];
5594                struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5595                struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
5596
5597                if (ring->fw_ring_id != INVALID_HW_RING_ID)
5598                        bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
5599        }
5600}
5601
5602static int bnxt_cp_num_to_irq_num(struct bnxt *bp, int n)
5603{
5604        struct bnxt_napi *bnapi = bp->bnapi[n];
5605        struct bnxt_cp_ring_info *cpr;
5606
5607        cpr = &bnapi->cp_ring;
5608        return cpr->cp_ring_struct.map_idx;
5609}
5610
5611static void bnxt_disable_int_sync(struct bnxt *bp)
5612{
5613        int i;
5614
5615        if (!bp->irq_tbl)
5616                return;
5617
5618        atomic_inc(&bp->intr_sem);
5619
5620        bnxt_disable_int(bp);
5621        for (i = 0; i < bp->cp_nr_rings; i++) {
5622                int map_idx = bnxt_cp_num_to_irq_num(bp, i);
5623
5624                synchronize_irq(bp->irq_tbl[map_idx].vector);
5625        }
5626}
5627
5628static void bnxt_enable_int(struct bnxt *bp)
5629{
5630        int i;
5631
5632        atomic_set(&bp->intr_sem, 0);
5633        for (i = 0; i < bp->cp_nr_rings; i++) {
5634                struct bnxt_napi *bnapi = bp->bnapi[i];
5635                struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5636
5637                bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons);
5638        }
5639}
5640
5641int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap, int bmap_size,
5642                            bool async_only)
5643{
5644        DECLARE_BITMAP(async_events_bmap, 256);
5645        u32 *events = (u32 *)async_events_bmap;
5646        struct hwrm_func_drv_rgtr_output *resp;
5647        struct hwrm_func_drv_rgtr_input *req;
5648        u32 flags;
5649        int rc, i;
5650
5651        rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_RGTR);
5652        if (rc)
5653                return rc;
5654
5655        req->enables = cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE |
5656                                   FUNC_DRV_RGTR_REQ_ENABLES_VER |
5657                                   FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
5658
5659        req->os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX);
5660        flags = FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE;
5661        if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET)
5662                flags |= FUNC_DRV_RGTR_REQ_FLAGS_HOT_RESET_SUPPORT;
5663        if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
5664                flags |= FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT |
5665                         FUNC_DRV_RGTR_REQ_FLAGS_MASTER_SUPPORT;
5666        if (bp->fw_cap & BNXT_FW_CAP_NPAR_1_2)
5667                flags |= FUNC_DRV_RGTR_REQ_FLAGS_NPAR_1_2_SUPPORT;
5668        req->flags = cpu_to_le32(flags);
5669        req->ver_maj_8b = DRV_VER_MAJ;
5670        req->ver_min_8b = DRV_VER_MIN;
5671        req->ver_upd_8b = DRV_VER_UPD;
5672        req->ver_maj = cpu_to_le16(DRV_VER_MAJ);
5673        req->ver_min = cpu_to_le16(DRV_VER_MIN);
5674        req->ver_upd = cpu_to_le16(DRV_VER_UPD);
5675
5676        if (BNXT_PF(bp)) {
5677                u32 data[8];
5678                int i;
5679
5680                memset(data, 0, sizeof(data));
5681                for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++) {
5682                        u16 cmd = bnxt_vf_req_snif[i];
5683                        unsigned int bit, idx;
5684
5685                        idx = cmd / 32;
5686                        bit = cmd % 32;
5687                        data[idx] |= 1 << bit;
5688                }
5689
5690                for (i = 0; i < 8; i++)
5691                        req->vf_req_fwd[i] = cpu_to_le32(data[i]);
5692
5693                req->enables |=
5694                        cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD);
5695        }
5696
5697        if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE)
5698                req->flags |= cpu_to_le32(
5699                        FUNC_DRV_RGTR_REQ_FLAGS_FLOW_HANDLE_64BIT_MODE);
5700
5701        memset(async_events_bmap, 0, sizeof(async_events_bmap));
5702        for (i = 0; i < ARRAY_SIZE(bnxt_async_events_arr); i++) {
5703                u16 event_id = bnxt_async_events_arr[i];
5704
5705                if (event_id == ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY &&
5706                    !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
5707                        continue;
5708                if (event_id == ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE &&
5709                    !bp->ptp_cfg)
5710                        continue;
5711                __set_bit(bnxt_async_events_arr[i], async_events_bmap);
5712        }
5713        if (bmap && bmap_size) {
5714                for (i = 0; i < bmap_size; i++) {
5715                        if (test_bit(i, bmap))
5716                                __set_bit(i, async_events_bmap);
5717                }
5718        }
5719        for (i = 0; i < 8; i++)
5720                req->async_event_fwd[i] |= cpu_to_le32(events[i]);
5721
5722        if (async_only)
5723                req->enables =
5724                        cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
5725
5726        resp = hwrm_req_hold(bp, req);
5727        rc = hwrm_req_send(bp, req);
5728        if (!rc) {
5729                set_bit(BNXT_STATE_DRV_REGISTERED, &bp->state);
5730                if (resp->flags &
5731                    cpu_to_le32(FUNC_DRV_RGTR_RESP_FLAGS_IF_CHANGE_SUPPORTED))
5732                        bp->fw_cap |= BNXT_FW_CAP_IF_CHANGE;
5733        }
5734        hwrm_req_drop(bp, req);
5735        return rc;
5736}
5737
5738int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp)
5739{
5740        struct hwrm_func_drv_unrgtr_input *req;
5741        int rc;
5742
5743        if (!test_and_clear_bit(BNXT_STATE_DRV_REGISTERED, &bp->state))
5744                return 0;
5745
5746        rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_UNRGTR);
5747        if (rc)
5748                return rc;
5749        return hwrm_req_send(bp, req);
5750}
5751
5752static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa);
5753
5754static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type)
5755{
5756        struct hwrm_tunnel_dst_port_free_input *req;
5757        int rc;
5758
5759        if (tunnel_type == TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN &&
5760            bp->vxlan_fw_dst_port_id == INVALID_HW_RING_ID)
5761                return 0;
5762        if (tunnel_type == TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE &&
5763            bp->nge_fw_dst_port_id == INVALID_HW_RING_ID)
5764                return 0;
5765
5766        rc = hwrm_req_init(bp, req, HWRM_TUNNEL_DST_PORT_FREE);
5767        if (rc)
5768                return rc;
5769
5770        req->tunnel_type = tunnel_type;
5771
5772        switch (tunnel_type) {
5773        case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN:
5774                req->tunnel_dst_port_id = cpu_to_le16(bp->vxlan_fw_dst_port_id);
5775                bp->vxlan_port = 0;
5776                bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID;
5777                break;
5778        case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE:
5779                req->tunnel_dst_port_id = cpu_to_le16(bp->nge_fw_dst_port_id);
5780                bp->nge_port = 0;
5781                bp->nge_fw_dst_port_id = INVALID_HW_RING_ID;
5782                break;
5783        case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE:
5784                req->tunnel_dst_port_id = cpu_to_le16(bp->vxlan_gpe_fw_dst_port_id);
5785                bp->vxlan_gpe_port = 0;
5786                bp->vxlan_gpe_fw_dst_port_id = INVALID_HW_RING_ID;
5787                break;
5788        default:
5789                break;
5790        }
5791
5792        rc = hwrm_req_send(bp, req);
5793        if (rc)
5794                netdev_err(bp->dev, "hwrm_tunnel_dst_port_free failed. rc:%d\n",
5795                           rc);
5796        if (bp->flags & BNXT_FLAG_TPA)
5797                bnxt_set_tpa(bp, true);
5798        return rc;
5799}
5800
5801static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port,
5802                                           u8 tunnel_type)
5803{
5804        struct hwrm_tunnel_dst_port_alloc_output *resp;
5805        struct hwrm_tunnel_dst_port_alloc_input *req;
5806        int rc;
5807
5808        rc = hwrm_req_init(bp, req, HWRM_TUNNEL_DST_PORT_ALLOC);
5809        if (rc)
5810                return rc;
5811
5812        req->tunnel_type = tunnel_type;
5813        req->tunnel_dst_port_val = port;
5814
5815        resp = hwrm_req_hold(bp, req);
5816        rc = hwrm_req_send(bp, req);
5817        if (rc) {
5818                netdev_err(bp->dev, "hwrm_tunnel_dst_port_alloc failed. rc:%d\n",
5819                           rc);
5820                goto err_out;
5821        }
5822
5823        switch (tunnel_type) {
5824        case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN:
5825                bp->vxlan_port = port;
5826                bp->vxlan_fw_dst_port_id =
5827                        le16_to_cpu(resp->tunnel_dst_port_id);
5828                break;
5829        case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE:
5830                bp->nge_port = port;
5831                bp->nge_fw_dst_port_id = le16_to_cpu(resp->tunnel_dst_port_id);
5832                break;
5833        case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE:
5834                bp->vxlan_gpe_port = port;
5835                bp->vxlan_gpe_fw_dst_port_id =
5836                        le16_to_cpu(resp->tunnel_dst_port_id);
5837                break;
5838        default:
5839                break;
5840        }
5841        if (bp->flags & BNXT_FLAG_TPA)
5842                bnxt_set_tpa(bp, true);
5843
5844err_out:
5845        hwrm_req_drop(bp, req);
5846        return rc;
5847}
5848
5849static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id)
5850{
5851        struct hwrm_cfa_l2_set_rx_mask_input *req;
5852        struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5853        int rc;
5854
5855        rc = hwrm_req_init(bp, req, HWRM_CFA_L2_SET_RX_MASK);
5856        if (rc)
5857                return rc;
5858
5859        req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
5860        if (vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_MCAST) {
5861                req->num_mc_entries = cpu_to_le32(vnic->mc_list_count);
5862                req->mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping);
5863        }
5864        req->mask = cpu_to_le32(vnic->rx_mask);
5865        return hwrm_req_send_silent(bp, req);
5866}
5867
5868void bnxt_del_l2_filter(struct bnxt *bp, struct bnxt_l2_filter *fltr)
5869{
5870        if (!atomic_dec_and_test(&fltr->refcnt))
5871                return;
5872        spin_lock_bh(&bp->ntp_fltr_lock);
5873        if (!test_and_clear_bit(BNXT_FLTR_INSERTED, &fltr->base.state)) {
5874                spin_unlock_bh(&bp->ntp_fltr_lock);
5875                return;
5876        }
5877        hlist_del_rcu(&fltr->base.hash);
5878        bnxt_del_one_usr_fltr(bp, &fltr->base);
5879        if (fltr->base.flags) {
5880                clear_bit(fltr->base.sw_id, bp->ntp_fltr_bmap);
5881                bp->ntp_fltr_count--;
5882        }
5883        spin_unlock_bh(&bp->ntp_fltr_lock);
5884        kfree_rcu(fltr, base.rcu);
5885}
5886
5887static struct bnxt_l2_filter *__bnxt_lookup_l2_filter(struct bnxt *bp,
5888                                                      struct bnxt_l2_key *key,
5889                                                      u32 idx)
5890{
5891        struct hlist_head *head = &bp->l2_fltr_hash_tbl[idx];
5892        struct bnxt_l2_filter *fltr;
5893
5894        hlist_for_each_entry_rcu(fltr, head, base.hash) {
5895                struct bnxt_l2_key *l2_key = &fltr->l2_key;
5896
5897                if (ether_addr_equal(l2_key->dst_mac_addr, key->dst_mac_addr) &&
5898                    l2_key->vlan == key->vlan)
5899                        return fltr;
5900        }
5901        return NULL;
5902}
5903
5904static struct bnxt_l2_filter *bnxt_lookup_l2_filter(struct bnxt *bp,
5905                                                    struct bnxt_l2_key *key,
5906                                                    u32 idx)
5907{
5908        struct bnxt_l2_filter *fltr = NULL;
5909
5910        rcu_read_lock();
5911        fltr = __bnxt_lookup_l2_filter(bp, key, idx);
5912        if (fltr)
5913                atomic_inc(&fltr->refcnt);
5914        rcu_read_unlock();
5915        return fltr;
5916}
5917
5918#define BNXT_IPV4_4TUPLE(bp, fkeys)                                     \
5919        (((fkeys)->basic.ip_proto == IPPROTO_TCP &&                     \
5920          (bp)->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4) ||  \
5921         ((fkeys)->basic.ip_proto == IPPROTO_UDP &&                     \
5922          (bp)->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4))
5923
5924#define BNXT_IPV6_4TUPLE(bp, fkeys)                                     \
5925        (((fkeys)->basic.ip_proto == IPPROTO_TCP &&                     \
5926          (bp)->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6) ||  \
5927         ((fkeys)->basic.ip_proto == IPPROTO_UDP &&                     \
5928          (bp)->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6))
5929
5930static u32 bnxt_get_rss_flow_tuple_len(struct bnxt *bp, struct flow_keys *fkeys)
5931{
5932        if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
5933                if (BNXT_IPV4_4TUPLE(bp, fkeys))
5934                        return sizeof(fkeys->addrs.v4addrs) +
5935                               sizeof(fkeys->ports);
5936
5937                if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4)
5938                        return sizeof(fkeys->addrs.v4addrs);
5939        }
5940
5941        if (fkeys->basic.n_proto == htons(ETH_P_IPV6)) {
5942                if (BNXT_IPV6_4TUPLE(bp, fkeys))
5943                        return sizeof(fkeys->addrs.v6addrs) +
5944                               sizeof(fkeys->ports);
5945
5946                if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6)
5947                        return sizeof(fkeys->addrs.v6addrs);
5948        }
5949
5950        return 0;
5951}
5952
5953static u32 bnxt_toeplitz(struct bnxt *bp, struct flow_keys *fkeys,
5954                         const unsigned char *key)
5955{
5956        u64 prefix = bp->toeplitz_prefix, hash = 0;
5957        struct bnxt_ipv4_tuple tuple4;
5958        struct bnxt_ipv6_tuple tuple6;
5959        int i, j, len = 0;
5960        u8 *four_tuple;
5961
5962        len = bnxt_get_rss_flow_tuple_len(bp, fkeys);
5963        if (!len)
5964                return 0;
5965
5966        if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
5967                tuple4.v4addrs = fkeys->addrs.v4addrs;
5968                tuple4.ports = fkeys->ports;
5969                four_tuple = (unsigned char *)&tuple4;
5970        } else {
5971                tuple6.v6addrs = fkeys->addrs.v6addrs;
5972                tuple6.ports = fkeys->ports;
5973                four_tuple = (unsigned char *)&tuple6;
5974        }
5975
5976        for (i = 0, j = 8; i < len; i++, j++) {
5977                u8 byte = four_tuple[i];
5978                int bit;
5979
5980                for (bit = 0; bit < 8; bit++, prefix <<= 1, byte <<= 1) {
5981                        if (byte & 0x80)
5982                                hash ^= prefix;
5983                }
5984                prefix |= (j < HW_HASH_KEY_SIZE) ? key[j] : 0;
5985        }
5986
5987        /* The valid part of the hash is in the upper 32 bits. */
5988        return (hash >> 32) & BNXT_NTP_FLTR_HASH_MASK;
5989}
5990
5991#ifdef CONFIG_RFS_ACCEL
5992static struct bnxt_l2_filter *
5993bnxt_lookup_l2_filter_from_key(struct bnxt *bp, struct bnxt_l2_key *key)
5994{
5995        struct bnxt_l2_filter *fltr;
5996        u32 idx;
5997
5998        idx = jhash2(&key->filter_key, BNXT_L2_KEY_SIZE, bp->hash_seed) &
5999              BNXT_L2_FLTR_HASH_MASK;
6000        fltr = bnxt_lookup_l2_filter(bp, key, idx);
6001        return fltr;
6002}
6003#endif
6004
6005static int bnxt_init_l2_filter(struct bnxt *bp, struct bnxt_l2_filter *fltr,
6006                               struct bnxt_l2_key *key, u32 idx)
6007{
6008        struct hlist_head *head;
6009
6010        ether_addr_copy(fltr->l2_key.dst_mac_addr, key->dst_mac_addr);
6011        fltr->l2_key.vlan = key->vlan;
6012        fltr->base.type = BNXT_FLTR_TYPE_L2;
6013        if (fltr->base.flags) {
6014                int bit_id;
6015
6016                bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap,
6017                                                 bp->max_fltr, 0);
6018                if (bit_id < 0)
6019                        return -ENOMEM;
6020                fltr->base.sw_id = (u16)bit_id;
6021                bp->ntp_fltr_count++;
6022        }
6023        head = &bp->l2_fltr_hash_tbl[idx];
6024        hlist_add_head_rcu(&fltr->base.hash, head);
6025        bnxt_insert_usr_fltr(bp, &fltr->base);
6026        set_bit(BNXT_FLTR_INSERTED, &fltr->base.state);
6027        atomic_set(&fltr->refcnt, 1);
6028        return 0;
6029}
6030
6031static struct bnxt_l2_filter *bnxt_alloc_l2_filter(struct bnxt *bp,
6032                                                   struct bnxt_l2_key *key,
6033                                                   gfp_t gfp)
6034{
6035        struct bnxt_l2_filter *fltr;
6036        u32 idx;
6037        int rc;
6038
6039        idx = jhash2(&key->filter_key, BNXT_L2_KEY_SIZE, bp->hash_seed) &
6040              BNXT_L2_FLTR_HASH_MASK;
6041        fltr = bnxt_lookup_l2_filter(bp, key, idx);
6042        if (fltr)
6043                return fltr;
6044
6045        fltr = kzalloc(sizeof(*fltr), gfp);
6046        if (!fltr)
6047                return ERR_PTR(-ENOMEM);
6048        spin_lock_bh(&bp->ntp_fltr_lock);
6049        rc = bnxt_init_l2_filter(bp, fltr, key, idx);
6050        spin_unlock_bh(&bp->ntp_fltr_lock);
6051        if (rc) {
6052                bnxt_del_l2_filter(bp, fltr);
6053                fltr = ERR_PTR(rc);
6054        }
6055        return fltr;
6056}
6057
6058struct bnxt_l2_filter *bnxt_alloc_new_l2_filter(struct bnxt *bp,
6059                                                struct bnxt_l2_key *key,
6060                                                u16 flags)
6061{
6062        struct bnxt_l2_filter *fltr;
6063        u32 idx;
6064        int rc;
6065
6066        idx = jhash2(&key->filter_key, BNXT_L2_KEY_SIZE, bp->hash_seed) &
6067              BNXT_L2_FLTR_HASH_MASK;
6068        spin_lock_bh(&bp->ntp_fltr_lock);
6069        fltr = __bnxt_lookup_l2_filter(bp, key, idx);
6070        if (fltr) {
6071                fltr = ERR_PTR(-EEXIST);
6072                goto l2_filter_exit;
6073        }
6074        fltr = kzalloc(sizeof(*fltr), GFP_ATOMIC);
6075        if (!fltr) {
6076                fltr = ERR_PTR(-ENOMEM);
6077                goto l2_filter_exit;
6078        }
6079        fltr->base.flags = flags;
6080        rc = bnxt_init_l2_filter(bp, fltr, key, idx);
6081        if (rc) {
6082                spin_unlock_bh(&bp->ntp_fltr_lock);
6083                bnxt_del_l2_filter(bp, fltr);
6084                return ERR_PTR(rc);
6085        }
6086
6087l2_filter_exit:
6088        spin_unlock_bh(&bp->ntp_fltr_lock);
6089        return fltr;
6090}
6091
6092static u16 bnxt_vf_target_id(struct bnxt_pf_info *pf, u16 vf_idx)
6093{
6094#ifdef CONFIG_BNXT_SRIOV
6095        struct bnxt_vf_info *vf = &pf->vf[vf_idx];
6096
6097        return vf->fw_fid;
6098#else
6099        return INVALID_HW_RING_ID;
6100#endif
6101}
6102
6103int bnxt_hwrm_l2_filter_free(struct bnxt *bp, struct bnxt_l2_filter *fltr)
6104{
6105        struct hwrm_cfa_l2_filter_free_input *req;
6106        u16 target_id = 0xffff;
6107        int rc;
6108
6109        if (fltr->base.flags & BNXT_ACT_FUNC_DST) {
6110                struct bnxt_pf_info *pf = &bp->pf;
6111
6112                if (fltr->base.vf_idx >= pf->active_vfs)
6113                        return -EINVAL;
6114
6115                target_id = bnxt_vf_target_id(pf, fltr->base.vf_idx);
6116                if (target_id == INVALID_HW_RING_ID)
6117                        return -EINVAL;
6118        }
6119
6120        rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_FREE);
6121        if (rc)
6122                return rc;
6123
6124        req->target_id = cpu_to_le16(target_id);
6125        req->l2_filter_id = fltr->base.filter_id;
6126        return hwrm_req_send(bp, req);
6127}
6128
6129int bnxt_hwrm_l2_filter_alloc(struct bnxt *bp, struct bnxt_l2_filter *fltr)
6130{
6131        struct hwrm_cfa_l2_filter_alloc_output *resp;
6132        struct hwrm_cfa_l2_filter_alloc_input *req;
6133        u16 target_id = 0xffff;
6134        int rc;
6135
6136        if (fltr->base.flags & BNXT_ACT_FUNC_DST) {
6137                struct bnxt_pf_info *pf = &bp->pf;
6138
6139                if (fltr->base.vf_idx >= pf->active_vfs)
6140                        return -EINVAL;
6141
6142                target_id = bnxt_vf_target_id(pf, fltr->base.vf_idx);
6143        }
6144        rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_ALLOC);
6145        if (rc)
6146                return rc;
6147
6148        req->target_id = cpu_to_le16(target_id);
6149        req->flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX);
6150
6151        if (!BNXT_CHIP_TYPE_NITRO_A0(bp))
6152                req->flags |=
6153                        cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST);
6154        req->dst_id = cpu_to_le16(fltr->base.fw_vnic_id);
6155        req->enables =
6156                cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR |
6157                            CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID |
6158                            CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK);
6159        ether_addr_copy(req->l2_addr, fltr->l2_key.dst_mac_addr);
6160        eth_broadcast_addr(req->l2_addr_mask);
6161
6162        if (fltr->l2_key.vlan) {
6163                req->enables |=
6164                        cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN |
6165                                CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN_MASK |
6166                                CFA_L2_FILTER_ALLOC_REQ_ENABLES_NUM_VLANS);
6167                req->num_vlans = 1;
6168                req->l2_ivlan = cpu_to_le16(fltr->l2_key.vlan);
6169                req->l2_ivlan_mask = cpu_to_le16(0xfff);
6170        }
6171
6172        resp = hwrm_req_hold(bp, req);
6173        rc = hwrm_req_send(bp, req);
6174        if (!rc) {
6175                fltr->base.filter_id = resp->l2_filter_id;
6176                set_bit(BNXT_FLTR_VALID, &fltr->base.state);
6177        }
6178        hwrm_req_drop(bp, req);
6179        return rc;
6180}
6181
6182int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp,
6183                                     struct bnxt_ntuple_filter *fltr)
6184{
6185        struct hwrm_cfa_ntuple_filter_free_input *req;
6186        int rc;
6187
6188        set_bit(BNXT_FLTR_FW_DELETED, &fltr->base.state);
6189        rc = hwrm_req_init(bp, req, HWRM_CFA_NTUPLE_FILTER_FREE);
6190        if (rc)
6191                return rc;
6192
6193        req->ntuple_filter_id = fltr->base.filter_id;
6194        return hwrm_req_send(bp, req);
6195}
6196
6197#define BNXT_NTP_FLTR_FLAGS                                     \
6198        (CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID |     \
6199         CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE |        \
6200         CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE |      \
6201         CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR |       \
6202         CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK |  \
6203         CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR |       \
6204         CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK |  \
6205         CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL |      \
6206         CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT |         \
6207         CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK |    \
6208         CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT |         \
6209         CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK |    \
6210         CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID)
6211
6212#define BNXT_NTP_TUNNEL_FLTR_FLAG                               \
6213                CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE
6214
6215void bnxt_fill_ipv6_mask(__be32 mask[4])
6216{
6217        int i;
6218
6219        for (i = 0; i < 4; i++)
6220                mask[i] = cpu_to_be32(~0);
6221}
6222
6223static void
6224bnxt_cfg_rfs_ring_tbl_idx(struct bnxt *bp,
6225                          struct hwrm_cfa_ntuple_filter_alloc_input *req,
6226                          struct bnxt_ntuple_filter *fltr)
6227{
6228        u16 rxq = fltr->base.rxq;
6229
6230        if (fltr->base.flags & BNXT_ACT_RSS_CTX) {
6231                struct ethtool_rxfh_context *ctx;
6232                struct bnxt_rss_ctx *rss_ctx;
6233                struct bnxt_vnic_info *vnic;
6234
6235                ctx = xa_load(&bp->dev->ethtool->rss_ctx,
6236                              fltr->base.fw_vnic_id);
6237                if (ctx) {
6238                        rss_ctx = ethtool_rxfh_context_priv(ctx);
6239                        vnic = &rss_ctx->vnic;
6240
6241                        req->dst_id = cpu_to_le16(vnic->fw_vnic_id);
6242                }
6243                return;
6244        }
6245        if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) {
6246                struct bnxt_vnic_info *vnic;
6247                u32 enables;
6248
6249                vnic = &bp->vnic_info[BNXT_VNIC_NTUPLE];
6250                req->dst_id = cpu_to_le16(vnic->fw_vnic_id);
6251                enables = CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_RFS_RING_TBL_IDX;
6252                req->enables |= cpu_to_le32(enables);
6253                req->rfs_ring_tbl_idx = cpu_to_le16(rxq);
6254        } else {
6255                u32 flags;
6256
6257                flags = CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_RFS_RING_IDX;
6258                req->flags |= cpu_to_le32(flags);
6259                req->dst_id = cpu_to_le16(rxq);
6260        }
6261}
6262
6263int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
6264                                      struct bnxt_ntuple_filter *fltr)
6265{
6266        struct hwrm_cfa_ntuple_filter_alloc_output *resp;
6267        struct hwrm_cfa_ntuple_filter_alloc_input *req;
6268        struct bnxt_flow_masks *masks = &fltr->fmasks;
6269        struct flow_keys *keys = &fltr->fkeys;
6270        struct bnxt_l2_filter *l2_fltr;
6271        struct bnxt_vnic_info *vnic;
6272        int rc;
6273
6274        rc = hwrm_req_init(bp, req, HWRM_CFA_NTUPLE_FILTER_ALLOC);
6275        if (rc)
6276                return rc;
6277
6278        l2_fltr = fltr->l2_fltr;
6279        req->l2_filter_id = l2_fltr->base.filter_id;
6280
6281        if (fltr->base.flags & BNXT_ACT_DROP) {
6282                req->flags =
6283                        cpu_to_le32(CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DROP);
6284        } else if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2) {
6285                bnxt_cfg_rfs_ring_tbl_idx(bp, req, fltr);
6286        } else {
6287                vnic = &bp->vnic_info[fltr->base.rxq + 1];
6288                req->dst_id = cpu_to_le16(vnic->fw_vnic_id);
6289        }
6290        req->enables |= cpu_to_le32(BNXT_NTP_FLTR_FLAGS);
6291
6292        req->ethertype = htons(ETH_P_IP);
6293        req->ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4;
6294        req->ip_protocol = keys->basic.ip_proto;
6295
6296        if (keys->basic.n_proto == htons(ETH_P_IPV6)) {
6297                req->ethertype = htons(ETH_P_IPV6);
6298                req->ip_addr_type =
6299                        CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6;
6300                *(struct in6_addr *)&req->src_ipaddr[0] = keys->addrs.v6addrs.src;
6301                *(struct in6_addr *)&req->src_ipaddr_mask[0] = masks->addrs.v6addrs.src;
6302                *(struct in6_addr *)&req->dst_ipaddr[0] = keys->addrs.v6addrs.dst;
6303                *(struct in6_addr *)&req->dst_ipaddr_mask[0] = masks->addrs.v6addrs.dst;
6304        } else {
6305                req->src_ipaddr[0] = keys->addrs.v4addrs.src;
6306                req->src_ipaddr_mask[0] = masks->addrs.v4addrs.src;
6307                req->dst_ipaddr[0] = keys->addrs.v4addrs.dst;
6308                req->dst_ipaddr_mask[0] = masks->addrs.v4addrs.dst;
6309        }
6310        if (keys->control.flags & FLOW_DIS_ENCAPSULATION) {
6311                req->enables |= cpu_to_le32(BNXT_NTP_TUNNEL_FLTR_FLAG);
6312                req->tunnel_type =
6313                        CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL;
6314        }
6315
6316        req->src_port = keys->ports.src;
6317        req->src_port_mask = masks->ports.src;
6318        req->dst_port = keys->ports.dst;
6319        req->dst_port_mask = masks->ports.dst;
6320
6321        resp = hwrm_req_hold(bp, req);
6322        rc = hwrm_req_send(bp, req);
6323        if (!rc)
6324                fltr->base.filter_id = resp->ntuple_filter_id;
6325        hwrm_req_drop(bp, req);
6326        return rc;
6327}
6328
6329static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx,
6330                                     const u8 *mac_addr)
6331{
6332        struct bnxt_l2_filter *fltr;
6333        struct bnxt_l2_key key;
6334        int rc;
6335
6336        ether_addr_copy(key.dst_mac_addr, mac_addr);
6337        key.vlan = 0;
6338        fltr = bnxt_alloc_l2_filter(bp, &key, GFP_KERNEL);
6339        if (IS_ERR(fltr))
6340                return PTR_ERR(fltr);
6341
6342        fltr->base.fw_vnic_id = bp->vnic_info[vnic_id].fw_vnic_id;
6343        rc = bnxt_hwrm_l2_filter_alloc(bp, fltr);
6344        if (rc)
6345                bnxt_del_l2_filter(bp, fltr);
6346        else
6347                bp->vnic_info[vnic_id].l2_filters[idx] = fltr;
6348        return rc;
6349}
6350
6351static void bnxt_hwrm_clear_vnic_filter(struct bnxt *bp)
6352{
6353        u16 i, j, num_of_vnics = 1; /* only vnic 0 supported */
6354
6355        /* Any associated ntuple filters will also be cleared by firmware. */
6356        for (i = 0; i < num_of_vnics; i++) {
6357                struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
6358
6359                for (j = 0; j < vnic->uc_filter_count; j++) {
6360                        struct bnxt_l2_filter *fltr = vnic->l2_filters[j];
6361
6362                        bnxt_hwrm_l2_filter_free(bp, fltr);
6363                        bnxt_del_l2_filter(bp, fltr);
6364                }
6365                vnic->uc_filter_count = 0;
6366        }
6367}
6368
6369#define BNXT_DFLT_TUNL_TPA_BMAP                         \
6370        (VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_GRE |       \
6371         VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_IPV4 |      \
6372         VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_IPV6)
6373
6374static void bnxt_hwrm_vnic_update_tunl_tpa(struct bnxt *bp,
6375                                           struct hwrm_vnic_tpa_cfg_input *req)
6376{
6377        u32 tunl_tpa_bmap = BNXT_DFLT_TUNL_TPA_BMAP;
6378
6379        if (!(bp->fw_cap & BNXT_FW_CAP_VNIC_TUNNEL_TPA))
6380                return;
6381
6382        if (bp->vxlan_port)
6383                tunl_tpa_bmap |= VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_VXLAN;
6384        if (bp->vxlan_gpe_port)
6385                tunl_tpa_bmap |= VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_VXLAN_GPE;
6386        if (bp->nge_port)
6387                tunl_tpa_bmap |= VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_GENEVE;
6388
6389        req->enables |= cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_TNL_TPA_EN);
6390        req->tnl_tpa_en_bitmap = cpu_to_le32(tunl_tpa_bmap);
6391}
6392
6393int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, struct bnxt_vnic_info *vnic,
6394                           u32 tpa_flags)
6395{
6396        u16 max_aggs = VNIC_TPA_CFG_REQ_MAX_AGGS_MAX;
6397        struct hwrm_vnic_tpa_cfg_input *req;
6398        int rc;
6399
6400        if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
6401                return 0;
6402
6403        rc = hwrm_req_init(bp, req, HWRM_VNIC_TPA_CFG);
6404        if (rc)
6405                return rc;
6406
6407        if (tpa_flags) {
6408                u16 mss = bp->dev->mtu - 40;
6409                u32 nsegs, n, segs = 0, flags;
6410
6411                flags = VNIC_TPA_CFG_REQ_FLAGS_TPA |
6412                        VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA |
6413                        VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE |
6414                        VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN |
6415                        VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ;
6416                if (tpa_flags & BNXT_FLAG_GRO)
6417                        flags |= VNIC_TPA_CFG_REQ_FLAGS_GRO;
6418
6419                req->flags = cpu_to_le32(flags);
6420
6421                req->enables =
6422                        cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS |
6423                                    VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS |
6424                                    VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN);
6425
6426                /* Number of segs are log2 units, and first packet is not
6427                 * included as part of this units.
6428                 */
6429                if (mss <= BNXT_RX_PAGE_SIZE) {
6430                        n = BNXT_RX_PAGE_SIZE / mss;
6431                        nsegs = (MAX_SKB_FRAGS - 1) * n;
6432                } else {
6433                        n = mss / BNXT_RX_PAGE_SIZE;
6434                        if (mss & (BNXT_RX_PAGE_SIZE - 1))
6435                                n++;
6436                        nsegs = (MAX_SKB_FRAGS - n) / n;
6437                }
6438
6439                if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
6440                        segs = MAX_TPA_SEGS_P5;
6441                        max_aggs = bp->max_tpa;
6442                } else {
6443                        segs = ilog2(nsegs);
6444                }
6445                req->max_agg_segs = cpu_to_le16(segs);
6446                req->max_aggs = cpu_to_le16(max_aggs);
6447
6448                req->min_agg_len = cpu_to_le32(512);
6449                bnxt_hwrm_vnic_update_tunl_tpa(bp, req);
6450        }
6451        req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
6452
6453        return hwrm_req_send(bp, req);
6454}
6455
6456static u16 bnxt_cp_ring_from_grp(struct bnxt *bp, struct bnxt_ring_struct *ring)
6457{
6458        struct bnxt_ring_grp_info *grp_info;
6459
6460        grp_info = &bp->grp_info[ring->grp_idx];
6461        return grp_info->cp_fw_ring_id;
6462}
6463
6464static u16 bnxt_cp_ring_for_rx(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
6465{
6466        if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
6467                return rxr->rx_cpr->cp_ring_struct.fw_ring_id;
6468        else
6469                return bnxt_cp_ring_from_grp(bp, &rxr->rx_ring_struct);
6470}
6471
6472static u16 bnxt_cp_ring_for_tx(struct bnxt *bp, struct bnxt_tx_ring_info *txr)
6473{
6474        if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
6475                return txr->tx_cpr->cp_ring_struct.fw_ring_id;
6476        else
6477                return bnxt_cp_ring_from_grp(bp, &txr->tx_ring_struct);
6478}
6479
6480static int bnxt_alloc_rss_indir_tbl(struct bnxt *bp)
6481{
6482        int entries;
6483
6484        if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
6485                entries = BNXT_MAX_RSS_TABLE_ENTRIES_P5;
6486        else
6487                entries = HW_HASH_INDEX_SIZE;
6488
6489        bp->rss_indir_tbl_entries = entries;
6490        bp->rss_indir_tbl =
6491                kmalloc_array(entries, sizeof(*bp->rss_indir_tbl), GFP_KERNEL);
6492        if (!bp->rss_indir_tbl)
6493                return -ENOMEM;
6494
6495        return 0;
6496}
6497
6498void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp,
6499                                 struct ethtool_rxfh_context *rss_ctx)
6500{
6501        u16 max_rings, max_entries, pad, i;
6502        u32 *rss_indir_tbl;
6503
6504        if (!bp->rx_nr_rings)
6505                return;
6506
6507        if (BNXT_CHIP_TYPE_NITRO_A0(bp))
6508                max_rings = bp->rx_nr_rings - 1;
6509        else
6510                max_rings = bp->rx_nr_rings;
6511
6512        max_entries = bnxt_get_rxfh_indir_size(bp->dev);
6513        if (rss_ctx)
6514                rss_indir_tbl = ethtool_rxfh_context_indir(rss_ctx);
6515        else
6516                rss_indir_tbl = &bp->rss_indir_tbl[0];
6517
6518        for (i = 0; i < max_entries; i++)
6519                rss_indir_tbl[i] = ethtool_rxfh_indir_default(i, max_rings);
6520
6521        pad = bp->rss_indir_tbl_entries - max_entries;
6522        if (pad)
6523                memset(&rss_indir_tbl[i], 0, pad * sizeof(*rss_indir_tbl));
6524}
6525
6526static u16 bnxt_get_max_rss_ring(struct bnxt *bp)
6527{
6528        u32 i, tbl_size, max_ring = 0;
6529
6530        if (!bp->rss_indir_tbl)
6531                return 0;
6532
6533        tbl_size = bnxt_get_rxfh_indir_size(bp->dev);
6534        for (i = 0; i < tbl_size; i++)
6535                max_ring = max(max_ring, bp->rss_indir_tbl[i]);
6536        return max_ring;
6537}
6538
6539int bnxt_get_nr_rss_ctxs(struct bnxt *bp, int rx_rings)
6540{
6541        if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
6542                if (!rx_rings)
6543                        return 0;
6544                return bnxt_calc_nr_ring_pages(rx_rings - 1,
6545                                               BNXT_RSS_TABLE_ENTRIES_P5);
6546        }
6547        if (BNXT_CHIP_TYPE_NITRO_A0(bp))
6548                return 2;
6549        return 1;
6550}
6551
6552static void bnxt_fill_hw_rss_tbl(struct bnxt *bp, struct bnxt_vnic_info *vnic)
6553{
6554        bool no_rss = !(vnic->flags & BNXT_VNIC_RSS_FLAG);
6555        u16 i, j;
6556
6557        /* Fill the RSS indirection table with ring group ids */
6558        for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++) {
6559                if (!no_rss)
6560                        j = bp->rss_indir_tbl[i];
6561                vnic->rss_table[i] = cpu_to_le16(vnic->fw_grp_ids[j]);
6562        }
6563}
6564
6565static void bnxt_fill_hw_rss_tbl_p5(struct bnxt *bp,
6566                                    struct bnxt_vnic_info *vnic)
6567{
6568        __le16 *ring_tbl = vnic->rss_table;
6569        struct bnxt_rx_ring_info *rxr;
6570        u16 tbl_size, i;
6571
6572        tbl_size = bnxt_get_rxfh_indir_size(bp->dev);
6573
6574        for (i = 0; i < tbl_size; i++) {
6575                u16 ring_id, j;
6576
6577                if (vnic->flags & BNXT_VNIC_NTUPLE_FLAG)
6578                        j = ethtool_rxfh_indir_default(i, bp->rx_nr_rings);
6579                else if (vnic->flags & BNXT_VNIC_RSSCTX_FLAG)
6580                        j = ethtool_rxfh_context_indir(vnic->rss_ctx)[i];
6581                else
6582                        j = bp->rss_indir_tbl[i];
6583                rxr = &bp->rx_ring[j];
6584
6585                ring_id = rxr->rx_ring_struct.fw_ring_id;
6586                *ring_tbl++ = cpu_to_le16(ring_id);
6587                ring_id = bnxt_cp_ring_for_rx(bp, rxr);
6588                *ring_tbl++ = cpu_to_le16(ring_id);
6589        }
6590}
6591
6592static void
6593__bnxt_hwrm_vnic_set_rss(struct bnxt *bp, struct hwrm_vnic_rss_cfg_input *req,
6594                         struct bnxt_vnic_info *vnic)
6595{
6596        if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
6597                bnxt_fill_hw_rss_tbl_p5(bp, vnic);
6598                if (bp->flags & BNXT_FLAG_CHIP_P7)
6599                        req->flags |= VNIC_RSS_CFG_REQ_FLAGS_IPSEC_HASH_TYPE_CFG_SUPPORT;
6600        } else {
6601                bnxt_fill_hw_rss_tbl(bp, vnic);
6602        }
6603
6604        if (bp->rss_hash_delta) {
6605                req->hash_type = cpu_to_le32(bp->rss_hash_delta);
6606                if (bp->rss_hash_cfg & bp->rss_hash_delta)
6607                        req->flags |= VNIC_RSS_CFG_REQ_FLAGS_HASH_TYPE_INCLUDE;
6608                else
6609                        req->flags |= VNIC_RSS_CFG_REQ_FLAGS_HASH_TYPE_EXCLUDE;
6610        } else {
6611                req->hash_type = cpu_to_le32(bp->rss_hash_cfg);
6612        }
6613        req->hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
6614        req->ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr);
6615        req->hash_key_tbl_addr = cpu_to_le64(vnic->rss_hash_key_dma_addr);
6616}
6617
6618static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, struct bnxt_vnic_info *vnic,
6619                                  bool set_rss)
6620{
6621        struct hwrm_vnic_rss_cfg_input *req;
6622        int rc;
6623
6624        if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) ||
6625            vnic->fw_rss_cos_lb_ctx[0] == INVALID_HW_RING_ID)
6626                return 0;
6627
6628        rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_CFG);
6629        if (rc)
6630                return rc;
6631
6632        if (set_rss)
6633                __bnxt_hwrm_vnic_set_rss(bp, req, vnic);
6634        req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
6635        return hwrm_req_send(bp, req);
6636}
6637
6638static int bnxt_hwrm_vnic_set_rss_p5(struct bnxt *bp,
6639                                     struct bnxt_vnic_info *vnic, bool set_rss)
6640{
6641        struct hwrm_vnic_rss_cfg_input *req;
6642        dma_addr_t ring_tbl_map;
6643        u32 i, nr_ctxs;
6644        int rc;
6645
6646        rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_CFG);
6647        if (rc)
6648                return rc;
6649
6650        req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
6651        if (!set_rss)
6652                return hwrm_req_send(bp, req);
6653
6654        __bnxt_hwrm_vnic_set_rss(bp, req, vnic);
6655        ring_tbl_map = vnic->rss_table_dma_addr;
6656        nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings);
6657
6658        hwrm_req_hold(bp, req);
6659        for (i = 0; i < nr_ctxs; ring_tbl_map += BNXT_RSS_TABLE_SIZE_P5, i++) {
6660                req->ring_grp_tbl_addr = cpu_to_le64(ring_tbl_map);
6661                req->ring_table_pair_index = i;
6662                req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[i]);
6663                rc = hwrm_req_send(bp, req);
6664                if (rc)
6665                        goto exit;
6666        }
6667
6668exit:
6669        hwrm_req_drop(bp, req);
6670        return rc;
6671}
6672
6673static void bnxt_hwrm_update_rss_hash_cfg(struct bnxt *bp)
6674{
6675        struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
6676        struct hwrm_vnic_rss_qcfg_output *resp;
6677        struct hwrm_vnic_rss_qcfg_input *req;
6678
6679        if (hwrm_req_init(bp, req, HWRM_VNIC_RSS_QCFG))
6680                return;
6681
6682        req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
6683        /* all contexts configured to same hash_type, zero always exists */
6684        req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
6685        resp = hwrm_req_hold(bp, req);
6686        if (!hwrm_req_send(bp, req)) {
6687                bp->rss_hash_cfg = le32_to_cpu(resp->hash_type) ?: bp->rss_hash_cfg;
6688                bp->rss_hash_delta = 0;
6689        }
6690        hwrm_req_drop(bp, req);
6691}
6692
6693static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, struct bnxt_vnic_info *vnic)
6694{
6695        u16 hds_thresh = (u16)bp->dev->cfg_pending->hds_thresh;
6696        struct hwrm_vnic_plcmodes_cfg_input *req;
6697        int rc;
6698
6699        rc = hwrm_req_init(bp, req, HWRM_VNIC_PLCMODES_CFG);
6700        if (rc)
6701                return rc;
6702
6703        req->flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT);
6704        req->enables = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID);
6705        req->jumbo_thresh = cpu_to_le16(bp->rx_buf_use_size);
6706
6707        if (!BNXT_RX_PAGE_MODE(bp) && (bp->flags & BNXT_FLAG_AGG_RINGS)) {
6708                req->flags |= cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 |
6709                                          VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6);
6710                req->enables |=
6711                        cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID);
6712                req->hds_threshold = cpu_to_le16(hds_thresh);
6713        }
6714        req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
6715        return hwrm_req_send(bp, req);
6716}
6717
6718static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp,
6719                                        struct bnxt_vnic_info *vnic,
6720                                        u16 ctx_idx)
6721{
6722        struct hwrm_vnic_rss_cos_lb_ctx_free_input *req;
6723
6724        if (hwrm_req_init(bp, req, HWRM_VNIC_RSS_COS_LB_CTX_FREE))
6725                return;
6726
6727        req->rss_cos_lb_ctx_id =
6728                cpu_to_le16(vnic->fw_rss_cos_lb_ctx[ctx_idx]);
6729
6730        hwrm_req_send(bp, req);
6731        vnic->fw_rss_cos_lb_ctx[ctx_idx] = INVALID_HW_RING_ID;
6732}
6733
6734static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp)
6735{
6736        int i, j;
6737
6738        for (i = 0; i < bp->nr_vnics; i++) {
6739                struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
6740
6741                for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++) {
6742                        if (vnic->fw_rss_cos_lb_ctx[j] != INVALID_HW_RING_ID)
6743                                bnxt_hwrm_vnic_ctx_free_one(bp, vnic, j);
6744                }
6745        }
6746        bp->rsscos_nr_ctxs = 0;
6747}
6748
6749static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp,
6750                                    struct bnxt_vnic_info *vnic, u16 ctx_idx)
6751{
6752        struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp;
6753        struct hwrm_vnic_rss_cos_lb_ctx_alloc_input *req;
6754        int rc;
6755
6756        rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC);
6757        if (rc)
6758                return rc;
6759
6760        resp = hwrm_req_hold(bp, req);
6761        rc = hwrm_req_send(bp, req);
6762        if (!rc)
6763                vnic->fw_rss_cos_lb_ctx[ctx_idx] =
6764                        le16_to_cpu(resp->rss_cos_lb_ctx_id);
6765        hwrm_req_drop(bp, req);
6766
6767        return rc;
6768}
6769
6770static u32 bnxt_get_roce_vnic_mode(struct bnxt *bp)
6771{
6772        if (bp->flags & BNXT_FLAG_ROCE_MIRROR_CAP)
6773                return VNIC_CFG_REQ_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE;
6774        return VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE;
6775}
6776
6777int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
6778{
6779        struct bnxt_vnic_info *vnic0 = &bp->vnic_info[BNXT_VNIC_DEFAULT];
6780        struct hwrm_vnic_cfg_input *req;
6781        unsigned int ring = 0, grp_idx;
6782        u16 def_vlan = 0;
6783        int rc;
6784
6785        rc = hwrm_req_init(bp, req, HWRM_VNIC_CFG);
6786        if (rc)
6787                return rc;
6788
6789        if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
6790                struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0];
6791
6792                req->default_rx_ring_id =
6793                        cpu_to_le16(rxr->rx_ring_struct.fw_ring_id);
6794                req->default_cmpl_ring_id =
6795                        cpu_to_le16(bnxt_cp_ring_for_rx(bp, rxr));
6796                req->enables =
6797                        cpu_to_le32(VNIC_CFG_REQ_ENABLES_DEFAULT_RX_RING_ID |
6798                                    VNIC_CFG_REQ_ENABLES_DEFAULT_CMPL_RING_ID);
6799                goto vnic_mru;
6800        }
6801        req->enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP);
6802        /* Only RSS support for now TBD: COS & LB */
6803        if (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID) {
6804                req->rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
6805                req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
6806                                           VNIC_CFG_REQ_ENABLES_MRU);
6807        } else if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) {
6808                req->rss_rule = cpu_to_le16(vnic0->fw_rss_cos_lb_ctx[0]);
6809                req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
6810                                           VNIC_CFG_REQ_ENABLES_MRU);
6811                req->flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE);
6812        } else {
6813                req->rss_rule = cpu_to_le16(0xffff);
6814        }
6815
6816        if (BNXT_CHIP_TYPE_NITRO_A0(bp) &&
6817            (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID)) {
6818                req->cos_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[1]);
6819                req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_COS_RULE);
6820        } else {
6821                req->cos_rule = cpu_to_le16(0xffff);
6822        }
6823
6824        if (vnic->flags & BNXT_VNIC_RSS_FLAG)
6825                ring = 0;
6826        else if (vnic->flags & BNXT_VNIC_RFS_FLAG)
6827                ring = vnic->vnic_id - 1;
6828        else if ((vnic->vnic_id == 1) && BNXT_CHIP_TYPE_NITRO_A0(bp))
6829                ring = bp->rx_nr_rings - 1;
6830
6831        grp_idx = bp->rx_ring[ring].bnapi->index;
6832        req->dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id);
6833        req->lb_rule = cpu_to_le16(0xffff);
6834vnic_mru:
6835        vnic->mru = bp->dev->mtu + ETH_HLEN + VLAN_HLEN;
6836        req->mru = cpu_to_le16(vnic->mru);
6837
6838        req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
6839#ifdef CONFIG_BNXT_SRIOV
6840        if (BNXT_VF(bp))
6841                def_vlan = bp->vf.vlan;
6842#endif
6843        if ((bp->flags & BNXT_FLAG_STRIP_VLAN) || def_vlan)
6844                req->flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE);
6845        if (vnic->vnic_id == BNXT_VNIC_DEFAULT && bnxt_ulp_registered(bp->edev))
6846                req->flags |= cpu_to_le32(bnxt_get_roce_vnic_mode(bp));
6847
6848        return hwrm_req_send(bp, req);
6849}
6850
6851static void bnxt_hwrm_vnic_free_one(struct bnxt *bp,
6852                                    struct bnxt_vnic_info *vnic)
6853{
6854        if (vnic->fw_vnic_id != INVALID_HW_RING_ID) {
6855                struct hwrm_vnic_free_input *req;
6856
6857                if (hwrm_req_init(bp, req, HWRM_VNIC_FREE))
6858                        return;
6859
6860                req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
6861
6862                hwrm_req_send(bp, req);
6863                vnic->fw_vnic_id = INVALID_HW_RING_ID;
6864        }
6865}
6866
6867static void bnxt_hwrm_vnic_free(struct bnxt *bp)
6868{
6869        u16 i;
6870
6871        for (i = 0; i < bp->nr_vnics; i++)
6872                bnxt_hwrm_vnic_free_one(bp, &bp->vnic_info[i]);
6873}
6874
6875int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic,
6876                         unsigned int start_rx_ring_idx,
6877                         unsigned int nr_rings)
6878{
6879        unsigned int i, j, grp_idx, end_idx = start_rx_ring_idx + nr_rings;
6880        struct hwrm_vnic_alloc_output *resp;
6881        struct hwrm_vnic_alloc_input *req;
6882        int rc;
6883
6884        rc = hwrm_req_init(bp, req, HWRM_VNIC_ALLOC);
6885        if (rc)
6886                return rc;
6887
6888        if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
6889                goto vnic_no_ring_grps;
6890
6891        /* map ring groups to this vnic */
6892        for (i = start_rx_ring_idx, j = 0; i < end_idx; i++, j++) {
6893                grp_idx = bp->rx_ring[i].bnapi->index;
6894                if (bp->grp_info[grp_idx].fw_grp_id == INVALID_HW_RING_ID) {
6895                        netdev_err(bp->dev, "Not enough ring groups avail:%x req:%x\n",
6896                                   j, nr_rings);
6897                        break;
6898                }
6899                vnic->fw_grp_ids[j] = bp->grp_info[grp_idx].fw_grp_id;
6900        }
6901
6902vnic_no_ring_grps:
6903        for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++)
6904                vnic->fw_rss_cos_lb_ctx[i] = INVALID_HW_RING_ID;
6905        if (vnic->vnic_id == BNXT_VNIC_DEFAULT)
6906                req->flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT);
6907
6908        resp = hwrm_req_hold(bp, req);
6909        rc = hwrm_req_send(bp, req);
6910        if (!rc)
6911                vnic->fw_vnic_id = le32_to_cpu(resp->vnic_id);
6912        hwrm_req_drop(bp, req);
6913        return rc;
6914}
6915
6916static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
6917{
6918        struct hwrm_vnic_qcaps_output *resp;
6919        struct hwrm_vnic_qcaps_input *req;
6920        int rc;
6921
6922        bp->hw_ring_stats_size = sizeof(struct ctx_hw_stats);
6923        bp->flags &= ~BNXT_FLAG_ROCE_MIRROR_CAP;
6924        bp->rss_cap &= ~BNXT_RSS_CAP_NEW_RSS_CAP;
6925        if (bp->hwrm_spec_code < 0x10600)
6926                return 0;
6927
6928        rc = hwrm_req_init(bp, req, HWRM_VNIC_QCAPS);
6929        if (rc)
6930                return rc;
6931
6932        resp = hwrm_req_hold(bp, req);
6933        rc = hwrm_req_send(bp, req);
6934        if (!rc) {
6935                u32 flags = le32_to_cpu(resp->flags);
6936
6937                if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
6938                    (flags & VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP))
6939                        bp->rss_cap |= BNXT_RSS_CAP_NEW_RSS_CAP;
6940                if (flags &
6941                    VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP)
6942                        bp->flags |= BNXT_FLAG_ROCE_MIRROR_CAP;
6943
6944                /* Older P5 fw before EXT_HW_STATS support did not set
6945                 * VLAN_STRIP_CAP properly.
6946                 */
6947                if ((flags & VNIC_QCAPS_RESP_FLAGS_VLAN_STRIP_CAP) ||
6948                    (BNXT_CHIP_P5(bp) &&
6949                     !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED)))
6950                        bp->fw_cap |= BNXT_FW_CAP_VLAN_RX_STRIP;
6951                if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_HASH_TYPE_DELTA_CAP)
6952                        bp->rss_cap |= BNXT_RSS_CAP_RSS_HASH_TYPE_DELTA;
6953                if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_PROF_TCAM_MODE_ENABLED)
6954                        bp->rss_cap |= BNXT_RSS_CAP_RSS_TCAM;
6955                bp->max_tpa_v2 = le16_to_cpu(resp->max_aggs_supported);
6956                if (bp->max_tpa_v2) {
6957                        if (BNXT_CHIP_P5(bp))
6958                                bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P5;
6959                        else
6960                                bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P7;
6961                }
6962                if (flags & VNIC_QCAPS_RESP_FLAGS_HW_TUNNEL_TPA_CAP)
6963                        bp->fw_cap |= BNXT_FW_CAP_VNIC_TUNNEL_TPA;
6964                if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_AH_SPI_IPV4_CAP)
6965                        bp->rss_cap |= BNXT_RSS_CAP_AH_V4_RSS_CAP;
6966                if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_AH_SPI_IPV6_CAP)
6967                        bp->rss_cap |= BNXT_RSS_CAP_AH_V6_RSS_CAP;
6968                if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_ESP_SPI_IPV4_CAP)
6969                        bp->rss_cap |= BNXT_RSS_CAP_ESP_V4_RSS_CAP;
6970                if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_ESP_SPI_IPV6_CAP)
6971                        bp->rss_cap |= BNXT_RSS_CAP_ESP_V6_RSS_CAP;
6972                if (flags & VNIC_QCAPS_RESP_FLAGS_RE_FLUSH_CAP)
6973                        bp->fw_cap |= BNXT_FW_CAP_VNIC_RE_FLUSH;
6974        }
6975        hwrm_req_drop(bp, req);
6976        return rc;
6977}
6978
6979static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp)
6980{
6981        struct hwrm_ring_grp_alloc_output *resp;
6982        struct hwrm_ring_grp_alloc_input *req;
6983        int rc;
6984        u16 i;
6985
6986        if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
6987                return 0;
6988
6989        rc = hwrm_req_init(bp, req, HWRM_RING_GRP_ALLOC);
6990        if (rc)
6991                return rc;
6992
6993        resp = hwrm_req_hold(bp, req);
6994        for (i = 0; i < bp->rx_nr_rings; i++) {
6995                unsigned int grp_idx = bp->rx_ring[i].bnapi->index;
6996
6997                req->cr = cpu_to_le16(bp->grp_info[grp_idx].cp_fw_ring_id);
6998                req->rr = cpu_to_le16(bp->grp_info[grp_idx].rx_fw_ring_id);
6999                req->ar = cpu_to_le16(bp->grp_info[grp_idx].agg_fw_ring_id);
7000                req->sc = cpu_to_le16(bp->grp_info[grp_idx].fw_stats_ctx);
7001
7002                rc = hwrm_req_send(bp, req);
7003
7004                if (rc)
7005                        break;
7006
7007                bp->grp_info[grp_idx].fw_grp_id =
7008                        le32_to_cpu(resp->ring_group_id);
7009        }
7010        hwrm_req_drop(bp, req);
7011        return rc;
7012}
7013
7014static void bnxt_hwrm_ring_grp_free(struct bnxt *bp)
7015{
7016        struct hwrm_ring_grp_free_input *req;
7017        u16 i;
7018
7019        if (!bp->grp_info || (bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
7020                return;
7021
7022        if (hwrm_req_init(bp, req, HWRM_RING_GRP_FREE))
7023                return;
7024
7025        hwrm_req_hold(bp, req);
7026        for (i = 0; i < bp->cp_nr_rings; i++) {
7027                if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID)
7028                        continue;
7029                req->ring_group_id =
7030                        cpu_to_le32(bp->grp_info[i].fw_grp_id);
7031
7032                hwrm_req_send(bp, req);
7033                bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
7034        }
7035        hwrm_req_drop(bp, req);
7036}
7037
7038static void bnxt_set_rx_ring_params_p5(struct bnxt *bp, u32 ring_type,
7039                                       struct hwrm_ring_alloc_input *req,
7040                                       struct bnxt_ring_struct *ring)
7041{
7042        struct bnxt_ring_grp_info *grp_info = &bp->grp_info[ring->grp_idx];
7043        u32 enables = RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID |
7044                      RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID;
7045
7046        if (ring_type == HWRM_RING_ALLOC_AGG) {
7047                req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX_AGG;
7048                req->rx_ring_id = cpu_to_le16(grp_info->rx_fw_ring_id);
7049                req->rx_buf_size = cpu_to_le16(BNXT_RX_PAGE_SIZE);
7050                enables |= RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID;
7051        } else {
7052                req->rx_buf_size = cpu_to_le16(bp->rx_buf_use_size);
7053                if (NET_IP_ALIGN == 2)
7054                        req->flags =
7055                                cpu_to_le16(RING_ALLOC_REQ_FLAGS_RX_SOP_PAD);
7056        }
7057        req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
7058        req->nq_ring_id = cpu_to_le16(grp_info->cp_fw_ring_id);
7059        req->enables |= cpu_to_le32(enables);
7060}
7061
7062static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
7063                                    struct bnxt_ring_struct *ring,
7064                                    u32 ring_type, u32 map_index)
7065{
7066        struct hwrm_ring_alloc_output *resp;
7067        struct hwrm_ring_alloc_input *req;
7068        struct bnxt_ring_mem_info *rmem = &ring->ring_mem;
7069        struct bnxt_ring_grp_info *grp_info;
7070        int rc, err = 0;
7071        u16 ring_id;
7072
7073        rc = hwrm_req_init(bp, req, HWRM_RING_ALLOC);
7074        if (rc)
7075                goto exit;
7076
7077        req->enables = 0;
7078        if (rmem->nr_pages > 1) {
7079                req->page_tbl_addr = cpu_to_le64(rmem->pg_tbl_map);
7080                /* Page size is in log2 units */
7081                req->page_size = BNXT_PAGE_SHIFT;
7082                req->page_tbl_depth = 1;
7083        } else {
7084                req->page_tbl_addr =  cpu_to_le64(rmem->dma_arr[0]);
7085        }
7086        req->fbo = 0;
7087        /* Association of ring index with doorbell index and MSIX number */
7088        req->logical_id = cpu_to_le16(map_index);
7089
7090        switch (ring_type) {
7091        case HWRM_RING_ALLOC_TX: {
7092                struct bnxt_tx_ring_info *txr;
7093                u16 flags = 0;
7094
7095                txr = container_of(ring, struct bnxt_tx_ring_info,
7096                                   tx_ring_struct);
7097                req->ring_type = RING_ALLOC_REQ_RING_TYPE_TX;
7098                /* Association of transmit ring with completion ring */
7099                grp_info = &bp->grp_info[ring->grp_idx];
7100                req->cmpl_ring_id = cpu_to_le16(bnxt_cp_ring_for_tx(bp, txr));
7101                req->length = cpu_to_le32(bp->tx_ring_mask + 1);
7102                req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
7103                req->queue_id = cpu_to_le16(ring->queue_id);
7104                if (bp->flags & BNXT_FLAG_TX_COAL_CMPL)
7105                        req->cmpl_coal_cnt =
7106                                RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_64;
7107                if ((bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP) && bp->ptp_cfg)
7108                        flags |= RING_ALLOC_REQ_FLAGS_TX_PKT_TS_CMPL_ENABLE;
7109                req->flags = cpu_to_le16(flags);
7110                break;
7111        }
7112        case HWRM_RING_ALLOC_RX:
7113        case HWRM_RING_ALLOC_AGG:
7114                req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
7115                req->length = (ring_type == HWRM_RING_ALLOC_RX) ?
7116                              cpu_to_le32(bp->rx_ring_mask + 1) :
7117                              cpu_to_le32(bp->rx_agg_ring_mask + 1);
7118                if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
7119                        bnxt_set_rx_ring_params_p5(bp, ring_type, req, ring);
7120                break;
7121        case HWRM_RING_ALLOC_CMPL:
7122                req->ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL;
7123                req->length = cpu_to_le32(bp->cp_ring_mask + 1);
7124                if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7125                        /* Association of cp ring with nq */
7126                        grp_info = &bp->grp_info[map_index];
7127                        req->nq_ring_id = cpu_to_le16(grp_info->cp_fw_ring_id);
7128                        req->cq_handle = cpu_to_le64(ring->handle);
7129                        req->enables |= cpu_to_le32(
7130                                RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID);
7131                } else {
7132                        req->int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
7133                }
7134                break;
7135        case HWRM_RING_ALLOC_NQ:
7136                req->ring_type = RING_ALLOC_REQ_RING_TYPE_NQ;
7137                req->length = cpu_to_le32(bp->cp_ring_mask + 1);
7138                req->int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
7139                break;
7140        default:
7141                netdev_err(bp->dev, "hwrm alloc invalid ring type %d\n",
7142                           ring_type);
7143                return -EINVAL;
7144        }
7145
7146        resp = hwrm_req_hold(bp, req);
7147        rc = hwrm_req_send(bp, req);
7148        err = le16_to_cpu(resp->error_code);
7149        ring_id = le16_to_cpu(resp->ring_id);
7150        hwrm_req_drop(bp, req);
7151
7152exit:
7153        if (rc || err) {
7154                netdev_err(bp->dev, "hwrm_ring_alloc type %d failed. rc:%x err:%x\n",
7155                           ring_type, rc, err);
7156                return -EIO;
7157        }
7158        ring->fw_ring_id = ring_id;
7159        return rc;
7160}
7161
7162static int bnxt_hwrm_set_async_event_cr(struct bnxt *bp, int idx)
7163{
7164        int rc;
7165
7166        if (BNXT_PF(bp)) {
7167                struct hwrm_func_cfg_input *req;
7168
7169                rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req);
7170                if (rc)
7171                        return rc;
7172
7173                req->fid = cpu_to_le16(0xffff);
7174                req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
7175                req->async_event_cr = cpu_to_le16(idx);
7176                return hwrm_req_send(bp, req);
7177        } else {
7178                struct hwrm_func_vf_cfg_input *req;
7179
7180                rc = hwrm_req_init(bp, req, HWRM_FUNC_VF_CFG);
7181                if (rc)
7182                        return rc;
7183
7184                req->enables =
7185                        cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
7186                req->async_event_cr = cpu_to_le16(idx);
7187                return hwrm_req_send(bp, req);
7188        }
7189}
7190
7191static void bnxt_set_db_mask(struct bnxt *bp, struct bnxt_db_info *db,
7192                             u32 ring_type)
7193{
7194        switch (ring_type) {
7195        case HWRM_RING_ALLOC_TX:
7196                db->db_ring_mask = bp->tx_ring_mask;
7197                break;
7198        case HWRM_RING_ALLOC_RX:
7199                db->db_ring_mask = bp->rx_ring_mask;
7200                break;
7201        case HWRM_RING_ALLOC_AGG:
7202                db->db_ring_mask = bp->rx_agg_ring_mask;
7203                break;
7204        case HWRM_RING_ALLOC_CMPL:
7205        case HWRM_RING_ALLOC_NQ:
7206                db->db_ring_mask = bp->cp_ring_mask;
7207                break;
7208        }
7209        if (bp->flags & BNXT_FLAG_CHIP_P7) {
7210                db->db_epoch_mask = db->db_ring_mask + 1;
7211                db->db_epoch_shift = DBR_EPOCH_SFT - ilog2(db->db_epoch_mask);
7212        }
7213}
7214
7215static void bnxt_set_db(struct bnxt *bp, struct bnxt_db_info *db, u32 ring_type,
7216                        u32 map_idx, u32 xid)
7217{
7218        if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7219                switch (ring_type) {
7220                case HWRM_RING_ALLOC_TX:
7221                        db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SQ;
7222                        break;
7223                case HWRM_RING_ALLOC_RX:
7224                case HWRM_RING_ALLOC_AGG:
7225                        db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SRQ;
7226                        break;
7227                case HWRM_RING_ALLOC_CMPL:
7228                        db->db_key64 = DBR_PATH_L2;
7229                        break;
7230                case HWRM_RING_ALLOC_NQ:
7231                        db->db_key64 = DBR_PATH_L2;
7232                        break;
7233                }
7234                db->db_key64 |= (u64)xid << DBR_XID_SFT;
7235
7236                if (bp->flags & BNXT_FLAG_CHIP_P7)
7237                        db->db_key64 |= DBR_VALID;
7238
7239                db->doorbell = bp->bar1 + bp->db_offset;
7240        } else {
7241                db->doorbell = bp->bar1 + map_idx * 0x80;
7242                switch (ring_type) {
7243                case HWRM_RING_ALLOC_TX:
7244                        db->db_key32 = DB_KEY_TX;
7245                        break;
7246                case HWRM_RING_ALLOC_RX:
7247                case HWRM_RING_ALLOC_AGG:
7248                        db->db_key32 = DB_KEY_RX;
7249                        break;
7250                case HWRM_RING_ALLOC_CMPL:
7251                        db->db_key32 = DB_KEY_CP;
7252                        break;
7253                }
7254        }
7255        bnxt_set_db_mask(bp, db, ring_type);
7256}
7257
7258static int bnxt_hwrm_rx_ring_alloc(struct bnxt *bp,
7259                                   struct bnxt_rx_ring_info *rxr)
7260{
7261        struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
7262        struct bnxt_napi *bnapi = rxr->bnapi;
7263        u32 type = HWRM_RING_ALLOC_RX;
7264        u32 map_idx = bnapi->index;
7265        int rc;
7266
7267        rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
7268        if (rc)
7269                return rc;
7270
7271        bnxt_set_db(bp, &rxr->rx_db, type, map_idx, ring->fw_ring_id);
7272        bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id;
7273
7274        return 0;
7275}
7276
7277static int bnxt_hwrm_rx_agg_ring_alloc(struct bnxt *bp,
7278                                       struct bnxt_rx_ring_info *rxr)
7279{
7280        struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct;
7281        u32 type = HWRM_RING_ALLOC_AGG;
7282        u32 grp_idx = ring->grp_idx;
7283        u32 map_idx;
7284        int rc;
7285
7286        map_idx = grp_idx + bp->rx_nr_rings;
7287        rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
7288        if (rc)
7289                return rc;
7290
7291        bnxt_set_db(bp, &rxr->rx_agg_db, type, map_idx,
7292                    ring->fw_ring_id);
7293        bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
7294        bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
7295        bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id;
7296
7297        return 0;
7298}
7299
7300static int bnxt_hwrm_cp_ring_alloc_p5(struct bnxt *bp,
7301                                      struct bnxt_cp_ring_info *cpr)
7302{
7303        const u32 type = HWRM_RING_ALLOC_CMPL;
7304        struct bnxt_napi *bnapi = cpr->bnapi;
7305        struct bnxt_ring_struct *ring;
7306        u32 map_idx = bnapi->index;
7307        int rc;
7308
7309        ring = &cpr->cp_ring_struct;
7310        ring->handle = BNXT_SET_NQ_HDL(cpr);
7311        rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
7312        if (rc)
7313                return rc;
7314        bnxt_set_db(bp, &cpr->cp_db, type, map_idx, ring->fw_ring_id);
7315        bnxt_db_cq(bp, &cpr->cp_db, cpr->cp_raw_cons);
7316        return 0;
7317}
7318
7319static int bnxt_hwrm_tx_ring_alloc(struct bnxt *bp,
7320                                   struct bnxt_tx_ring_info *txr, u32 tx_idx)
7321{
7322        struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
7323        const u32 type = HWRM_RING_ALLOC_TX;
7324        int rc;
7325
7326        rc = hwrm_ring_alloc_send_msg(bp, ring, type, tx_idx);
7327        if (rc)
7328                return rc;
7329        bnxt_set_db(bp, &txr->tx_db, type, tx_idx, ring->fw_ring_id);
7330        return 0;
7331}
7332
7333static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
7334{
7335        bool agg_rings = !!(bp->flags & BNXT_FLAG_AGG_RINGS);
7336        int i, rc = 0;
7337        u32 type;
7338
7339        if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
7340                type = HWRM_RING_ALLOC_NQ;
7341        else
7342                type = HWRM_RING_ALLOC_CMPL;
7343        for (i = 0; i < bp->cp_nr_rings; i++) {
7344                struct bnxt_napi *bnapi = bp->bnapi[i];
7345                struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
7346                struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
7347                u32 map_idx = ring->map_idx;
7348                unsigned int vector;
7349
7350                vector = bp->irq_tbl[map_idx].vector;
7351                disable_irq_nosync(vector);
7352                rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
7353                if (rc) {
7354                        enable_irq(vector);
7355                        goto err_out;
7356                }
7357                bnxt_set_db(bp, &cpr->cp_db, type, map_idx, ring->fw_ring_id);
7358                bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
7359                enable_irq(vector);
7360                bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id;
7361
7362                if (!i) {
7363                        rc = bnxt_hwrm_set_async_event_cr(bp, ring->fw_ring_id);
7364                        if (rc)
7365                                netdev_warn(bp->dev, "Failed to set async event completion ring.\n");
7366                }
7367        }
7368
7369        for (i = 0; i < bp->tx_nr_rings; i++) {
7370                struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
7371
7372                if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7373                        rc = bnxt_hwrm_cp_ring_alloc_p5(bp, txr->tx_cpr);
7374                        if (rc)
7375                                goto err_out;
7376                }
7377                rc = bnxt_hwrm_tx_ring_alloc(bp, txr, i);
7378                if (rc)
7379                        goto err_out;
7380        }
7381
7382        for (i = 0; i < bp->rx_nr_rings; i++) {
7383                struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
7384
7385                rc = bnxt_hwrm_rx_ring_alloc(bp, rxr);
7386                if (rc)
7387                        goto err_out;
7388                /* If we have agg rings, post agg buffers first. */
7389                if (!agg_rings)
7390                        bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
7391                if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7392                        rc = bnxt_hwrm_cp_ring_alloc_p5(bp, rxr->rx_cpr);
7393                        if (rc)
7394                                goto err_out;
7395                }
7396        }
7397
7398        if (agg_rings) {
7399                for (i = 0; i < bp->rx_nr_rings; i++) {
7400                        rc = bnxt_hwrm_rx_agg_ring_alloc(bp, &bp->rx_ring[i]);
7401                        if (rc)
7402                                goto err_out;
7403                }
7404        }
7405err_out:
7406        return rc;
7407}
7408
7409static void bnxt_cancel_dim(struct bnxt *bp)
7410{
7411        int i;
7412
7413        /* DIM work is initialized in bnxt_enable_napi().  Proceed only
7414         * if NAPI is enabled.
7415         */
7416        if (!bp->bnapi || test_bit(BNXT_STATE_NAPI_DISABLED, &bp->state))
7417                return;
7418
7419        /* Make sure NAPI sees that the VNIC is disabled */
7420        synchronize_net();
7421        for (i = 0; i < bp->rx_nr_rings; i++) {
7422                struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
7423                struct bnxt_napi *bnapi = rxr->bnapi;
7424
7425                cancel_work_sync(&bnapi->cp_ring.dim.work);
7426        }
7427}
7428
7429static int hwrm_ring_free_send_msg(struct bnxt *bp,
7430                                   struct bnxt_ring_struct *ring,
7431                                   u32 ring_type, int cmpl_ring_id)
7432{
7433        struct hwrm_ring_free_output *resp;
7434        struct hwrm_ring_free_input *req;
7435        u16 error_code = 0;
7436        int rc;
7437
7438        if (BNXT_NO_FW_ACCESS(bp))
7439                return 0;
7440
7441        rc = hwrm_req_init(bp, req, HWRM_RING_FREE);
7442        if (rc)
7443                goto exit;
7444
7445        req->cmpl_ring = cpu_to_le16(cmpl_ring_id);
7446        req->ring_type = ring_type;
7447        req->ring_id = cpu_to_le16(ring->fw_ring_id);
7448
7449        resp = hwrm_req_hold(bp, req);
7450        rc = hwrm_req_send(bp, req);
7451        error_code = le16_to_cpu(resp->error_code);
7452        hwrm_req_drop(bp, req);
7453exit:
7454        if (rc || error_code) {
7455                netdev_err(bp->dev, "hwrm_ring_free type %d failed. rc:%x err:%x\n",
7456                           ring_type, rc, error_code);
7457                return -EIO;
7458        }
7459        return 0;
7460}
7461
7462static void bnxt_hwrm_tx_ring_free(struct bnxt *bp,
7463                                   struct bnxt_tx_ring_info *txr,
7464                                   bool close_path)
7465{
7466        struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
7467        u32 cmpl_ring_id;
7468
7469        if (ring->fw_ring_id == INVALID_HW_RING_ID)
7470                return;
7471
7472        cmpl_ring_id = close_path ? bnxt_cp_ring_for_tx(bp, txr) :
7473                       INVALID_HW_RING_ID;
7474        hwrm_ring_free_send_msg(bp, ring, RING_FREE_REQ_RING_TYPE_TX,
7475                                cmpl_ring_id);
7476        ring->fw_ring_id = INVALID_HW_RING_ID;
7477}
7478
7479static void bnxt_hwrm_rx_ring_free(struct bnxt *bp,
7480                                   struct bnxt_rx_ring_info *rxr,
7481                                   bool close_path)
7482{
7483        struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
7484        u32 grp_idx = rxr->bnapi->index;
7485        u32 cmpl_ring_id;
7486
7487        if (ring->fw_ring_id == INVALID_HW_RING_ID)
7488                return;
7489
7490        cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
7491        hwrm_ring_free_send_msg(bp, ring,
7492                                RING_FREE_REQ_RING_TYPE_RX,
7493                                close_path ? cmpl_ring_id :
7494                                INVALID_HW_RING_ID);
7495        ring->fw_ring_id = INVALID_HW_RING_ID;
7496        bp->grp_info[grp_idx].rx_fw_ring_id = INVALID_HW_RING_ID;
7497}
7498
7499static void bnxt_hwrm_rx_agg_ring_free(struct bnxt *bp,
7500                                       struct bnxt_rx_ring_info *rxr,
7501                                       bool close_path)
7502{
7503        struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct;
7504        u32 grp_idx = rxr->bnapi->index;
7505        u32 type, cmpl_ring_id;
7506
7507        if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
7508                type = RING_FREE_REQ_RING_TYPE_RX_AGG;
7509        else
7510                type = RING_FREE_REQ_RING_TYPE_RX;
7511
7512        if (ring->fw_ring_id == INVALID_HW_RING_ID)
7513                return;
7514
7515        cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
7516        hwrm_ring_free_send_msg(bp, ring, type,
7517                                close_path ? cmpl_ring_id :
7518                                INVALID_HW_RING_ID);
7519        ring->fw_ring_id = INVALID_HW_RING_ID;
7520        bp->grp_info[grp_idx].agg_fw_ring_id = INVALID_HW_RING_ID;
7521}
7522
7523static void bnxt_hwrm_cp_ring_free(struct bnxt *bp,
7524                                   struct bnxt_cp_ring_info *cpr)
7525{
7526        struct bnxt_ring_struct *ring;
7527
7528        ring = &cpr->cp_ring_struct;
7529        if (ring->fw_ring_id == INVALID_HW_RING_ID)
7530                return;
7531
7532        hwrm_ring_free_send_msg(bp, ring, RING_FREE_REQ_RING_TYPE_L2_CMPL,
7533                                INVALID_HW_RING_ID);
7534        ring->fw_ring_id = INVALID_HW_RING_ID;
7535}
7536
7537static void bnxt_clear_one_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
7538{
7539        struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
7540        int i, size = ring->ring_mem.page_size;
7541
7542        cpr->cp_raw_cons = 0;
7543        cpr->toggle = 0;
7544
7545        for (i = 0; i < bp->cp_nr_pages; i++)
7546                if (cpr->cp_desc_ring[i])
7547                        memset(cpr->cp_desc_ring[i], 0, size);
7548}
7549
7550static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
7551{
7552        u32 type;
7553        int i;
7554
7555        if (!bp->bnapi)
7556                return;
7557
7558        for (i = 0; i < bp->tx_nr_rings; i++)
7559                bnxt_hwrm_tx_ring_free(bp, &bp->tx_ring[i], close_path);
7560
7561        bnxt_cancel_dim(bp);
7562        for (i = 0; i < bp->rx_nr_rings; i++) {
7563                bnxt_hwrm_rx_ring_free(bp, &bp->rx_ring[i], close_path);
7564                bnxt_hwrm_rx_agg_ring_free(bp, &bp->rx_ring[i], close_path);
7565        }
7566
7567        /* The completion rings are about to be freed.  After that the
7568         * IRQ doorbell will not work anymore.  So we need to disable
7569         * IRQ here.
7570         */
7571        bnxt_disable_int_sync(bp);
7572
7573        if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
7574                type = RING_FREE_REQ_RING_TYPE_NQ;
7575        else
7576                type = RING_FREE_REQ_RING_TYPE_L2_CMPL;
7577        for (i = 0; i < bp->cp_nr_rings; i++) {
7578                struct bnxt_napi *bnapi = bp->bnapi[i];
7579                struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
7580                struct bnxt_ring_struct *ring;
7581                int j;
7582
7583                for (j = 0; j < cpr->cp_ring_count && cpr->cp_ring_arr; j++)
7584                        bnxt_hwrm_cp_ring_free(bp, &cpr->cp_ring_arr[j]);
7585
7586                ring = &cpr->cp_ring_struct;
7587                if (ring->fw_ring_id != INVALID_HW_RING_ID) {
7588                        hwrm_ring_free_send_msg(bp, ring, type,
7589                                                INVALID_HW_RING_ID);
7590                        ring->fw_ring_id = INVALID_HW_RING_ID;
7591                        bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
7592                }
7593        }
7594}
7595
7596static int __bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
7597                             bool shared);
7598static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
7599                           bool shared);
7600
7601static int bnxt_hwrm_get_rings(struct bnxt *bp)
7602{
7603        struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
7604        struct hwrm_func_qcfg_output *resp;
7605        struct hwrm_func_qcfg_input *req;
7606        int rc;
7607
7608        if (bp->hwrm_spec_code < 0x10601)
7609                return 0;
7610
7611        rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG);
7612        if (rc)
7613                return rc;
7614
7615        req->fid = cpu_to_le16(0xffff);
7616        resp = hwrm_req_hold(bp, req);
7617        rc = hwrm_req_send(bp, req);
7618        if (rc) {
7619                hwrm_req_drop(bp, req);
7620                return rc;
7621        }
7622
7623        hw_resc->resv_tx_rings = le16_to_cpu(resp->alloc_tx_rings);
7624        if (BNXT_NEW_RM(bp)) {
7625                u16 cp, stats;
7626
7627                hw_resc->resv_rx_rings = le16_to_cpu(resp->alloc_rx_rings);
7628                hw_resc->resv_hw_ring_grps =
7629                        le32_to_cpu(resp->alloc_hw_ring_grps);
7630                hw_resc->resv_vnics = le16_to_cpu(resp->alloc_vnics);
7631                hw_resc->resv_rsscos_ctxs = le16_to_cpu(resp->alloc_rsscos_ctx);
7632                cp = le16_to_cpu(resp->alloc_cmpl_rings);
7633                stats = le16_to_cpu(resp->alloc_stat_ctx);
7634                hw_resc->resv_irqs = cp;
7635                if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7636                        int rx = hw_resc->resv_rx_rings;
7637                        int tx = hw_resc->resv_tx_rings;
7638
7639                        if (bp->flags & BNXT_FLAG_AGG_RINGS)
7640                                rx >>= 1;
7641                        if (cp < (rx + tx)) {
7642                                rc = __bnxt_trim_rings(bp, &rx, &tx, cp, false);
7643                                if (rc)
7644                                        goto get_rings_exit;
7645                                if (bp->flags & BNXT_FLAG_AGG_RINGS)
7646                                        rx <<= 1;
7647                                hw_resc->resv_rx_rings = rx;
7648                                hw_resc->resv_tx_rings = tx;
7649                        }
7650                        hw_resc->resv_irqs = le16_to_cpu(resp->alloc_msix);
7651                        hw_resc->resv_hw_ring_grps = rx;
7652                }
7653                hw_resc->resv_cp_rings = cp;
7654                hw_resc->resv_stat_ctxs = stats;
7655        }
7656get_rings_exit:
7657        hwrm_req_drop(bp, req);
7658        return rc;
7659}
7660
7661int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings)
7662{
7663        struct hwrm_func_qcfg_output *resp;
7664        struct hwrm_func_qcfg_input *req;
7665        int rc;
7666
7667        if (bp->hwrm_spec_code < 0x10601)
7668                return 0;
7669
7670        rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG);
7671        if (rc)
7672                return rc;
7673
7674        req->fid = cpu_to_le16(fid);
7675        resp = hwrm_req_hold(bp, req);
7676        rc = hwrm_req_send(bp, req);
7677        if (!rc)
7678                *tx_rings = le16_to_cpu(resp->alloc_tx_rings);
7679
7680        hwrm_req_drop(bp, req);
7681        return rc;
7682}
7683
7684static bool bnxt_rfs_supported(struct bnxt *bp);
7685
7686static struct hwrm_func_cfg_input *
7687__bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7688{
7689        struct hwrm_func_cfg_input *req;
7690        u32 enables = 0;
7691
7692        if (bnxt_hwrm_func_cfg_short_req_init(bp, &req))
7693                return NULL;
7694
7695        req->fid = cpu_to_le16(0xffff);
7696        enables |= hwr->tx ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
7697        req->num_tx_rings = cpu_to_le16(hwr->tx);
7698        if (BNXT_NEW_RM(bp)) {
7699                enables |= hwr->rx ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
7700                enables |= hwr->stat ? FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
7701                if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7702                        enables |= hwr->cp ? FUNC_CFG_REQ_ENABLES_NUM_MSIX : 0;
7703                        enables |= hwr->cp_p5 ?
7704                                   FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
7705                } else {
7706                        enables |= hwr->cp ?
7707                                   FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
7708                        enables |= hwr->grp ?
7709                                   FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
7710                }
7711                enables |= hwr->vnic ? FUNC_CFG_REQ_ENABLES_NUM_VNICS : 0;
7712                enables |= hwr->rss_ctx ? FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS :
7713                                          0;
7714                req->num_rx_rings = cpu_to_le16(hwr->rx);
7715                req->num_rsscos_ctxs = cpu_to_le16(hwr->rss_ctx);
7716                if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7717                        req->num_cmpl_rings = cpu_to_le16(hwr->cp_p5);
7718                        req->num_msix = cpu_to_le16(hwr->cp);
7719                } else {
7720                        req->num_cmpl_rings = cpu_to_le16(hwr->cp);
7721                        req->num_hw_ring_grps = cpu_to_le16(hwr->grp);
7722                }
7723                req->num_stat_ctxs = cpu_to_le16(hwr->stat);
7724                req->num_vnics = cpu_to_le16(hwr->vnic);
7725        }
7726        req->enables = cpu_to_le32(enables);
7727        return req;
7728}
7729
7730static struct hwrm_func_vf_cfg_input *
7731__bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7732{
7733        struct hwrm_func_vf_cfg_input *req;
7734        u32 enables = 0;
7735
7736        if (hwrm_req_init(bp, req, HWRM_FUNC_VF_CFG))
7737                return NULL;
7738
7739        enables |= hwr->tx ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
7740        enables |= hwr->rx ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS |
7741                             FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
7742        enables |= hwr->stat ? FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
7743        enables |= hwr->rss_ctx ? FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
7744        if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7745                enables |= hwr->cp_p5 ?
7746                           FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
7747        } else {
7748                enables |= hwr->cp ? FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
7749                enables |= hwr->grp ?
7750                           FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
7751        }
7752        enables |= hwr->vnic ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0;
7753        enables |= FUNC_VF_CFG_REQ_ENABLES_NUM_L2_CTXS;
7754
7755        req->num_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX);
7756        req->num_tx_rings = cpu_to_le16(hwr->tx);
7757        req->num_rx_rings = cpu_to_le16(hwr->rx);
7758        req->num_rsscos_ctxs = cpu_to_le16(hwr->rss_ctx);
7759        if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7760                req->num_cmpl_rings = cpu_to_le16(hwr->cp_p5);
7761        } else {
7762                req->num_cmpl_rings = cpu_to_le16(hwr->cp);
7763                req->num_hw_ring_grps = cpu_to_le16(hwr->grp);
7764        }
7765        req->num_stat_ctxs = cpu_to_le16(hwr->stat);
7766        req->num_vnics = cpu_to_le16(hwr->vnic);
7767
7768        req->enables = cpu_to_le32(enables);
7769        return req;
7770}
7771
7772static int
7773bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7774{
7775        struct hwrm_func_cfg_input *req;
7776        int rc;
7777
7778        req = __bnxt_hwrm_reserve_pf_rings(bp, hwr);
7779        if (!req)
7780                return -ENOMEM;
7781
7782        if (!req->enables) {
7783                hwrm_req_drop(bp, req);
7784                return 0;
7785        }
7786
7787        rc = hwrm_req_send(bp, req);
7788        if (rc)
7789                return rc;
7790
7791        if (bp->hwrm_spec_code < 0x10601)
7792                bp->hw_resc.resv_tx_rings = hwr->tx;
7793
7794        return bnxt_hwrm_get_rings(bp);
7795}
7796
7797static int
7798bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7799{
7800        struct hwrm_func_vf_cfg_input *req;
7801        int rc;
7802
7803        if (!BNXT_NEW_RM(bp)) {
7804                bp->hw_resc.resv_tx_rings = hwr->tx;
7805                return 0;
7806        }
7807
7808        req = __bnxt_hwrm_reserve_vf_rings(bp, hwr);
7809        if (!req)
7810                return -ENOMEM;
7811
7812        rc = hwrm_req_send(bp, req);
7813        if (rc)
7814                return rc;
7815
7816        return bnxt_hwrm_get_rings(bp);
7817}
7818
7819static int bnxt_hwrm_reserve_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7820{
7821        if (BNXT_PF(bp))
7822                return bnxt_hwrm_reserve_pf_rings(bp, hwr);
7823        else
7824                return bnxt_hwrm_reserve_vf_rings(bp, hwr);
7825}
7826
7827int bnxt_nq_rings_in_use(struct bnxt *bp)
7828{
7829        return bp->cp_nr_rings + bnxt_get_ulp_msix_num(bp);
7830}
7831
7832static int bnxt_cp_rings_in_use(struct bnxt *bp)
7833{
7834        int cp;
7835
7836        if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
7837                return bnxt_nq_rings_in_use(bp);
7838
7839        cp = bp->tx_nr_rings + bp->rx_nr_rings;
7840        return cp;
7841}
7842
7843static int bnxt_get_func_stat_ctxs(struct bnxt *bp)
7844{
7845        return bp->cp_nr_rings + bnxt_get_ulp_stat_ctxs(bp);
7846}
7847
7848static int bnxt_get_total_rss_ctxs(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7849{
7850        if (!hwr->grp)
7851                return 0;
7852        if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7853                int rss_ctx = bnxt_get_nr_rss_ctxs(bp, hwr->grp);
7854
7855                if (BNXT_SUPPORTS_NTUPLE_VNIC(bp))
7856                        rss_ctx *= hwr->vnic;
7857                return rss_ctx;
7858        }
7859        if (BNXT_VF(bp))
7860                return BNXT_VF_MAX_RSS_CTX;
7861        if (!(bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP) && bnxt_rfs_supported(bp))
7862                return hwr->grp + 1;
7863        return 1;
7864}
7865
7866/* Check if a default RSS map needs to be setup.  This function is only
7867 * used on older firmware that does not require reserving RX rings.
7868 */
7869static void bnxt_check_rss_tbl_no_rmgr(struct bnxt *bp)
7870{
7871        struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
7872
7873        /* The RSS map is valid for RX rings set to resv_rx_rings */
7874        if (hw_resc->resv_rx_rings != bp->rx_nr_rings) {
7875                hw_resc->resv_rx_rings = bp->rx_nr_rings;
7876                if (!netif_is_rxfh_configured(bp->dev))
7877                        bnxt_set_dflt_rss_indir_tbl(bp, NULL);
7878        }
7879}
7880
7881static int bnxt_get_total_vnics(struct bnxt *bp, int rx_rings)
7882{
7883        if (bp->flags & BNXT_FLAG_RFS) {
7884                if (BNXT_SUPPORTS_NTUPLE_VNIC(bp))
7885                        return 2 + bp->num_rss_ctx;
7886                if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
7887                        return rx_rings + 1;
7888        }
7889        return 1;
7890}
7891
7892static bool bnxt_need_reserve_rings(struct bnxt *bp)
7893{
7894        struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
7895        int cp = bnxt_cp_rings_in_use(bp);
7896        int nq = bnxt_nq_rings_in_use(bp);
7897        int rx = bp->rx_nr_rings, stat;
7898        int vnic, grp = rx;
7899
7900        /* Old firmware does not need RX ring reservations but we still
7901         * need to setup a default RSS map when needed.  With new firmware
7902         * we go through RX ring reservations first and then set up the
7903         * RSS map for the successfully reserved RX rings when needed.
7904         */
7905        if (!BNXT_NEW_RM(bp))
7906                bnxt_check_rss_tbl_no_rmgr(bp);
7907
7908        if (hw_resc->resv_tx_rings != bp->tx_nr_rings &&
7909            bp->hwrm_spec_code >= 0x10601)
7910                return true;
7911
7912        if (!BNXT_NEW_RM(bp))
7913                return false;
7914
7915        vnic = bnxt_get_total_vnics(bp, rx);
7916
7917        if (bp->flags & BNXT_FLAG_AGG_RINGS)
7918                rx <<= 1;
7919        stat = bnxt_get_func_stat_ctxs(bp);
7920        if (hw_resc->resv_rx_rings != rx || hw_resc->resv_cp_rings != cp ||
7921            hw_resc->resv_vnics != vnic || hw_resc->resv_stat_ctxs != stat ||
7922            (hw_resc->resv_hw_ring_grps != grp &&
7923             !(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)))
7924                return true;
7925        if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && BNXT_PF(bp) &&
7926            hw_resc->resv_irqs != nq)
7927                return true;
7928        return false;
7929}
7930
7931static void bnxt_copy_reserved_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7932{
7933        struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
7934
7935        hwr->tx = hw_resc->resv_tx_rings;
7936        if (BNXT_NEW_RM(bp)) {
7937                hwr->rx = hw_resc->resv_rx_rings;
7938                hwr->cp = hw_resc->resv_irqs;
7939                if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
7940                        hwr->cp_p5 = hw_resc->resv_cp_rings;
7941                hwr->grp = hw_resc->resv_hw_ring_grps;
7942                hwr->vnic = hw_resc->resv_vnics;
7943                hwr->stat = hw_resc->resv_stat_ctxs;
7944                hwr->rss_ctx = hw_resc->resv_rsscos_ctxs;
7945        }
7946}
7947
7948static bool bnxt_rings_ok(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7949{
7950        return hwr->tx && hwr->rx && hwr->cp && hwr->grp && hwr->vnic &&
7951               hwr->stat && (hwr->cp_p5 || !(bp->flags & BNXT_FLAG_CHIP_P5_PLUS));
7952}
7953
7954static int bnxt_get_avail_msix(struct bnxt *bp, int num);
7955
7956static int __bnxt_reserve_rings(struct bnxt *bp)
7957{
7958        struct bnxt_hw_rings hwr = {0};
7959        int rx_rings, old_rx_rings, rc;
7960        int cp = bp->cp_nr_rings;
7961        int ulp_msix = 0;
7962        bool sh = false;
7963        int tx_cp;
7964
7965        if (!bnxt_need_reserve_rings(bp))
7966                return 0;
7967
7968        if (BNXT_NEW_RM(bp) && !bnxt_ulp_registered(bp->edev)) {
7969                ulp_msix = bnxt_get_avail_msix(bp, bp->ulp_num_msix_want);
7970                if (!ulp_msix)
7971                        bnxt_set_ulp_stat_ctxs(bp, 0);
7972
7973                if (ulp_msix > bp->ulp_num_msix_want)
7974                        ulp_msix = bp->ulp_num_msix_want;
7975                hwr.cp = cp + ulp_msix;
7976        } else {
7977                hwr.cp = bnxt_nq_rings_in_use(bp);
7978        }
7979
7980        hwr.tx = bp->tx_nr_rings;
7981        hwr.rx = bp->rx_nr_rings;
7982        if (bp->flags & BNXT_FLAG_SHARED_RINGS)
7983                sh = true;
7984        if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
7985                hwr.cp_p5 = hwr.rx + hwr.tx;
7986
7987        hwr.vnic = bnxt_get_total_vnics(bp, hwr.rx);
7988
7989        if (bp->flags & BNXT_FLAG_AGG_RINGS)
7990                hwr.rx <<= 1;
7991        hwr.grp = bp->rx_nr_rings;
7992        hwr.rss_ctx = bnxt_get_total_rss_ctxs(bp, &hwr);
7993        hwr.stat = bnxt_get_func_stat_ctxs(bp);
7994        old_rx_rings = bp->hw_resc.resv_rx_rings;
7995
7996        rc = bnxt_hwrm_reserve_rings(bp, &hwr);
7997        if (rc)
7998                return rc;
7999
8000        bnxt_copy_reserved_rings(bp, &hwr);
8001
8002        rx_rings = hwr.rx;
8003        if (bp->flags & BNXT_FLAG_AGG_RINGS) {
8004                if (hwr.rx >= 2) {
8005                        rx_rings = hwr.rx >> 1;
8006                } else {
8007                        if (netif_running(bp->dev))
8008                                return -ENOMEM;
8009
8010                        bp->flags &= ~BNXT_FLAG_AGG_RINGS;
8011                        bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
8012                        bp->dev->hw_features &= ~NETIF_F_LRO;
8013                        bp->dev->features &= ~NETIF_F_LRO;
8014                        bnxt_set_ring_params(bp);
8015                }
8016        }
8017        rx_rings = min_t(int, rx_rings, hwr.grp);
8018        hwr.cp = min_t(int, hwr.cp, bp->cp_nr_rings);
8019        if (bnxt_ulp_registered(bp->edev) &&
8020            hwr.stat > bnxt_get_ulp_stat_ctxs(bp))
8021                hwr.stat -= bnxt_get_ulp_stat_ctxs(bp);
8022        hwr.cp = min_t(int, hwr.cp, hwr.stat);
8023        rc = bnxt_trim_rings(bp, &rx_rings, &hwr.tx, hwr.cp, sh);
8024        if (bp->flags & BNXT_FLAG_AGG_RINGS)
8025                hwr.rx = rx_rings << 1;
8026        tx_cp = bnxt_num_tx_to_cp(bp, hwr.tx);
8027        hwr.cp = sh ? max_t(int, tx_cp, rx_rings) : tx_cp + rx_rings;
8028        if (hwr.tx != bp->tx_nr_rings) {
8029                netdev_warn(bp->dev,
8030                            "Able to reserve only %d out of %d requested TX rings\n",
8031                            hwr.tx, bp->tx_nr_rings);
8032        }
8033        bp->tx_nr_rings = hwr.tx;
8034
8035        /* If we cannot reserve all the RX rings, reset the RSS map only
8036         * if absolutely necessary
8037         */
8038        if (rx_rings != bp->rx_nr_rings) {
8039                netdev_warn(bp->dev, "Able to reserve only %d out of %d requested RX rings\n",
8040                            rx_rings, bp->rx_nr_rings);
8041                if (netif_is_rxfh_configured(bp->dev) &&
8042                    (bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) !=
8043                     bnxt_get_nr_rss_ctxs(bp, rx_rings) ||
8044                     bnxt_get_max_rss_ring(bp) >= rx_rings)) {
8045                        netdev_warn(bp->dev, "RSS table entries reverting to default\n");
8046                        bp->dev->priv_flags &= ~IFF_RXFH_CONFIGURED;
8047                }
8048        }
8049        bp->rx_nr_rings = rx_rings;
8050        bp->cp_nr_rings = hwr.cp;
8051
8052        if (!bnxt_rings_ok(bp, &hwr))
8053                return -ENOMEM;
8054
8055        if (old_rx_rings != bp->hw_resc.resv_rx_rings &&
8056            !netif_is_rxfh_configured(bp->dev))
8057                bnxt_set_dflt_rss_indir_tbl(bp, NULL);
8058
8059        if (!bnxt_ulp_registered(bp->edev) && BNXT_NEW_RM(bp)) {
8060                int resv_msix, resv_ctx, ulp_ctxs;
8061                struct bnxt_hw_resc *hw_resc;
8062
8063                hw_resc = &bp->hw_resc;
8064                resv_msix = hw_resc->resv_irqs - bp->cp_nr_rings;
8065                ulp_msix = min_t(int, resv_msix, ulp_msix);
8066                bnxt_set_ulp_msix_num(bp, ulp_msix);
8067                resv_ctx = hw_resc->resv_stat_ctxs  - bp->cp_nr_rings;
8068                ulp_ctxs = min(resv_ctx, bnxt_get_ulp_stat_ctxs(bp));
8069                bnxt_set_ulp_stat_ctxs(bp, ulp_ctxs);
8070        }
8071
8072        return rc;
8073}
8074
8075static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
8076{
8077        struct hwrm_func_vf_cfg_input *req;
8078        u32 flags;
8079
8080        if (!BNXT_NEW_RM(bp))
8081                return 0;
8082
8083        req = __bnxt_hwrm_reserve_vf_rings(bp, hwr);
8084        flags = FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST |
8085                FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST |
8086                FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
8087                FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
8088                FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST |
8089                FUNC_VF_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST;
8090        if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
8091                flags |= FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
8092
8093        req->flags = cpu_to_le32(flags);
8094        return hwrm_req_send_silent(bp, req);
8095}
8096
8097static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
8098{
8099        struct hwrm_func_cfg_input *req;
8100        u32 flags;
8101
8102        req = __bnxt_hwrm_reserve_pf_rings(bp, hwr);
8103        flags = FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST;
8104        if (BNXT_NEW_RM(bp)) {
8105                flags |= FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST |
8106                         FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
8107                         FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
8108                         FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST;
8109                if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
8110                        flags |= FUNC_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST |
8111                                 FUNC_CFG_REQ_FLAGS_NQ_ASSETS_TEST;
8112                else
8113                        flags |= FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
8114        }
8115
8116        req->flags = cpu_to_le32(flags);
8117        return hwrm_req_send_silent(bp, req);
8118}
8119
8120static int bnxt_hwrm_check_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
8121{
8122        if (bp->hwrm_spec_code < 0x10801)
8123                return 0;
8124
8125        if (BNXT_PF(bp))
8126                return bnxt_hwrm_check_pf_rings(bp, hwr);
8127
8128        return bnxt_hwrm_check_vf_rings(bp, hwr);
8129}
8130
8131static void bnxt_hwrm_coal_params_qcaps(struct bnxt *bp)
8132{
8133        struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
8134        struct hwrm_ring_aggint_qcaps_output *resp;
8135        struct hwrm_ring_aggint_qcaps_input *req;
8136        int rc;
8137
8138        coal_cap->cmpl_params = BNXT_LEGACY_COAL_CMPL_PARAMS;
8139        coal_cap->num_cmpl_dma_aggr_max = 63;
8140        coal_cap->num_cmpl_dma_aggr_during_int_max = 63;
8141        coal_cap->cmpl_aggr_dma_tmr_max = 65535;
8142        coal_cap->cmpl_aggr_dma_tmr_during_int_max = 65535;
8143        coal_cap->int_lat_tmr_min_max = 65535;
8144        coal_cap->int_lat_tmr_max_max = 65535;
8145        coal_cap->num_cmpl_aggr_int_max = 65535;
8146        coal_cap->timer_units = 80;
8147
8148        if (bp->hwrm_spec_code < 0x10902)
8149                return;
8150
8151        if (hwrm_req_init(bp, req, HWRM_RING_AGGINT_QCAPS))
8152                return;
8153
8154        resp = hwrm_req_hold(bp, req);
8155        rc = hwrm_req_send_silent(bp, req);
8156        if (!rc) {
8157                coal_cap->cmpl_params = le32_to_cpu(resp->cmpl_params);
8158                coal_cap->nq_params = le32_to_cpu(resp->nq_params);
8159                coal_cap->num_cmpl_dma_aggr_max =
8160                        le16_to_cpu(resp->num_cmpl_dma_aggr_max);
8161                coal_cap->num_cmpl_dma_aggr_during_int_max =
8162                        le16_to_cpu(resp->num_cmpl_dma_aggr_during_int_max);
8163                coal_cap->cmpl_aggr_dma_tmr_max =
8164                        le16_to_cpu(resp->cmpl_aggr_dma_tmr_max);
8165                coal_cap->cmpl_aggr_dma_tmr_during_int_max =
8166                        le16_to_cpu(resp->cmpl_aggr_dma_tmr_during_int_max);
8167                coal_cap->int_lat_tmr_min_max =
8168                        le16_to_cpu(resp->int_lat_tmr_min_max);
8169                coal_cap->int_lat_tmr_max_max =
8170                        le16_to_cpu(resp->int_lat_tmr_max_max);
8171                coal_cap->num_cmpl_aggr_int_max =
8172                        le16_to_cpu(resp->num_cmpl_aggr_int_max);
8173                coal_cap->timer_units = le16_to_cpu(resp->timer_units);
8174        }
8175        hwrm_req_drop(bp, req);
8176}
8177
8178static u16 bnxt_usec_to_coal_tmr(struct bnxt *bp, u16 usec)
8179{
8180        struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
8181
8182        return usec * 1000 / coal_cap->timer_units;
8183}
8184
8185static void bnxt_hwrm_set_coal_params(struct bnxt *bp,
8186        struct bnxt_coal *hw_coal,
8187        struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
8188{
8189        struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
8190        u16 val, tmr, max, flags = hw_coal->flags;
8191        u32 cmpl_params = coal_cap->cmpl_params;
8192
8193        max = hw_coal->bufs_per_record * 128;
8194        if (hw_coal->budget)
8195                max = hw_coal->bufs_per_record * hw_coal->budget;
8196        max = min_t(u16, max, coal_cap->num_cmpl_aggr_int_max);
8197
8198        val = clamp_t(u16, hw_coal->coal_bufs, 1, max);
8199        req->num_cmpl_aggr_int = cpu_to_le16(val);
8200
8201        val = min_t(u16, val, coal_cap->num_cmpl_dma_aggr_max);
8202        req->num_cmpl_dma_aggr = cpu_to_le16(val);
8203
8204        val = clamp_t(u16, hw_coal->coal_bufs_irq, 1,
8205                      coal_cap->num_cmpl_dma_aggr_during_int_max);
8206        req->num_cmpl_dma_aggr_during_int = cpu_to_le16(val);
8207
8208        tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks);
8209        tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_max_max);
8210        req->int_lat_tmr_max = cpu_to_le16(tmr);
8211
8212        /* min timer set to 1/2 of interrupt timer */
8213        if (cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_INT_LAT_TMR_MIN) {
8214                val = tmr / 2;
8215                val = clamp_t(u16, val, 1, coal_cap->int_lat_tmr_min_max);
8216                req->int_lat_tmr_min = cpu_to_le16(val);
8217                req->enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE);
8218        }
8219
8220        /* buf timer set to 1/4 of interrupt timer */
8221        val = clamp_t(u16, tmr / 4, 1, coal_cap->cmpl_aggr_dma_tmr_max);
8222        req->cmpl_aggr_dma_tmr = cpu_to_le16(val);
8223
8224        if (cmpl_params &
8225            RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_DMA_AGGR_DURING_INT) {
8226                tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks_irq);
8227                val = clamp_t(u16, tmr, 1,
8228                              coal_cap->cmpl_aggr_dma_tmr_during_int_max);
8229                req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(val);
8230                req->enables |=
8231                        cpu_to_le16(BNXT_COAL_CMPL_AGGR_TMR_DURING_INT_ENABLE);
8232        }
8233
8234        if ((cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_RING_IDLE) &&
8235            hw_coal->idle_thresh && hw_coal->coal_ticks < hw_coal->idle_thresh)
8236                flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE;
8237        req->flags = cpu_to_le16(flags);
8238        req->enables |= cpu_to_le16(BNXT_COAL_CMPL_ENABLES);
8239}
8240
8241static int __bnxt_hwrm_set_coal_nq(struct bnxt *bp, struct bnxt_napi *bnapi,
8242                                   struct bnxt_coal *hw_coal)
8243{
8244        struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req;
8245        struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
8246        struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
8247        u32 nq_params = coal_cap->nq_params;
8248        u16 tmr;
8249        int rc;
8250
8251        if (!(nq_params & RING_AGGINT_QCAPS_RESP_NQ_PARAMS_INT_LAT_TMR_MIN))
8252                return 0;
8253
8254        rc = hwrm_req_init(bp, req, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
8255        if (rc)
8256                return rc;
8257
8258        req->ring_id = cpu_to_le16(cpr->cp_ring_struct.fw_ring_id);
8259        req->flags =
8260                cpu_to_le16(RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_IS_NQ);
8261
8262        tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks) / 2;
8263        tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_min_max);
8264        req->int_lat_tmr_min = cpu_to_le16(tmr);
8265        req->enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE);
8266        return hwrm_req_send(bp, req);
8267}
8268
8269int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi)
8270{
8271        struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req_rx;
8272        struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
8273        struct bnxt_coal coal;
8274        int rc;
8275
8276        /* Tick values in micro seconds.
8277         * 1 coal_buf x bufs_per_record = 1 completion record.
8278         */
8279        memcpy(&coal, &bp->rx_coal, sizeof(struct bnxt_coal));
8280
8281        coal.coal_ticks = cpr->rx_ring_coal.coal_ticks;
8282        coal.coal_bufs = cpr->rx_ring_coal.coal_bufs;
8283
8284        if (!bnapi->rx_ring)
8285                return -ENODEV;
8286
8287        rc = hwrm_req_init(bp, req_rx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
8288        if (rc)
8289                return rc;
8290
8291        bnxt_hwrm_set_coal_params(bp, &coal, req_rx);
8292
8293        req_rx->ring_id = cpu_to_le16(bnxt_cp_ring_for_rx(bp, bnapi->rx_ring));
8294
8295        return hwrm_req_send(bp, req_rx);
8296}
8297
8298static int
8299bnxt_hwrm_set_rx_coal(struct bnxt *bp, struct bnxt_napi *bnapi,
8300                      struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
8301{
8302        u16 ring_id = bnxt_cp_ring_for_rx(bp, bnapi->rx_ring);
8303
8304        req->ring_id = cpu_to_le16(ring_id);
8305        return hwrm_req_send(bp, req);
8306}
8307
8308static int
8309bnxt_hwrm_set_tx_coal(struct bnxt *bp, struct bnxt_napi *bnapi,
8310                      struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
8311{
8312        struct bnxt_tx_ring_info *txr;
8313        int i, rc;
8314
8315        bnxt_for_each_napi_tx(i, bnapi, txr) {
8316                u16 ring_id;
8317
8318                ring_id = bnxt_cp_ring_for_tx(bp, txr);
8319                req->ring_id = cpu_to_le16(ring_id);
8320                rc = hwrm_req_send(bp, req);
8321                if (rc)
8322                        return rc;
8323                if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
8324                        return 0;
8325        }
8326        return 0;
8327}
8328
8329int bnxt_hwrm_set_coal(struct bnxt *bp)
8330{
8331        struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req_rx, *req_tx;
8332        int i, rc;
8333
8334        rc = hwrm_req_init(bp, req_rx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
8335        if (rc)
8336                return rc;
8337
8338        rc = hwrm_req_init(bp, req_tx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
8339        if (rc) {
8340                hwrm_req_drop(bp, req_rx);
8341                return rc;
8342        }
8343
8344        bnxt_hwrm_set_coal_params(bp, &bp->rx_coal, req_rx);
8345        bnxt_hwrm_set_coal_params(bp, &bp->tx_coal, req_tx);
8346
8347        hwrm_req_hold(bp, req_rx);
8348        hwrm_req_hold(bp, req_tx);
8349        for (i = 0; i < bp->cp_nr_rings; i++) {
8350                struct bnxt_napi *bnapi = bp->bnapi[i];
8351                struct bnxt_coal *hw_coal;
8352
8353                if (!bnapi->rx_ring)
8354                        rc = bnxt_hwrm_set_tx_coal(bp, bnapi, req_tx);
8355                else
8356                        rc = bnxt_hwrm_set_rx_coal(bp, bnapi, req_rx);
8357                if (rc)
8358                        break;
8359
8360                if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
8361                        continue;
8362
8363                if (bnapi->rx_ring && bnapi->tx_ring[0]) {
8364                        rc = bnxt_hwrm_set_tx_coal(bp, bnapi, req_tx);
8365                        if (rc)
8366                                break;
8367                }
8368                if (bnapi->rx_ring)
8369                        hw_coal = &bp->rx_coal;
8370                else
8371                        hw_coal = &bp->tx_coal;
8372                __bnxt_hwrm_set_coal_nq(bp, bnapi, hw_coal);
8373        }
8374        hwrm_req_drop(bp, req_rx);
8375        hwrm_req_drop(bp, req_tx);
8376        return rc;
8377}
8378
8379static void bnxt_hwrm_stat_ctx_free(struct bnxt *bp)
8380{
8381        struct hwrm_stat_ctx_clr_stats_input *req0 = NULL;
8382        struct hwrm_stat_ctx_free_input *req;
8383        int i;
8384
8385        if (!bp->bnapi)
8386                return;
8387
8388        if (BNXT_CHIP_TYPE_NITRO_A0(bp))
8389                return;
8390
8391        if (hwrm_req_init(bp, req, HWRM_STAT_CTX_FREE))
8392                return;
8393        if (BNXT_FW_MAJ(bp) <= 20) {
8394                if (hwrm_req_init(bp, req0, HWRM_STAT_CTX_CLR_STATS)) {
8395                        hwrm_req_drop(bp, req);
8396                        return;
8397                }
8398                hwrm_req_hold(bp, req0);
8399        }
8400        hwrm_req_hold(bp, req);
8401        for (i = 0; i < bp->cp_nr_rings; i++) {
8402                struct bnxt_napi *bnapi = bp->bnapi[i];
8403                struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
8404
8405                if (cpr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) {
8406                        req->stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id);
8407                        if (req0) {
8408                                req0->stat_ctx_id = req->stat_ctx_id;
8409                                hwrm_req_send(bp, req0);
8410                        }
8411                        hwrm_req_send(bp, req);
8412
8413                        cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
8414                }
8415        }
8416        hwrm_req_drop(bp, req);
8417        if (req0)
8418                hwrm_req_drop(bp, req0);
8419}
8420
8421static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp)
8422{
8423        struct hwrm_stat_ctx_alloc_output *resp;
8424        struct hwrm_stat_ctx_alloc_input *req;
8425        int rc, i;
8426
8427        if (BNXT_CHIP_TYPE_NITRO_A0(bp))
8428                return 0;
8429
8430        rc = hwrm_req_init(bp, req, HWRM_STAT_CTX_ALLOC);
8431        if (rc)
8432                return rc;
8433
8434        req->stats_dma_length = cpu_to_le16(bp->hw_ring_stats_size);
8435        req->update_period_ms = cpu_to_le32(bp->stats_coal_ticks / 1000);
8436
8437        resp = hwrm_req_hold(bp, req);
8438        for (i = 0; i < bp->cp_nr_rings; i++) {
8439                struct bnxt_napi *bnapi = bp->bnapi[i];
8440                struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
8441
8442                req->stats_dma_addr = cpu_to_le64(cpr->stats.hw_stats_map);
8443
8444                rc = hwrm_req_send(bp, req);
8445                if (rc)
8446                        break;
8447
8448                cpr->hw_stats_ctx_id = le32_to_cpu(resp->stat_ctx_id);
8449
8450                bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id;
8451        }
8452        hwrm_req_drop(bp, req);
8453        return rc;
8454}
8455
8456static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
8457{
8458        struct hwrm_func_qcfg_output *resp;
8459        struct hwrm_func_qcfg_input *req;
8460        u16 flags;
8461        int rc;
8462
8463        rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG);
8464        if (rc)
8465                return rc;
8466
8467        req->fid = cpu_to_le16(0xffff);
8468        resp = hwrm_req_hold(bp, req);
8469        rc = hwrm_req_send(bp, req);
8470        if (rc)
8471                goto func_qcfg_exit;
8472
8473        flags = le16_to_cpu(resp->flags);
8474#ifdef CONFIG_BNXT_SRIOV
8475        if (BNXT_VF(bp)) {
8476                struct bnxt_vf_info *vf = &bp->vf;
8477
8478                vf->vlan = le16_to_cpu(resp->vlan) & VLAN_VID_MASK;
8479                if (flags & FUNC_QCFG_RESP_FLAGS_TRUSTED_VF)
8480                        vf->flags |= BNXT_VF_TRUST;
8481                else
8482                        vf->flags &= ~BNXT_VF_TRUST;
8483        } else {
8484                bp->pf.registered_vfs = le16_to_cpu(resp->registered_vfs);
8485        }
8486#endif
8487        if (flags & (FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED |
8488                     FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED)) {
8489                bp->fw_cap |= BNXT_FW_CAP_LLDP_AGENT;
8490                if (flags & FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED)
8491                        bp->fw_cap |= BNXT_FW_CAP_DCBX_AGENT;
8492        }
8493        if (BNXT_PF(bp) && (flags & FUNC_QCFG_RESP_FLAGS_MULTI_HOST))
8494                bp->flags |= BNXT_FLAG_MULTI_HOST;
8495
8496        if (flags & FUNC_QCFG_RESP_FLAGS_RING_MONITOR_ENABLED)
8497                bp->fw_cap |= BNXT_FW_CAP_RING_MONITOR;
8498
8499        if (flags & FUNC_QCFG_RESP_FLAGS_ENABLE_RDMA_SRIOV)
8500                bp->fw_cap |= BNXT_FW_CAP_ENABLE_RDMA_SRIOV;
8501
8502        switch (resp->port_partition_type) {
8503        case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0:
8504        case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_2:
8505        case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5:
8506        case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0:
8507                bp->port_partition_type = resp->port_partition_type;
8508                break;
8509        }
8510        if (bp->hwrm_spec_code < 0x10707 ||
8511            resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEB)
8512                bp->br_mode = BRIDGE_MODE_VEB;
8513        else if (resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEPA)
8514                bp->br_mode = BRIDGE_MODE_VEPA;
8515        else
8516                bp->br_mode = BRIDGE_MODE_UNDEF;
8517
8518        bp->max_mtu = le16_to_cpu(resp->max_mtu_configured);
8519        if (!bp->max_mtu)
8520                bp->max_mtu = BNXT_MAX_MTU;
8521
8522        if (bp->db_size)
8523                goto func_qcfg_exit;
8524
8525        bp->db_offset = le16_to_cpu(resp->legacy_l2_db_size_kb) * 1024;
8526        if (BNXT_CHIP_P5(bp)) {
8527                if (BNXT_PF(bp))
8528                        bp->db_offset = DB_PF_OFFSET_P5;
8529                else
8530                        bp->db_offset = DB_VF_OFFSET_P5;
8531        }
8532        bp->db_size = PAGE_ALIGN(le16_to_cpu(resp->l2_doorbell_bar_size_kb) *
8533                                 1024);
8534        if (!bp->db_size || bp->db_size > pci_resource_len(bp->pdev, 2) ||
8535            bp->db_size <= bp->db_offset)
8536                bp->db_size = pci_resource_len(bp->pdev, 2);
8537
8538func_qcfg_exit:
8539        hwrm_req_drop(bp, req);
8540        return rc;
8541}
8542
8543static void bnxt_init_ctx_initializer(struct bnxt_ctx_mem_type *ctxm,
8544                                      u8 init_val, u8 init_offset,
8545                                      bool init_mask_set)
8546{
8547        ctxm->init_value = init_val;
8548        ctxm->init_offset = BNXT_CTX_INIT_INVALID_OFFSET;
8549        if (init_mask_set)
8550                ctxm->init_offset = init_offset * 4;
8551        else
8552                ctxm->init_value = 0;
8553}
8554
8555static int bnxt_alloc_all_ctx_pg_info(struct bnxt *bp, int ctx_max)
8556{
8557        struct bnxt_ctx_mem_info *ctx = bp->ctx;
8558        u16 type;
8559
8560        for (type = 0; type < ctx_max; type++) {
8561                struct bnxt_ctx_mem_type *ctxm = &ctx->ctx_arr[type];
8562                int n = 1;
8563
8564                if (!ctxm->max_entries || ctxm->pg_info)
8565                        continue;
8566
8567                if (ctxm->instance_bmap)
8568                        n = hweight32(ctxm->instance_bmap);
8569                ctxm->pg_info = kcalloc(n, sizeof(*ctxm->pg_info), GFP_KERNEL);
8570                if (!ctxm->pg_info)
8571                        return -ENOMEM;
8572        }
8573        return 0;
8574}
8575
8576static void bnxt_free_one_ctx_mem(struct bnxt *bp,
8577                                  struct bnxt_ctx_mem_type *ctxm, bool force);
8578
8579#define BNXT_CTX_INIT_VALID(flags)      \
8580        (!!((flags) &                   \
8581            FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_ENABLE_CTX_KIND_INIT))
8582
8583static int bnxt_hwrm_func_backing_store_qcaps_v2(struct bnxt *bp)
8584{
8585        struct hwrm_func_backing_store_qcaps_v2_output *resp;
8586        struct hwrm_func_backing_store_qcaps_v2_input *req;
8587        struct bnxt_ctx_mem_info *ctx = bp->ctx;
8588        u16 type;
8589        int rc;
8590
8591        rc = hwrm_req_init(bp, req, HWRM_FUNC_BACKING_STORE_QCAPS_V2);
8592        if (rc)
8593                return rc;
8594
8595        if (!ctx) {
8596                ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
8597                if (!ctx)
8598                        return -ENOMEM;
8599                bp->ctx = ctx;
8600        }
8601
8602        resp = hwrm_req_hold(bp, req);
8603
8604        for (type = 0; type < BNXT_CTX_V2_MAX; ) {
8605                struct bnxt_ctx_mem_type *ctxm = &ctx->ctx_arr[type];
8606                u8 init_val, init_off, i;
8607                u32 max_entries;
8608                u16 entry_size;
8609                __le32 *p;
8610                u32 flags;
8611
8612                req->type = cpu_to_le16(type);
8613                rc = hwrm_req_send(bp, req);
8614                if (rc)
8615                        goto ctx_done;
8616                flags = le32_to_cpu(resp->flags);
8617                type = le16_to_cpu(resp->next_valid_type);
8618                if (!(flags & BNXT_CTX_MEM_TYPE_VALID)) {
8619                        bnxt_free_one_ctx_mem(bp, ctxm, true);
8620                        continue;
8621                }
8622                entry_size = le16_to_cpu(resp->entry_size);
8623                max_entries = le32_to_cpu(resp->max_num_entries);
8624                if (ctxm->mem_valid) {
8625                        if (!(flags & BNXT_CTX_MEM_PERSIST) ||
8626                            ctxm->entry_size != entry_size ||
8627                            ctxm->max_entries != max_entries)
8628                                bnxt_free_one_ctx_mem(bp, ctxm, true);
8629                        else
8630                                continue;
8631                }
8632                ctxm->type = le16_to_cpu(resp->type);
8633                ctxm->entry_size = entry_size;
8634                ctxm->flags = flags;
8635                ctxm->instance_bmap = le32_to_cpu(resp->instance_bit_map);
8636                ctxm->entry_multiple = resp->entry_multiple;
8637                ctxm->max_entries = max_entries;
8638                ctxm->min_entries = le32_to_cpu(resp->min_num_entries);
8639                init_val = resp->ctx_init_value;
8640                init_off = resp->ctx_init_offset;
8641                bnxt_init_ctx_initializer(ctxm, init_val, init_off,
8642                                          BNXT_CTX_INIT_VALID(flags));
8643                ctxm->split_entry_cnt = min_t(u8, resp->subtype_valid_cnt,
8644                                              BNXT_MAX_SPLIT_ENTRY);
8645                for (i = 0, p = &resp->split_entry_0; i < ctxm->split_entry_cnt;
8646                     i++, p++)
8647                        ctxm->split[i] = le32_to_cpu(*p);
8648        }
8649        rc = bnxt_alloc_all_ctx_pg_info(bp, BNXT_CTX_V2_MAX);
8650
8651ctx_done:
8652        hwrm_req_drop(bp, req);
8653        return rc;
8654}
8655
8656static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
8657{
8658        struct hwrm_func_backing_store_qcaps_output *resp;
8659        struct hwrm_func_backing_store_qcaps_input *req;
8660        int rc;
8661
8662        if (bp->hwrm_spec_code < 0x10902 || BNXT_VF(bp) ||
8663            (bp->ctx && bp->ctx->flags & BNXT_CTX_FLAG_INITED))
8664                return 0;
8665
8666        if (bp->fw_cap & BNXT_FW_CAP_BACKING_STORE_V2)
8667                return bnxt_hwrm_func_backing_store_qcaps_v2(bp);
8668
8669        rc = hwrm_req_init(bp, req, HWRM_FUNC_BACKING_STORE_QCAPS);
8670        if (rc)
8671                return rc;
8672
8673        resp = hwrm_req_hold(bp, req);
8674        rc = hwrm_req_send_silent(bp, req);
8675        if (!rc) {
8676                struct bnxt_ctx_mem_type *ctxm;
8677                struct bnxt_ctx_mem_info *ctx;
8678                u8 init_val, init_idx = 0;
8679                u16 init_mask;
8680
8681                ctx = bp->ctx;
8682                if (!ctx) {
8683                        ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
8684                        if (!ctx) {
8685                                rc = -ENOMEM;
8686                                goto ctx_err;
8687                        }
8688                        bp->ctx = ctx;
8689                }
8690                init_val = resp->ctx_kind_initializer;
8691                init_mask = le16_to_cpu(resp->ctx_init_mask);
8692
8693                ctxm = &ctx->ctx_arr[BNXT_CTX_QP];
8694                ctxm->max_entries = le32_to_cpu(resp->qp_max_entries);
8695                ctxm->qp_qp1_entries = le16_to_cpu(resp->qp_min_qp1_entries);
8696                ctxm->qp_l2_entries = le16_to_cpu(resp->qp_max_l2_entries);
8697                ctxm->qp_fast_qpmd_entries = le16_to_cpu(resp->fast_qpmd_qp_num_entries);
8698                ctxm->entry_size = le16_to_cpu(resp->qp_entry_size);
8699                bnxt_init_ctx_initializer(ctxm, init_val, resp->qp_init_offset,
8700                                          (init_mask & (1 << init_idx++)) != 0);
8701
8702                ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ];
8703                ctxm->srq_l2_entries = le16_to_cpu(resp->srq_max_l2_entries);
8704                ctxm->max_entries = le32_to_cpu(resp->srq_max_entries);
8705                ctxm->entry_size = le16_to_cpu(resp->srq_entry_size);
8706                bnxt_init_ctx_initializer(ctxm, init_val, resp->srq_init_offset,
8707                                          (init_mask & (1 << init_idx++)) != 0);
8708
8709                ctxm = &ctx->ctx_arr[BNXT_CTX_CQ];
8710                ctxm->cq_l2_entries = le16_to_cpu(resp->cq_max_l2_entries);
8711                ctxm->max_entries = le32_to_cpu(resp->cq_max_entries);
8712                ctxm->entry_size = le16_to_cpu(resp->cq_entry_size);
8713                bnxt_init_ctx_initializer(ctxm, init_val, resp->cq_init_offset,
8714                                          (init_mask & (1 << init_idx++)) != 0);
8715
8716                ctxm = &ctx->ctx_arr[BNXT_CTX_VNIC];
8717                ctxm->vnic_entries = le16_to_cpu(resp->vnic_max_vnic_entries);
8718                ctxm->max_entries = ctxm->vnic_entries +
8719                        le16_to_cpu(resp->vnic_max_ring_table_entries);
8720                ctxm->entry_size = le16_to_cpu(resp->vnic_entry_size);
8721                bnxt_init_ctx_initializer(ctxm, init_val,
8722                                          resp->vnic_init_offset,
8723                                          (init_mask & (1 << init_idx++)) != 0);
8724
8725                ctxm = &ctx->ctx_arr[BNXT_CTX_STAT];
8726                ctxm->max_entries = le32_to_cpu(resp->stat_max_entries);
8727                ctxm->entry_size = le16_to_cpu(resp->stat_entry_size);
8728                bnxt_init_ctx_initializer(ctxm, init_val,
8729                                          resp->stat_init_offset,
8730                                          (init_mask & (1 << init_idx++)) != 0);
8731
8732                ctxm = &ctx->ctx_arr[BNXT_CTX_STQM];
8733                ctxm->entry_size = le16_to_cpu(resp->tqm_entry_size);
8734                ctxm->min_entries = le32_to_cpu(resp->tqm_min_entries_per_ring);
8735                ctxm->max_entries = le32_to_cpu(resp->tqm_max_entries_per_ring);
8736                ctxm->entry_multiple = resp->tqm_entries_multiple;
8737                if (!ctxm->entry_multiple)
8738                        ctxm->entry_multiple = 1;
8739
8740                memcpy(&ctx->ctx_arr[BNXT_CTX_FTQM], ctxm, sizeof(*ctxm));
8741
8742                ctxm = &ctx->ctx_arr[BNXT_CTX_MRAV];
8743                ctxm->max_entries = le32_to_cpu(resp->mrav_max_entries);
8744                ctxm->entry_size = le16_to_cpu(resp->mrav_entry_size);
8745                ctxm->mrav_num_entries_units =
8746                        le16_to_cpu(resp->mrav_num_entries_units);
8747                bnxt_init_ctx_initializer(ctxm, init_val,
8748                                          resp->mrav_init_offset,
8749                                          (init_mask & (1 << init_idx++)) != 0);
8750
8751                ctxm = &ctx->ctx_arr[BNXT_CTX_TIM];
8752                ctxm->entry_size = le16_to_cpu(resp->tim_entry_size);
8753                ctxm->max_entries = le32_to_cpu(resp->tim_max_entries);
8754
8755                ctx->tqm_fp_rings_count = resp->tqm_fp_rings_count;
8756                if (!ctx->tqm_fp_rings_count)
8757                        ctx->tqm_fp_rings_count = bp->max_q;
8758                else if (ctx->tqm_fp_rings_count > BNXT_MAX_TQM_FP_RINGS)
8759                        ctx->tqm_fp_rings_count = BNXT_MAX_TQM_FP_RINGS;
8760
8761                ctxm = &ctx->ctx_arr[BNXT_CTX_FTQM];
8762                memcpy(ctxm, &ctx->ctx_arr[BNXT_CTX_STQM], sizeof(*ctxm));
8763                ctxm->instance_bmap = (1 << ctx->tqm_fp_rings_count) - 1;
8764
8765                rc = bnxt_alloc_all_ctx_pg_info(bp, BNXT_CTX_MAX);
8766        } else {
8767                rc = 0;
8768        }
8769ctx_err:
8770        hwrm_req_drop(bp, req);
8771        return rc;
8772}
8773
8774static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem, u8 *pg_attr,
8775                                  __le64 *pg_dir)
8776{
8777        if (!rmem->nr_pages)
8778                return;
8779
8780        BNXT_SET_CTX_PAGE_ATTR(*pg_attr);
8781        if (rmem->depth >= 1) {
8782                if (rmem->depth == 2)
8783                        *pg_attr |= 2;
8784                else
8785                        *pg_attr |= 1;
8786                *pg_dir = cpu_to_le64(rmem->pg_tbl_map);
8787        } else {
8788                *pg_dir = cpu_to_le64(rmem->dma_arr[0]);
8789        }
8790}
8791
8792#define FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES                 \
8793        (FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP |                \
8794         FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ |               \
8795         FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ |                \
8796         FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC |              \
8797         FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT)
8798
8799static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables)
8800{
8801        struct hwrm_func_backing_store_cfg_input *req;
8802        struct bnxt_ctx_mem_info *ctx = bp->ctx;
8803        struct bnxt_ctx_pg_info *ctx_pg;
8804        struct bnxt_ctx_mem_type *ctxm;
8805        void **__req = (void **)&req;
8806        u32 req_len = sizeof(*req);
8807        __le32 *num_entries;
8808        __le64 *pg_dir;
8809        u32 flags = 0;
8810        u8 *pg_attr;
8811        u32 ena;
8812        int rc;
8813        int i;
8814
8815        if (!ctx)
8816                return 0;
8817
8818        if (req_len > bp->hwrm_max_ext_req_len)
8819                req_len = BNXT_BACKING_STORE_CFG_LEGACY_LEN;
8820        rc = __hwrm_req_init(bp, __req, HWRM_FUNC_BACKING_STORE_CFG, req_len);
8821        if (rc)
8822                return rc;
8823
8824        req->enables = cpu_to_le32(enables);
8825        if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP) {
8826                ctxm = &ctx->ctx_arr[BNXT_CTX_QP];
8827                ctx_pg = ctxm->pg_info;
8828                req->qp_num_entries = cpu_to_le32(ctx_pg->entries);
8829                req->qp_num_qp1_entries = cpu_to_le16(ctxm->qp_qp1_entries);
8830                req->qp_num_l2_entries = cpu_to_le16(ctxm->qp_l2_entries);
8831                req->qp_entry_size = cpu_to_le16(ctxm->entry_size);
8832                bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
8833                                      &req->qpc_pg_size_qpc_lvl,
8834                                      &req->qpc_page_dir);
8835
8836                if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP_FAST_QPMD)
8837                        req->qp_num_fast_qpmd_entries = cpu_to_le16(ctxm->qp_fast_qpmd_entries);
8838        }
8839        if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ) {
8840                ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ];
8841                ctx_pg = ctxm->pg_info;
8842                req->srq_num_entries = cpu_to_le32(ctx_pg->entries);
8843                req->srq_num_l2_entries = cpu_to_le16(ctxm->srq_l2_entries);
8844                req->srq_entry_size = cpu_to_le16(ctxm->entry_size);
8845                bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
8846                                      &req->srq_pg_size_srq_lvl,
8847                                      &req->srq_page_dir);
8848        }
8849        if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ) {
8850                ctxm = &ctx->ctx_arr[BNXT_CTX_CQ];
8851                ctx_pg = ctxm->pg_info;
8852                req->cq_num_entries = cpu_to_le32(ctx_pg->entries);
8853                req->cq_num_l2_entries = cpu_to_le16(ctxm->cq_l2_entries);
8854                req->cq_entry_size = cpu_to_le16(ctxm->entry_size);
8855                bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
8856                                      &req->cq_pg_size_cq_lvl,
8857                                      &req->cq_page_dir);
8858        }
8859        if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC) {
8860                ctxm = &ctx->ctx_arr[BNXT_CTX_VNIC];
8861                ctx_pg = ctxm->pg_info;
8862                req->vnic_num_vnic_entries = cpu_to_le16(ctxm->vnic_entries);
8863                req->vnic_num_ring_table_entries =
8864                        cpu_to_le16(ctxm->max_entries - ctxm->vnic_entries);
8865                req->vnic_entry_size = cpu_to_le16(ctxm->entry_size);
8866                bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
8867                                      &req->vnic_pg_size_vnic_lvl,
8868                                      &req->vnic_page_dir);
8869        }
8870        if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT) {
8871                ctxm = &ctx->ctx_arr[BNXT_CTX_STAT];
8872                ctx_pg = ctxm->pg_info;
8873                req->stat_num_entries = cpu_to_le32(ctxm->max_entries);
8874                req->stat_entry_size = cpu_to_le16(ctxm->entry_size);
8875                bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
8876                                      &req->stat_pg_size_stat_lvl,
8877                                      &req->stat_page_dir);
8878        }
8879        if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV) {
8880                u32 units;
8881
8882                ctxm = &ctx->ctx_arr[BNXT_CTX_MRAV];
8883                ctx_pg = ctxm->pg_info;
8884                req->mrav_num_entries = cpu_to_le32(ctx_pg->entries);
8885                units = ctxm->mrav_num_entries_units;
8886                if (units) {
8887                        u32 num_mr, num_ah = ctxm->mrav_av_entries;
8888                        u32 entries;
8889
8890                        num_mr = ctx_pg->entries - num_ah;
8891                        entries = ((num_mr / units) << 16) | (num_ah / units);
8892                        req->mrav_num_entries = cpu_to_le32(entries);
8893                        flags |= FUNC_BACKING_STORE_CFG_REQ_FLAGS_MRAV_RESERVATION_SPLIT;
8894                }
8895                req->mrav_entry_size = cpu_to_le16(ctxm->entry_size);
8896                bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
8897                                      &req->mrav_pg_size_mrav_lvl,
8898                                      &req->mrav_page_dir);
8899        }
8900        if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM) {
8901                ctxm = &ctx->ctx_arr[BNXT_CTX_TIM];
8902                ctx_pg = ctxm->pg_info;
8903                req->tim_num_entries = cpu_to_le32(ctx_pg->entries);
8904                req->tim_entry_size = cpu_to_le16(ctxm->entry_size);
8905                bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
8906                                      &req->tim_pg_size_tim_lvl,
8907                                      &req->tim_page_dir);
8908        }
8909        ctxm = &ctx->ctx_arr[BNXT_CTX_STQM];
8910        for (i = 0, num_entries = &req->tqm_sp_num_entries,
8911             pg_attr = &req->tqm_sp_pg_size_tqm_sp_lvl,
8912             pg_dir = &req->tqm_sp_page_dir,
8913             ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP,
8914             ctx_pg = ctxm->pg_info;
8915             i < BNXT_MAX_TQM_RINGS;
8916             ctx_pg = &ctx->ctx_arr[BNXT_CTX_FTQM].pg_info[i],
8917             i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) {
8918                if (!(enables & ena))
8919                        continue;
8920
8921                req->tqm_entry_size = cpu_to_le16(ctxm->entry_size);
8922                *num_entries = cpu_to_le32(ctx_pg->entries);
8923                bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir);
8924        }
8925        req->flags = cpu_to_le32(flags);
8926        return hwrm_req_send(bp, req);
8927}
8928
8929static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp,
8930                                  struct bnxt_ctx_pg_info *ctx_pg)
8931{
8932        struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
8933
8934        rmem->page_size = BNXT_PAGE_SIZE;
8935        rmem->pg_arr = ctx_pg->ctx_pg_arr;
8936        rmem->dma_arr = ctx_pg->ctx_dma_arr;
8937        rmem->flags = BNXT_RMEM_VALID_PTE_FLAG;
8938        if (rmem->depth >= 1)
8939                rmem->flags |= BNXT_RMEM_USE_FULL_PAGE_FLAG;
8940        return bnxt_alloc_ring(bp, rmem);
8941}
8942
8943static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp,
8944                                  struct bnxt_ctx_pg_info *ctx_pg, u32 mem_size,
8945                                  u8 depth, struct bnxt_ctx_mem_type *ctxm)
8946{
8947        struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
8948        int rc;
8949
8950        if (!mem_size)
8951                return -EINVAL;
8952
8953        ctx_pg->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
8954        if (ctx_pg->nr_pages > MAX_CTX_TOTAL_PAGES) {
8955                ctx_pg->nr_pages = 0;
8956                return -EINVAL;
8957        }
8958        if (ctx_pg->nr_pages > MAX_CTX_PAGES || depth > 1) {
8959                int nr_tbls, i;
8960
8961                rmem->depth = 2;
8962                ctx_pg->ctx_pg_tbl = kcalloc(MAX_CTX_PAGES, sizeof(ctx_pg),
8963                                             GFP_KERNEL);
8964                if (!ctx_pg->ctx_pg_tbl)
8965                        return -ENOMEM;
8966                nr_tbls = DIV_ROUND_UP(ctx_pg->nr_pages, MAX_CTX_PAGES);
8967                rmem->nr_pages = nr_tbls;
8968                rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
8969                if (rc)
8970                        return rc;
8971                for (i = 0; i < nr_tbls; i++) {
8972                        struct bnxt_ctx_pg_info *pg_tbl;
8973
8974                        pg_tbl = kzalloc(sizeof(*pg_tbl), GFP_KERNEL);
8975                        if (!pg_tbl)
8976                                return -ENOMEM;
8977                        ctx_pg->ctx_pg_tbl[i] = pg_tbl;
8978                        rmem = &pg_tbl->ring_mem;
8979                        rmem->pg_tbl = ctx_pg->ctx_pg_arr[i];
8980                        rmem->pg_tbl_map = ctx_pg->ctx_dma_arr[i];
8981                        rmem->depth = 1;
8982                        rmem->nr_pages = MAX_CTX_PAGES;
8983                        rmem->ctx_mem = ctxm;
8984                        if (i == (nr_tbls - 1)) {
8985                                int rem = ctx_pg->nr_pages % MAX_CTX_PAGES;
8986
8987                                if (rem)
8988                                        rmem->nr_pages = rem;
8989                        }
8990                        rc = bnxt_alloc_ctx_mem_blk(bp, pg_tbl);
8991                        if (rc)
8992                                break;
8993                }
8994        } else {
8995                rmem->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
8996                if (rmem->nr_pages > 1 || depth)
8997                        rmem->depth = 1;
8998                rmem->ctx_mem = ctxm;
8999                rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
9000        }
9001        return rc;
9002}
9003
9004static size_t bnxt_copy_ctx_pg_tbls(struct bnxt *bp,
9005                                    struct bnxt_ctx_pg_info *ctx_pg,
9006                                    void *buf, size_t offset, size_t head,
9007                                    size_t tail)
9008{
9009        struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
9010        size_t nr_pages = ctx_pg->nr_pages;
9011        int page_size = rmem->page_size;
9012        size_t len = 0, total_len = 0;
9013        u16 depth = rmem->depth;
9014
9015        tail %= nr_pages * page_size;
9016        do {
9017                if (depth > 1) {
9018                        int i = head / (page_size * MAX_CTX_PAGES);
9019                        struct bnxt_ctx_pg_info *pg_tbl;
9020
9021                        pg_tbl = ctx_pg->ctx_pg_tbl[i];
9022                        rmem = &pg_tbl->ring_mem;
9023                }
9024                len = __bnxt_copy_ring(bp, rmem, buf, offset, head, tail);
9025                head += len;
9026                offset += len;
9027                total_len += len;
9028                if (head >= nr_pages * page_size)
9029                        head = 0;
9030        } while (head != tail);
9031        return total_len;
9032}
9033
9034static void bnxt_free_ctx_pg_tbls(struct bnxt *bp,
9035                                  struct bnxt_ctx_pg_info *ctx_pg)
9036{
9037        struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
9038
9039        if (rmem->depth > 1 || ctx_pg->nr_pages > MAX_CTX_PAGES ||
9040            ctx_pg->ctx_pg_tbl) {
9041                int i, nr_tbls = rmem->nr_pages;
9042
9043                for (i = 0; i < nr_tbls; i++) {
9044                        struct bnxt_ctx_pg_info *pg_tbl;
9045                        struct bnxt_ring_mem_info *rmem2;
9046
9047                        pg_tbl = ctx_pg->ctx_pg_tbl[i];
9048                        if (!pg_tbl)
9049                                continue;
9050                        rmem2 = &pg_tbl->ring_mem;
9051                        bnxt_free_ring(bp, rmem2);
9052                        ctx_pg->ctx_pg_arr[i] = NULL;
9053                        kfree(pg_tbl);
9054                        ctx_pg->ctx_pg_tbl[i] = NULL;
9055                }
9056                kfree(ctx_pg->ctx_pg_tbl);
9057                ctx_pg->ctx_pg_tbl = NULL;
9058        }
9059        bnxt_free_ring(bp, rmem);
9060        ctx_pg->nr_pages = 0;
9061}
9062
9063static int bnxt_setup_ctxm_pg_tbls(struct bnxt *bp,
9064                                   struct bnxt_ctx_mem_type *ctxm, u32 entries,
9065                                   u8 pg_lvl)
9066{
9067        struct bnxt_ctx_pg_info *ctx_pg = ctxm->pg_info;
9068        int i, rc = 0, n = 1;
9069        u32 mem_size;
9070
9071        if (!ctxm->entry_size || !ctx_pg)
9072                return -EINVAL;
9073        if (ctxm->instance_bmap)
9074                n = hweight32(ctxm->instance_bmap);
9075        if (ctxm->entry_multiple)
9076                entries = roundup(entries, ctxm->entry_multiple);
9077        entries = clamp_t(u32, entries, ctxm->min_entries, ctxm->max_entries);
9078        mem_size = entries * ctxm->entry_size;
9079        for (i = 0; i < n && !rc; i++) {
9080                ctx_pg[i].entries = entries;
9081                rc = bnxt_alloc_ctx_pg_tbls(bp, &ctx_pg[i], mem_size, pg_lvl,
9082                                            ctxm->init_value ? ctxm : NULL);
9083        }
9084        if (!rc)
9085                ctxm->mem_valid = 1;
9086        return rc;
9087}
9088
9089static int bnxt_hwrm_func_backing_store_cfg_v2(struct bnxt *bp,
9090                                               struct bnxt_ctx_mem_type *ctxm,
9091                                               bool last)
9092{
9093        struct hwrm_func_backing_store_cfg_v2_input *req;
9094        u32 instance_bmap = ctxm->instance_bmap;
9095        int i, j, rc = 0, n = 1;
9096        __le32 *p;
9097
9098        if (!(ctxm->flags & BNXT_CTX_MEM_TYPE_VALID) || !ctxm->pg_info)
9099                return 0;
9100
9101        if (instance_bmap)
9102                n = hweight32(ctxm->instance_bmap);
9103        else
9104                instance_bmap = 1;
9105
9106        rc = hwrm_req_init(bp, req, HWRM_FUNC_BACKING_STORE_CFG_V2);
9107        if (rc)
9108                return rc;
9109        hwrm_req_hold(bp, req);
9110        req->type = cpu_to_le16(ctxm->type);
9111        req->entry_size = cpu_to_le16(ctxm->entry_size);
9112        if ((ctxm->flags & BNXT_CTX_MEM_PERSIST) &&
9113            bnxt_bs_trace_avail(bp, ctxm->type)) {
9114                struct bnxt_bs_trace_info *bs_trace;
9115                u32 enables;
9116
9117                enables = FUNC_BACKING_STORE_CFG_V2_REQ_ENABLES_NEXT_BS_OFFSET;
9118                req->enables = cpu_to_le32(enables);
9119                bs_trace = &bp->bs_trace[bnxt_bstore_to_trace[ctxm->type]];
9120                req->next_bs_offset = cpu_to_le32(bs_trace->last_offset);
9121        }
9122        req->subtype_valid_cnt = ctxm->split_entry_cnt;
9123        for (i = 0, p = &req->split_entry_0; i < ctxm->split_entry_cnt; i++)
9124                p[i] = cpu_to_le32(ctxm->split[i]);
9125        for (i = 0, j = 0; j < n && !rc; i++) {
9126                struct bnxt_ctx_pg_info *ctx_pg;
9127
9128                if (!(instance_bmap & (1 << i)))
9129                        continue;
9130                req->instance = cpu_to_le16(i);
9131                ctx_pg = &ctxm->pg_info[j++];
9132                if (!ctx_pg->entries)
9133                        continue;
9134                req->num_entries = cpu_to_le32(ctx_pg->entries);
9135                bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
9136                                      &req->page_size_pbl_level,
9137                                      &req->page_dir);
9138                if (last && j == n)
9139                        req->flags =
9140                                cpu_to_le32(FUNC_BACKING_STORE_CFG_V2_REQ_FLAGS_BS_CFG_ALL_DONE);
9141                rc = hwrm_req_send(bp, req);
9142        }
9143        hwrm_req_drop(bp, req);
9144        return rc;
9145}
9146
9147static int bnxt_backing_store_cfg_v2(struct bnxt *bp, u32 ena)
9148{
9149        struct bnxt_ctx_mem_info *ctx = bp->ctx;
9150        struct bnxt_ctx_mem_type *ctxm;
9151        u16 last_type = BNXT_CTX_INV;
9152        int rc = 0;
9153        u16 type;
9154
9155        for (type = BNXT_CTX_SRT; type <= BNXT_CTX_RIGP1; type++) {
9156                ctxm = &ctx->ctx_arr[type];
9157                if (!bnxt_bs_trace_avail(bp, type))
9158                        continue;
9159                if (!ctxm->mem_valid) {
9160                        rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm,
9161                                                     ctxm->max_entries, 1);
9162                        if (rc) {
9163                                netdev_warn(bp->dev, "Unable to setup ctx page for type:0x%x.\n",
9164                                            type);
9165                                continue;
9166                        }
9167                        bnxt_bs_trace_init(bp, ctxm);
9168                }
9169                last_type = type;
9170        }
9171
9172        if (last_type == BNXT_CTX_INV) {
9173                if (!ena)
9174                        return 0;
9175                else if (ena & FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM)
9176                        last_type = BNXT_CTX_MAX - 1;
9177                else
9178                        last_type = BNXT_CTX_L2_MAX - 1;
9179        }
9180        ctx->ctx_arr[last_type].last = 1;
9181
9182        for (type = 0 ; type < BNXT_CTX_V2_MAX; type++) {
9183                ctxm = &ctx->ctx_arr[type];
9184
9185                if (!ctxm->mem_valid)
9186                        continue;
9187                rc = bnxt_hwrm_func_backing_store_cfg_v2(bp, ctxm, ctxm->last);
9188                if (rc)
9189                        return rc;
9190        }
9191        return 0;
9192}
9193
9194/**
9195 * __bnxt_copy_ctx_mem - copy host context memory
9196 * @bp: The driver context
9197 * @ctxm: The pointer to the context memory type
9198 * @buf: The destination buffer or NULL to just obtain the length
9199 * @offset: The buffer offset to copy the data to
9200 * @head: The head offset of context memory to copy from
9201 * @tail: The tail offset (last byte + 1) of context memory to end the copy
9202 *
9203 * This function is called for debugging purposes to dump the host context
9204 * used by the chip.
9205 *
9206 * Return: Length of memory copied
9207 */
9208static size_t __bnxt_copy_ctx_mem(struct bnxt *bp,
9209                                  struct bnxt_ctx_mem_type *ctxm, void *buf,
9210                                  size_t offset, size_t head, size_t tail)
9211{
9212        struct bnxt_ctx_pg_info *ctx_pg = ctxm->pg_info;
9213        size_t len = 0, total_len = 0;
9214        int i, n = 1;
9215
9216        if (!ctx_pg)
9217                return 0;
9218
9219        if (ctxm->instance_bmap)
9220                n = hweight32(ctxm->instance_bmap);
9221        for (i = 0; i < n; i++) {
9222                len = bnxt_copy_ctx_pg_tbls(bp, &ctx_pg[i], buf, offset, head,
9223                                            tail);
9224                offset += len;
9225                total_len += len;
9226        }
9227        return total_len;
9228}
9229
9230size_t bnxt_copy_ctx_mem(struct bnxt *bp, struct bnxt_ctx_mem_type *ctxm,
9231                         void *buf, size_t offset)
9232{
9233        size_t tail = ctxm->max_entries * ctxm->entry_size;
9234
9235        return __bnxt_copy_ctx_mem(bp, ctxm, buf, offset, 0, tail);
9236}
9237
9238static void bnxt_free_one_ctx_mem(struct bnxt *bp,
9239                                  struct bnxt_ctx_mem_type *ctxm, bool force)
9240{
9241        struct bnxt_ctx_pg_info *ctx_pg;
9242        int i, n = 1;
9243
9244        ctxm->last = 0;
9245
9246        if (ctxm->mem_valid && !force && (ctxm->flags & BNXT_CTX_MEM_PERSIST))
9247                return;
9248
9249        ctx_pg = ctxm->pg_info;
9250        if (ctx_pg) {
9251                if (ctxm->instance_bmap)
9252                        n = hweight32(ctxm->instance_bmap);
9253                for (i = 0; i < n; i++)
9254                        bnxt_free_ctx_pg_tbls(bp, &ctx_pg[i]);
9255
9256                kfree(ctx_pg);
9257                ctxm->pg_info = NULL;
9258                ctxm->mem_valid = 0;
9259        }
9260        memset(ctxm, 0, sizeof(*ctxm));
9261}
9262
9263void bnxt_free_ctx_mem(struct bnxt *bp, bool force)
9264{
9265        struct bnxt_ctx_mem_info *ctx = bp->ctx;
9266        u16 type;
9267
9268        if (!ctx)
9269                return;
9270
9271        for (type = 0; type < BNXT_CTX_V2_MAX; type++)
9272                bnxt_free_one_ctx_mem(bp, &ctx->ctx_arr[type], force);
9273
9274        ctx->flags &= ~BNXT_CTX_FLAG_INITED;
9275        if (force) {
9276                kfree(ctx);
9277                bp->ctx = NULL;
9278        }
9279}
9280
9281static int bnxt_alloc_ctx_mem(struct bnxt *bp)
9282{
9283        struct bnxt_ctx_mem_type *ctxm;
9284        struct bnxt_ctx_mem_info *ctx;
9285        u32 l2_qps, qp1_qps, max_qps;
9286        u32 ena, entries_sp, entries;
9287        u32 srqs, max_srqs, min;
9288        u32 num_mr, num_ah;
9289        u32 extra_srqs = 0;
9290        u32 extra_qps = 0;
9291        u32 fast_qpmd_qps;
9292        u8 pg_lvl = 1;
9293        int i, rc;
9294
9295        rc = bnxt_hwrm_func_backing_store_qcaps(bp);
9296        if (rc) {
9297                netdev_err(bp->dev, "Failed querying context mem capability, rc = %d.\n",
9298                           rc);
9299                return rc;
9300        }
9301        ctx = bp->ctx;
9302        if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED))
9303                return 0;
9304
9305        ctxm = &ctx->ctx_arr[BNXT_CTX_QP];
9306        l2_qps = ctxm->qp_l2_entries;
9307        qp1_qps = ctxm->qp_qp1_entries;
9308        fast_qpmd_qps = ctxm->qp_fast_qpmd_entries;
9309        max_qps = ctxm->max_entries;
9310        ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ];
9311        srqs = ctxm->srq_l2_entries;
9312        max_srqs = ctxm->max_entries;
9313        ena = 0;
9314        if ((bp->flags & BNXT_FLAG_ROCE_CAP) && !is_kdump_kernel()) {
9315                pg_lvl = 2;
9316                if (BNXT_SW_RES_LMT(bp)) {
9317                        extra_qps = max_qps - l2_qps - qp1_qps;
9318                        extra_srqs = max_srqs - srqs;
9319                } else {
9320                        extra_qps = min_t(u32, 65536,
9321                                          max_qps - l2_qps - qp1_qps);
9322                        /* allocate extra qps if fw supports RoCE fast qp
9323                         * destroy feature
9324                         */
9325                        extra_qps += fast_qpmd_qps;
9326                        extra_srqs = min_t(u32, 8192, max_srqs - srqs);
9327                }
9328                if (fast_qpmd_qps)
9329                        ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP_FAST_QPMD;
9330        }
9331
9332        ctxm = &ctx->ctx_arr[BNXT_CTX_QP];
9333        rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, l2_qps + qp1_qps + extra_qps,
9334                                     pg_lvl);
9335        if (rc)
9336                return rc;
9337
9338        ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ];
9339        rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, srqs + extra_srqs, pg_lvl);
9340        if (rc)
9341                return rc;
9342
9343        ctxm = &ctx->ctx_arr[BNXT_CTX_CQ];
9344        rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, ctxm->cq_l2_entries +
9345                                     extra_qps * 2, pg_lvl);
9346        if (rc)
9347                return rc;
9348
9349        ctxm = &ctx->ctx_arr[BNXT_CTX_VNIC];
9350        rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, ctxm->max_entries, 1);
9351        if (rc)
9352                return rc;
9353
9354        ctxm = &ctx->ctx_arr[BNXT_CTX_STAT];
9355        rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, ctxm->max_entries, 1);
9356        if (rc)
9357                return rc;
9358
9359        if (!(bp->flags & BNXT_FLAG_ROCE_CAP))
9360                goto skip_rdma;
9361
9362        ctxm = &ctx->ctx_arr[BNXT_CTX_MRAV];
9363        if (BNXT_SW_RES_LMT(bp) &&
9364            ctxm->split_entry_cnt == BNXT_CTX_MRAV_AV_SPLIT_ENTRY + 1) {
9365                num_ah = ctxm->mrav_av_entries;
9366                num_mr = ctxm->max_entries - num_ah;
9367        } else {
9368                /* 128K extra is needed to accommodate static AH context
9369                 * allocation by f/w.
9370                 */
9371                num_mr = min_t(u32, ctxm->max_entries / 2, 1024 * 256);
9372                num_ah = min_t(u32, num_mr, 1024 * 128);
9373                ctxm->split_entry_cnt = BNXT_CTX_MRAV_AV_SPLIT_ENTRY + 1;
9374                if (!ctxm->mrav_av_entries || ctxm->mrav_av_entries > num_ah)
9375                        ctxm->mrav_av_entries = num_ah;
9376        }
9377
9378        rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, num_mr + num_ah, 2);
9379        if (rc)
9380                return rc;
9381        ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV;
9382
9383        ctxm = &ctx->ctx_arr[BNXT_CTX_TIM];
9384        rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, l2_qps + qp1_qps + extra_qps, 1);
9385        if (rc)
9386                return rc;
9387        ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM;
9388
9389skip_rdma:
9390        ctxm = &ctx->ctx_arr[BNXT_CTX_STQM];
9391        min = ctxm->min_entries;
9392        entries_sp = ctx->ctx_arr[BNXT_CTX_VNIC].vnic_entries + l2_qps +
9393                     2 * (extra_qps + qp1_qps) + min;
9394        rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, entries_sp, 2);
9395        if (rc)
9396                return rc;
9397
9398        ctxm = &ctx->ctx_arr[BNXT_CTX_FTQM];
9399        entries = l2_qps + 2 * (extra_qps + qp1_qps);
9400        rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, entries, 2);
9401        if (rc)
9402                return rc;
9403        for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++)
9404                ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP << i;
9405        ena |= FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES;
9406
9407        if (bp->fw_cap & BNXT_FW_CAP_BACKING_STORE_V2)
9408                rc = bnxt_backing_store_cfg_v2(bp, ena);
9409        else
9410                rc = bnxt_hwrm_func_backing_store_cfg(bp, ena);
9411        if (rc) {
9412                netdev_err(bp->dev, "Failed configuring context mem, rc = %d.\n",
9413                           rc);
9414                return rc;
9415        }
9416        ctx->flags |= BNXT_CTX_FLAG_INITED;
9417        return 0;
9418}
9419
9420static int bnxt_hwrm_crash_dump_mem_cfg(struct bnxt *bp)
9421{
9422        struct hwrm_dbg_crashdump_medium_cfg_input *req;
9423        u16 page_attr;
9424        int rc;
9425
9426        if (!(bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_HOST_DDR))
9427                return 0;
9428
9429        rc = hwrm_req_init(bp, req, HWRM_DBG_CRASHDUMP_MEDIUM_CFG);
9430        if (rc)
9431                return rc;
9432
9433        if (BNXT_PAGE_SIZE == 0x2000)
9434                page_attr = DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_8K;
9435        else if (BNXT_PAGE_SIZE == 0x10000)
9436                page_attr = DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_64K;
9437        else
9438                page_attr = DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_4K;
9439        req->pg_size_lvl = cpu_to_le16(page_attr |
9440                                       bp->fw_crash_mem->ring_mem.depth);
9441        req->pbl = cpu_to_le64(bp->fw_crash_mem->ring_mem.pg_tbl_map);
9442        req->size = cpu_to_le32(bp->fw_crash_len);
9443        req->output_dest_flags = cpu_to_le16(BNXT_DBG_CR_DUMP_MDM_CFG_DDR);
9444        return hwrm_req_send(bp, req);
9445}
9446
9447static void bnxt_free_crash_dump_mem(struct bnxt *bp)
9448{
9449        if (bp->fw_crash_mem) {
9450                bnxt_free_ctx_pg_tbls(bp, bp->fw_crash_mem);
9451                kfree(bp->fw_crash_mem);
9452                bp->fw_crash_mem = NULL;
9453        }
9454}
9455
9456static int bnxt_alloc_crash_dump_mem(struct bnxt *bp)
9457{
9458        u32 mem_size = 0;
9459        int rc;
9460
9461        if (!(bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_HOST_DDR))
9462                return 0;
9463
9464        rc = bnxt_hwrm_get_dump_len(bp, BNXT_DUMP_CRASH, &mem_size);
9465        if (rc)
9466                return rc;
9467
9468        mem_size = round_up(mem_size, 4);
9469
9470        /* keep and use the existing pages */
9471        if (bp->fw_crash_mem &&
9472            mem_size <= bp->fw_crash_mem->nr_pages * BNXT_PAGE_SIZE)
9473                goto alloc_done;
9474
9475        if (bp->fw_crash_mem)
9476                bnxt_free_ctx_pg_tbls(bp, bp->fw_crash_mem);
9477        else
9478                bp->fw_crash_mem = kzalloc(sizeof(*bp->fw_crash_mem),
9479                                           GFP_KERNEL);
9480        if (!bp->fw_crash_mem)
9481                return -ENOMEM;
9482
9483        rc = bnxt_alloc_ctx_pg_tbls(bp, bp->fw_crash_mem, mem_size, 1, NULL);
9484        if (rc) {
9485                bnxt_free_crash_dump_mem(bp);
9486                return rc;
9487        }
9488
9489alloc_done:
9490        bp->fw_crash_len = mem_size;
9491        return 0;
9492}
9493
9494int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all)
9495{
9496        struct hwrm_func_resource_qcaps_output *resp;
9497        struct hwrm_func_resource_qcaps_input *req;
9498        struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
9499        int rc;
9500
9501        rc = hwrm_req_init(bp, req, HWRM_FUNC_RESOURCE_QCAPS);
9502        if (rc)
9503                return rc;
9504
9505        req->fid = cpu_to_le16(0xffff);
9506        resp = hwrm_req_hold(bp, req);
9507        rc = hwrm_req_send_silent(bp, req);
9508        if (rc)
9509                goto hwrm_func_resc_qcaps_exit;
9510
9511        hw_resc->max_tx_sch_inputs = le16_to_cpu(resp->max_tx_scheduler_inputs);
9512        if (!all)
9513                goto hwrm_func_resc_qcaps_exit;
9514
9515        hw_resc->min_rsscos_ctxs = le16_to_cpu(resp->min_rsscos_ctx);
9516        hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
9517        hw_resc->min_cp_rings = le16_to_cpu(resp->min_cmpl_rings);
9518        hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
9519        hw_resc->min_tx_rings = le16_to_cpu(resp->min_tx_rings);
9520        hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
9521        hw_resc->min_rx_rings = le16_to_cpu(resp->min_rx_rings);
9522        hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
9523        hw_resc->min_hw_ring_grps = le16_to_cpu(resp->min_hw_ring_grps);
9524        hw_resc->max_hw_ring_grps = le16_to_cpu(resp->max_hw_ring_grps);
9525        hw_resc->min_l2_ctxs = le16_to_cpu(resp->min_l2_ctxs);
9526        hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
9527        hw_resc->min_vnics = le16_to_cpu(resp->min_vnics);
9528        hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
9529        hw_resc->min_stat_ctxs = le16_to_cpu(resp->min_stat_ctx);
9530        hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
9531
9532        if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
9533                u16 max_msix = le16_to_cpu(resp->max_msix);
9534
9535                hw_resc->max_nqs = max_msix;
9536                hw_resc->max_hw_ring_grps = hw_resc->max_rx_rings;
9537        }
9538
9539        if (BNXT_PF(bp)) {
9540                struct bnxt_pf_info *pf = &bp->pf;
9541
9542                pf->vf_resv_strategy =
9543                        le16_to_cpu(resp->vf_reservation_strategy);
9544                if (pf->vf_resv_strategy > BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC)
9545                        pf->vf_resv_strategy = BNXT_VF_RESV_STRATEGY_MAXIMAL;
9546        }
9547hwrm_func_resc_qcaps_exit:
9548        hwrm_req_drop(bp, req);
9549        return rc;
9550}
9551
9552static int __bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
9553{
9554        struct hwrm_port_mac_ptp_qcfg_output *resp;
9555        struct hwrm_port_mac_ptp_qcfg_input *req;
9556        struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
9557        u8 flags;
9558        int rc;
9559
9560        if (bp->hwrm_spec_code < 0x10801 || !BNXT_CHIP_P5_PLUS(bp)) {
9561                rc = -ENODEV;
9562                goto no_ptp;
9563        }
9564
9565        rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_PTP_QCFG);
9566        if (rc)
9567                goto no_ptp;
9568
9569        req->port_id = cpu_to_le16(bp->pf.port_id);
9570        resp = hwrm_req_hold(bp, req);
9571        rc = hwrm_req_send(bp, req);
9572        if (rc)
9573                goto exit;
9574
9575        flags = resp->flags;
9576        if (BNXT_CHIP_P5_AND_MINUS(bp) &&
9577            !(flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS)) {
9578                rc = -ENODEV;
9579                goto exit;
9580        }
9581        if (!ptp) {
9582                ptp = kzalloc(sizeof(*ptp), GFP_KERNEL);
9583                if (!ptp) {
9584                        rc = -ENOMEM;
9585                        goto exit;
9586                }
9587                ptp->bp = bp;
9588                bp->ptp_cfg = ptp;
9589        }
9590
9591        if (flags &
9592            (PORT_MAC_PTP_QCFG_RESP_FLAGS_PARTIAL_DIRECT_ACCESS_REF_CLOCK |
9593             PORT_MAC_PTP_QCFG_RESP_FLAGS_64B_PHC_TIME)) {
9594                ptp->refclk_regs[0] = le32_to_cpu(resp->ts_ref_clock_reg_lower);
9595                ptp->refclk_regs[1] = le32_to_cpu(resp->ts_ref_clock_reg_upper);
9596        } else if (BNXT_CHIP_P5(bp)) {
9597                ptp->refclk_regs[0] = BNXT_TS_REG_TIMESYNC_TS0_LOWER;
9598                ptp->refclk_regs[1] = BNXT_TS_REG_TIMESYNC_TS0_UPPER;
9599        } else {
9600                rc = -ENODEV;
9601                goto exit;
9602        }
9603        ptp->rtc_configured =
9604                (flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_RTC_CONFIGURED) != 0;
9605        rc = bnxt_ptp_init(bp);
9606        if (rc)
9607                netdev_warn(bp->dev, "PTP initialization failed.\n");
9608exit:
9609        hwrm_req_drop(bp, req);
9610        if (!rc)
9611                return 0;
9612
9613no_ptp:
9614        bnxt_ptp_clear(bp);
9615        kfree(ptp);
9616        bp->ptp_cfg = NULL;
9617        return rc;
9618}
9619
9620static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
9621{
9622        struct hwrm_func_qcaps_output *resp;
9623        struct hwrm_func_qcaps_input *req;
9624        struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
9625        u32 flags, flags_ext, flags_ext2;
9626        int rc;
9627
9628        rc = hwrm_req_init(bp, req, HWRM_FUNC_QCAPS);
9629        if (rc)
9630                return rc;
9631
9632        req->fid = cpu_to_le16(0xffff);
9633        resp = hwrm_req_hold(bp, req);
9634        rc = hwrm_req_send(bp, req);
9635        if (rc)
9636                goto hwrm_func_qcaps_exit;
9637
9638        flags = le32_to_cpu(resp->flags);
9639        if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED)
9640                bp->flags |= BNXT_FLAG_ROCEV1_CAP;
9641        if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED)
9642                bp->flags |= BNXT_FLAG_ROCEV2_CAP;
9643        if (flags & FUNC_QCAPS_RESP_FLAGS_PCIE_STATS_SUPPORTED)
9644                bp->fw_cap |= BNXT_FW_CAP_PCIE_STATS_SUPPORTED;
9645        if (flags & FUNC_QCAPS_RESP_FLAGS_HOT_RESET_CAPABLE)
9646                bp->fw_cap |= BNXT_FW_CAP_HOT_RESET;
9647        if (flags & FUNC_QCAPS_RESP_FLAGS_EXT_STATS_SUPPORTED)
9648                bp->fw_cap |= BNXT_FW_CAP_EXT_STATS_SUPPORTED;
9649        if (flags &  FUNC_QCAPS_RESP_FLAGS_ERROR_RECOVERY_CAPABLE)
9650                bp->fw_cap |= BNXT_FW_CAP_ERROR_RECOVERY;
9651        if (flags & FUNC_QCAPS_RESP_FLAGS_ERR_RECOVER_RELOAD)
9652                bp->fw_cap |= BNXT_FW_CAP_ERR_RECOVER_RELOAD;
9653        if (!(flags & FUNC_QCAPS_RESP_FLAGS_VLAN_ACCELERATION_TX_DISABLED))
9654                bp->fw_cap |= BNXT_FW_CAP_VLAN_TX_INSERT;
9655        if (flags & FUNC_QCAPS_RESP_FLAGS_DBG_QCAPS_CMD_SUPPORTED)
9656                bp->fw_cap |= BNXT_FW_CAP_DBG_QCAPS;
9657
9658        flags_ext = le32_to_cpu(resp->flags_ext);
9659        if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_EXT_HW_STATS_SUPPORTED)
9660                bp->fw_cap |= BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED;
9661        if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_PTP_PPS_SUPPORTED))
9662                bp->fw_cap |= BNXT_FW_CAP_PTP_PPS;
9663        if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_PTP_64BIT_RTC_SUPPORTED)
9664                bp->fw_cap |= BNXT_FW_CAP_PTP_RTC;
9665        if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_HOT_RESET_IF_SUPPORT))
9666                bp->fw_cap |= BNXT_FW_CAP_HOT_RESET_IF;
9667        if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_FW_LIVEPATCH_SUPPORTED))
9668                bp->fw_cap |= BNXT_FW_CAP_LIVEPATCH;
9669        if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_NPAR_1_2_SUPPORTED)
9670                bp->fw_cap |= BNXT_FW_CAP_NPAR_1_2;
9671        if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_DFLT_VLAN_TPID_PCP_SUPPORTED))
9672                bp->fw_cap |= BNXT_FW_CAP_DFLT_VLAN_TPID_PCP;
9673        if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_BS_V2_SUPPORTED)
9674                bp->fw_cap |= BNXT_FW_CAP_BACKING_STORE_V2;
9675        if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_TX_COAL_CMPL_CAP)
9676                bp->flags |= BNXT_FLAG_TX_COAL_CMPL;
9677
9678        flags_ext2 = le32_to_cpu(resp->flags_ext2);
9679        if (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_RX_ALL_PKTS_TIMESTAMPS_SUPPORTED)
9680                bp->fw_cap |= BNXT_FW_CAP_RX_ALL_PKT_TS;
9681        if (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_UDP_GSO_SUPPORTED)
9682                bp->flags |= BNXT_FLAG_UDP_GSO_CAP;
9683        if (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_TX_PKT_TS_CMPL_SUPPORTED)
9684                bp->fw_cap |= BNXT_FW_CAP_TX_TS_CMP;
9685        if (flags_ext2 &
9686            FUNC_QCAPS_RESP_FLAGS_EXT2_SW_MAX_RESOURCE_LIMITS_SUPPORTED)
9687                bp->fw_cap |= BNXT_FW_CAP_SW_MAX_RESOURCE_LIMITS;
9688        if (BNXT_PF(bp) &&
9689            (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_ROCE_VF_RESOURCE_MGMT_SUPPORTED))
9690                bp->fw_cap |= BNXT_FW_CAP_ROCE_VF_RESC_MGMT_SUPPORTED;
9691
9692        bp->tx_push_thresh = 0;
9693        if ((flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED) &&
9694            BNXT_FW_MAJ(bp) > 217)
9695                bp->tx_push_thresh = BNXT_TX_PUSH_THRESH;
9696
9697        hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
9698        hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
9699        hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
9700        hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
9701        hw_resc->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps);
9702        if (!hw_resc->max_hw_ring_grps)
9703                hw_resc->max_hw_ring_grps = hw_resc->max_tx_rings;
9704        hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
9705        hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
9706        hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
9707
9708        hw_resc->max_encap_records = le32_to_cpu(resp->max_encap_records);
9709        hw_resc->max_decap_records = le32_to_cpu(resp->max_decap_records);
9710        hw_resc->max_tx_em_flows = le32_to_cpu(resp->max_tx_em_flows);
9711        hw_resc->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows);
9712        hw_resc->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows);
9713        hw_resc->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows);
9714
9715        if (BNXT_PF(bp)) {
9716                struct bnxt_pf_info *pf = &bp->pf;
9717
9718                pf->fw_fid = le16_to_cpu(resp->fid);
9719                pf->port_id = le16_to_cpu(resp->port_id);
9720                memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN);
9721                pf->first_vf_id = le16_to_cpu(resp->first_vf_id);
9722                pf->max_vfs = le16_to_cpu(resp->max_vfs);
9723                bp->flags &= ~BNXT_FLAG_WOL_CAP;
9724                if (flags & FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED)
9725                        bp->flags |= BNXT_FLAG_WOL_CAP;
9726                if (flags & FUNC_QCAPS_RESP_FLAGS_PTP_SUPPORTED) {
9727                        bp->fw_cap |= BNXT_FW_CAP_PTP;
9728                } else {
9729                        bnxt_ptp_clear(bp);
9730                        kfree(bp->ptp_cfg);
9731                        bp->ptp_cfg = NULL;
9732                }
9733        } else {
9734#ifdef CONFIG_BNXT_SRIOV
9735                struct bnxt_vf_info *vf = &bp->vf;
9736
9737                vf->fw_fid = le16_to_cpu(resp->fid);
9738                memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN);
9739#endif
9740        }
9741        bp->tso_max_segs = le16_to_cpu(resp->max_tso_segs);
9742
9743hwrm_func_qcaps_exit:
9744        hwrm_req_drop(bp, req);
9745        return rc;
9746}
9747
9748static void bnxt_hwrm_dbg_qcaps(struct bnxt *bp)
9749{
9750        struct hwrm_dbg_qcaps_output *resp;
9751        struct hwrm_dbg_qcaps_input *req;
9752        int rc;
9753
9754        bp->fw_dbg_cap = 0;
9755        if (!(bp->fw_cap & BNXT_FW_CAP_DBG_QCAPS))
9756                return;
9757
9758        rc = hwrm_req_init(bp, req, HWRM_DBG_QCAPS);
9759        if (rc)
9760                return;
9761
9762        req->fid = cpu_to_le16(0xffff);
9763        resp = hwrm_req_hold(bp, req);
9764        rc = hwrm_req_send(bp, req);
9765        if (rc)
9766                goto hwrm_dbg_qcaps_exit;
9767
9768        bp->fw_dbg_cap = le32_to_cpu(resp->flags);
9769
9770hwrm_dbg_qcaps_exit:
9771        hwrm_req_drop(bp, req);
9772}
9773
9774static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp);
9775
9776int bnxt_hwrm_func_qcaps(struct bnxt *bp)
9777{
9778        int rc;
9779
9780        rc = __bnxt_hwrm_func_qcaps(bp);
9781        if (rc)
9782                return rc;
9783
9784        bnxt_hwrm_dbg_qcaps(bp);
9785
9786        rc = bnxt_hwrm_queue_qportcfg(bp);
9787        if (rc) {
9788                netdev_err(bp->dev, "hwrm query qportcfg failure rc: %d\n", rc);
9789                return rc;
9790        }
9791        if (bp->hwrm_spec_code >= 0x10803) {
9792                rc = bnxt_alloc_ctx_mem(bp);
9793                if (rc)
9794                        return rc;
9795                rc = bnxt_hwrm_func_resc_qcaps(bp, true);
9796                if (!rc)
9797                        bp->fw_cap |= BNXT_FW_CAP_NEW_RM;
9798        }
9799        return 0;
9800}
9801
9802static int bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(struct bnxt *bp)
9803{
9804        struct hwrm_cfa_adv_flow_mgnt_qcaps_output *resp;
9805        struct hwrm_cfa_adv_flow_mgnt_qcaps_input *req;
9806        u32 flags;
9807        int rc;
9808
9809        if (!(bp->fw_cap & BNXT_FW_CAP_CFA_ADV_FLOW))
9810                return 0;
9811
9812        rc = hwrm_req_init(bp, req, HWRM_CFA_ADV_FLOW_MGNT_QCAPS);
9813        if (rc)
9814                return rc;
9815
9816        resp = hwrm_req_hold(bp, req);
9817        rc = hwrm_req_send(bp, req);
9818        if (rc)
9819                goto hwrm_cfa_adv_qcaps_exit;
9820
9821        flags = le32_to_cpu(resp->flags);
9822        if (flags &
9823            CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V2_SUPPORTED)
9824                bp->fw_cap |= BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2;
9825
9826        if (flags &
9827            CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V3_SUPPORTED)
9828                bp->fw_cap |= BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V3;
9829
9830        if (flags &
9831            CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_RX_EXT_IP_PROTO_SUPPORTED)
9832                bp->fw_cap |= BNXT_FW_CAP_CFA_NTUPLE_RX_EXT_IP_PROTO;
9833
9834hwrm_cfa_adv_qcaps_exit:
9835        hwrm_req_drop(bp, req);
9836        return rc;
9837}
9838
9839static int __bnxt_alloc_fw_health(struct bnxt *bp)
9840{
9841        if (bp->fw_health)
9842                return 0;
9843
9844        bp->fw_health = kzalloc(sizeof(*bp->fw_health), GFP_KERNEL);
9845        if (!bp->fw_health)
9846                return -ENOMEM;
9847
9848        mutex_init(&bp->fw_health->lock);
9849        return 0;
9850}
9851
9852static int bnxt_alloc_fw_health(struct bnxt *bp)
9853{
9854        int rc;
9855
9856        if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET) &&
9857            !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
9858                return 0;
9859
9860        rc = __bnxt_alloc_fw_health(bp);
9861        if (rc) {
9862                bp->fw_cap &= ~BNXT_FW_CAP_HOT_RESET;
9863                bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
9864                return rc;
9865        }
9866
9867        return 0;
9868}
9869
9870static void __bnxt_map_fw_health_reg(struct bnxt *bp, u32 reg)
9871{
9872        writel(reg & BNXT_GRC_BASE_MASK, bp->bar0 +
9873                                         BNXT_GRCPF_REG_WINDOW_BASE_OUT +
9874                                         BNXT_FW_HEALTH_WIN_MAP_OFF);
9875}
9876
9877static void bnxt_inv_fw_health_reg(struct bnxt *bp)
9878{
9879        struct bnxt_fw_health *fw_health = bp->fw_health;
9880        u32 reg_type;
9881
9882        if (!fw_health)
9883                return;
9884
9885        reg_type = BNXT_FW_HEALTH_REG_TYPE(fw_health->regs[BNXT_FW_HEALTH_REG]);
9886        if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC)
9887                fw_health->status_reliable = false;
9888
9889        reg_type = BNXT_FW_HEALTH_REG_TYPE(fw_health->regs[BNXT_FW_RESET_CNT_REG]);
9890        if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC)
9891                fw_health->resets_reliable = false;
9892}
9893
9894static void bnxt_try_map_fw_health_reg(struct bnxt *bp)
9895{
9896        void __iomem *hs;
9897        u32 status_loc;
9898        u32 reg_type;
9899        u32 sig;
9900
9901        if (bp->fw_health)
9902                bp->fw_health->status_reliable = false;
9903
9904        __bnxt_map_fw_health_reg(bp, HCOMM_STATUS_STRUCT_LOC);
9905        hs = bp->bar0 + BNXT_FW_HEALTH_WIN_OFF(HCOMM_STATUS_STRUCT_LOC);
9906
9907        sig = readl(hs + offsetof(struct hcomm_status, sig_ver));
9908        if ((sig & HCOMM_STATUS_SIGNATURE_MASK) != HCOMM_STATUS_SIGNATURE_VAL) {
9909                if (!bp->chip_num) {
9910                        __bnxt_map_fw_health_reg(bp, BNXT_GRC_REG_BASE);
9911                        bp->chip_num = readl(bp->bar0 +
9912                                             BNXT_FW_HEALTH_WIN_BASE +
9913                                             BNXT_GRC_REG_CHIP_NUM);
9914                }
9915                if (!BNXT_CHIP_P5_PLUS(bp))
9916                        return;
9917
9918                status_loc = BNXT_GRC_REG_STATUS_P5 |
9919                             BNXT_FW_HEALTH_REG_TYPE_BAR0;
9920        } else {
9921                status_loc = readl(hs + offsetof(struct hcomm_status,
9922                                                 fw_status_loc));
9923        }
9924
9925        if (__bnxt_alloc_fw_health(bp)) {
9926                netdev_warn(bp->dev, "no memory for firmware status checks\n");
9927                return;
9928        }
9929
9930        bp->fw_health->regs[BNXT_FW_HEALTH_REG] = status_loc;
9931        reg_type = BNXT_FW_HEALTH_REG_TYPE(status_loc);
9932        if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC) {
9933                __bnxt_map_fw_health_reg(bp, status_loc);
9934                bp->fw_health->mapped_regs[BNXT_FW_HEALTH_REG] =
9935                        BNXT_FW_HEALTH_WIN_OFF(status_loc);
9936        }
9937
9938        bp->fw_health->status_reliable = true;
9939}
9940
9941static int bnxt_map_fw_health_regs(struct bnxt *bp)
9942{
9943        struct bnxt_fw_health *fw_health = bp->fw_health;
9944        u32 reg_base = 0xffffffff;
9945        int i;
9946
9947        bp->fw_health->status_reliable = false;
9948        bp->fw_health->resets_reliable = false;
9949        /* Only pre-map the monitoring GRC registers using window 3 */
9950        for (i = 0; i < 4; i++) {
9951                u32 reg = fw_health->regs[i];
9952
9953                if (BNXT_FW_HEALTH_REG_TYPE(reg) != BNXT_FW_HEALTH_REG_TYPE_GRC)
9954                        continue;
9955                if (reg_base == 0xffffffff)
9956                        reg_base = reg & BNXT_GRC_BASE_MASK;
9957                if ((reg & BNXT_GRC_BASE_MASK) != reg_base)
9958                        return -ERANGE;
9959                fw_health->mapped_regs[i] = BNXT_FW_HEALTH_WIN_OFF(reg);
9960        }
9961        bp->fw_health->status_reliable = true;
9962        bp->fw_health->resets_reliable = true;
9963        if (reg_base == 0xffffffff)
9964                return 0;
9965
9966        __bnxt_map_fw_health_reg(bp, reg_base);
9967        return 0;
9968}
9969
9970static void bnxt_remap_fw_health_regs(struct bnxt *bp)
9971{
9972        if (!bp->fw_health)
9973                return;
9974
9975        if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) {
9976                bp->fw_health->status_reliable = true;
9977                bp->fw_health->resets_reliable = true;
9978        } else {
9979                bnxt_try_map_fw_health_reg(bp);
9980        }
9981}
9982
9983static int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp)
9984{
9985        struct bnxt_fw_health *fw_health = bp->fw_health;
9986        struct hwrm_error_recovery_qcfg_output *resp;
9987        struct hwrm_error_recovery_qcfg_input *req;
9988        int rc, i;
9989
9990        if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
9991                return 0;
9992
9993        rc = hwrm_req_init(bp, req, HWRM_ERROR_RECOVERY_QCFG);
9994        if (rc)
9995                return rc;
9996
9997        resp = hwrm_req_hold(bp, req);
9998        rc = hwrm_req_send(bp, req);
9999        if (rc)
10000                goto err_recovery_out;
10001        fw_health->flags = le32_to_cpu(resp->flags);
10002        if ((fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) &&
10003            !(bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL)) {
10004                rc = -EINVAL;
10005                goto err_recovery_out;
10006        }
10007        fw_health->polling_dsecs = le32_to_cpu(resp->driver_polling_freq);
10008        fw_health->master_func_wait_dsecs =
10009                le32_to_cpu(resp->master_func_wait_period);
10010        fw_health->normal_func_wait_dsecs =
10011                le32_to_cpu(resp->normal_func_wait_period);
10012        fw_health->post_reset_wait_dsecs =
10013                le32_to_cpu(resp->master_func_wait_period_after_reset);
10014        fw_health->post_reset_max_wait_dsecs =
10015                le32_to_cpu(resp->max_bailout_time_after_reset);
10016        fw_health->regs[BNXT_FW_HEALTH_REG] =
10017                le32_to_cpu(resp->fw_health_status_reg);
10018        fw_health->regs[BNXT_FW_HEARTBEAT_REG] =
10019                le32_to_cpu(resp->fw_heartbeat_reg);
10020        fw_health->regs[BNXT_FW_RESET_CNT_REG] =
10021                le32_to_cpu(resp->fw_reset_cnt_reg);
10022        fw_health->regs[BNXT_FW_RESET_INPROG_REG] =
10023                le32_to_cpu(resp->reset_inprogress_reg);
10024        fw_health->fw_reset_inprog_reg_mask =
10025                le32_to_cpu(resp->reset_inprogress_reg_mask);
10026        fw_health->fw_reset_seq_cnt = resp->reg_array_cnt;
10027        if (fw_health->fw_reset_seq_cnt >= 16) {
10028                rc = -EINVAL;
10029                goto err_recovery_out;
10030        }
10031        for (i = 0; i < fw_health->fw_reset_seq_cnt; i++) {
10032                fw_health->fw_reset_seq_regs[i] =
10033                        le32_to_cpu(resp->reset_reg[i]);
10034                fw_health->fw_reset_seq_vals[i] =
10035                        le32_to_cpu(resp->reset_reg_val[i]);
10036                fw_health->fw_reset_seq_delay_msec[i] =
10037                        resp->delay_after_reset[i];
10038        }
10039err_recovery_out:
10040        hwrm_req_drop(bp, req);
10041        if (!rc)
10042                rc = bnxt_map_fw_health_regs(bp);
10043        if (rc)
10044                bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
10045        return rc;
10046}
10047
10048static int bnxt_hwrm_func_reset(struct bnxt *bp)
10049{
10050        struct hwrm_func_reset_input *req;
10051        int rc;
10052
10053        rc = hwrm_req_init(bp, req, HWRM_FUNC_RESET);
10054        if (rc)
10055                return rc;
10056
10057        req->enables = 0;
10058        hwrm_req_timeout(bp, req, HWRM_RESET_TIMEOUT);
10059        return hwrm_req_send(bp, req);
10060}
10061
10062static void bnxt_nvm_cfg_ver_get(struct bnxt *bp)
10063{
10064        struct hwrm_nvm_get_dev_info_output nvm_info;
10065
10066        if (!bnxt_hwrm_nvm_get_dev_info(bp, &nvm_info))
10067                snprintf(bp->nvm_cfg_ver, FW_VER_STR_LEN, "%d.%d.%d",
10068                         nvm_info.nvm_cfg_ver_maj, nvm_info.nvm_cfg_ver_min,
10069                         nvm_info.nvm_cfg_ver_upd);
10070}
10071
10072static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
10073{
10074        struct hwrm_queue_qportcfg_output *resp;
10075        struct hwrm_queue_qportcfg_input *req;
10076        u8 i, j, *qptr;
10077        bool no_rdma;
10078        int rc = 0;
10079
10080        rc = hwrm_req_init(bp, req, HWRM_QUEUE_QPORTCFG);
10081        if (rc)
10082                return rc;
10083
10084        resp = hwrm_req_hold(bp, req);
10085        rc = hwrm_req_send(bp, req);
10086        if (rc)
10087                goto qportcfg_exit;
10088
10089        if (!resp->max_configurable_queues) {
10090                rc = -EINVAL;
10091                goto qportcfg_exit;
10092        }
10093        bp->max_tc = resp->max_configurable_queues;
10094        bp->max_lltc = resp->max_configurable_lossless_queues;
10095        if (bp->max_tc > BNXT_MAX_QUEUE)
10096                bp->max_tc = BNXT_MAX_QUEUE;
10097
10098        no_rdma = !(bp->flags & BNXT_FLAG_ROCE_CAP);
10099        qptr = &resp->queue_id0;
10100        for (i = 0, j = 0; i < bp->max_tc; i++) {
10101                bp->q_info[j].queue_id = *qptr;
10102                bp->q_ids[i] = *qptr++;
10103                bp->q_info[j].queue_profile = *qptr++;
10104                bp->tc_to_qidx[j] = j;
10105                if (!BNXT_CNPQ(bp->q_info[j].queue_profile) ||
10106                    (no_rdma && BNXT_PF(bp)))
10107                        j++;
10108        }
10109        bp->max_q = bp->max_tc;
10110        bp->max_tc = max_t(u8, j, 1);
10111
10112        if (resp->queue_cfg_info & QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG)
10113                bp->max_tc = 1;
10114
10115        if (bp->max_lltc > bp->max_tc)
10116                bp->max_lltc = bp->max_tc;
10117
10118qportcfg_exit:
10119        hwrm_req_drop(bp, req);
10120        return rc;
10121}
10122
10123static int bnxt_hwrm_poll(struct bnxt *bp)
10124{
10125        struct hwrm_ver_get_input *req;
10126        int rc;
10127
10128        rc = hwrm_req_init(bp, req, HWRM_VER_GET);
10129        if (rc)
10130                return rc;
10131
10132        req->hwrm_intf_maj = HWRM_VERSION_MAJOR;
10133        req->hwrm_intf_min = HWRM_VERSION_MINOR;
10134        req->hwrm_intf_upd = HWRM_VERSION_UPDATE;
10135
10136        hwrm_req_flags(bp, req, BNXT_HWRM_CTX_SILENT | BNXT_HWRM_FULL_WAIT);
10137        rc = hwrm_req_send(bp, req);
10138        return rc;
10139}
10140
10141static int bnxt_hwrm_ver_get(struct bnxt *bp)
10142{
10143        struct hwrm_ver_get_output *resp;
10144        struct hwrm_ver_get_input *req;
10145        u16 fw_maj, fw_min, fw_bld, fw_rsv;
10146        u32 dev_caps_cfg, hwrm_ver;
10147        int rc, len, max_tmo_secs;
10148
10149        rc = hwrm_req_init(bp, req, HWRM_VER_GET);
10150        if (rc)
10151                return rc;
10152
10153        hwrm_req_flags(bp, req, BNXT_HWRM_FULL_WAIT);
10154        bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN;
10155        req->hwrm_intf_maj = HWRM_VERSION_MAJOR;
10156        req->hwrm_intf_min = HWRM_VERSION_MINOR;
10157        req->hwrm_intf_upd = HWRM_VERSION_UPDATE;
10158
10159        resp = hwrm_req_hold(bp, req);
10160        rc = hwrm_req_send(bp, req);
10161        if (rc)
10162                goto hwrm_ver_get_exit;
10163
10164        memcpy(&bp->ver_resp, resp, sizeof(struct hwrm_ver_get_output));
10165
10166        bp->hwrm_spec_code = resp->hwrm_intf_maj_8b << 16 |
10167                             resp->hwrm_intf_min_8b << 8 |
10168                             resp->hwrm_intf_upd_8b;
10169        if (resp->hwrm_intf_maj_8b < 1) {
10170                netdev_warn(bp->dev, "HWRM interface %d.%d.%d is older than 1.0.0.\n",
10171                            resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
10172                            resp->hwrm_intf_upd_8b);
10173                netdev_warn(bp->dev, "Please update firmware with HWRM interface 1.0.0 or newer.\n");
10174        }
10175
10176        hwrm_ver = HWRM_VERSION_MAJOR << 16 | HWRM_VERSION_MINOR << 8 |
10177                        HWRM_VERSION_UPDATE;
10178
10179        if (bp->hwrm_spec_code > hwrm_ver)
10180                snprintf(bp->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d",
10181                         HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR,
10182                         HWRM_VERSION_UPDATE);
10183        else
10184                snprintf(bp->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d",
10185                         resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
10186                         resp->hwrm_intf_upd_8b);
10187
10188        fw_maj = le16_to_cpu(resp->hwrm_fw_major);
10189        if (bp->hwrm_spec_code > 0x10803 && fw_maj) {
10190                fw_min = le16_to_cpu(resp->hwrm_fw_minor);
10191                fw_bld = le16_to_cpu(resp->hwrm_fw_build);
10192                fw_rsv = le16_to_cpu(resp->hwrm_fw_patch);
10193                len = FW_VER_STR_LEN;
10194        } else {
10195                fw_maj = resp->hwrm_fw_maj_8b;
10196                fw_min = resp->hwrm_fw_min_8b;
10197                fw_bld = resp->hwrm_fw_bld_8b;
10198                fw_rsv = resp->hwrm_fw_rsvd_8b;
10199                len = BC_HWRM_STR_LEN;
10200        }
10201        bp->fw_ver_code = BNXT_FW_VER_CODE(fw_maj, fw_min, fw_bld, fw_rsv);
10202        snprintf(bp->fw_ver_str, len, "%d.%d.%d.%d", fw_maj, fw_min, fw_bld,
10203                 fw_rsv);
10204
10205        if (strlen(resp->active_pkg_name)) {
10206                int fw_ver_len = strlen(bp->fw_ver_str);
10207
10208                snprintf(bp->fw_ver_str + fw_ver_len,
10209                         FW_VER_STR_LEN - fw_ver_len - 1, "/pkg %s",
10210                         resp->active_pkg_name);
10211                bp->fw_cap |= BNXT_FW_CAP_PKG_VER;
10212        }
10213
10214        bp->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout);
10215        if (!bp->hwrm_cmd_timeout)
10216                bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
10217        bp->hwrm_cmd_max_timeout = le16_to_cpu(resp->max_req_timeout) * 1000;
10218        if (!bp->hwrm_cmd_max_timeout)
10219                bp->hwrm_cmd_max_timeout = HWRM_CMD_MAX_TIMEOUT;
10220        max_tmo_secs = bp->hwrm_cmd_max_timeout / 1000;
10221#ifdef CONFIG_DETECT_HUNG_TASK
10222        if (bp->hwrm_cmd_max_timeout > HWRM_CMD_MAX_TIMEOUT ||
10223            max_tmo_secs > CONFIG_DEFAULT_HUNG_TASK_TIMEOUT) {
10224                netdev_warn(bp->dev, "Device requests max timeout of %d seconds, may trigger hung task watchdog (kernel default %ds)\n",
10225                            max_tmo_secs, CONFIG_DEFAULT_HUNG_TASK_TIMEOUT);
10226        }
10227#endif
10228
10229        if (resp->hwrm_intf_maj_8b >= 1) {
10230                bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len);
10231                bp->hwrm_max_ext_req_len = le16_to_cpu(resp->max_ext_req_len);
10232        }
10233        if (bp->hwrm_max_ext_req_len < HWRM_MAX_REQ_LEN)
10234                bp->hwrm_max_ext_req_len = HWRM_MAX_REQ_LEN;
10235
10236        bp->chip_num = le16_to_cpu(resp->chip_num);
10237        bp->chip_rev = resp->chip_rev;
10238        if (bp->chip_num == CHIP_NUM_58700 && !resp->chip_rev &&
10239            !resp->chip_metal)
10240                bp->flags |= BNXT_FLAG_CHIP_NITRO_A0;
10241
10242        dev_caps_cfg = le32_to_cpu(resp->dev_caps_cfg);
10243        if ((dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
10244            (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED))
10245                bp->fw_cap |= BNXT_FW_CAP_SHORT_CMD;
10246
10247        if (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED)
10248                bp->fw_cap |= BNXT_FW_CAP_KONG_MB_CHNL;
10249
10250        if (dev_caps_cfg &
10251            VER_GET_RESP_DEV_CAPS_CFG_FLOW_HANDLE_64BIT_SUPPORTED)
10252                bp->fw_cap |= BNXT_FW_CAP_OVS_64BIT_HANDLE;
10253
10254        if (dev_caps_cfg &
10255            VER_GET_RESP_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED)
10256                bp->fw_cap |= BNXT_FW_CAP_TRUSTED_VF;
10257
10258        if (dev_caps_cfg &
10259            VER_GET_RESP_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED)
10260                bp->fw_cap |= BNXT_FW_CAP_CFA_ADV_FLOW;
10261
10262hwrm_ver_get_exit:
10263        hwrm_req_drop(bp, req);
10264        return rc;
10265}
10266
10267int bnxt_hwrm_fw_set_time(struct bnxt *bp)
10268{
10269        struct hwrm_fw_set_time_input *req;
10270        struct tm tm;
10271        time64_t now = ktime_get_real_seconds();
10272        int rc;
10273
10274        if ((BNXT_VF(bp) && bp->hwrm_spec_code < 0x10901) ||
10275            bp->hwrm_spec_code < 0x10400)
10276                return -EOPNOTSUPP;
10277
10278        time64_to_tm(now, 0, &tm);
10279        rc = hwrm_req_init(bp, req, HWRM_FW_SET_TIME);
10280        if (rc)
10281                return rc;
10282
10283        req->year = cpu_to_le16(1900 + tm.tm_year);
10284        req->month = 1 + tm.tm_mon;
10285        req->day = tm.tm_mday;
10286        req->hour = tm.tm_hour;
10287        req->minute = tm.tm_min;
10288        req->second = tm.tm_sec;
10289        return hwrm_req_send(bp, req);
10290}
10291
10292static void bnxt_add_one_ctr(u64 hw, u64 *sw, u64 mask)
10293{
10294        u64 sw_tmp;
10295
10296        hw &= mask;
10297        sw_tmp = (*sw & ~mask) | hw;
10298        if (hw < (*sw & mask))
10299                sw_tmp += mask + 1;
10300        WRITE_ONCE(*sw, sw_tmp);
10301}
10302
10303static void __bnxt_accumulate_stats(__le64 *hw_stats, u64 *sw_stats, u64 *masks,
10304                                    int count, bool ignore_zero)
10305{
10306        int i;
10307
10308        for (i = 0; i < count; i++) {
10309                u64 hw = le64_to_cpu(READ_ONCE(hw_stats[i]));
10310
10311                if (ignore_zero && !hw)
10312                        continue;
10313
10314                if (masks[i] == -1ULL)
10315                        sw_stats[i] = hw;
10316                else
10317                        bnxt_add_one_ctr(hw, &sw_stats[i], masks[i]);
10318        }
10319}
10320
10321static void bnxt_accumulate_stats(struct bnxt_stats_mem *stats)
10322{
10323        if (!stats->hw_stats)
10324                return;
10325
10326        __bnxt_accumulate_stats(stats->hw_stats, stats->sw_stats,
10327                                stats->hw_masks, stats->len / 8, false);
10328}
10329
10330static void bnxt_accumulate_all_stats(struct bnxt *bp)
10331{
10332        struct bnxt_stats_mem *ring0_stats;
10333        bool ignore_zero = false;
10334        int i;
10335
10336        /* Chip bug.  Counter intermittently becomes 0. */
10337        if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
10338                ignore_zero = true;
10339
10340        for (i = 0; i < bp->cp_nr_rings; i++) {
10341                struct bnxt_napi *bnapi = bp->bnapi[i];
10342                struct bnxt_cp_ring_info *cpr;
10343                struct bnxt_stats_mem *stats;
10344
10345                cpr = &bnapi->cp_ring;
10346                stats = &cpr->stats;
10347                if (!i)
10348                        ring0_stats = stats;
10349                __bnxt_accumulate_stats(stats->hw_stats, stats->sw_stats,
10350                                        ring0_stats->hw_masks,
10351                                        ring0_stats->len / 8, ignore_zero);
10352        }
10353        if (bp->flags & BNXT_FLAG_PORT_STATS) {
10354                struct bnxt_stats_mem *stats = &bp->port_stats;
10355                __le64 *hw_stats = stats->hw_stats;
10356                u64 *sw_stats = stats->sw_stats;
10357                u64 *masks = stats->hw_masks;
10358                int cnt;
10359
10360                cnt = sizeof(struct rx_port_stats) / 8;
10361                __bnxt_accumulate_stats(hw_stats, sw_stats, masks, cnt, false);
10362
10363                hw_stats += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
10364                sw_stats += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
10365                masks += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
10366                cnt = sizeof(struct tx_port_stats) / 8;
10367                __bnxt_accumulate_stats(hw_stats, sw_stats, masks, cnt, false);
10368        }
10369        if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
10370                bnxt_accumulate_stats(&bp->rx_port_stats_ext);
10371                bnxt_accumulate_stats(&bp->tx_port_stats_ext);
10372        }
10373}
10374
10375static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags)
10376{
10377        struct hwrm_port_qstats_input *req;
10378        struct bnxt_pf_info *pf = &bp->pf;
10379        int rc;
10380
10381        if (!(bp->flags & BNXT_FLAG_PORT_STATS))
10382                return 0;
10383
10384        if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED))
10385                return -EOPNOTSUPP;
10386
10387        rc = hwrm_req_init(bp, req, HWRM_PORT_QSTATS);
10388        if (rc)
10389                return rc;
10390
10391        req->flags = flags;
10392        req->port_id = cpu_to_le16(pf->port_id);
10393        req->tx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map +
10394                                            BNXT_TX_PORT_STATS_BYTE_OFFSET);
10395        req->rx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map);
10396        return hwrm_req_send(bp, req);
10397}
10398
10399static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags)
10400{
10401        struct hwrm_queue_pri2cos_qcfg_output *resp_qc;
10402        struct hwrm_queue_pri2cos_qcfg_input *req_qc;
10403        struct hwrm_port_qstats_ext_output *resp_qs;
10404        struct hwrm_port_qstats_ext_input *req_qs;
10405        struct bnxt_pf_info *pf = &bp->pf;
10406        u32 tx_stat_size;
10407        int rc;
10408
10409        if (!(bp->flags & BNXT_FLAG_PORT_STATS_EXT))
10410                return 0;
10411
10412        if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED))
10413                return -EOPNOTSUPP;
10414
10415        rc = hwrm_req_init(bp, req_qs, HWRM_PORT_QSTATS_EXT);
10416        if (rc)
10417                return rc;
10418
10419        req_qs->flags = flags;
10420        req_qs->port_id = cpu_to_le16(pf->port_id);
10421        req_qs->rx_stat_size = cpu_to_le16(sizeof(struct rx_port_stats_ext));
10422        req_qs->rx_stat_host_addr = cpu_to_le64(bp->rx_port_stats_ext.hw_stats_map);
10423        tx_stat_size = bp->tx_port_stats_ext.hw_stats ?
10424                       sizeof(struct tx_port_stats_ext) : 0;
10425        req_qs->tx_stat_size = cpu_to_le16(tx_stat_size);
10426        req_qs->tx_stat_host_addr = cpu_to_le64(bp->tx_port_stats_ext.hw_stats_map);
10427        resp_qs = hwrm_req_hold(bp, req_qs);
10428        rc = hwrm_req_send(bp, req_qs);
10429        if (!rc) {
10430                bp->fw_rx_stats_ext_size =
10431                        le16_to_cpu(resp_qs->rx_stat_size) / 8;
10432                if (BNXT_FW_MAJ(bp) < 220 &&
10433                    bp->fw_rx_stats_ext_size > BNXT_RX_STATS_EXT_NUM_LEGACY)
10434                        bp->fw_rx_stats_ext_size = BNXT_RX_STATS_EXT_NUM_LEGACY;
10435
10436                bp->fw_tx_stats_ext_size = tx_stat_size ?
10437                        le16_to_cpu(resp_qs->tx_stat_size) / 8 : 0;
10438        } else {
10439                bp->fw_rx_stats_ext_size = 0;
10440                bp->fw_tx_stats_ext_size = 0;
10441        }
10442        hwrm_req_drop(bp, req_qs);
10443
10444        if (flags)
10445                return rc;
10446
10447        if (bp->fw_tx_stats_ext_size <=
10448            offsetof(struct tx_port_stats_ext, pfc_pri0_tx_duration_us) / 8) {
10449                bp->pri2cos_valid = 0;
10450                return rc;
10451        }
10452
10453        rc = hwrm_req_init(bp, req_qc, HWRM_QUEUE_PRI2COS_QCFG);
10454        if (rc)
10455                return rc;
10456
10457        req_qc->flags = cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN);
10458
10459        resp_qc = hwrm_req_hold(bp, req_qc);
10460        rc = hwrm_req_send(bp, req_qc);
10461        if (!rc) {
10462                u8 *pri2cos;
10463                int i, j;
10464
10465                pri2cos = &resp_qc->pri0_cos_queue_id;
10466                for (i = 0; i < 8; i++) {
10467                        u8 queue_id = pri2cos[i];
10468                        u8 queue_idx;
10469
10470                        /* Per port queue IDs start from 0, 10, 20, etc */
10471                        queue_idx = queue_id % 10;
10472                        if (queue_idx > BNXT_MAX_QUEUE) {
10473                                bp->pri2cos_valid = false;
10474                                hwrm_req_drop(bp, req_qc);
10475                                return rc;
10476                        }
10477                        for (j = 0; j < bp->max_q; j++) {
10478                                if (bp->q_ids[j] == queue_id)
10479                                        bp->pri2cos_idx[i] = queue_idx;
10480                        }
10481                }
10482                bp->pri2cos_valid = true;
10483        }
10484        hwrm_req_drop(bp, req_qc);
10485
10486        return rc;
10487}
10488
10489static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp)
10490{
10491        bnxt_hwrm_tunnel_dst_port_free(bp,
10492                TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
10493        bnxt_hwrm_tunnel_dst_port_free(bp,
10494                TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
10495}
10496
10497static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa)
10498{
10499        int rc, i;
10500        u32 tpa_flags = 0;
10501
10502        if (set_tpa)
10503                tpa_flags = bp->flags & BNXT_FLAG_TPA;
10504        else if (BNXT_NO_FW_ACCESS(bp))
10505                return 0;
10506        for (i = 0; i < bp->nr_vnics; i++) {
10507                rc = bnxt_hwrm_vnic_set_tpa(bp, &bp->vnic_info[i], tpa_flags);
10508                if (rc) {
10509                        netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n",
10510                                   i, rc);
10511                        return rc;
10512                }
10513        }
10514        return 0;
10515}
10516
10517static void bnxt_hwrm_clear_vnic_rss(struct bnxt *bp)
10518{
10519        int i;
10520
10521        for (i = 0; i < bp->nr_vnics; i++)
10522                bnxt_hwrm_vnic_set_rss(bp, &bp->vnic_info[i], false);
10523}
10524
10525static void bnxt_clear_vnic(struct bnxt *bp)
10526{
10527        if (!bp->vnic_info)
10528                return;
10529
10530        bnxt_hwrm_clear_vnic_filter(bp);
10531        if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) {
10532                /* clear all RSS setting before free vnic ctx */
10533                bnxt_hwrm_clear_vnic_rss(bp);
10534                bnxt_hwrm_vnic_ctx_free(bp);
10535        }
10536        /* before free the vnic, undo the vnic tpa settings */
10537        if (bp->flags & BNXT_FLAG_TPA)
10538                bnxt_set_tpa(bp, false);
10539        bnxt_hwrm_vnic_free(bp);
10540        if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
10541                bnxt_hwrm_vnic_ctx_free(bp);
10542}
10543
10544static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path,
10545                                    bool irq_re_init)
10546{
10547        bnxt_clear_vnic(bp);
10548        bnxt_hwrm_ring_free(bp, close_path);
10549        bnxt_hwrm_ring_grp_free(bp);
10550        if (irq_re_init) {
10551                bnxt_hwrm_stat_ctx_free(bp);
10552                bnxt_hwrm_free_tunnel_ports(bp);
10553        }
10554}
10555
10556static int bnxt_hwrm_set_br_mode(struct bnxt *bp, u16 br_mode)
10557{
10558        struct hwrm_func_cfg_input *req;
10559        u8 evb_mode;
10560        int rc;
10561
10562        if (br_mode == BRIDGE_MODE_VEB)
10563                evb_mode = FUNC_CFG_REQ_EVB_MODE_VEB;
10564        else if (br_mode == BRIDGE_MODE_VEPA)
10565                evb_mode = FUNC_CFG_REQ_EVB_MODE_VEPA;
10566        else
10567                return -EINVAL;
10568
10569        rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req);
10570        if (rc)
10571                return rc;
10572
10573        req->fid = cpu_to_le16(0xffff);
10574        req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_EVB_MODE);
10575        req->evb_mode = evb_mode;
10576        return hwrm_req_send(bp, req);
10577}
10578
10579static int bnxt_hwrm_set_cache_line_size(struct bnxt *bp, int size)
10580{
10581        struct hwrm_func_cfg_input *req;
10582        int rc;
10583
10584        if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10803)
10585                return 0;
10586
10587        rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req);
10588        if (rc)
10589                return rc;
10590
10591        req->fid = cpu_to_le16(0xffff);
10592        req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE);
10593        req->options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_64;
10594        if (size == 128)
10595                req->options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128;
10596
10597        return hwrm_req_send(bp, req);
10598}
10599
10600static int __bnxt_setup_vnic(struct bnxt *bp, struct bnxt_vnic_info *vnic)
10601{
10602        int rc;
10603
10604        if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG)
10605                goto skip_rss_ctx;
10606
10607        /* allocate context for vnic */
10608        rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, 0);
10609        if (rc) {
10610                netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
10611                           vnic->vnic_id, rc);
10612                goto vnic_setup_err;
10613        }
10614        bp->rsscos_nr_ctxs++;
10615
10616        if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
10617                rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, 1);
10618                if (rc) {
10619                        netdev_err(bp->dev, "hwrm vnic %d cos ctx alloc failure rc: %x\n",
10620                                   vnic->vnic_id, rc);
10621                        goto vnic_setup_err;
10622                }
10623                bp->rsscos_nr_ctxs++;
10624        }
10625
10626skip_rss_ctx:
10627        /* configure default vnic, ring grp */
10628        rc = bnxt_hwrm_vnic_cfg(bp, vnic);
10629        if (rc) {
10630                netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
10631                           vnic->vnic_id, rc);
10632                goto vnic_setup_err;
10633        }
10634
10635        /* Enable RSS hashing on vnic */
10636        rc = bnxt_hwrm_vnic_set_rss(bp, vnic, true);
10637        if (rc) {
10638                netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %x\n",
10639                           vnic->vnic_id, rc);
10640                goto vnic_setup_err;
10641        }
10642
10643        if (bp->flags & BNXT_FLAG_AGG_RINGS) {
10644                rc = bnxt_hwrm_vnic_set_hds(bp, vnic);
10645                if (rc) {
10646                        netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
10647                                   vnic->vnic_id, rc);
10648                }
10649        }
10650
10651vnic_setup_err:
10652        return rc;
10653}
10654
10655int bnxt_hwrm_vnic_update(struct bnxt *bp, struct bnxt_vnic_info *vnic,
10656                          u8 valid)
10657{
10658        struct hwrm_vnic_update_input *req;
10659        int rc;
10660
10661        rc = hwrm_req_init(bp, req, HWRM_VNIC_UPDATE);
10662        if (rc)
10663                return rc;
10664
10665        req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
10666
10667        if (valid & VNIC_UPDATE_REQ_ENABLES_MRU_VALID)
10668                req->mru = cpu_to_le16(vnic->mru);
10669
10670        req->enables = cpu_to_le32(valid);
10671
10672        return hwrm_req_send(bp, req);
10673}
10674
10675int bnxt_hwrm_vnic_rss_cfg_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic)
10676{
10677        int rc;
10678
10679        rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic, true);
10680        if (rc) {
10681                netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n",
10682                           vnic->vnic_id, rc);
10683                return rc;
10684        }
10685        rc = bnxt_hwrm_vnic_cfg(bp, vnic);
10686        if (rc)
10687                netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
10688                           vnic->vnic_id, rc);
10689        return rc;
10690}
10691
10692int __bnxt_setup_vnic_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic)
10693{
10694        int rc, i, nr_ctxs;
10695
10696        nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings);
10697        for (i = 0; i < nr_ctxs; i++) {
10698                rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, i);
10699                if (rc) {
10700                        netdev_err(bp->dev, "hwrm vnic %d ctx %d alloc failure rc: %x\n",
10701                                   vnic->vnic_id, i, rc);
10702                        break;
10703                }
10704                bp->rsscos_nr_ctxs++;
10705        }
10706        if (i < nr_ctxs)
10707                return -ENOMEM;
10708
10709        rc = bnxt_hwrm_vnic_rss_cfg_p5(bp, vnic);
10710        if (rc)
10711                return rc;
10712
10713        if (bp->flags & BNXT_FLAG_AGG_RINGS) {
10714                rc = bnxt_hwrm_vnic_set_hds(bp, vnic);
10715                if (rc) {
10716                        netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
10717                                   vnic->vnic_id, rc);
10718                }
10719        }
10720        return rc;
10721}
10722
10723static int bnxt_setup_vnic(struct bnxt *bp, struct bnxt_vnic_info *vnic)
10724{
10725        if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
10726                return __bnxt_setup_vnic_p5(bp, vnic);
10727        else
10728                return __bnxt_setup_vnic(bp, vnic);
10729}
10730
10731static int bnxt_alloc_and_setup_vnic(struct bnxt *bp,
10732                                     struct bnxt_vnic_info *vnic,
10733                                     u16 start_rx_ring_idx, int rx_rings)
10734{
10735        int rc;
10736
10737        rc = bnxt_hwrm_vnic_alloc(bp, vnic, start_rx_ring_idx, rx_rings);
10738        if (rc) {
10739                netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
10740                           vnic->vnic_id, rc);
10741                return rc;
10742        }
10743        return bnxt_setup_vnic(bp, vnic);
10744}
10745
10746static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
10747{
10748        struct bnxt_vnic_info *vnic;
10749        int i, rc = 0;
10750
10751        if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) {
10752                vnic = &bp->vnic_info[BNXT_VNIC_NTUPLE];
10753                return bnxt_alloc_and_setup_vnic(bp, vnic, 0, bp->rx_nr_rings);
10754        }
10755
10756        if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
10757                return 0;
10758
10759        for (i = 0; i < bp->rx_nr_rings; i++) {
10760                u16 vnic_id = i + 1;
10761                u16 ring_id = i;
10762
10763                if (vnic_id >= bp->nr_vnics)
10764                        break;
10765
10766                vnic = &bp->vnic_info[vnic_id];
10767                vnic->flags |= BNXT_VNIC_RFS_FLAG;
10768                if (bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP)
10769                        vnic->flags |= BNXT_VNIC_RFS_NEW_RSS_FLAG;
10770                if (bnxt_alloc_and_setup_vnic(bp, &bp->vnic_info[vnic_id], ring_id, 1))
10771                        break;
10772        }
10773        return rc;
10774}
10775
10776void bnxt_del_one_rss_ctx(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx,
10777                          bool all)
10778{
10779        struct bnxt_vnic_info *vnic = &rss_ctx->vnic;
10780        struct bnxt_filter_base *usr_fltr, *tmp;
10781        struct bnxt_ntuple_filter *ntp_fltr;
10782        int i;
10783
10784        if (netif_running(bp->dev)) {
10785                bnxt_hwrm_vnic_free_one(bp, &rss_ctx->vnic);
10786                for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++) {
10787                        if (vnic->fw_rss_cos_lb_ctx[i] != INVALID_HW_RING_ID)
10788                                bnxt_hwrm_vnic_ctx_free_one(bp, vnic, i);
10789                }
10790        }
10791        if (!all)
10792                return;
10793
10794        list_for_each_entry_safe(usr_fltr, tmp, &bp->usr_fltr_list, list) {
10795                if ((usr_fltr->flags & BNXT_ACT_RSS_CTX) &&
10796                    usr_fltr->fw_vnic_id == rss_ctx->index) {
10797                        ntp_fltr = container_of(usr_fltr,
10798                                                struct bnxt_ntuple_filter,
10799                                                base);
10800                        bnxt_hwrm_cfa_ntuple_filter_free(bp, ntp_fltr);
10801                        bnxt_del_ntp_filter(bp, ntp_fltr);
10802                        bnxt_del_one_usr_fltr(bp, usr_fltr);
10803                }
10804        }
10805
10806        if (vnic->rss_table)
10807                dma_free_coherent(&bp->pdev->dev, vnic->rss_table_size,
10808                                  vnic->rss_table,
10809                                  vnic->rss_table_dma_addr);
10810        bp->num_rss_ctx--;
10811}
10812
10813static bool bnxt_vnic_has_rx_ring(struct bnxt *bp, struct bnxt_vnic_info *vnic,
10814                                  int rxr_id)
10815{
10816        u16 tbl_size = bnxt_get_rxfh_indir_size(bp->dev);
10817        int i, vnic_rx;
10818
10819        /* Ntuple VNIC always has all the rx rings. Any change of ring id
10820         * must be updated because a future filter may use it.
10821         */
10822        if (vnic->flags & BNXT_VNIC_NTUPLE_FLAG)
10823                return true;
10824
10825        for (i = 0; i < tbl_size; i++) {
10826                if (vnic->flags & BNXT_VNIC_RSSCTX_FLAG)
10827                        vnic_rx = ethtool_rxfh_context_indir(vnic->rss_ctx)[i];
10828                else
10829                        vnic_rx = bp->rss_indir_tbl[i];
10830
10831                if (rxr_id == vnic_rx)
10832                        return true;
10833        }
10834
10835        return false;
10836}
10837
10838static int bnxt_set_vnic_mru_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic,
10839                                u16 mru, int rxr_id)
10840{
10841        int rc;
10842
10843        if (!bnxt_vnic_has_rx_ring(bp, vnic, rxr_id))
10844                return 0;
10845
10846        if (mru) {
10847                rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic, true);
10848                if (rc) {
10849                        netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n",
10850                                   vnic->vnic_id, rc);
10851                        return rc;
10852                }
10853        }
10854        vnic->mru = mru;
10855        bnxt_hwrm_vnic_update(bp, vnic,
10856                              VNIC_UPDATE_REQ_ENABLES_MRU_VALID);
10857
10858        return 0;
10859}
10860
10861static int bnxt_set_rss_ctx_vnic_mru(struct bnxt *bp, u16 mru, int rxr_id)
10862{
10863        struct ethtool_rxfh_context *ctx;
10864        unsigned long context;
10865        int rc;
10866
10867        xa_for_each(&bp->dev->ethtool->rss_ctx, context, ctx) {
10868                struct bnxt_rss_ctx *rss_ctx = ethtool_rxfh_context_priv(ctx);
10869                struct bnxt_vnic_info *vnic = &rss_ctx->vnic;
10870
10871                rc = bnxt_set_vnic_mru_p5(bp, vnic, mru, rxr_id);
10872                if (rc)
10873                        return rc;
10874        }
10875
10876        return 0;
10877}
10878
10879static void bnxt_hwrm_realloc_rss_ctx_vnic(struct bnxt *bp)
10880{
10881        bool set_tpa = !!(bp->flags & BNXT_FLAG_TPA);
10882        struct ethtool_rxfh_context *ctx;
10883        unsigned long context;
10884
10885        xa_for_each(&bp->dev->ethtool->rss_ctx, context, ctx) {
10886                struct bnxt_rss_ctx *rss_ctx = ethtool_rxfh_context_priv(ctx);
10887                struct bnxt_vnic_info *vnic = &rss_ctx->vnic;
10888
10889                if (bnxt_hwrm_vnic_alloc(bp, vnic, 0, bp->rx_nr_rings) ||
10890                    bnxt_hwrm_vnic_set_tpa(bp, vnic, set_tpa) ||
10891                    __bnxt_setup_vnic_p5(bp, vnic)) {
10892                        netdev_err(bp->dev, "Failed to restore RSS ctx %d\n",
10893                                   rss_ctx->index);
10894                        bnxt_del_one_rss_ctx(bp, rss_ctx, true);
10895                        ethtool_rxfh_context_lost(bp->dev, rss_ctx->index);
10896                }
10897        }
10898}
10899
10900static void bnxt_clear_rss_ctxs(struct bnxt *bp)
10901{
10902        struct ethtool_rxfh_context *ctx;
10903        unsigned long context;
10904
10905        xa_for_each(&bp->dev->ethtool->rss_ctx, context, ctx) {
10906                struct bnxt_rss_ctx *rss_ctx = ethtool_rxfh_context_priv(ctx);
10907
10908                bnxt_del_one_rss_ctx(bp, rss_ctx, false);
10909        }
10910}
10911
10912/* Allow PF, trusted VFs and VFs with default VLAN to be in promiscuous mode */
10913static bool bnxt_promisc_ok(struct bnxt *bp)
10914{
10915#ifdef CONFIG_BNXT_SRIOV
10916        if (BNXT_VF(bp) && !bp->vf.vlan && !bnxt_is_trusted_vf(bp, &bp->vf))
10917                return false;
10918#endif
10919        return true;
10920}
10921
10922static int bnxt_setup_nitroa0_vnic(struct bnxt *bp)
10923{
10924        struct bnxt_vnic_info *vnic = &bp->vnic_info[1];
10925        unsigned int rc = 0;
10926
10927        rc = bnxt_hwrm_vnic_alloc(bp, vnic, bp->rx_nr_rings - 1, 1);
10928        if (rc) {
10929                netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
10930                           rc);
10931                return rc;
10932        }
10933
10934        rc = bnxt_hwrm_vnic_cfg(bp, vnic);
10935        if (rc) {
10936                netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
10937                           rc);
10938                return rc;
10939        }
10940        return rc;
10941}
10942
10943static int bnxt_cfg_rx_mode(struct bnxt *);
10944static bool bnxt_mc_list_updated(struct bnxt *, u32 *);
10945
10946static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
10947{
10948        struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
10949        int rc = 0;
10950        unsigned int rx_nr_rings = bp->rx_nr_rings;
10951
10952        if (irq_re_init) {
10953                rc = bnxt_hwrm_stat_ctx_alloc(bp);
10954                if (rc) {
10955                        netdev_err(bp->dev, "hwrm stat ctx alloc failure rc: %x\n",
10956                                   rc);
10957                        goto err_out;
10958                }
10959        }
10960
10961        rc = bnxt_hwrm_ring_alloc(bp);
10962        if (rc) {
10963                netdev_err(bp->dev, "hwrm ring alloc failure rc: %x\n", rc);
10964                goto err_out;
10965        }
10966
10967        rc = bnxt_hwrm_ring_grp_alloc(bp);
10968        if (rc) {
10969                netdev_err(bp->dev, "hwrm_ring_grp alloc failure: %x\n", rc);
10970                goto err_out;
10971        }
10972
10973        if (BNXT_CHIP_TYPE_NITRO_A0(bp))
10974                rx_nr_rings--;
10975
10976        /* default vnic 0 */
10977        rc = bnxt_hwrm_vnic_alloc(bp, vnic, 0, rx_nr_rings);
10978        if (rc) {
10979                netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc);
10980                goto err_out;
10981        }
10982
10983        if (BNXT_VF(bp))
10984                bnxt_hwrm_func_qcfg(bp);
10985
10986        rc = bnxt_setup_vnic(bp, vnic);
10987        if (rc)
10988                goto err_out;
10989        if (bp->rss_cap & BNXT_RSS_CAP_RSS_HASH_TYPE_DELTA)
10990                bnxt_hwrm_update_rss_hash_cfg(bp);
10991
10992        if (bp->flags & BNXT_FLAG_RFS) {
10993                rc = bnxt_alloc_rfs_vnics(bp);
10994                if (rc)
10995                        goto err_out;
10996        }
10997
10998        if (bp->flags & BNXT_FLAG_TPA) {
10999                rc = bnxt_set_tpa(bp, true);
11000                if (rc)
11001                        goto err_out;
11002        }
11003
11004        if (BNXT_VF(bp))
11005                bnxt_update_vf_mac(bp);
11006
11007        /* Filter for default vnic 0 */
11008        rc = bnxt_hwrm_set_vnic_filter(bp, 0, 0, bp->dev->dev_addr);
11009        if (rc) {
11010                if (BNXT_VF(bp) && rc == -ENODEV)
11011                        netdev_err(bp->dev, "Cannot configure L2 filter while PF is unavailable\n");
11012                else
11013                        netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
11014                goto err_out;
11015        }
11016        vnic->uc_filter_count = 1;
11017
11018        vnic->rx_mask = 0;
11019        if (test_bit(BNXT_STATE_HALF_OPEN, &bp->state))
11020                goto skip_rx_mask;
11021
11022        if (bp->dev->flags & IFF_BROADCAST)
11023                vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
11024
11025        if (bp->dev->flags & IFF_PROMISC)
11026                vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
11027
11028        if (bp->dev->flags & IFF_ALLMULTI) {
11029                vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
11030                vnic->mc_list_count = 0;
11031        } else if (bp->dev->flags & IFF_MULTICAST) {
11032                u32 mask = 0;
11033
11034                bnxt_mc_list_updated(bp, &mask);
11035                vnic->rx_mask |= mask;
11036        }
11037
11038        rc = bnxt_cfg_rx_mode(bp);
11039        if (rc)
11040                goto err_out;
11041
11042skip_rx_mask:
11043        rc = bnxt_hwrm_set_coal(bp);
11044        if (rc)
11045                netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n",
11046                                rc);
11047
11048        if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
11049                rc = bnxt_setup_nitroa0_vnic(bp);
11050                if (rc)
11051                        netdev_err(bp->dev, "Special vnic setup failure for NS2 A0 rc: %x\n",
11052                                   rc);
11053        }
11054
11055        if (BNXT_VF(bp)) {
11056                bnxt_hwrm_func_qcfg(bp);
11057                netdev_update_features(bp->dev);
11058        }
11059
11060        return 0;
11061
11062err_out:
11063        bnxt_hwrm_resource_free(bp, 0, true);
11064
11065        return rc;
11066}
11067
11068static int bnxt_shutdown_nic(struct bnxt *bp, bool irq_re_init)
11069{
11070        bnxt_hwrm_resource_free(bp, 1, irq_re_init);
11071        return 0;
11072}
11073
11074static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init)
11075{
11076        bnxt_init_cp_rings(bp);
11077        bnxt_init_rx_rings(bp);
11078        bnxt_init_tx_rings(bp);
11079        bnxt_init_ring_grps(bp, irq_re_init);
11080        bnxt_init_vnics(bp);
11081
11082        return bnxt_init_chip(bp, irq_re_init);
11083}
11084
11085static int bnxt_set_real_num_queues(struct bnxt *bp)
11086{
11087        int rc;
11088        struct net_device *dev = bp->dev;
11089
11090        rc = netif_set_real_num_tx_queues(dev, bp->tx_nr_rings -
11091                                          bp->tx_nr_rings_xdp);
11092        if (rc)
11093                return rc;
11094
11095        rc = netif_set_real_num_rx_queues(dev, bp->rx_nr_rings);
11096        if (rc)
11097                return rc;
11098
11099#ifdef CONFIG_RFS_ACCEL
11100        if (bp->flags & BNXT_FLAG_RFS)
11101                dev->rx_cpu_rmap = alloc_irq_cpu_rmap(bp->rx_nr_rings);
11102#endif
11103
11104        return rc;
11105}
11106
11107static int __bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
11108                             bool shared)
11109{
11110        int _rx = *rx, _tx = *tx;
11111
11112        if (shared) {
11113                *rx = min_t(int, _rx, max);
11114                *tx = min_t(int, _tx, max);
11115        } else {
11116                if (max < 2)
11117                        return -ENOMEM;
11118
11119                while (_rx + _tx > max) {
11120                        if (_rx > _tx && _rx > 1)
11121                                _rx--;
11122                        else if (_tx > 1)
11123                                _tx--;
11124                }
11125                *rx = _rx;
11126                *tx = _tx;
11127        }
11128        return 0;
11129}
11130
11131static int __bnxt_num_tx_to_cp(struct bnxt *bp, int tx, int tx_sets, int tx_xdp)
11132{
11133        return (tx - tx_xdp) / tx_sets + tx_xdp;
11134}
11135
11136int bnxt_num_tx_to_cp(struct bnxt *bp, int tx)
11137{
11138        int tcs = bp->num_tc;
11139
11140        if (!tcs)
11141                tcs = 1;
11142        return __bnxt_num_tx_to_cp(bp, tx, tcs, bp->tx_nr_rings_xdp);
11143}
11144
11145static int bnxt_num_cp_to_tx(struct bnxt *bp, int tx_cp)
11146{
11147        int tcs = bp->num_tc;
11148
11149        return (tx_cp - bp->tx_nr_rings_xdp) * tcs +
11150               bp->tx_nr_rings_xdp;
11151}
11152
11153static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
11154                           bool sh)
11155{
11156        int tx_cp = bnxt_num_tx_to_cp(bp, *tx);
11157
11158        if (tx_cp != *tx) {
11159                int tx_saved = tx_cp, rc;
11160
11161                rc = __bnxt_trim_rings(bp, rx, &tx_cp, max, sh);
11162                if (rc)
11163                        return rc;
11164                if (tx_cp != tx_saved)
11165                        *tx = bnxt_num_cp_to_tx(bp, tx_cp);
11166                return 0;
11167        }
11168        return __bnxt_trim_rings(bp, rx, tx, max, sh);
11169}
11170
11171static void bnxt_setup_msix(struct bnxt *bp)
11172{
11173        const int len = sizeof(bp->irq_tbl[0].name);
11174        struct net_device *dev = bp->dev;
11175        int tcs, i;
11176
11177        tcs = bp->num_tc;
11178        if (tcs) {
11179                int i, off, count;
11180
11181                for (i = 0; i < tcs; i++) {
11182                        count = bp->tx_nr_rings_per_tc;
11183                        off = BNXT_TC_TO_RING_BASE(bp, i);
11184                        netdev_set_tc_queue(dev, i, count, off);
11185                }
11186        }
11187
11188        for (i = 0; i < bp->cp_nr_rings; i++) {
11189                int map_idx = bnxt_cp_num_to_irq_num(bp, i);
11190                char *attr;
11191
11192                if (bp->flags & BNXT_FLAG_SHARED_RINGS)
11193                        attr = "TxRx";
11194                else if (i < bp->rx_nr_rings)
11195                        attr = "rx";
11196                else
11197                        attr = "tx";
11198
11199                snprintf(bp->irq_tbl[map_idx].name, len, "%s-%s-%d", dev->name,
11200                         attr, i);
11201                bp->irq_tbl[map_idx].handler = bnxt_msix;
11202        }
11203}
11204
11205static int bnxt_init_int_mode(struct bnxt *bp);
11206
11207static int bnxt_change_msix(struct bnxt *bp, int total)
11208{
11209        struct msi_map map;
11210        int i;
11211
11212        /* add MSIX to the end if needed */
11213        for (i = bp->total_irqs; i < total; i++) {
11214                map = pci_msix_alloc_irq_at(bp->pdev, i, NULL);
11215                if (map.index < 0)
11216                        return bp->total_irqs;
11217                bp->irq_tbl[i].vector = map.virq;
11218                bp->total_irqs++;
11219        }
11220
11221        /* trim MSIX from the end if needed */
11222        for (i = bp->total_irqs; i > total; i--) {
11223                map.index = i - 1;
11224                map.virq = bp->irq_tbl[i - 1].vector;
11225                pci_msix_free_irq(bp->pdev, map);
11226                bp->total_irqs--;
11227        }
11228        return bp->total_irqs;
11229}
11230
11231static int bnxt_setup_int_mode(struct bnxt *bp)
11232{
11233        int rc;
11234
11235        if (!bp->irq_tbl) {
11236                rc = bnxt_init_int_mode(bp);
11237                if (rc || !bp->irq_tbl)
11238                        return rc ?: -ENODEV;
11239        }
11240
11241        bnxt_setup_msix(bp);
11242
11243        rc = bnxt_set_real_num_queues(bp);
11244        return rc;
11245}
11246
11247static unsigned int bnxt_get_max_func_rss_ctxs(struct bnxt *bp)
11248{
11249        return bp->hw_resc.max_rsscos_ctxs;
11250}
11251
11252static unsigned int bnxt_get_max_func_vnics(struct bnxt *bp)
11253{
11254        return bp->hw_resc.max_vnics;
11255}
11256
11257unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp)
11258{
11259        return bp->hw_resc.max_stat_ctxs;
11260}
11261
11262unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp)
11263{
11264        return bp->hw_resc.max_cp_rings;
11265}
11266
11267static unsigned int bnxt_get_max_func_cp_rings_for_en(struct bnxt *bp)
11268{
11269        unsigned int cp = bp->hw_resc.max_cp_rings;
11270
11271        if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
11272                cp -= bnxt_get_ulp_msix_num(bp);
11273
11274        return cp;
11275}
11276
11277static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp)
11278{
11279        struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
11280
11281        if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
11282                return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_nqs);
11283
11284        return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_cp_rings);
11285}
11286
11287static void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs)
11288{
11289        bp->hw_resc.max_irqs = max_irqs;
11290}
11291
11292unsigned int bnxt_get_avail_cp_rings_for_en(struct bnxt *bp)
11293{
11294        unsigned int cp;
11295
11296        cp = bnxt_get_max_func_cp_rings_for_en(bp);
11297        if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
11298                return cp - bp->rx_nr_rings - bp->tx_nr_rings;
11299        else
11300                return cp - bp->cp_nr_rings;
11301}
11302
11303unsigned int bnxt_get_avail_stat_ctxs_for_en(struct bnxt *bp)
11304{
11305        return bnxt_get_max_func_stat_ctxs(bp) - bnxt_get_func_stat_ctxs(bp);
11306}
11307
11308static int bnxt_get_avail_msix(struct bnxt *bp, int num)
11309{
11310        int max_irq = bnxt_get_max_func_irqs(bp);
11311        int total_req = bp->cp_nr_rings + num;
11312
11313        if (max_irq < total_req) {
11314                num = max_irq - bp->cp_nr_rings;
11315                if (num <= 0)
11316                        return 0;
11317        }
11318        return num;
11319}
11320
11321static int bnxt_get_num_msix(struct bnxt *bp)
11322{
11323        if (!BNXT_NEW_RM(bp))
11324                return bnxt_get_max_func_irqs(bp);
11325
11326        return bnxt_nq_rings_in_use(bp);
11327}
11328
11329static int bnxt_init_int_mode(struct bnxt *bp)
11330{
11331        int i, total_vecs, max, rc = 0, min = 1, ulp_msix, tx_cp, tbl_size;
11332
11333        total_vecs = bnxt_get_num_msix(bp);
11334        max = bnxt_get_max_func_irqs(bp);
11335        if (total_vecs > max)
11336                total_vecs = max;
11337
11338        if (!total_vecs)
11339                return 0;
11340
11341        if (!(bp->flags & BNXT_FLAG_SHARED_RINGS))
11342                min = 2;
11343
11344        total_vecs = pci_alloc_irq_vectors(bp->pdev, min, total_vecs,
11345                                           PCI_IRQ_MSIX);
11346        ulp_msix = bnxt_get_ulp_msix_num(bp);
11347        if (total_vecs < 0 || total_vecs < ulp_msix) {
11348                rc = -ENODEV;
11349                goto msix_setup_exit;
11350        }
11351
11352        tbl_size = total_vecs;
11353        if (pci_msix_can_alloc_dyn(bp->pdev))
11354                tbl_size = max;
11355        bp->irq_tbl = kcalloc(tbl_size, sizeof(*bp->irq_tbl), GFP_KERNEL);
11356        if (bp->irq_tbl) {
11357                for (i = 0; i < total_vecs; i++)
11358                        bp->irq_tbl[i].vector = pci_irq_vector(bp->pdev, i);
11359
11360                bp->total_irqs = total_vecs;
11361                /* Trim rings based upon num of vectors allocated */
11362                rc = bnxt_trim_rings(bp, &bp->rx_nr_rings, &bp->tx_nr_rings,
11363                                     total_vecs - ulp_msix, min == 1);
11364                if (rc)
11365                        goto msix_setup_exit;
11366
11367                tx_cp = bnxt_num_tx_to_cp(bp, bp->tx_nr_rings);
11368                bp->cp_nr_rings = (min == 1) ?
11369                                  max_t(int, tx_cp, bp->rx_nr_rings) :
11370                                  tx_cp + bp->rx_nr_rings;
11371
11372        } else {
11373                rc = -ENOMEM;
11374                goto msix_setup_exit;
11375        }
11376        return 0;
11377
11378msix_setup_exit:
11379        netdev_err(bp->dev, "bnxt_init_int_mode err: %x\n", rc);
11380        kfree(bp->irq_tbl);
11381        bp->irq_tbl = NULL;
11382        pci_free_irq_vectors(bp->pdev);
11383        return rc;
11384}
11385
11386static void bnxt_clear_int_mode(struct bnxt *bp)
11387{
11388        pci_free_irq_vectors(bp->pdev);
11389
11390        kfree(bp->irq_tbl);
11391        bp->irq_tbl = NULL;
11392}
11393
11394int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init)
11395{
11396        bool irq_cleared = false;
11397        bool irq_change = false;
11398        int tcs = bp->num_tc;
11399        int irqs_required;
11400        int rc;
11401
11402        if (!bnxt_need_reserve_rings(bp))
11403                return 0;
11404
11405        if (BNXT_NEW_RM(bp) && !bnxt_ulp_registered(bp->edev)) {
11406                int ulp_msix = bnxt_get_avail_msix(bp, bp->ulp_num_msix_want);
11407
11408                if (ulp_msix > bp->ulp_num_msix_want)
11409                        ulp_msix = bp->ulp_num_msix_want;
11410                irqs_required = ulp_msix + bp->cp_nr_rings;
11411        } else {
11412                irqs_required = bnxt_get_num_msix(bp);
11413        }
11414
11415        if (irq_re_init && BNXT_NEW_RM(bp) && irqs_required != bp->total_irqs) {
11416                irq_change = true;
11417                if (!pci_msix_can_alloc_dyn(bp->pdev)) {
11418                        bnxt_ulp_irq_stop(bp);
11419                        bnxt_clear_int_mode(bp);
11420                        irq_cleared = true;
11421                }
11422        }
11423        rc = __bnxt_reserve_rings(bp);
11424        if (irq_cleared) {
11425                if (!rc)
11426                        rc = bnxt_init_int_mode(bp);
11427                bnxt_ulp_irq_restart(bp, rc);
11428        } else if (irq_change && !rc) {
11429                if (bnxt_change_msix(bp, irqs_required) != irqs_required)
11430                        rc = -ENOSPC;
11431        }
11432        if (rc) {
11433                netdev_err(bp->dev, "ring reservation/IRQ init failure rc: %d\n", rc);
11434                return rc;
11435        }
11436        if (tcs && (bp->tx_nr_rings_per_tc * tcs !=
11437                    bp->tx_nr_rings - bp->tx_nr_rings_xdp)) {
11438                netdev_err(bp->dev, "tx ring reservation failure\n");
11439                netdev_reset_tc(bp->dev);
11440                bp->num_tc = 0;
11441                if (bp->tx_nr_rings_xdp)
11442                        bp->tx_nr_rings_per_tc = bp->tx_nr_rings_xdp;
11443                else
11444                        bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
11445                return -ENOMEM;
11446        }
11447        return 0;
11448}
11449
11450static void bnxt_tx_queue_stop(struct bnxt *bp, int idx)
11451{
11452        struct bnxt_tx_ring_info *txr;
11453        struct netdev_queue *txq;
11454        struct bnxt_napi *bnapi;
11455        int i;
11456
11457        bnapi = bp->bnapi[idx];
11458        bnxt_for_each_napi_tx(i, bnapi, txr) {
11459                WRITE_ONCE(txr->dev_state, BNXT_DEV_STATE_CLOSING);
11460                synchronize_net();
11461
11462                if (!(bnapi->flags & BNXT_NAPI_FLAG_XDP)) {
11463                        txq = netdev_get_tx_queue(bp->dev, txr->txq_index);
11464                        if (txq) {
11465                                __netif_tx_lock_bh(txq);
11466                                netif_tx_stop_queue(txq);
11467                                __netif_tx_unlock_bh(txq);
11468                        }
11469                }
11470
11471                if (!bp->tph_mode)
11472                        continue;
11473
11474                bnxt_hwrm_tx_ring_free(bp, txr, true);
11475                bnxt_hwrm_cp_ring_free(bp, txr->tx_cpr);
11476                bnxt_free_one_tx_ring_skbs(bp, txr, txr->txq_index);
11477                bnxt_clear_one_cp_ring(bp, txr->tx_cpr);
11478        }
11479}
11480
11481static int bnxt_tx_queue_start(struct bnxt *bp, int idx)
11482{
11483        struct bnxt_tx_ring_info *txr;
11484        struct netdev_queue *txq;
11485        struct bnxt_napi *bnapi;
11486        int rc, i;
11487
11488        bnapi = bp->bnapi[idx];
11489        /* All rings have been reserved and previously allocated.
11490         * Reallocating with the same parameters should never fail.
11491         */
11492        bnxt_for_each_napi_tx(i, bnapi, txr) {
11493                if (!bp->tph_mode)
11494                        goto start_tx;
11495
11496                rc = bnxt_hwrm_cp_ring_alloc_p5(bp, txr->tx_cpr);
11497                if (rc)
11498                        return rc;
11499
11500                rc = bnxt_hwrm_tx_ring_alloc(bp, txr, false);
11501                if (rc)
11502                        return rc;
11503
11504                txr->tx_prod = 0;
11505                txr->tx_cons = 0;
11506                txr->tx_hw_cons = 0;
11507start_tx:
11508                WRITE_ONCE(txr->dev_state, 0);
11509                synchronize_net();
11510
11511                if (bnapi->flags & BNXT_NAPI_FLAG_XDP)
11512                        continue;
11513
11514                txq = netdev_get_tx_queue(bp->dev, txr->txq_index);
11515                if (txq)
11516                        netif_tx_start_queue(txq);
11517        }
11518
11519        return 0;
11520}
11521
11522static void bnxt_irq_affinity_notify(struct irq_affinity_notify *notify,
11523                                     const cpumask_t *mask)
11524{
11525        struct bnxt_irq *irq;
11526        u16 tag;
11527        int err;
11528
11529        irq = container_of(notify, struct bnxt_irq, affinity_notify);
11530
11531        if (!irq->bp->tph_mode)
11532                return;
11533
11534        cpumask_copy(irq->cpu_mask, mask);
11535
11536        if (irq->ring_nr >= irq->bp->rx_nr_rings)
11537                return;
11538
11539        if (pcie_tph_get_cpu_st(irq->bp->pdev, TPH_MEM_TYPE_VM,
11540                                cpumask_first(irq->cpu_mask), &tag))
11541                return;
11542
11543        if (pcie_tph_set_st_entry(irq->bp->pdev, irq->msix_nr, tag))
11544                return;
11545
11546        netdev_lock(irq->bp->dev);
11547        if (netif_running(irq->bp->dev)) {
11548                err = netdev_rx_queue_restart(irq->bp->dev, irq->ring_nr);
11549                if (err)
11550                        netdev_err(irq->bp->dev,
11551                                   "RX queue restart failed: err=%d\n", err);
11552        }
11553        netdev_unlock(irq->bp->dev);
11554}
11555
11556static void bnxt_irq_affinity_release(struct kref *ref)
11557{
11558        struct irq_affinity_notify *notify =
11559                container_of(ref, struct irq_affinity_notify, kref);
11560        struct bnxt_irq *irq;
11561
11562        irq = container_of(notify, struct bnxt_irq, affinity_notify);
11563
11564        if (!irq->bp->tph_mode)
11565                return;
11566
11567        if (pcie_tph_set_st_entry(irq->bp->pdev, irq->msix_nr, 0)) {
11568                netdev_err(irq->bp->dev,
11569                           "Setting ST=0 for MSIX entry %d failed\n",
11570                           irq->msix_nr);
11571                return;
11572        }
11573}
11574
11575static void bnxt_release_irq_notifier(struct bnxt_irq *irq)
11576{
11577        irq_set_affinity_notifier(irq->vector, NULL);
11578}
11579
11580static void bnxt_register_irq_notifier(struct bnxt *bp, struct bnxt_irq *irq)
11581{
11582        struct irq_affinity_notify *notify;
11583
11584        irq->bp = bp;
11585
11586        /* Nothing to do if TPH is not enabled */
11587        if (!bp->tph_mode)
11588                return;
11589
11590        /* Register IRQ affinity notifier */
11591        notify = &irq->affinity_notify;
11592        notify->irq = irq->vector;
11593        notify->notify = bnxt_irq_affinity_notify;
11594        notify->release = bnxt_irq_affinity_release;
11595
11596        irq_set_affinity_notifier(irq->vector, notify);
11597}
11598
11599static void bnxt_free_irq(struct bnxt *bp)
11600{
11601        struct bnxt_irq *irq;
11602        int i;
11603
11604#ifdef CONFIG_RFS_ACCEL
11605        free_irq_cpu_rmap(bp->dev->rx_cpu_rmap);
11606        bp->dev->rx_cpu_rmap = NULL;
11607#endif
11608        if (!bp->irq_tbl || !bp->bnapi)
11609                return;
11610
11611        for (i = 0; i < bp->cp_nr_rings; i++) {
11612                int map_idx = bnxt_cp_num_to_irq_num(bp, i);
11613
11614                irq = &bp->irq_tbl[map_idx];
11615                if (irq->requested) {
11616                        if (irq->have_cpumask) {
11617                                irq_update_affinity_hint(irq->vector, NULL);
11618                                free_cpumask_var(irq->cpu_mask);
11619                                irq->have_cpumask = 0;
11620                        }
11621
11622                        bnxt_release_irq_notifier(irq);
11623
11624                        free_irq(irq->vector, bp->bnapi[i]);
11625                }
11626
11627                irq->requested = 0;
11628        }
11629
11630        /* Disable TPH support */
11631        pcie_disable_tph(bp->pdev);
11632        bp->tph_mode = 0;
11633}
11634
11635static int bnxt_request_irq(struct bnxt *bp)
11636{
11637        struct cpu_rmap *rmap = NULL;
11638        int i, j, rc = 0;
11639        unsigned long flags = 0;
11640
11641        rc = bnxt_setup_int_mode(bp);
11642        if (rc) {
11643                netdev_err(bp->dev, "bnxt_setup_int_mode err: %x\n",
11644                           rc);
11645                return rc;
11646        }
11647#ifdef CONFIG_RFS_ACCEL
11648        rmap = bp->dev->rx_cpu_rmap;
11649#endif
11650
11651        /* Enable TPH support as part of IRQ request */
11652        rc = pcie_enable_tph(bp->pdev, PCI_TPH_ST_IV_MODE);
11653        if (!rc)
11654                bp->tph_mode = PCI_TPH_ST_IV_MODE;
11655
11656        for (i = 0, j = 0; i < bp->cp_nr_rings; i++) {
11657                int map_idx = bnxt_cp_num_to_irq_num(bp, i);
11658                struct bnxt_irq *irq = &bp->irq_tbl[map_idx];
11659
11660                if (IS_ENABLED(CONFIG_RFS_ACCEL) &&
11661                    rmap && bp->bnapi[i]->rx_ring) {
11662                        rc = irq_cpu_rmap_add(rmap, irq->vector);
11663                        if (rc)
11664                                netdev_warn(bp->dev, "failed adding irq rmap for ring %d\n",
11665                                            j);
11666                        j++;
11667                }
11668
11669                rc = request_irq(irq->vector, irq->handler, flags, irq->name,
11670                                 bp->bnapi[i]);
11671                if (rc)
11672                        break;
11673
11674                netif_napi_set_irq_locked(&bp->bnapi[i]->napi, irq->vector);
11675                irq->requested = 1;
11676
11677                if (zalloc_cpumask_var(&irq->cpu_mask, GFP_KERNEL)) {
11678                        int numa_node = dev_to_node(&bp->pdev->dev);
11679                        u16 tag;
11680
11681                        irq->have_cpumask = 1;
11682                        irq->msix_nr = map_idx;
11683                        irq->ring_nr = i;
11684                        cpumask_set_cpu(cpumask_local_spread(i, numa_node),
11685                                        irq->cpu_mask);
11686                        rc = irq_update_affinity_hint(irq->vector, irq->cpu_mask);
11687                        if (rc) {
11688                                netdev_warn(bp->dev,
11689                                            "Update affinity hint failed, IRQ = %d\n",
11690                                            irq->vector);
11691                                break;
11692                        }
11693
11694                        bnxt_register_irq_notifier(bp, irq);
11695
11696                        /* Init ST table entry */
11697                        if (pcie_tph_get_cpu_st(irq->bp->pdev, TPH_MEM_TYPE_VM,
11698                                                cpumask_first(irq->cpu_mask),
11699                                                &tag))
11700                                continue;
11701
11702                        pcie_tph_set_st_entry(irq->bp->pdev, irq->msix_nr, tag);
11703                }
11704        }
11705        return rc;
11706}
11707
11708static void bnxt_del_napi(struct bnxt *bp)
11709{
11710        int i;
11711
11712        if (!bp->bnapi)
11713                return;
11714
11715        for (i = 0; i < bp->rx_nr_rings; i++)
11716                netif_queue_set_napi(bp->dev, i, NETDEV_QUEUE_TYPE_RX, NULL);
11717        for (i = 0; i < bp->tx_nr_rings - bp->tx_nr_rings_xdp; i++)
11718                netif_queue_set_napi(bp->dev, i, NETDEV_QUEUE_TYPE_TX, NULL);
11719
11720        for (i = 0; i < bp->cp_nr_rings; i++) {
11721                struct bnxt_napi *bnapi = bp->bnapi[i];
11722
11723                __netif_napi_del_locked(&bnapi->napi);
11724        }
11725        /* We called __netif_napi_del_locked(), we need
11726         * to respect an RCU grace period before freeing napi structures.
11727         */
11728        synchronize_net();
11729}
11730
11731static void bnxt_init_napi(struct bnxt *bp)
11732{
11733        int (*poll_fn)(struct napi_struct *, int) = bnxt_poll;
11734        unsigned int cp_nr_rings = bp->cp_nr_rings;
11735        struct bnxt_napi *bnapi;
11736        int i;
11737
11738        if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
11739                poll_fn = bnxt_poll_p5;
11740        else if (BNXT_CHIP_TYPE_NITRO_A0(bp))
11741                cp_nr_rings--;
11742
11743        set_bit(BNXT_STATE_NAPI_DISABLED, &bp->state);
11744
11745        for (i = 0; i < cp_nr_rings; i++) {
11746                bnapi = bp->bnapi[i];
11747                netif_napi_add_config_locked(bp->dev, &bnapi->napi, poll_fn,
11748                                             bnapi->index);
11749        }
11750        if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
11751                bnapi = bp->bnapi[cp_nr_rings];
11752                netif_napi_add_locked(bp->dev, &bnapi->napi, bnxt_poll_nitroa0);
11753        }
11754}
11755
11756static void bnxt_disable_napi(struct bnxt *bp)
11757{
11758        int i;
11759
11760        if (!bp->bnapi ||
11761            test_and_set_bit(BNXT_STATE_NAPI_DISABLED, &bp->state))
11762                return;
11763
11764        for (i = 0; i < bp->cp_nr_rings; i++) {
11765                struct bnxt_napi *bnapi = bp->bnapi[i];
11766                struct bnxt_cp_ring_info *cpr;
11767
11768                cpr = &bnapi->cp_ring;
11769                if (bnapi->tx_fault)
11770                        cpr->sw_stats->tx.tx_resets++;
11771                if (bnapi->in_reset)
11772                        cpr->sw_stats->rx.rx_resets++;
11773                napi_disable_locked(&bnapi->napi);
11774        }
11775}
11776
11777static void bnxt_enable_napi(struct bnxt *bp)
11778{
11779        int i;
11780
11781        clear_bit(BNXT_STATE_NAPI_DISABLED, &bp->state);
11782        for (i = 0; i < bp->cp_nr_rings; i++) {
11783                struct bnxt_napi *bnapi = bp->bnapi[i];
11784                struct bnxt_cp_ring_info *cpr;
11785
11786                bnapi->tx_fault = 0;
11787
11788                cpr = &bnapi->cp_ring;
11789                bnapi->in_reset = false;
11790
11791                if (bnapi->rx_ring) {
11792                        INIT_WORK(&cpr->dim.work, bnxt_dim_work);
11793                        cpr->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
11794                }
11795                napi_enable_locked(&bnapi->napi);
11796        }
11797}
11798
11799void bnxt_tx_disable(struct bnxt *bp)
11800{
11801        int i;
11802        struct bnxt_tx_ring_info *txr;
11803
11804        if (bp->tx_ring) {
11805                for (i = 0; i < bp->tx_nr_rings; i++) {
11806                        txr = &bp->tx_ring[i];
11807                        WRITE_ONCE(txr->dev_state, BNXT_DEV_STATE_CLOSING);
11808                }
11809        }
11810        /* Make sure napi polls see @dev_state change */
11811        synchronize_net();
11812        /* Drop carrier first to prevent TX timeout */
11813        netif_carrier_off(bp->dev);
11814        /* Stop all TX queues */
11815        netif_tx_disable(bp->dev);
11816}
11817
11818void bnxt_tx_enable(struct bnxt *bp)
11819{
11820        int i;
11821        struct bnxt_tx_ring_info *txr;
11822
11823        for (i = 0; i < bp->tx_nr_rings; i++) {
11824                txr = &bp->tx_ring[i];
11825                WRITE_ONCE(txr->dev_state, 0);
11826        }
11827        /* Make sure napi polls see @dev_state change */
11828        synchronize_net();
11829        netif_tx_wake_all_queues(bp->dev);
11830        if (BNXT_LINK_IS_UP(bp))
11831                netif_carrier_on(bp->dev);
11832}
11833
11834static char *bnxt_report_fec(struct bnxt_link_info *link_info)
11835{
11836        u8 active_fec = link_info->active_fec_sig_mode &
11837                        PORT_PHY_QCFG_RESP_ACTIVE_FEC_MASK;
11838
11839        switch (active_fec) {
11840        default:
11841        case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_NONE_ACTIVE:
11842                return "None";
11843        case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE74_ACTIVE:
11844                return "Clause 74 BaseR";
11845        case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE91_ACTIVE:
11846                return "Clause 91 RS(528,514)";
11847        case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_1XN_ACTIVE:
11848                return "Clause 91 RS544_1XN";
11849        case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_IEEE_ACTIVE:
11850                return "Clause 91 RS(544,514)";
11851        case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_1XN_ACTIVE:
11852                return "Clause 91 RS272_1XN";
11853        case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_IEEE_ACTIVE:
11854                return "Clause 91 RS(272,257)";
11855        }
11856}
11857
11858void bnxt_report_link(struct bnxt *bp)
11859{
11860        if (BNXT_LINK_IS_UP(bp)) {
11861                const char *signal = "";
11862                const char *flow_ctrl;
11863                const char *duplex;
11864                u32 speed;
11865                u16 fec;
11866
11867                netif_carrier_on(bp->dev);
11868                speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed);
11869                if (speed == SPEED_UNKNOWN) {
11870                        netdev_info(bp->dev, "NIC Link is Up, speed unknown\n");
11871                        return;
11872                }
11873                if (bp->link_info.duplex == BNXT_LINK_DUPLEX_FULL)
11874                        duplex = "full";
11875                else
11876                        duplex = "half";
11877                if (bp->link_info.pause == BNXT_LINK_PAUSE_BOTH)
11878                        flow_ctrl = "ON - receive & transmit";
11879                else if (bp->link_info.pause == BNXT_LINK_PAUSE_TX)
11880                        flow_ctrl = "ON - transmit";
11881                else if (bp->link_info.pause == BNXT_LINK_PAUSE_RX)
11882                        flow_ctrl = "ON - receive";
11883                else
11884                        flow_ctrl = "none";
11885                if (bp->link_info.phy_qcfg_resp.option_flags &
11886                    PORT_PHY_QCFG_RESP_OPTION_FLAGS_SIGNAL_MODE_KNOWN) {
11887                        u8 sig_mode = bp->link_info.active_fec_sig_mode &
11888                                      PORT_PHY_QCFG_RESP_SIGNAL_MODE_MASK;
11889                        switch (sig_mode) {
11890                        case PORT_PHY_QCFG_RESP_SIGNAL_MODE_NRZ:
11891                                signal = "(NRZ) ";
11892                                break;
11893                        case PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4:
11894                                signal = "(PAM4 56Gbps) ";
11895                                break;
11896                        case PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4_112:
11897                                signal = "(PAM4 112Gbps) ";
11898                                break;
11899                        default:
11900                                break;
11901                        }
11902                }
11903                netdev_info(bp->dev, "NIC Link is Up, %u Mbps %s%s duplex, Flow control: %s\n",
11904                            speed, signal, duplex, flow_ctrl);
11905                if (bp->phy_flags & BNXT_PHY_FL_EEE_CAP)
11906                        netdev_info(bp->dev, "EEE is %s\n",
11907                                    bp->eee.eee_active ? "active" :
11908                                                         "not active");
11909                fec = bp->link_info.fec_cfg;
11910                if (!(fec & PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED))
11911                        netdev_info(bp->dev, "FEC autoneg %s encoding: %s\n",
11912                                    (fec & BNXT_FEC_AUTONEG) ? "on" : "off",
11913                                    bnxt_report_fec(&bp->link_info));
11914        } else {
11915                netif_carrier_off(bp->dev);
11916                netdev_err(bp->dev, "NIC Link is Down\n");
11917        }
11918}
11919
11920static bool bnxt_phy_qcaps_no_speed(struct hwrm_port_phy_qcaps_output *resp)
11921{
11922        if (!resp->supported_speeds_auto_mode &&
11923            !resp->supported_speeds_force_mode &&
11924            !resp->supported_pam4_speeds_auto_mode &&
11925            !resp->supported_pam4_speeds_force_mode &&
11926            !resp->supported_speeds2_auto_mode &&
11927            !resp->supported_speeds2_force_mode)
11928                return true;
11929        return false;
11930}
11931
11932static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
11933{
11934        struct bnxt_link_info *link_info = &bp->link_info;
11935        struct hwrm_port_phy_qcaps_output *resp;
11936        struct hwrm_port_phy_qcaps_input *req;
11937        int rc = 0;
11938
11939        if (bp->hwrm_spec_code < 0x10201)
11940                return 0;
11941
11942        rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_QCAPS);
11943        if (rc)
11944                return rc;
11945
11946        resp = hwrm_req_hold(bp, req);
11947        rc = hwrm_req_send(bp, req);
11948        if (rc)
11949                goto hwrm_phy_qcaps_exit;
11950
11951        bp->phy_flags = resp->flags | (le16_to_cpu(resp->flags2) << 8);
11952        if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED) {
11953                struct ethtool_keee *eee = &bp->eee;
11954                u16 fw_speeds = le16_to_cpu(resp->supported_speeds_eee_mode);
11955
11956                _bnxt_fw_to_linkmode(eee->supported, fw_speeds);
11957                bp->lpi_tmr_lo = le32_to_cpu(resp->tx_lpi_timer_low) &
11958                                 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK;
11959                bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) &
11960                                 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK;
11961        }
11962
11963        if (bp->hwrm_spec_code >= 0x10a01) {
11964                if (bnxt_phy_qcaps_no_speed(resp)) {
11965                        link_info->phy_state = BNXT_PHY_STATE_DISABLED;
11966                        netdev_warn(bp->dev, "Ethernet link disabled\n");
11967                } else if (link_info->phy_state == BNXT_PHY_STATE_DISABLED) {
11968                        link_info->phy_state = BNXT_PHY_STATE_ENABLED;
11969                        netdev_info(bp->dev, "Ethernet link enabled\n");
11970                        /* Phy re-enabled, reprobe the speeds */
11971                        link_info->support_auto_speeds = 0;
11972                        link_info->support_pam4_auto_speeds = 0;
11973                        link_info->support_auto_speeds2 = 0;
11974                }
11975        }
11976        if (resp->supported_speeds_auto_mode)
11977                link_info->support_auto_speeds =
11978                        le16_to_cpu(resp->supported_speeds_auto_mode);
11979        if (resp->supported_pam4_speeds_auto_mode)
11980                link_info->support_pam4_auto_speeds =
11981                        le16_to_cpu(resp->supported_pam4_speeds_auto_mode);
11982        if (resp->supported_speeds2_auto_mode)
11983                link_info->support_auto_speeds2 =
11984                        le16_to_cpu(resp->supported_speeds2_auto_mode);
11985
11986        bp->port_count = resp->port_cnt;
11987
11988hwrm_phy_qcaps_exit:
11989        hwrm_req_drop(bp, req);
11990        return rc;
11991}
11992
11993static void bnxt_hwrm_mac_qcaps(struct bnxt *bp)
11994{
11995        struct hwrm_port_mac_qcaps_output *resp;
11996        struct hwrm_port_mac_qcaps_input *req;
11997        int rc;
11998
11999        if (bp->hwrm_spec_code < 0x10a03)
12000                return;
12001
12002        rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_QCAPS);
12003        if (rc)
12004                return;
12005
12006        resp = hwrm_req_hold(bp, req);
12007        rc = hwrm_req_send_silent(bp, req);
12008        if (!rc)
12009                bp->mac_flags = resp->flags;
12010        hwrm_req_drop(bp, req);
12011}
12012
12013static bool bnxt_support_dropped(u16 advertising, u16 supported)
12014{
12015        u16 diff = advertising ^ supported;
12016
12017        return ((supported | diff) != supported);
12018}
12019
12020static bool bnxt_support_speed_dropped(struct bnxt_link_info *link_info)
12021{
12022        struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
12023
12024        /* Check if any advertised speeds are no longer supported. The caller
12025         * holds the link_lock mutex, so we can modify link_info settings.
12026         */
12027        if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
12028                if (bnxt_support_dropped(link_info->advertising,
12029                                         link_info->support_auto_speeds2)) {
12030                        link_info->advertising = link_info->support_auto_speeds2;
12031                        return true;
12032                }
12033                return false;
12034        }
12035        if (bnxt_support_dropped(link_info->advertising,
12036                                 link_info->support_auto_speeds)) {
12037                link_info->advertising = link_info->support_auto_speeds;
12038                return true;
12039        }
12040        if (bnxt_support_dropped(link_info->advertising_pam4,
12041                                 link_info->support_pam4_auto_speeds)) {
12042                link_info->advertising_pam4 = link_info->support_pam4_auto_speeds;
12043                return true;
12044        }
12045        return false;
12046}
12047
12048int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
12049{
12050        struct bnxt_link_info *link_info = &bp->link_info;
12051        struct hwrm_port_phy_qcfg_output *resp;
12052        struct hwrm_port_phy_qcfg_input *req;
12053        u8 link_state = link_info->link_state;
12054        bool support_changed;
12055        int rc;
12056
12057        rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_QCFG);
12058        if (rc)
12059                return rc;
12060
12061        resp = hwrm_req_hold(bp, req);
12062        rc = hwrm_req_send(bp, req);
12063        if (rc) {
12064                hwrm_req_drop(bp, req);
12065                if (BNXT_VF(bp) && rc == -ENODEV) {
12066                        netdev_warn(bp->dev, "Cannot obtain link state while PF unavailable.\n");
12067                        rc = 0;
12068                }
12069                return rc;
12070        }
12071
12072        memcpy(&link_info->phy_qcfg_resp, resp, sizeof(*resp));
12073        link_info->phy_link_status = resp->link;
12074        link_info->duplex = resp->duplex_cfg;
12075        if (bp->hwrm_spec_code >= 0x10800)
12076                link_info->duplex = resp->duplex_state;
12077        link_info->pause = resp->pause;
12078        link_info->auto_mode = resp->auto_mode;
12079        link_info->auto_pause_setting = resp->auto_pause;
12080        link_info->lp_pause = resp->link_partner_adv_pause;
12081        link_info->force_pause_setting = resp->force_pause;
12082        link_info->duplex_setting = resp->duplex_cfg;
12083        if (link_info->phy_link_status == BNXT_LINK_LINK) {
12084                link_info->link_speed = le16_to_cpu(resp->link_speed);
12085                if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2)
12086                        link_info->active_lanes = resp->active_lanes;
12087        } else {
12088                link_info->link_speed = 0;
12089                link_info->active_lanes = 0;
12090        }
12091        link_info->force_link_speed = le16_to_cpu(resp->force_link_speed);
12092        link_info->force_pam4_link_speed =
12093                le16_to_cpu(resp->force_pam4_link_speed);
12094        link_info->force_link_speed2 = le16_to_cpu(resp->force_link_speeds2);
12095        link_info->support_speeds = le16_to_cpu(resp->support_speeds);
12096        link_info->support_pam4_speeds = le16_to_cpu(resp->support_pam4_speeds);
12097        link_info->support_speeds2 = le16_to_cpu(resp->support_speeds2);
12098        link_info->auto_link_speeds = le16_to_cpu(resp->auto_link_speed_mask);
12099        link_info->auto_pam4_link_speeds =
12100                le16_to_cpu(resp->auto_pam4_link_speed_mask);
12101        link_info->auto_link_speeds2 = le16_to_cpu(resp->auto_link_speeds2);
12102        link_info->lp_auto_link_speeds =
12103                le16_to_cpu(resp->link_partner_adv_speeds);
12104        link_info->lp_auto_pam4_link_speeds =
12105                resp->link_partner_pam4_adv_speeds;
12106        link_info->preemphasis = le32_to_cpu(resp->preemphasis);
12107        link_info->phy_ver[0] = resp->phy_maj;
12108        link_info->phy_ver[1] = resp->phy_min;
12109        link_info->phy_ver[2] = resp->phy_bld;
12110        link_info->media_type = resp->media_type;
12111        link_info->phy_type = resp->phy_type;
12112        link_info->transceiver = resp->xcvr_pkg_type;
12113        link_info->phy_addr = resp->eee_config_phy_addr &
12114                              PORT_PHY_QCFG_RESP_PHY_ADDR_MASK;
12115        link_info->module_status = resp->module_status;
12116
12117        if (bp->phy_flags & BNXT_PHY_FL_EEE_CAP) {
12118                struct ethtool_keee *eee = &bp->eee;
12119                u16 fw_speeds;
12120
12121                eee->eee_active = 0;
12122                if (resp->eee_config_phy_addr &
12123                    PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE) {
12124                        eee->eee_active = 1;
12125                        fw_speeds = le16_to_cpu(
12126                                resp->link_partner_adv_eee_link_speed_mask);
12127                        _bnxt_fw_to_linkmode(eee->lp_advertised, fw_speeds);
12128                }
12129
12130                /* Pull initial EEE config */
12131                if (!chng_link_state) {
12132                        if (resp->eee_config_phy_addr &
12133                            PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED)
12134                                eee->eee_enabled = 1;
12135
12136                        fw_speeds = le16_to_cpu(resp->adv_eee_link_speed_mask);
12137                        _bnxt_fw_to_linkmode(eee->advertised, fw_speeds);
12138
12139                        if (resp->eee_config_phy_addr &
12140                            PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI) {
12141                                __le32 tmr;
12142
12143                                eee->tx_lpi_enabled = 1;
12144                                tmr = resp->xcvr_identifier_type_tx_lpi_timer;
12145                                eee->tx_lpi_timer = le32_to_cpu(tmr) &
12146                                        PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK;
12147                        }
12148                }
12149        }
12150
12151        link_info->fec_cfg = PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED;
12152        if (bp->hwrm_spec_code >= 0x10504) {
12153                link_info->fec_cfg = le16_to_cpu(resp->fec_cfg);
12154                link_info->active_fec_sig_mode = resp->active_fec_signal_mode;
12155        }
12156        /* TODO: need to add more logic to report VF link */
12157        if (chng_link_state) {
12158                if (link_info->phy_link_status == BNXT_LINK_LINK)
12159                        link_info->link_state = BNXT_LINK_STATE_UP;
12160                else
12161                        link_info->link_state = BNXT_LINK_STATE_DOWN;
12162                if (link_state != link_info->link_state)
12163                        bnxt_report_link(bp);
12164        } else {
12165                /* always link down if not require to update link state */
12166                link_info->link_state = BNXT_LINK_STATE_DOWN;
12167        }
12168        hwrm_req_drop(bp, req);
12169
12170        if (!BNXT_PHY_CFG_ABLE(bp))
12171                return 0;
12172
12173        support_changed = bnxt_support_speed_dropped(link_info);
12174        if (support_changed && (link_info->autoneg & BNXT_AUTONEG_SPEED))
12175                bnxt_hwrm_set_link_setting(bp, true, false);
12176        return 0;
12177}
12178
12179static void bnxt_get_port_module_status(struct bnxt *bp)
12180{
12181        struct bnxt_link_info *link_info = &bp->link_info;
12182        struct hwrm_port_phy_qcfg_output *resp = &link_info->phy_qcfg_resp;
12183        u8 module_status;
12184
12185        if (bnxt_update_link(bp, true))
12186                return;
12187
12188        module_status = link_info->module_status;
12189        switch (module_status) {
12190        case PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX:
12191        case PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN:
12192        case PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG:
12193                netdev_warn(bp->dev, "Unqualified SFP+ module detected on port %d\n",
12194                            bp->pf.port_id);
12195                if (bp->hwrm_spec_code >= 0x10201) {
12196                        netdev_warn(bp->dev, "Module part number %s\n",
12197                                    resp->phy_vendor_partnumber);
12198                }
12199                if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX)
12200                        netdev_warn(bp->dev, "TX is disabled\n");
12201                if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN)
12202                        netdev_warn(bp->dev, "SFP+ module is shutdown\n");
12203        }
12204}
12205
12206static void
12207bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
12208{
12209        if (bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) {
12210                if (bp->hwrm_spec_code >= 0x10201)
12211                        req->auto_pause =
12212                                PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE;
12213                if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
12214                        req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX;
12215                if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
12216                        req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_TX;
12217                req->enables |=
12218                        cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
12219        } else {
12220                if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
12221                        req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_RX;
12222                if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
12223                        req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_TX;
12224                req->enables |=
12225                        cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE);
12226                if (bp->hwrm_spec_code >= 0x10201) {
12227                        req->auto_pause = req->force_pause;
12228                        req->enables |= cpu_to_le32(
12229                                PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
12230                }
12231        }
12232}
12233
12234static void bnxt_hwrm_set_link_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
12235{
12236        if (bp->link_info.autoneg & BNXT_AUTONEG_SPEED) {
12237                req->auto_mode |= PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK;
12238                if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
12239                        req->enables |=
12240                                cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEEDS2_MASK);
12241                        req->auto_link_speeds2_mask = cpu_to_le16(bp->link_info.advertising);
12242                } else if (bp->link_info.advertising) {
12243                        req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK);
12244                        req->auto_link_speed_mask = cpu_to_le16(bp->link_info.advertising);
12245                }
12246                if (bp->link_info.advertising_pam4) {
12247                        req->enables |=
12248                                cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAM4_LINK_SPEED_MASK);
12249                        req->auto_link_pam4_speed_mask =
12250                                cpu_to_le16(bp->link_info.advertising_pam4);
12251                }
12252                req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE);
12253                req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG);
12254        } else {
12255                req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE);
12256                if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
12257                        req->force_link_speeds2 = cpu_to_le16(bp->link_info.req_link_speed);
12258                        req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_LINK_SPEEDS2);
12259                        netif_info(bp, link, bp->dev, "Forcing FW speed2: %d\n",
12260                                   (u32)bp->link_info.req_link_speed);
12261                } else if (bp->link_info.req_signal_mode == BNXT_SIG_MODE_PAM4) {
12262                        req->force_pam4_link_speed = cpu_to_le16(bp->link_info.req_link_speed);
12263                        req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAM4_LINK_SPEED);
12264                } else {
12265                        req->force_link_speed = cpu_to_le16(bp->link_info.req_link_speed);
12266                }
12267        }
12268
12269        /* tell chimp that the setting takes effect immediately */
12270        req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESET_PHY);
12271}
12272
12273int bnxt_hwrm_set_pause(struct bnxt *bp)
12274{
12275        struct hwrm_port_phy_cfg_input *req;
12276        int rc;
12277
12278        rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG);
12279        if (rc)
12280                return rc;
12281
12282        bnxt_hwrm_set_pause_common(bp, req);
12283
12284        if ((bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) ||
12285            bp->link_info.force_link_chng)
12286                bnxt_hwrm_set_link_common(bp, req);
12287
12288        rc = hwrm_req_send(bp, req);
12289        if (!rc && !(bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL)) {
12290                /* since changing of pause setting doesn't trigger any link
12291                 * change event, the driver needs to update the current pause
12292                 * result upon successfully return of the phy_cfg command
12293                 */
12294                bp->link_info.pause =
12295                bp->link_info.force_pause_setting = bp->link_info.req_flow_ctrl;
12296                bp->link_info.auto_pause_setting = 0;
12297                if (!bp->link_info.force_link_chng)
12298                        bnxt_report_link(bp);
12299        }
12300        bp->link_info.force_link_chng = false;
12301        return rc;
12302}
12303
12304static void bnxt_hwrm_set_eee(struct bnxt *bp,
12305                              struct hwrm_port_phy_cfg_input *req)
12306{
12307        struct ethtool_keee *eee = &bp->eee;
12308
12309        if (eee->eee_enabled) {
12310                u16 eee_speeds;
12311                u32 flags = PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE;
12312
12313                if (eee->tx_lpi_enabled)
12314                        flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE;
12315                else
12316                        flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE;
12317
12318                req->flags |= cpu_to_le32(flags);
12319                eee_speeds = bnxt_get_fw_auto_link_speeds(eee->advertised);
12320                req->eee_link_speed_mask = cpu_to_le16(eee_speeds);
12321                req->tx_lpi_timer = cpu_to_le32(eee->tx_lpi_timer);
12322        } else {
12323                req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE);
12324        }
12325}
12326
12327int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause, bool set_eee)
12328{
12329        struct hwrm_port_phy_cfg_input *req;
12330        int rc;
12331
12332        rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG);
12333        if (rc)
12334                return rc;
12335
12336        if (set_pause)
12337                bnxt_hwrm_set_pause_common(bp, req);
12338
12339        bnxt_hwrm_set_link_common(bp, req);
12340
12341        if (set_eee)
12342                bnxt_hwrm_set_eee(bp, req);
12343        return hwrm_req_send(bp, req);
12344}
12345
12346static int bnxt_hwrm_shutdown_link(struct bnxt *bp)
12347{
12348        struct hwrm_port_phy_cfg_input *req;
12349        int rc;
12350
12351        if (!BNXT_SINGLE_PF(bp))
12352                return 0;
12353
12354        if (pci_num_vf(bp->pdev) &&
12355            !(bp->phy_flags & BNXT_PHY_FL_FW_MANAGED_LKDN))
12356                return 0;
12357
12358        rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG);
12359        if (rc)
12360                return rc;
12361
12362        req->flags = cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN);
12363        rc = hwrm_req_send(bp, req);
12364        if (!rc) {
12365                mutex_lock(&bp->link_lock);
12366                /* Device is not obliged link down in certain scenarios, even
12367                 * when forced. Setting the state unknown is consistent with
12368                 * driver startup and will force link state to be reported
12369                 * during subsequent open based on PORT_PHY_QCFG.
12370                 */
12371                bp->link_info.link_state = BNXT_LINK_STATE_UNKNOWN;
12372                mutex_unlock(&bp->link_lock);
12373        }
12374        return rc;
12375}
12376
12377static int bnxt_fw_reset_via_optee(struct bnxt *bp)
12378{
12379#ifdef CONFIG_TEE_BNXT_FW
12380        int rc = tee_bnxt_fw_load();
12381
12382        if (rc)
12383                netdev_err(bp->dev, "Failed FW reset via OP-TEE, rc=%d\n", rc);
12384
12385        return rc;
12386#else
12387        netdev_err(bp->dev, "OP-TEE not supported\n");
12388        return -ENODEV;
12389#endif
12390}
12391
12392static int bnxt_try_recover_fw(struct bnxt *bp)
12393{
12394        if (bp->fw_health && bp->fw_health->status_reliable) {
12395                int retry = 0, rc;
12396                u32 sts;
12397
12398                do {
12399                        sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
12400                        rc = bnxt_hwrm_poll(bp);
12401                        if (!BNXT_FW_IS_BOOTING(sts) &&
12402                            !BNXT_FW_IS_RECOVERING(sts))
12403                                break;
12404                        retry++;
12405                } while (rc == -EBUSY && retry < BNXT_FW_RETRY);
12406
12407                if (!BNXT_FW_IS_HEALTHY(sts)) {
12408                        netdev_err(bp->dev,
12409                                   "Firmware not responding, status: 0x%x\n",
12410                                   sts);
12411                        rc = -ENODEV;
12412                }
12413                if (sts & FW_STATUS_REG_CRASHED_NO_MASTER) {
12414                        netdev_warn(bp->dev, "Firmware recover via OP-TEE requested\n");
12415                        return bnxt_fw_reset_via_optee(bp);
12416                }
12417                return rc;
12418        }
12419
12420        return -ENODEV;
12421}
12422
12423static void bnxt_clear_reservations(struct bnxt *bp, bool fw_reset)
12424{
12425        struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
12426
12427        if (!BNXT_NEW_RM(bp))
12428                return; /* no resource reservations required */
12429
12430        hw_resc->resv_cp_rings = 0;
12431        hw_resc->resv_stat_ctxs = 0;
12432        hw_resc->resv_irqs = 0;
12433        hw_resc->resv_tx_rings = 0;
12434        hw_resc->resv_rx_rings = 0;
12435        hw_resc->resv_hw_ring_grps = 0;
12436        hw_resc->resv_vnics = 0;
12437        hw_resc->resv_rsscos_ctxs = 0;
12438        if (!fw_reset) {
12439                bp->tx_nr_rings = 0;
12440                bp->rx_nr_rings = 0;
12441        }
12442}
12443
12444int bnxt_cancel_reservations(struct bnxt *bp, bool fw_reset)
12445{
12446        int rc;
12447
12448        if (!BNXT_NEW_RM(bp))
12449                return 0; /* no resource reservations required */
12450
12451        rc = bnxt_hwrm_func_resc_qcaps(bp, true);
12452        if (rc)
12453                netdev_err(bp->dev, "resc_qcaps failed\n");
12454
12455        bnxt_clear_reservations(bp, fw_reset);
12456
12457        return rc;
12458}
12459
12460static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
12461{
12462        struct hwrm_func_drv_if_change_output *resp;
12463        struct hwrm_func_drv_if_change_input *req;
12464        bool resc_reinit = false;
12465        bool caps_change = false;
12466        int rc, retry = 0;
12467        bool fw_reset;
12468        u32 flags = 0;
12469
12470        fw_reset = (bp->fw_reset_state == BNXT_FW_RESET_STATE_ABORT);
12471        bp->fw_reset_state = 0;
12472
12473        if (!(bp->fw_cap & BNXT_FW_CAP_IF_CHANGE))
12474                return 0;
12475
12476        rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_IF_CHANGE);
12477        if (rc)
12478                return rc;
12479
12480        if (up)
12481                req->flags = cpu_to_le32(FUNC_DRV_IF_CHANGE_REQ_FLAGS_UP);
12482        resp = hwrm_req_hold(bp, req);
12483
12484        hwrm_req_flags(bp, req, BNXT_HWRM_FULL_WAIT);
12485        while (retry < BNXT_FW_IF_RETRY) {
12486                rc = hwrm_req_send(bp, req);
12487                if (rc != -EAGAIN)
12488                        break;
12489
12490                msleep(50);
12491                retry++;
12492        }
12493
12494        if (rc == -EAGAIN) {
12495                hwrm_req_drop(bp, req);
12496                return rc;
12497        } else if (!rc) {
12498                flags = le32_to_cpu(resp->flags);
12499        } else if (up) {
12500                rc = bnxt_try_recover_fw(bp);
12501                fw_reset = true;
12502        }
12503        hwrm_req_drop(bp, req);
12504        if (rc)
12505                return rc;
12506
12507        if (!up) {
12508                bnxt_inv_fw_health_reg(bp);
12509                return 0;
12510        }
12511
12512        if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE)
12513                resc_reinit = true;
12514        if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_HOT_FW_RESET_DONE ||
12515            test_bit(BNXT_STATE_FW_RESET_DET, &bp->state))
12516                fw_reset = true;
12517        else
12518                bnxt_remap_fw_health_regs(bp);
12519
12520        if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state) && !fw_reset) {
12521                netdev_err(bp->dev, "RESET_DONE not set during FW reset.\n");
12522                set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
12523                return -ENODEV;
12524        }
12525        if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_CAPS_CHANGE)
12526                caps_change = true;
12527
12528        if (resc_reinit || fw_reset || caps_change) {
12529                if (fw_reset || caps_change) {
12530                        set_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
12531                        if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
12532                                bnxt_ulp_irq_stop(bp);
12533                        bnxt_free_ctx_mem(bp, false);
12534                        bnxt_dcb_free(bp);
12535                        rc = bnxt_fw_init_one(bp);
12536                        if (rc) {
12537                                clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
12538                                set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
12539                                return rc;
12540                        }
12541                        /* IRQ will be initialized later in bnxt_request_irq()*/
12542                        bnxt_clear_int_mode(bp);
12543                }
12544                rc = bnxt_cancel_reservations(bp, fw_reset);
12545        }
12546        return rc;
12547}
12548
12549static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
12550{
12551        struct hwrm_port_led_qcaps_output *resp;
12552        struct hwrm_port_led_qcaps_input *req;
12553        struct bnxt_pf_info *pf = &bp->pf;
12554        int rc;
12555
12556        bp->num_leds = 0;
12557        if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10601)
12558                return 0;
12559
12560        rc = hwrm_req_init(bp, req, HWRM_PORT_LED_QCAPS);
12561        if (rc)
12562                return rc;
12563
12564        req->port_id = cpu_to_le16(pf->port_id);
12565        resp = hwrm_req_hold(bp, req);
12566        rc = hwrm_req_send(bp, req);
12567        if (rc) {
12568                hwrm_req_drop(bp, req);
12569                return rc;
12570        }
12571        if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
12572                int i;
12573
12574                bp->num_leds = resp->num_leds;
12575                memcpy(bp->leds, &resp->led0_id, sizeof(bp->leds[0]) *
12576                                                 bp->num_leds);
12577                for (i = 0; i < bp->num_leds; i++) {
12578                        struct bnxt_led_info *led = &bp->leds[i];
12579                        __le16 caps = led->led_state_caps;
12580
12581                        if (!led->led_group_id ||
12582                            !BNXT_LED_ALT_BLINK_CAP(caps)) {
12583                                bp->num_leds = 0;
12584                                break;
12585                        }
12586                }
12587        }
12588        hwrm_req_drop(bp, req);
12589        return 0;
12590}
12591
12592int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp)
12593{
12594        struct hwrm_wol_filter_alloc_output *resp;
12595        struct hwrm_wol_filter_alloc_input *req;
12596        int rc;
12597
12598        rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_ALLOC);
12599        if (rc)
12600                return rc;
12601
12602        req->port_id = cpu_to_le16(bp->pf.port_id);
12603        req->wol_type = WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT;
12604        req->enables = cpu_to_le32(WOL_FILTER_ALLOC_REQ_ENABLES_MAC_ADDRESS);
12605        memcpy(req->mac_address, bp->dev->dev_addr, ETH_ALEN);
12606
12607        resp = hwrm_req_hold(bp, req);
12608        rc = hwrm_req_send(bp, req);
12609        if (!rc)
12610                bp->wol_filter_id = resp->wol_filter_id;
12611        hwrm_req_drop(bp, req);
12612        return rc;
12613}
12614
12615int bnxt_hwrm_free_wol_fltr(struct bnxt *bp)
12616{
12617        struct hwrm_wol_filter_free_input *req;
12618        int rc;
12619
12620        rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_FREE);
12621        if (rc)
12622                return rc;
12623
12624        req->port_id = cpu_to_le16(bp->pf.port_id);
12625        req->enables = cpu_to_le32(WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID);
12626        req->wol_filter_id = bp->wol_filter_id;
12627
12628        return hwrm_req_send(bp, req);
12629}
12630
12631static u16 bnxt_hwrm_get_wol_fltrs(struct bnxt *bp, u16 handle)
12632{
12633        struct hwrm_wol_filter_qcfg_output *resp;
12634        struct hwrm_wol_filter_qcfg_input *req;
12635        u16 next_handle = 0;
12636        int rc;
12637
12638        rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_QCFG);
12639        if (rc)
12640                return rc;
12641
12642        req->port_id = cpu_to_le16(bp->pf.port_id);
12643        req->handle = cpu_to_le16(handle);
12644        resp = hwrm_req_hold(bp, req);
12645        rc = hwrm_req_send(bp, req);
12646        if (!rc) {
12647                next_handle = le16_to_cpu(resp->next_handle);
12648                if (next_handle != 0) {
12649                        if (resp->wol_type ==
12650                            WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT) {
12651                                bp->wol = 1;
12652                                bp->wol_filter_id = resp->wol_filter_id;
12653                        }
12654                }
12655        }
12656        hwrm_req_drop(bp, req);
12657        return next_handle;
12658}
12659
12660static void bnxt_get_wol_settings(struct bnxt *bp)
12661{
12662        u16 handle = 0;
12663
12664        bp->wol = 0;
12665        if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_WOL_CAP))
12666                return;
12667
12668        do {
12669                handle = bnxt_hwrm_get_wol_fltrs(bp, handle);
12670        } while (handle && handle != 0xffff);
12671}
12672
12673static bool bnxt_eee_config_ok(struct bnxt *bp)
12674{
12675        struct ethtool_keee *eee = &bp->eee;
12676        struct bnxt_link_info *link_info = &bp->link_info;
12677
12678        if (!(bp->phy_flags & BNXT_PHY_FL_EEE_CAP))
12679                return true;
12680
12681        if (eee->eee_enabled) {
12682                __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising);
12683                __ETHTOOL_DECLARE_LINK_MODE_MASK(tmp);
12684
12685                _bnxt_fw_to_linkmode(advertising, link_info->advertising);
12686
12687                if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
12688                        eee->eee_enabled = 0;
12689                        return false;
12690                }
12691                if (linkmode_andnot(tmp, eee->advertised, advertising)) {
12692                        linkmode_and(eee->advertised, advertising,
12693                                     eee->supported);
12694                        return false;
12695                }
12696        }
12697        return true;
12698}
12699
12700static int bnxt_update_phy_setting(struct bnxt *bp)
12701{
12702        int rc;
12703        bool update_link = false;
12704        bool update_pause = false;
12705        bool update_eee = false;
12706        struct bnxt_link_info *link_info = &bp->link_info;
12707
12708        rc = bnxt_update_link(bp, true);
12709        if (rc) {
12710                netdev_err(bp->dev, "failed to update link (rc: %x)\n",
12711                           rc);
12712                return rc;
12713        }
12714        if (!BNXT_SINGLE_PF(bp))
12715                return 0;
12716
12717        if ((link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
12718            (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) !=
12719            link_info->req_flow_ctrl)
12720                update_pause = true;
12721        if (!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
12722            link_info->force_pause_setting != link_info->req_flow_ctrl)
12723                update_pause = true;
12724        if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
12725                if (BNXT_AUTO_MODE(link_info->auto_mode))
12726                        update_link = true;
12727                if (bnxt_force_speed_updated(link_info))
12728                        update_link = true;
12729                if (link_info->req_duplex != link_info->duplex_setting)
12730                        update_link = true;
12731        } else {
12732                if (link_info->auto_mode == BNXT_LINK_AUTO_NONE)
12733                        update_link = true;
12734                if (bnxt_auto_speed_updated(link_info))
12735                        update_link = true;
12736        }
12737
12738        /* The last close may have shutdown the link, so need to call
12739         * PHY_CFG to bring it back up.
12740         */
12741        if (!BNXT_LINK_IS_UP(bp))
12742                update_link = true;
12743
12744        if (!bnxt_eee_config_ok(bp))
12745                update_eee = true;
12746
12747        if (update_link)
12748                rc = bnxt_hwrm_set_link_setting(bp, update_pause, update_eee);
12749        else if (update_pause)
12750                rc = bnxt_hwrm_set_pause(bp);
12751        if (rc) {
12752                netdev_err(bp->dev, "failed to update phy setting (rc: %x)\n",
12753                           rc);
12754                return rc;
12755        }
12756
12757        return rc;
12758}
12759
12760static int bnxt_init_dflt_ring_mode(struct bnxt *bp);
12761
12762static int bnxt_reinit_after_abort(struct bnxt *bp)
12763{
12764        int rc;
12765
12766        if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
12767                return -EBUSY;
12768
12769        if (bp->dev->reg_state == NETREG_UNREGISTERED)
12770                return -ENODEV;
12771
12772        rc = bnxt_fw_init_one(bp);
12773        if (!rc) {
12774                bnxt_clear_int_mode(bp);
12775                rc = bnxt_init_int_mode(bp);
12776                if (!rc) {
12777                        clear_bit(BNXT_STATE_ABORT_ERR, &bp->state);
12778                        set_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
12779                }
12780        }
12781        return rc;
12782}
12783
12784static void bnxt_cfg_one_usr_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr)
12785{
12786        struct bnxt_ntuple_filter *ntp_fltr;
12787        struct bnxt_l2_filter *l2_fltr;
12788
12789        if (list_empty(&fltr->list))
12790                return;
12791
12792        if (fltr->type == BNXT_FLTR_TYPE_NTUPLE) {
12793                ntp_fltr = container_of(fltr, struct bnxt_ntuple_filter, base);
12794                l2_fltr = bp->vnic_info[BNXT_VNIC_DEFAULT].l2_filters[0];
12795                atomic_inc(&l2_fltr->refcnt);
12796                ntp_fltr->l2_fltr = l2_fltr;
12797                if (bnxt_hwrm_cfa_ntuple_filter_alloc(bp, ntp_fltr)) {
12798                        bnxt_del_ntp_filter(bp, ntp_fltr);
12799                        netdev_err(bp->dev, "restoring previously configured ntuple filter id %d failed\n",
12800                                   fltr->sw_id);
12801                }
12802        } else if (fltr->type == BNXT_FLTR_TYPE_L2) {
12803                l2_fltr = container_of(fltr, struct bnxt_l2_filter, base);
12804                if (bnxt_hwrm_l2_filter_alloc(bp, l2_fltr)) {
12805                        bnxt_del_l2_filter(bp, l2_fltr);
12806                        netdev_err(bp->dev, "restoring previously configured l2 filter id %d failed\n",
12807                                   fltr->sw_id);
12808                }
12809        }
12810}
12811
12812static void bnxt_cfg_usr_fltrs(struct bnxt *bp)
12813{
12814        struct bnxt_filter_base *usr_fltr, *tmp;
12815
12816        list_for_each_entry_safe(usr_fltr, tmp, &bp->usr_fltr_list, list)
12817                bnxt_cfg_one_usr_fltr(bp, usr_fltr);
12818}
12819
12820static int bnxt_set_xps_mapping(struct bnxt *bp)
12821{
12822        int numa_node = dev_to_node(&bp->pdev->dev);
12823        unsigned int q_idx, map_idx, cpu, i;
12824        const struct cpumask *cpu_mask_ptr;
12825        int nr_cpus = num_online_cpus();
12826        cpumask_t *q_map;
12827        int rc = 0;
12828
12829        q_map = kcalloc(bp->tx_nr_rings_per_tc, sizeof(*q_map), GFP_KERNEL);
12830        if (!q_map)
12831                return -ENOMEM;
12832
12833        /* Create CPU mask for all TX queues across MQPRIO traffic classes.
12834         * Each TC has the same number of TX queues. The nth TX queue for each
12835         * TC will have the same CPU mask.
12836         */
12837        for (i = 0; i < nr_cpus; i++) {
12838                map_idx = i % bp->tx_nr_rings_per_tc;
12839                cpu = cpumask_local_spread(i, numa_node);
12840                cpu_mask_ptr = get_cpu_mask(cpu);
12841                cpumask_or(&q_map[map_idx], &q_map[map_idx], cpu_mask_ptr);
12842        }
12843
12844        /* Register CPU mask for each TX queue except the ones marked for XDP */
12845        for (q_idx = 0; q_idx < bp->dev->real_num_tx_queues; q_idx++) {
12846                map_idx = q_idx % bp->tx_nr_rings_per_tc;
12847                rc = netif_set_xps_queue(bp->dev, &q_map[map_idx], q_idx);
12848                if (rc) {
12849                        netdev_warn(bp->dev, "Error setting XPS for q:%d\n",
12850                                    q_idx);
12851                        break;
12852                }
12853        }
12854
12855        kfree(q_map);
12856
12857        return rc;
12858}
12859
12860static int bnxt_tx_nr_rings(struct bnxt *bp)
12861{
12862        return bp->num_tc ? bp->tx_nr_rings_per_tc * bp->num_tc :
12863                            bp->tx_nr_rings_per_tc;
12864}
12865
12866static int bnxt_tx_nr_rings_per_tc(struct bnxt *bp)
12867{
12868        return bp->num_tc ? bp->tx_nr_rings / bp->num_tc : bp->tx_nr_rings;
12869}
12870
12871static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
12872{
12873        int rc = 0;
12874
12875        netif_carrier_off(bp->dev);
12876        if (irq_re_init) {
12877                /* Reserve rings now if none were reserved at driver probe. */
12878                rc = bnxt_init_dflt_ring_mode(bp);
12879                if (rc) {
12880                        netdev_err(bp->dev, "Failed to reserve default rings at open\n");
12881                        return rc;
12882                }
12883        }
12884        rc = bnxt_reserve_rings(bp, irq_re_init);
12885        if (rc)
12886                return rc;
12887
12888        /* Make adjustments if reserved TX rings are less than requested */
12889        bp->tx_nr_rings -= bp->tx_nr_rings_xdp;
12890        bp->tx_nr_rings_per_tc = bnxt_tx_nr_rings_per_tc(bp);
12891        if (bp->tx_nr_rings_xdp) {
12892                bp->tx_nr_rings_xdp = bp->tx_nr_rings_per_tc;
12893                bp->tx_nr_rings += bp->tx_nr_rings_xdp;
12894        }
12895        rc = bnxt_alloc_mem(bp, irq_re_init);
12896        if (rc) {
12897                netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
12898                goto open_err_free_mem;
12899        }
12900
12901        if (irq_re_init) {
12902                bnxt_init_napi(bp);
12903                rc = bnxt_request_irq(bp);
12904                if (rc) {
12905                        netdev_err(bp->dev, "bnxt_request_irq err: %x\n", rc);
12906                        goto open_err_irq;
12907                }
12908        }
12909
12910        rc = bnxt_init_nic(bp, irq_re_init);
12911        if (rc) {
12912                netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
12913                goto open_err_irq;
12914        }
12915
12916        bnxt_enable_napi(bp);
12917        bnxt_debug_dev_init(bp);
12918
12919        if (link_re_init) {
12920                mutex_lock(&bp->link_lock);
12921                rc = bnxt_update_phy_setting(bp);
12922                mutex_unlock(&bp->link_lock);
12923                if (rc) {
12924                        netdev_warn(bp->dev, "failed to update phy settings\n");
12925                        if (BNXT_SINGLE_PF(bp)) {
12926                                bp->link_info.phy_retry = true;
12927                                bp->link_info.phy_retry_expires =
12928                                        jiffies + 5 * HZ;
12929                        }
12930                }
12931        }
12932
12933        if (irq_re_init) {
12934                udp_tunnel_nic_reset_ntf(bp->dev);
12935                rc = bnxt_set_xps_mapping(bp);
12936                if (rc)
12937                        netdev_warn(bp->dev, "failed to set xps mapping\n");
12938        }
12939
12940        if (bp->tx_nr_rings_xdp < num_possible_cpus()) {
12941                if (!static_key_enabled(&bnxt_xdp_locking_key))
12942                        static_branch_enable(&bnxt_xdp_locking_key);
12943        } else if (static_key_enabled(&bnxt_xdp_locking_key)) {
12944                static_branch_disable(&bnxt_xdp_locking_key);
12945        }
12946        set_bit(BNXT_STATE_OPEN, &bp->state);
12947        bnxt_enable_int(bp);
12948        /* Enable TX queues */
12949        bnxt_tx_enable(bp);
12950        mod_timer(&bp->timer, jiffies + bp->current_interval);
12951        /* Poll link status and check for SFP+ module status */
12952        mutex_lock(&bp->link_lock);
12953        bnxt_get_port_module_status(bp);
12954        mutex_unlock(&bp->link_lock);
12955
12956        /* VF-reps may need to be re-opened after the PF is re-opened */
12957        if (BNXT_PF(bp))
12958                bnxt_vf_reps_open(bp);
12959        bnxt_ptp_init_rtc(bp, true);
12960        bnxt_ptp_cfg_tstamp_filters(bp);
12961        if (BNXT_SUPPORTS_MULTI_RSS_CTX(bp))
12962                bnxt_hwrm_realloc_rss_ctx_vnic(bp);
12963        bnxt_cfg_usr_fltrs(bp);
12964        return 0;
12965
12966open_err_irq:
12967        bnxt_del_napi(bp);
12968
12969open_err_free_mem:
12970        bnxt_free_skbs(bp);
12971        bnxt_free_irq(bp);
12972        bnxt_free_mem(bp, true);
12973        return rc;
12974}
12975
12976int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
12977{
12978        int rc = 0;
12979
12980        if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state))
12981                rc = -EIO;
12982        if (!rc)
12983                rc = __bnxt_open_nic(bp, irq_re_init, link_re_init);
12984        if (rc) {
12985                netdev_err(bp->dev, "nic open fail (rc: %x)\n", rc);
12986                netif_close(bp->dev);
12987        }
12988        return rc;
12989}
12990
12991/* netdev instance lock held, open the NIC half way by allocating all
12992 * resources, but NAPI, IRQ, and TX are not enabled.  This is mainly used
12993 * for offline self tests.
12994 */
12995int bnxt_half_open_nic(struct bnxt *bp)
12996{
12997        int rc = 0;
12998
12999        if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
13000                netdev_err(bp->dev, "A previous firmware reset has not completed, aborting half open\n");
13001                rc = -ENODEV;
13002                goto half_open_err;
13003        }
13004
13005        rc = bnxt_alloc_mem(bp, true);
13006        if (rc) {
13007                netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
13008                goto half_open_err;
13009        }
13010        bnxt_init_napi(bp);
13011        set_bit(BNXT_STATE_HALF_OPEN, &bp->state);
13012        rc = bnxt_init_nic(bp, true);
13013        if (rc) {
13014                clear_bit(BNXT_STATE_HALF_OPEN, &bp->state);
13015                bnxt_del_napi(bp);
13016                netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
13017                goto half_open_err;
13018        }
13019        return 0;
13020
13021half_open_err:
13022        bnxt_free_skbs(bp);
13023        bnxt_free_mem(bp, true);
13024        netif_close(bp->dev);
13025        return rc;
13026}
13027
13028/* netdev instance lock held, this call can only be made after a previous
13029 * successful call to bnxt_half_open_nic().
13030 */
13031void bnxt_half_close_nic(struct bnxt *bp)
13032{
13033        bnxt_hwrm_resource_free(bp, false, true);
13034        bnxt_del_napi(bp);
13035        bnxt_free_skbs(bp);
13036        bnxt_free_mem(bp, true);
13037        clear_bit(BNXT_STATE_HALF_OPEN, &bp->state);
13038}
13039
13040void bnxt_reenable_sriov(struct bnxt *bp)
13041{
13042        if (BNXT_PF(bp)) {
13043                struct bnxt_pf_info *pf = &bp->pf;
13044                int n = pf->active_vfs;
13045
13046                if (n)
13047                        bnxt_cfg_hw_sriov(bp, &n, true);
13048        }
13049}
13050
13051static int bnxt_open(struct net_device *dev)
13052{
13053        struct bnxt *bp = netdev_priv(dev);
13054        int rc;
13055
13056        if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
13057                rc = bnxt_reinit_after_abort(bp);
13058                if (rc) {
13059                        if (rc == -EBUSY)
13060                                netdev_err(bp->dev, "A previous firmware reset has not completed, aborting\n");
13061                        else
13062                                netdev_err(bp->dev, "Failed to reinitialize after aborted firmware reset\n");
13063                        return -ENODEV;
13064                }
13065        }
13066
13067        rc = bnxt_hwrm_if_change(bp, true);
13068        if (rc)
13069                return rc;
13070
13071        rc = __bnxt_open_nic(bp, true, true);
13072        if (rc) {
13073                bnxt_hwrm_if_change(bp, false);
13074        } else {
13075                if (test_and_clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state)) {
13076                        if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
13077                                bnxt_queue_sp_work(bp,
13078                                                   BNXT_RESTART_ULP_SP_EVENT);
13079                }
13080        }
13081
13082        return rc;
13083}
13084
13085static bool bnxt_drv_busy(struct bnxt *bp)
13086{
13087        return (test_bit(BNXT_STATE_IN_SP_TASK, &bp->state) ||
13088                test_bit(BNXT_STATE_READ_STATS, &bp->state));
13089}
13090
13091static void bnxt_get_ring_stats(struct bnxt *bp,
13092                                struct rtnl_link_stats64 *stats);
13093
13094static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init,
13095                             bool link_re_init)
13096{
13097        /* Close the VF-reps before closing PF */
13098        if (BNXT_PF(bp))
13099                bnxt_vf_reps_close(bp);
13100
13101        /* Change device state to avoid TX queue wake up's */
13102        bnxt_tx_disable(bp);
13103
13104        clear_bit(BNXT_STATE_OPEN, &bp->state);
13105        smp_mb__after_atomic();
13106        while (bnxt_drv_busy(bp))
13107                msleep(20);
13108
13109        if (BNXT_SUPPORTS_MULTI_RSS_CTX(bp))
13110                bnxt_clear_rss_ctxs(bp);
13111        /* Flush rings and disable interrupts */
13112        bnxt_shutdown_nic(bp, irq_re_init);
13113
13114        /* TODO CHIMP_FW: Link/PHY related cleanup if (link_re_init) */
13115
13116        bnxt_debug_dev_exit(bp);
13117        bnxt_disable_napi(bp);
13118        timer_delete_sync(&bp->timer);
13119        bnxt_free_skbs(bp);
13120
13121        /* Save ring stats before shutdown */
13122        if (bp->bnapi && irq_re_init) {
13123                bnxt_get_ring_stats(bp, &bp->net_stats_prev);
13124                bnxt_get_ring_err_stats(bp, &bp->ring_err_stats_prev);
13125        }
13126        if (irq_re_init) {
13127                bnxt_free_irq(bp);
13128                bnxt_del_napi(bp);
13129        }
13130        bnxt_free_mem(bp, irq_re_init);
13131}
13132
13133void bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
13134{
13135        if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
13136                /* If we get here, it means firmware reset is in progress
13137                 * while we are trying to close.  We can safely proceed with
13138                 * the close because we are holding netdev instance lock.
13139                 * Some firmware messages may fail as we proceed to close.
13140                 * We set the ABORT_ERR flag here so that the FW reset thread
13141                 * will later abort when it gets the netdev instance lock
13142                 * and sees the flag.
13143                 */
13144                netdev_warn(bp->dev, "FW reset in progress during close, FW reset will be aborted\n");
13145                set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
13146        }
13147
13148#ifdef CONFIG_BNXT_SRIOV
13149        if (bp->sriov_cfg) {
13150                int rc;
13151
13152                rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait,
13153                                                      !bp->sriov_cfg,
13154                                                      BNXT_SRIOV_CFG_WAIT_TMO);
13155                if (!rc)
13156                        netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete, proceeding to close!\n");
13157                else if (rc < 0)
13158                        netdev_warn(bp->dev, "SRIOV config operation interrupted, proceeding to close!\n");
13159        }
13160#endif
13161        __bnxt_close_nic(bp, irq_re_init, link_re_init);
13162}
13163
13164static int bnxt_close(struct net_device *dev)
13165{
13166        struct bnxt *bp = netdev_priv(dev);
13167
13168        bnxt_close_nic(bp, true, true);
13169        bnxt_hwrm_shutdown_link(bp);
13170        bnxt_hwrm_if_change(bp, false);
13171        return 0;
13172}
13173
13174static int bnxt_hwrm_port_phy_read(struct bnxt *bp, u16 phy_addr, u16 reg,
13175                                   u16 *val)
13176{
13177        struct hwrm_port_phy_mdio_read_output *resp;
13178        struct hwrm_port_phy_mdio_read_input *req;
13179        int rc;
13180
13181        if (bp->hwrm_spec_code < 0x10a00)
13182                return -EOPNOTSUPP;
13183
13184        rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_MDIO_READ);
13185        if (rc)
13186                return rc;
13187
13188        req->port_id = cpu_to_le16(bp->pf.port_id);
13189        req->phy_addr = phy_addr;
13190        req->reg_addr = cpu_to_le16(reg & 0x1f);
13191        if (mdio_phy_id_is_c45(phy_addr)) {
13192                req->cl45_mdio = 1;
13193                req->phy_addr = mdio_phy_id_prtad(phy_addr);
13194                req->dev_addr = mdio_phy_id_devad(phy_addr);
13195                req->reg_addr = cpu_to_le16(reg);
13196        }
13197
13198        resp = hwrm_req_hold(bp, req);
13199        rc = hwrm_req_send(bp, req);
13200        if (!rc)
13201                *val = le16_to_cpu(resp->reg_data);
13202        hwrm_req_drop(bp, req);
13203        return rc;
13204}
13205
13206static int bnxt_hwrm_port_phy_write(struct bnxt *bp, u16 phy_addr, u16 reg,
13207                                    u16 val)
13208{
13209        struct hwrm_port_phy_mdio_write_input *req;
13210        int rc;
13211
13212        if (bp->hwrm_spec_code < 0x10a00)
13213                return -EOPNOTSUPP;
13214
13215        rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_MDIO_WRITE);
13216        if (rc)
13217                return rc;
13218
13219        req->port_id = cpu_to_le16(bp->pf.port_id);
13220        req->phy_addr = phy_addr;
13221        req->reg_addr = cpu_to_le16(reg & 0x1f);
13222        if (mdio_phy_id_is_c45(phy_addr)) {
13223                req->cl45_mdio = 1;
13224                req->phy_addr = mdio_phy_id_prtad(phy_addr);
13225                req->dev_addr = mdio_phy_id_devad(phy_addr);
13226                req->reg_addr = cpu_to_le16(reg);
13227        }
13228        req->reg_data = cpu_to_le16(val);
13229
13230        return hwrm_req_send(bp, req);
13231}
13232
13233/* netdev instance lock held */
13234static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
13235{
13236        struct mii_ioctl_data *mdio = if_mii(ifr);
13237        struct bnxt *bp = netdev_priv(dev);
13238        int rc;
13239
13240        switch (cmd) {
13241        case SIOCGMIIPHY:
13242                mdio->phy_id = bp->link_info.phy_addr;
13243
13244                fallthrough;
13245        case SIOCGMIIREG: {
13246                u16 mii_regval = 0;
13247
13248                if (!netif_running(dev))
13249                        return -EAGAIN;
13250
13251                rc = bnxt_hwrm_port_phy_read(bp, mdio->phy_id, mdio->reg_num,
13252                                             &mii_regval);
13253                mdio->val_out = mii_regval;
13254                return rc;
13255        }
13256
13257        case SIOCSMIIREG:
13258                if (!netif_running(dev))
13259                        return -EAGAIN;
13260
13261                return bnxt_hwrm_port_phy_write(bp, mdio->phy_id, mdio->reg_num,
13262                                                mdio->val_in);
13263
13264        case SIOCSHWTSTAMP:
13265                return bnxt_hwtstamp_set(dev, ifr);
13266
13267        case SIOCGHWTSTAMP:
13268                return bnxt_hwtstamp_get(dev, ifr);
13269
13270        default:
13271                /* do nothing */
13272                break;
13273        }
13274        return -EOPNOTSUPP;
13275}
13276
13277static void bnxt_get_ring_stats(struct bnxt *bp,
13278                                struct rtnl_link_stats64 *stats)
13279{
13280        int i;
13281
13282        for (i = 0; i < bp->cp_nr_rings; i++) {
13283                struct bnxt_napi *bnapi = bp->bnapi[i];
13284                struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
13285                u64 *sw = cpr->stats.sw_stats;
13286
13287                stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_ucast_pkts);
13288                stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts);
13289                stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_bcast_pkts);
13290
13291                stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_ucast_pkts);
13292                stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_mcast_pkts);
13293                stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_bcast_pkts);
13294
13295                stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_ucast_bytes);
13296                stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_mcast_bytes);
13297                stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_bcast_bytes);
13298
13299                stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_ucast_bytes);
13300                stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_mcast_bytes);
13301                stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_bcast_bytes);
13302
13303                stats->rx_missed_errors +=
13304                        BNXT_GET_RING_STATS64(sw, rx_discard_pkts);
13305
13306                stats->multicast += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts);
13307
13308                stats->tx_dropped += BNXT_GET_RING_STATS64(sw, tx_error_pkts);
13309
13310                stats->rx_dropped +=
13311                        cpr->sw_stats->rx.rx_netpoll_discards +
13312                        cpr->sw_stats->rx.rx_oom_discards;
13313        }
13314}
13315
13316static void bnxt_add_prev_stats(struct bnxt *bp,
13317                                struct rtnl_link_stats64 *stats)
13318{
13319        struct rtnl_link_stats64 *prev_stats = &bp->net_stats_prev;
13320
13321        stats->rx_packets += prev_stats->rx_packets;
13322        stats->tx_packets += prev_stats->tx_packets;
13323        stats->rx_bytes += prev_stats->rx_bytes;
13324        stats->tx_bytes += prev_stats->tx_bytes;
13325        stats->rx_missed_errors += prev_stats->rx_missed_errors;
13326        stats->multicast += prev_stats->multicast;
13327        stats->rx_dropped += prev_stats->rx_dropped;
13328        stats->tx_dropped += prev_stats->tx_dropped;
13329}
13330
13331static void
13332bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
13333{
13334        struct bnxt *bp = netdev_priv(dev);
13335
13336        set_bit(BNXT_STATE_READ_STATS, &bp->state);
13337        /* Make sure bnxt_close_nic() sees that we are reading stats before
13338         * we check the BNXT_STATE_OPEN flag.
13339         */
13340        smp_mb__after_atomic();
13341        if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
13342                clear_bit(BNXT_STATE_READ_STATS, &bp->state);
13343                *stats = bp->net_stats_prev;
13344                return;
13345        }
13346
13347        bnxt_get_ring_stats(bp, stats);
13348        bnxt_add_prev_stats(bp, stats);
13349
13350        if (bp->flags & BNXT_FLAG_PORT_STATS) {
13351                u64 *rx = bp->port_stats.sw_stats;
13352                u64 *tx = bp->port_stats.sw_stats +
13353                          BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
13354
13355                stats->rx_crc_errors =
13356                        BNXT_GET_RX_PORT_STATS64(rx, rx_fcs_err_frames);
13357                stats->rx_frame_errors =
13358                        BNXT_GET_RX_PORT_STATS64(rx, rx_align_err_frames);
13359                stats->rx_length_errors =
13360                        BNXT_GET_RX_PORT_STATS64(rx, rx_undrsz_frames) +
13361                        BNXT_GET_RX_PORT_STATS64(rx, rx_ovrsz_frames) +
13362                        BNXT_GET_RX_PORT_STATS64(rx, rx_runt_frames);
13363                stats->rx_errors =
13364                        BNXT_GET_RX_PORT_STATS64(rx, rx_false_carrier_frames) +
13365                        BNXT_GET_RX_PORT_STATS64(rx, rx_jbr_frames);
13366                stats->collisions =
13367                        BNXT_GET_TX_PORT_STATS64(tx, tx_total_collisions);
13368                stats->tx_fifo_errors =
13369                        BNXT_GET_TX_PORT_STATS64(tx, tx_fifo_underruns);
13370                stats->tx_errors = BNXT_GET_TX_PORT_STATS64(tx, tx_err);
13371        }
13372        clear_bit(BNXT_STATE_READ_STATS, &bp->state);
13373}
13374
13375static void bnxt_get_one_ring_err_stats(struct bnxt *bp,
13376                                        struct bnxt_total_ring_err_stats *stats,
13377                                        struct bnxt_cp_ring_info *cpr)
13378{
13379        struct bnxt_sw_stats *sw_stats = cpr->sw_stats;
13380        u64 *hw_stats = cpr->stats.sw_stats;
13381
13382        stats->rx_total_l4_csum_errors += sw_stats->rx.rx_l4_csum_errors;
13383        stats->rx_total_resets += sw_stats->rx.rx_resets;
13384        stats->rx_total_buf_errors += sw_stats->rx.rx_buf_errors;
13385        stats->rx_total_oom_discards += sw_stats->rx.rx_oom_discards;
13386        stats->rx_total_netpoll_discards += sw_stats->rx.rx_netpoll_discards;
13387        stats->rx_total_ring_discards +=
13388                BNXT_GET_RING_STATS64(hw_stats, rx_discard_pkts);
13389        stats->tx_total_resets += sw_stats->tx.tx_resets;
13390        stats->tx_total_ring_discards +=
13391                BNXT_GET_RING_STATS64(hw_stats, tx_discard_pkts);
13392        stats->total_missed_irqs += sw_stats->cmn.missed_irqs;
13393}
13394
13395void bnxt_get_ring_err_stats(struct bnxt *bp,
13396                             struct bnxt_total_ring_err_stats *stats)
13397{
13398        int i;
13399
13400        for (i = 0; i < bp->cp_nr_rings; i++)
13401                bnxt_get_one_ring_err_stats(bp, stats, &bp->bnapi[i]->cp_ring);
13402}
13403
13404static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask)
13405{
13406        struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
13407        struct net_device *dev = bp->dev;
13408        struct netdev_hw_addr *ha;
13409        u8 *haddr;
13410        int mc_count = 0;
13411        bool update = false;
13412        int off = 0;
13413
13414        netdev_for_each_mc_addr(ha, dev) {
13415                if (mc_count >= BNXT_MAX_MC_ADDRS) {
13416                        *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
13417                        vnic->mc_list_count = 0;
13418                        return false;
13419                }
13420                haddr = ha->addr;
13421                if (!ether_addr_equal(haddr, vnic->mc_list + off)) {
13422                        memcpy(vnic->mc_list + off, haddr, ETH_ALEN);
13423                        update = true;
13424                }
13425                off += ETH_ALEN;
13426                mc_count++;
13427        }
13428        if (mc_count)
13429                *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
13430
13431        if (mc_count != vnic->mc_list_count) {
13432                vnic->mc_list_count = mc_count;
13433                update = true;
13434        }
13435        return update;
13436}
13437
13438static bool bnxt_uc_list_updated(struct bnxt *bp)
13439{
13440        struct net_device *dev = bp->dev;
13441        struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
13442        struct netdev_hw_addr *ha;
13443        int off = 0;
13444
13445        if (netdev_uc_count(dev) != (vnic->uc_filter_count - 1))
13446                return true;
13447
13448        netdev_for_each_uc_addr(ha, dev) {
13449                if (!ether_addr_equal(ha->addr, vnic->uc_list + off))
13450                        return true;
13451
13452                off += ETH_ALEN;
13453        }
13454        return false;
13455}
13456
13457static void bnxt_set_rx_mode(struct net_device *dev)
13458{
13459        struct bnxt *bp = netdev_priv(dev);
13460        struct bnxt_vnic_info *vnic;
13461        bool mc_update = false;
13462        bool uc_update;
13463        u32 mask;
13464
13465        if (!test_bit(BNXT_STATE_OPEN, &bp->state))
13466                return;
13467
13468        vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
13469        mask = vnic->rx_mask;
13470        mask &= ~(CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS |
13471                  CFA_L2_SET_RX_MASK_REQ_MASK_MCAST |
13472                  CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST |
13473                  CFA_L2_SET_RX_MASK_REQ_MASK_BCAST);
13474
13475        if (dev->flags & IFF_PROMISC)
13476                mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
13477
13478        uc_update = bnxt_uc_list_updated(bp);
13479
13480        if (dev->flags & IFF_BROADCAST)
13481                mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
13482        if (dev->flags & IFF_ALLMULTI) {
13483                mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
13484                vnic->mc_list_count = 0;
13485        } else if (dev->flags & IFF_MULTICAST) {
13486                mc_update = bnxt_mc_list_updated(bp, &mask);
13487        }
13488
13489        if (mask != vnic->rx_mask || uc_update || mc_update) {
13490                vnic->rx_mask = mask;
13491
13492                bnxt_queue_sp_work(bp, BNXT_RX_MASK_SP_EVENT);
13493        }
13494}
13495
13496static int bnxt_cfg_rx_mode(struct bnxt *bp)
13497{
13498        struct net_device *dev = bp->dev;
13499        struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
13500        struct netdev_hw_addr *ha;
13501        int i, off = 0, rc;
13502        bool uc_update;
13503
13504        netif_addr_lock_bh(dev);
13505        uc_update = bnxt_uc_list_updated(bp);
13506        netif_addr_unlock_bh(dev);
13507
13508        if (!uc_update)
13509                goto skip_uc;
13510
13511        for (i = 1; i < vnic->uc_filter_count; i++) {
13512                struct bnxt_l2_filter *fltr = vnic->l2_filters[i];
13513
13514                bnxt_hwrm_l2_filter_free(bp, fltr);
13515                bnxt_del_l2_filter(bp, fltr);
13516        }
13517
13518        vnic->uc_filter_count = 1;
13519
13520        netif_addr_lock_bh(dev);
13521        if (netdev_uc_count(dev) > (BNXT_MAX_UC_ADDRS - 1)) {
13522                vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
13523        } else {
13524                netdev_for_each_uc_addr(ha, dev) {
13525                        memcpy(vnic->uc_list + off, ha->addr, ETH_ALEN);
13526                        off += ETH_ALEN;
13527                        vnic->uc_filter_count++;
13528                }
13529        }
13530        netif_addr_unlock_bh(dev);
13531
13532        for (i = 1, off = 0; i < vnic->uc_filter_count; i++, off += ETH_ALEN) {
13533                rc = bnxt_hwrm_set_vnic_filter(bp, 0, i, vnic->uc_list + off);
13534                if (rc) {
13535                        if (BNXT_VF(bp) && rc == -ENODEV) {
13536                                if (!test_and_set_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state))
13537                                        netdev_warn(bp->dev, "Cannot configure L2 filters while PF is unavailable, will retry\n");
13538                                else
13539                                        netdev_dbg(bp->dev, "PF still unavailable while configuring L2 filters.\n");
13540                                rc = 0;
13541                        } else {
13542                                netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
13543                        }
13544                        vnic->uc_filter_count = i;
13545                        return rc;
13546                }
13547        }
13548        if (test_and_clear_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state))
13549                netdev_notice(bp->dev, "Retry of L2 filter configuration successful.\n");
13550
13551skip_uc:
13552        if ((vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS) &&
13553            !bnxt_promisc_ok(bp))
13554                vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
13555        rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
13556        if (rc && (vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_MCAST)) {
13557                netdev_info(bp->dev, "Failed setting MC filters rc: %d, turning on ALL_MCAST mode\n",
13558                            rc);
13559                vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
13560                vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
13561                vnic->mc_list_count = 0;
13562                rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
13563        }
13564        if (rc)
13565                netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %d\n",
13566                           rc);
13567
13568        return rc;
13569}
13570
13571static bool bnxt_can_reserve_rings(struct bnxt *bp)
13572{
13573#ifdef CONFIG_BNXT_SRIOV
13574        if (BNXT_NEW_RM(bp) && BNXT_VF(bp)) {
13575                struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
13576
13577                /* No minimum rings were provisioned by the PF.  Don't
13578                 * reserve rings by default when device is down.
13579                 */
13580                if (hw_resc->min_tx_rings || hw_resc->resv_tx_rings)
13581                        return true;
13582
13583                if (!netif_running(bp->dev))
13584                        return false;
13585        }
13586#endif
13587        return true;
13588}
13589
13590/* If the chip and firmware supports RFS */
13591static bool bnxt_rfs_supported(struct bnxt *bp)
13592{
13593        if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
13594                if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2)
13595                        return true;
13596                return false;
13597        }
13598        /* 212 firmware is broken for aRFS */
13599        if (BNXT_FW_MAJ(bp) == 212)
13600                return false;
13601        if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp))
13602                return true;
13603        if (bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP)
13604                return true;
13605        return false;
13606}
13607
13608/* If runtime conditions support RFS */
13609bool bnxt_rfs_capable(struct bnxt *bp, bool new_rss_ctx)
13610{
13611        struct bnxt_hw_rings hwr = {0};
13612        int max_vnics, max_rss_ctxs;
13613
13614        if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
13615            !BNXT_SUPPORTS_NTUPLE_VNIC(bp))
13616                return bnxt_rfs_supported(bp);
13617
13618        if (!bnxt_can_reserve_rings(bp) || !bp->rx_nr_rings)
13619                return false;
13620
13621        hwr.grp = bp->rx_nr_rings;
13622        hwr.vnic = bnxt_get_total_vnics(bp, bp->rx_nr_rings);
13623        if (new_rss_ctx)
13624                hwr.vnic++;
13625        hwr.rss_ctx = bnxt_get_total_rss_ctxs(bp, &hwr);
13626        max_vnics = bnxt_get_max_func_vnics(bp);
13627        max_rss_ctxs = bnxt_get_max_func_rss_ctxs(bp);
13628
13629        if (hwr.vnic > max_vnics || hwr.rss_ctx > max_rss_ctxs) {
13630                if (bp->rx_nr_rings > 1)
13631                        netdev_warn(bp->dev,
13632                                    "Not enough resources to support NTUPLE filters, enough resources for up to %d rx rings\n",
13633                                    min(max_rss_ctxs - 1, max_vnics - 1));
13634                return false;
13635        }
13636
13637        if (!BNXT_NEW_RM(bp))
13638                return true;
13639
13640        /* Do not reduce VNIC and RSS ctx reservations.  There is a FW
13641         * issue that will mess up the default VNIC if we reduce the
13642         * reservations.
13643         */
13644        if (hwr.vnic <= bp->hw_resc.resv_vnics &&
13645            hwr.rss_ctx <= bp->hw_resc.resv_rsscos_ctxs)
13646                return true;
13647
13648        bnxt_hwrm_reserve_rings(bp, &hwr);
13649        if (hwr.vnic <= bp->hw_resc.resv_vnics &&
13650            hwr.rss_ctx <= bp->hw_resc.resv_rsscos_ctxs)
13651                return true;
13652
13653        netdev_warn(bp->dev, "Unable to reserve resources to support NTUPLE filters.\n");
13654        hwr.vnic = 1;
13655        hwr.rss_ctx = 0;
13656        bnxt_hwrm_reserve_rings(bp, &hwr);
13657        return false;
13658}
13659
13660static netdev_features_t bnxt_fix_features(struct net_device *dev,
13661                                           netdev_features_t features)
13662{
13663        struct bnxt *bp = netdev_priv(dev);
13664        netdev_features_t vlan_features;
13665
13666        if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp, false))
13667                features &= ~NETIF_F_NTUPLE;
13668
13669        if ((bp->flags & BNXT_FLAG_NO_AGG_RINGS) || bp->xdp_prog)
13670                features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
13671
13672        if (!(features & NETIF_F_GRO))
13673                features &= ~NETIF_F_GRO_HW;
13674
13675        if (features & NETIF_F_GRO_HW)
13676                features &= ~NETIF_F_LRO;
13677
13678        /* Both CTAG and STAG VLAN acceleration on the RX side have to be
13679         * turned on or off together.
13680         */
13681        vlan_features = features & BNXT_HW_FEATURE_VLAN_ALL_RX;
13682        if (vlan_features != BNXT_HW_FEATURE_VLAN_ALL_RX) {
13683                if (dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)
13684                        features &= ~BNXT_HW_FEATURE_VLAN_ALL_RX;
13685                else if (vlan_features)
13686                        features |= BNXT_HW_FEATURE_VLAN_ALL_RX;
13687        }
13688#ifdef CONFIG_BNXT_SRIOV
13689        if (BNXT_VF(bp) && bp->vf.vlan)
13690                features &= ~BNXT_HW_FEATURE_VLAN_ALL_RX;
13691#endif
13692        return features;
13693}
13694
13695static int bnxt_reinit_features(struct bnxt *bp, bool irq_re_init,
13696                                bool link_re_init, u32 flags, bool update_tpa)
13697{
13698        bnxt_close_nic(bp, irq_re_init, link_re_init);
13699        bp->flags = flags;
13700        if (update_tpa)
13701                bnxt_set_ring_params(bp);
13702        return bnxt_open_nic(bp, irq_re_init, link_re_init);
13703}
13704
13705static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
13706{
13707        bool update_tpa = false, update_ntuple = false;
13708        struct bnxt *bp = netdev_priv(dev);
13709        u32 flags = bp->flags;
13710        u32 changes;
13711        int rc = 0;
13712        bool re_init = false;
13713
13714        flags &= ~BNXT_FLAG_ALL_CONFIG_FEATS;
13715        if (features & NETIF_F_GRO_HW)
13716                flags |= BNXT_FLAG_GRO;
13717        else if (features & NETIF_F_LRO)
13718                flags |= BNXT_FLAG_LRO;
13719
13720        if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
13721                flags &= ~BNXT_FLAG_TPA;
13722
13723        if (features & BNXT_HW_FEATURE_VLAN_ALL_RX)
13724                flags |= BNXT_FLAG_STRIP_VLAN;
13725
13726        if (features & NETIF_F_NTUPLE)
13727                flags |= BNXT_FLAG_RFS;
13728        else
13729                bnxt_clear_usr_fltrs(bp, true);
13730
13731        changes = flags ^ bp->flags;
13732        if (changes & BNXT_FLAG_TPA) {
13733                update_tpa = true;
13734                if ((bp->flags & BNXT_FLAG_TPA) == 0 ||
13735                    (flags & BNXT_FLAG_TPA) == 0 ||
13736                    (bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
13737                        re_init = true;
13738        }
13739
13740        if (changes & ~BNXT_FLAG_TPA)
13741                re_init = true;
13742
13743        if (changes & BNXT_FLAG_RFS)
13744                update_ntuple = true;
13745
13746        if (flags != bp->flags) {
13747                u32 old_flags = bp->flags;
13748
13749                if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
13750                        bp->flags = flags;
13751                        if (update_tpa)
13752                                bnxt_set_ring_params(bp);
13753                        return rc;
13754                }
13755
13756                if (update_ntuple)
13757                        return bnxt_reinit_features(bp, true, false, flags, update_tpa);
13758
13759                if (re_init)
13760                        return bnxt_reinit_features(bp, false, false, flags, update_tpa);
13761
13762                if (update_tpa) {
13763                        bp->flags = flags;
13764                        rc = bnxt_set_tpa(bp,
13765                                          (flags & BNXT_FLAG_TPA) ?
13766                                          true : false);
13767                        if (rc)
13768                                bp->flags = old_flags;
13769                }
13770        }
13771        return rc;
13772}
13773
13774static bool bnxt_exthdr_check(struct bnxt *bp, struct sk_buff *skb, int nw_off,
13775                              u8 **nextp)
13776{
13777        struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + nw_off);
13778        struct hop_jumbo_hdr *jhdr;
13779        int hdr_count = 0;
13780        u8 *nexthdr;
13781        int start;
13782
13783        /* Check that there are at most 2 IPv6 extension headers, no
13784         * fragment header, and each is <= 64 bytes.
13785         */
13786        start = nw_off + sizeof(*ip6h);
13787        nexthdr = &ip6h->nexthdr;
13788        while (ipv6_ext_hdr(*nexthdr)) {
13789                struct ipv6_opt_hdr *hp;
13790                int hdrlen;
13791
13792                if (hdr_count >= 3 || *nexthdr == NEXTHDR_NONE ||
13793                    *nexthdr == NEXTHDR_FRAGMENT)
13794                        return false;
13795                hp = __skb_header_pointer(NULL, start, sizeof(*hp), skb->data,
13796                                          skb_headlen(skb), NULL);
13797                if (!hp)
13798                        return false;
13799                if (*nexthdr == NEXTHDR_AUTH)
13800                        hdrlen = ipv6_authlen(hp);
13801                else
13802                        hdrlen = ipv6_optlen(hp);
13803
13804                if (hdrlen > 64)
13805                        return false;
13806
13807                /* The ext header may be a hop-by-hop header inserted for
13808                 * big TCP purposes. This will be removed before sending
13809                 * from NIC, so do not count it.
13810                 */
13811                if (*nexthdr == NEXTHDR_HOP) {
13812                        if (likely(skb->len <= GRO_LEGACY_MAX_SIZE))
13813                                goto increment_hdr;
13814
13815                        jhdr = (struct hop_jumbo_hdr *)hp;
13816                        if (jhdr->tlv_type != IPV6_TLV_JUMBO || jhdr->hdrlen != 0 ||
13817                            jhdr->nexthdr != IPPROTO_TCP)
13818                                goto increment_hdr;
13819
13820                        goto next_hdr;
13821                }
13822increment_hdr:
13823                hdr_count++;
13824next_hdr:
13825                nexthdr = &hp->nexthdr;
13826                start += hdrlen;
13827        }
13828        if (nextp) {
13829                /* Caller will check inner protocol */
13830                if (skb->encapsulation) {
13831                        *nextp = nexthdr;
13832                        return true;
13833                }
13834                *nextp = NULL;
13835        }
13836        /* Only support TCP/UDP for non-tunneled ipv6 and inner ipv6 */
13837        return *nexthdr == IPPROTO_TCP || *nexthdr == IPPROTO_UDP;
13838}
13839
13840/* For UDP, we can only handle 1 Vxlan port and 1 Geneve port. */
13841static bool bnxt_udp_tunl_check(struct bnxt *bp, struct sk_buff *skb)
13842{
13843        struct udphdr *uh = udp_hdr(skb);
13844        __be16 udp_port = uh->dest;
13845
13846        if (udp_port != bp->vxlan_port && udp_port != bp->nge_port &&
13847            udp_port != bp->vxlan_gpe_port)
13848                return false;
13849        if (skb->inner_protocol == htons(ETH_P_TEB)) {
13850                struct ethhdr *eh = inner_eth_hdr(skb);
13851
13852                switch (eh->h_proto) {
13853                case htons(ETH_P_IP):
13854                        return true;
13855                case htons(ETH_P_IPV6):
13856                        return bnxt_exthdr_check(bp, skb,
13857                                                 skb_inner_network_offset(skb),
13858                                                 NULL);
13859                }
13860        } else if (skb->inner_protocol == htons(ETH_P_IP)) {
13861                return true;
13862        } else if (skb->inner_protocol == htons(ETH_P_IPV6)) {
13863                return bnxt_exthdr_check(bp, skb, skb_inner_network_offset(skb),
13864                                         NULL);
13865        }
13866        return false;
13867}
13868
13869static bool bnxt_tunl_check(struct bnxt *bp, struct sk_buff *skb, u8 l4_proto)
13870{
13871        switch (l4_proto) {
13872        case IPPROTO_UDP:
13873                return bnxt_udp_tunl_check(bp, skb);
13874        case IPPROTO_IPIP:
13875                return true;
13876        case IPPROTO_GRE: {
13877                switch (skb->inner_protocol) {
13878                default:
13879                        return false;
13880                case htons(ETH_P_IP):
13881                        return true;
13882                case htons(ETH_P_IPV6):
13883                        fallthrough;
13884                }
13885        }
13886        case IPPROTO_IPV6:
13887                /* Check ext headers of inner ipv6 */
13888                return bnxt_exthdr_check(bp, skb, skb_inner_network_offset(skb),
13889                                         NULL);
13890        }
13891        return false;
13892}
13893
13894static netdev_features_t bnxt_features_check(struct sk_buff *skb,
13895                                             struct net_device *dev,
13896                                             netdev_features_t features)
13897{
13898        struct bnxt *bp = netdev_priv(dev);
13899        u8 *l4_proto;
13900
13901        features = vlan_features_check(skb, features);
13902        switch (vlan_get_protocol(skb)) {
13903        case htons(ETH_P_IP):
13904                if (!skb->encapsulation)
13905                        return features;
13906                l4_proto = &ip_hdr(skb)->protocol;
13907                if (bnxt_tunl_check(bp, skb, *l4_proto))
13908                        return features;
13909                break;
13910        case htons(ETH_P_IPV6):
13911                if (!bnxt_exthdr_check(bp, skb, skb_network_offset(skb),
13912                                       &l4_proto))
13913                        break;
13914                if (!l4_proto || bnxt_tunl_check(bp, skb, *l4_proto))
13915                        return features;
13916                break;
13917        }
13918        return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
13919}
13920
13921int bnxt_dbg_hwrm_rd_reg(struct bnxt *bp, u32 reg_off, u16 num_words,
13922                         u32 *reg_buf)
13923{
13924        struct hwrm_dbg_read_direct_output *resp;
13925        struct hwrm_dbg_read_direct_input *req;
13926        __le32 *dbg_reg_buf;
13927        dma_addr_t mapping;
13928        int rc, i;
13929
13930        rc = hwrm_req_init(bp, req, HWRM_DBG_READ_DIRECT);
13931        if (rc)
13932                return rc;
13933
13934        dbg_reg_buf = hwrm_req_dma_slice(bp, req, num_words * 4,
13935                                         &mapping);
13936        if (!dbg_reg_buf) {
13937                rc = -ENOMEM;
13938                goto dbg_rd_reg_exit;
13939        }
13940
13941        req->host_dest_addr = cpu_to_le64(mapping);
13942
13943        resp = hwrm_req_hold(bp, req);
13944        req->read_addr = cpu_to_le32(reg_off + CHIMP_REG_VIEW_ADDR);
13945        req->read_len32 = cpu_to_le32(num_words);
13946
13947        rc = hwrm_req_send(bp, req);
13948        if (rc || resp->error_code) {
13949                rc = -EIO;
13950                goto dbg_rd_reg_exit;
13951        }
13952        for (i = 0; i < num_words; i++)
13953                reg_buf[i] = le32_to_cpu(dbg_reg_buf[i]);
13954
13955dbg_rd_reg_exit:
13956        hwrm_req_drop(bp, req);
13957        return rc;
13958}
13959
13960static int bnxt_dbg_hwrm_ring_info_get(struct bnxt *bp, u8 ring_type,
13961                                       u32 ring_id, u32 *prod, u32 *cons)
13962{
13963        struct hwrm_dbg_ring_info_get_output *resp;
13964        struct hwrm_dbg_ring_info_get_input *req;
13965        int rc;
13966
13967        rc = hwrm_req_init(bp, req, HWRM_DBG_RING_INFO_GET);
13968        if (rc)
13969                return rc;
13970
13971        req->ring_type = ring_type;
13972        req->fw_ring_id = cpu_to_le32(ring_id);
13973        resp = hwrm_req_hold(bp, req);
13974        rc = hwrm_req_send(bp, req);
13975        if (!rc) {
13976                *prod = le32_to_cpu(resp->producer_index);
13977                *cons = le32_to_cpu(resp->consumer_index);
13978        }
13979        hwrm_req_drop(bp, req);
13980        return rc;
13981}
13982
13983static void bnxt_dump_tx_sw_state(struct bnxt_napi *bnapi)
13984{
13985        struct bnxt_tx_ring_info *txr;
13986        int i = bnapi->index, j;
13987
13988        bnxt_for_each_napi_tx(j, bnapi, txr)
13989                netdev_info(bnapi->bp->dev, "[%d.%d]: tx{fw_ring: %d prod: %x cons: %x}\n",
13990                            i, j, txr->tx_ring_struct.fw_ring_id, txr->tx_prod,
13991                            txr->tx_cons);
13992}
13993
13994static void bnxt_dump_rx_sw_state(struct bnxt_napi *bnapi)
13995{
13996        struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
13997        int i = bnapi->index;
13998
13999        if (!rxr)
14000                return;
14001
14002        netdev_info(bnapi->bp->dev, "[%d]: rx{fw_ring: %d prod: %x} rx_agg{fw_ring: %d agg_prod: %x sw_agg_prod: %x}\n",
14003                    i, rxr->rx_ring_struct.fw_ring_id, rxr->rx_prod,
14004                    rxr->rx_agg_ring_struct.fw_ring_id, rxr->rx_agg_prod,
14005                    rxr->rx_sw_agg_prod);
14006}
14007
14008static void bnxt_dump_cp_sw_state(struct bnxt_napi *bnapi)
14009{
14010        struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
14011        int i = bnapi->index;
14012
14013        netdev_info(bnapi->bp->dev, "[%d]: cp{fw_ring: %d raw_cons: %x}\n",
14014                    i, cpr->cp_ring_struct.fw_ring_id, cpr->cp_raw_cons);
14015}
14016
14017static void bnxt_dbg_dump_states(struct bnxt *bp)
14018{
14019        int i;
14020        struct bnxt_napi *bnapi;
14021
14022        for (i = 0; i < bp->cp_nr_rings; i++) {
14023                bnapi = bp->bnapi[i];
14024                if (netif_msg_drv(bp)) {
14025                        bnxt_dump_tx_sw_state(bnapi);
14026                        bnxt_dump_rx_sw_state(bnapi);
14027                        bnxt_dump_cp_sw_state(bnapi);
14028                }
14029        }
14030}
14031
14032static int bnxt_hwrm_rx_ring_reset(struct bnxt *bp, int ring_nr)
14033{
14034        struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
14035        struct hwrm_ring_reset_input *req;
14036        struct bnxt_napi *bnapi = rxr->bnapi;
14037        struct bnxt_cp_ring_info *cpr;
14038        u16 cp_ring_id;
14039        int rc;
14040
14041        rc = hwrm_req_init(bp, req, HWRM_RING_RESET);
14042        if (rc)
14043                return rc;
14044
14045        cpr = &bnapi->cp_ring;
14046        cp_ring_id = cpr->cp_ring_struct.fw_ring_id;
14047        req->cmpl_ring = cpu_to_le16(cp_ring_id);
14048        req->ring_type = RING_RESET_REQ_RING_TYPE_RX_RING_GRP;
14049        req->ring_id = cpu_to_le16(bp->grp_info[bnapi->index].fw_grp_id);
14050        return hwrm_req_send_silent(bp, req);
14051}
14052
14053static void bnxt_reset_task(struct bnxt *bp, bool silent)
14054{
14055        if (!silent)
14056                bnxt_dbg_dump_states(bp);
14057        if (netif_running(bp->dev)) {
14058                bnxt_close_nic(bp, !silent, false);
14059                bnxt_open_nic(bp, !silent, false);
14060        }
14061}
14062
14063static void bnxt_tx_timeout(struct net_device *dev, unsigned int txqueue)
14064{
14065        struct bnxt *bp = netdev_priv(dev);
14066
14067        netdev_err(bp->dev,  "TX timeout detected, starting reset task!\n");
14068        bnxt_queue_sp_work(bp, BNXT_RESET_TASK_SP_EVENT);
14069}
14070
14071static void bnxt_fw_health_check(struct bnxt *bp)
14072{
14073        struct bnxt_fw_health *fw_health = bp->fw_health;
14074        struct pci_dev *pdev = bp->pdev;
14075        u32 val;
14076
14077        if (!fw_health->enabled || test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
14078                return;
14079
14080        /* Make sure it is enabled before checking the tmr_counter. */
14081        smp_rmb();
14082        if (fw_health->tmr_counter) {
14083                fw_health->tmr_counter--;
14084                return;
14085        }
14086
14087        val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
14088        if (val == fw_health->last_fw_heartbeat && pci_device_is_present(pdev)) {
14089                fw_health->arrests++;
14090                goto fw_reset;
14091        }
14092
14093        fw_health->last_fw_heartbeat = val;
14094
14095        val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
14096        if (val != fw_health->last_fw_reset_cnt && pci_device_is_present(pdev)) {
14097                fw_health->discoveries++;
14098                goto fw_reset;
14099        }
14100
14101        fw_health->tmr_counter = fw_health->tmr_multiplier;
14102        return;
14103
14104fw_reset:
14105        bnxt_queue_sp_work(bp, BNXT_FW_EXCEPTION_SP_EVENT);
14106}
14107
14108static void bnxt_timer(struct timer_list *t)
14109{
14110        struct bnxt *bp = timer_container_of(bp, t, timer);
14111        struct net_device *dev = bp->dev;
14112
14113        if (!netif_running(dev) || !test_bit(BNXT_STATE_OPEN, &bp->state))
14114                return;
14115
14116        if (atomic_read(&bp->intr_sem) != 0)
14117                goto bnxt_restart_timer;
14118
14119        if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
14120                bnxt_fw_health_check(bp);
14121
14122        if (BNXT_LINK_IS_UP(bp) && bp->stats_coal_ticks)
14123                bnxt_queue_sp_work(bp, BNXT_PERIODIC_STATS_SP_EVENT);
14124
14125        if (bnxt_tc_flower_enabled(bp))
14126                bnxt_queue_sp_work(bp, BNXT_FLOW_STATS_SP_EVENT);
14127
14128#ifdef CONFIG_RFS_ACCEL
14129        if ((bp->flags & BNXT_FLAG_RFS) && bp->ntp_fltr_count)
14130                bnxt_queue_sp_work(bp, BNXT_RX_NTP_FLTR_SP_EVENT);
14131#endif /*CONFIG_RFS_ACCEL*/
14132
14133        if (bp->link_info.phy_retry) {
14134                if (time_after(jiffies, bp->link_info.phy_retry_expires)) {
14135                        bp->link_info.phy_retry = false;
14136                        netdev_warn(bp->dev, "failed to update phy settings after maximum retries.\n");
14137                } else {
14138                        bnxt_queue_sp_work(bp, BNXT_UPDATE_PHY_SP_EVENT);
14139                }
14140        }
14141
14142        if (test_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state))
14143                bnxt_queue_sp_work(bp, BNXT_RX_MASK_SP_EVENT);
14144
14145        if ((BNXT_CHIP_P5(bp)) && !bp->chip_rev && netif_carrier_ok(dev))
14146                bnxt_queue_sp_work(bp, BNXT_RING_COAL_NOW_SP_EVENT);
14147
14148bnxt_restart_timer:
14149        mod_timer(&bp->timer, jiffies + bp->current_interval);
14150}
14151
14152static void bnxt_lock_sp(struct bnxt *bp)
14153{
14154        /* We are called from bnxt_sp_task which has BNXT_STATE_IN_SP_TASK
14155         * set.  If the device is being closed, bnxt_close() may be holding
14156         * netdev instance lock and waiting for BNXT_STATE_IN_SP_TASK to clear.
14157         * So we must clear BNXT_STATE_IN_SP_TASK before holding netdev
14158         * instance lock.
14159         */
14160        clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
14161        netdev_lock(bp->dev);
14162}
14163
14164static void bnxt_unlock_sp(struct bnxt *bp)
14165{
14166        set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
14167        netdev_unlock(bp->dev);
14168}
14169
14170/* Only called from bnxt_sp_task() */
14171static void bnxt_reset(struct bnxt *bp, bool silent)
14172{
14173        bnxt_lock_sp(bp);
14174        if (test_bit(BNXT_STATE_OPEN, &bp->state))
14175                bnxt_reset_task(bp, silent);
14176        bnxt_unlock_sp(bp);
14177}
14178
14179/* Only called from bnxt_sp_task() */
14180static void bnxt_rx_ring_reset(struct bnxt *bp)
14181{
14182        int i;
14183
14184        bnxt_lock_sp(bp);
14185        if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
14186                bnxt_unlock_sp(bp);
14187                return;
14188        }
14189        /* Disable and flush TPA before resetting the RX ring */
14190        if (bp->flags & BNXT_FLAG_TPA)
14191                bnxt_set_tpa(bp, false);
14192        for (i = 0; i < bp->rx_nr_rings; i++) {
14193                struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
14194                struct bnxt_cp_ring_info *cpr;
14195                int rc;
14196
14197                if (!rxr->bnapi->in_reset)
14198                        continue;
14199
14200                rc = bnxt_hwrm_rx_ring_reset(bp, i);
14201                if (rc) {
14202                        if (rc == -EINVAL || rc == -EOPNOTSUPP)
14203                                netdev_info_once(bp->dev, "RX ring reset not supported by firmware, falling back to global reset\n");
14204                        else
14205                                netdev_warn(bp->dev, "RX ring reset failed, rc = %d, falling back to global reset\n",
14206                                            rc);
14207                        bnxt_reset_task(bp, true);
14208                        break;
14209                }
14210                bnxt_free_one_rx_ring_skbs(bp, rxr);
14211                rxr->rx_prod = 0;
14212                rxr->rx_agg_prod = 0;
14213                rxr->rx_sw_agg_prod = 0;
14214                rxr->rx_next_cons = 0;
14215                rxr->bnapi->in_reset = false;
14216                bnxt_alloc_one_rx_ring(bp, i);
14217                cpr = &rxr->bnapi->cp_ring;
14218                cpr->sw_stats->rx.rx_resets++;
14219                if (bp->flags & BNXT_FLAG_AGG_RINGS)
14220                        bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
14221                bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
14222        }
14223        if (bp->flags & BNXT_FLAG_TPA)
14224                bnxt_set_tpa(bp, true);
14225        bnxt_unlock_sp(bp);
14226}
14227
14228static void bnxt_fw_fatal_close(struct bnxt *bp)
14229{
14230        bnxt_tx_disable(bp);
14231        bnxt_disable_napi(bp);
14232        bnxt_disable_int_sync(bp);
14233        bnxt_free_irq(bp);
14234        bnxt_clear_int_mode(bp);
14235        pci_disable_device(bp->pdev);
14236}
14237
14238static void bnxt_fw_reset_close(struct bnxt *bp)
14239{
14240        /* When firmware is in fatal state, quiesce device and disable
14241         * bus master to prevent any potential bad DMAs before freeing
14242         * kernel memory.
14243         */
14244        if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) {
14245                u16 val = 0;
14246
14247                pci_read_config_word(bp->pdev, PCI_SUBSYSTEM_ID, &val);
14248                if (val == 0xffff)
14249                        bp->fw_reset_min_dsecs = 0;
14250                bnxt_fw_fatal_close(bp);
14251        }
14252        __bnxt_close_nic(bp, true, false);
14253        bnxt_vf_reps_free(bp);
14254        bnxt_clear_int_mode(bp);
14255        bnxt_hwrm_func_drv_unrgtr(bp);
14256        if (pci_is_enabled(bp->pdev))
14257                pci_disable_device(bp->pdev);
14258        bnxt_free_ctx_mem(bp, false);
14259}
14260
14261static bool is_bnxt_fw_ok(struct bnxt *bp)
14262{
14263        struct bnxt_fw_health *fw_health = bp->fw_health;
14264        bool no_heartbeat = false, has_reset = false;
14265        u32 val;
14266
14267        val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
14268        if (val == fw_health->last_fw_heartbeat)
14269                no_heartbeat = true;
14270
14271        val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
14272        if (val != fw_health->last_fw_reset_cnt)
14273                has_reset = true;
14274
14275        if (!no_heartbeat && has_reset)
14276                return true;
14277
14278        return false;
14279}
14280
14281/* netdev instance lock is acquired before calling this function */
14282static void bnxt_force_fw_reset(struct bnxt *bp)
14283{
14284        struct bnxt_fw_health *fw_health = bp->fw_health;
14285        struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
14286        u32 wait_dsecs;
14287
14288        if (!test_bit(BNXT_STATE_OPEN, &bp->state) ||
14289            test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
14290                return;
14291
14292        /* we have to serialize with bnxt_refclk_read()*/
14293        if (ptp) {
14294                unsigned long flags;
14295
14296                write_seqlock_irqsave(&ptp->ptp_lock, flags);
14297                set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
14298                write_sequnlock_irqrestore(&ptp->ptp_lock, flags);
14299        } else {
14300                set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
14301        }
14302        bnxt_fw_reset_close(bp);
14303        wait_dsecs = fw_health->master_func_wait_dsecs;
14304        if (fw_health->primary) {
14305                if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU)
14306                        wait_dsecs = 0;
14307                bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW;
14308        } else {
14309                bp->fw_reset_timestamp = jiffies + wait_dsecs * HZ / 10;
14310                wait_dsecs = fw_health->normal_func_wait_dsecs;
14311                bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
14312        }
14313
14314        bp->fw_reset_min_dsecs = fw_health->post_reset_wait_dsecs;
14315        bp->fw_reset_max_dsecs = fw_health->post_reset_max_wait_dsecs;
14316        bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10);
14317}
14318
14319void bnxt_fw_exception(struct bnxt *bp)
14320{
14321        netdev_warn(bp->dev, "Detected firmware fatal condition, initiating reset\n");
14322        set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
14323        bnxt_ulp_stop(bp);
14324        bnxt_lock_sp(bp);
14325        bnxt_force_fw_reset(bp);
14326        bnxt_unlock_sp(bp);
14327}
14328
14329/* Returns the number of registered VFs, or 1 if VF configuration is pending, or
14330 * < 0 on error.
14331 */
14332static int bnxt_get_registered_vfs(struct bnxt *bp)
14333{
14334#ifdef CONFIG_BNXT_SRIOV
14335        int rc;
14336
14337        if (!BNXT_PF(bp))
14338                return 0;
14339
14340        rc = bnxt_hwrm_func_qcfg(bp);
14341        if (rc) {
14342                netdev_err(bp->dev, "func_qcfg cmd failed, rc = %d\n", rc);
14343                return rc;
14344        }
14345        if (bp->pf.registered_vfs)
14346                return bp->pf.registered_vfs;
14347        if (bp->sriov_cfg)
14348                return 1;
14349#endif
14350        return 0;
14351}
14352
14353void bnxt_fw_reset(struct bnxt *bp)
14354{
14355        bnxt_ulp_stop(bp);
14356        bnxt_lock_sp(bp);
14357        if (test_bit(BNXT_STATE_OPEN, &bp->state) &&
14358            !test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
14359                struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
14360                int n = 0, tmo;
14361
14362                /* we have to serialize with bnxt_refclk_read()*/
14363                if (ptp) {
14364                        unsigned long flags;
14365
14366                        write_seqlock_irqsave(&ptp->ptp_lock, flags);
14367                        set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
14368                        write_sequnlock_irqrestore(&ptp->ptp_lock, flags);
14369                } else {
14370                        set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
14371                }
14372                if (bp->pf.active_vfs &&
14373                    !test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
14374                        n = bnxt_get_registered_vfs(bp);
14375                if (n < 0) {
14376                        netdev_err(bp->dev, "Firmware reset aborted, rc = %d\n",
14377                                   n);
14378                        clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
14379                        netif_close(bp->dev);
14380                        goto fw_reset_exit;
14381                } else if (n > 0) {
14382                        u16 vf_tmo_dsecs = n * 10;
14383
14384                        if (bp->fw_reset_max_dsecs < vf_tmo_dsecs)
14385                                bp->fw_reset_max_dsecs = vf_tmo_dsecs;
14386                        bp->fw_reset_state =
14387                                BNXT_FW_RESET_STATE_POLL_VF;
14388                        bnxt_queue_fw_reset_work(bp, HZ / 10);
14389                        goto fw_reset_exit;
14390                }
14391                bnxt_fw_reset_close(bp);
14392                if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
14393                        bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN;
14394                        tmo = HZ / 10;
14395                } else {
14396                        bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
14397                        tmo = bp->fw_reset_min_dsecs * HZ / 10;
14398                }
14399                bnxt_queue_fw_reset_work(bp, tmo);
14400        }
14401fw_reset_exit:
14402        bnxt_unlock_sp(bp);
14403}
14404
14405static void bnxt_chk_missed_irq(struct bnxt *bp)
14406{
14407        int i;
14408
14409        if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
14410                return;
14411
14412        for (i = 0; i < bp->cp_nr_rings; i++) {
14413                struct bnxt_napi *bnapi = bp->bnapi[i];
14414                struct bnxt_cp_ring_info *cpr;
14415                u32 fw_ring_id;
14416                int j;
14417
14418                if (!bnapi)
14419                        continue;
14420
14421                cpr = &bnapi->cp_ring;
14422                for (j = 0; j < cpr->cp_ring_count; j++) {
14423                        struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[j];
14424                        u32 val[2];
14425
14426                        if (cpr2->has_more_work || !bnxt_has_work(bp, cpr2))
14427                                continue;
14428
14429                        if (cpr2->cp_raw_cons != cpr2->last_cp_raw_cons) {
14430                                cpr2->last_cp_raw_cons = cpr2->cp_raw_cons;
14431                                continue;
14432                        }
14433                        fw_ring_id = cpr2->cp_ring_struct.fw_ring_id;
14434                        bnxt_dbg_hwrm_ring_info_get(bp,
14435                                DBG_RING_INFO_GET_REQ_RING_TYPE_L2_CMPL,
14436                                fw_ring_id, &val[0], &val[1]);
14437                        cpr->sw_stats->cmn.missed_irqs++;
14438                }
14439        }
14440}
14441
14442static void bnxt_cfg_ntp_filters(struct bnxt *);
14443
14444static void bnxt_init_ethtool_link_settings(struct bnxt *bp)
14445{
14446        struct bnxt_link_info *link_info = &bp->link_info;
14447
14448        if (BNXT_AUTO_MODE(link_info->auto_mode)) {
14449                link_info->autoneg = BNXT_AUTONEG_SPEED;
14450                if (bp->hwrm_spec_code >= 0x10201) {
14451                        if (link_info->auto_pause_setting &
14452                            PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE)
14453                                link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
14454                } else {
14455                        link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
14456                }
14457                bnxt_set_auto_speed(link_info);
14458        } else {
14459                bnxt_set_force_speed(link_info);
14460                link_info->req_duplex = link_info->duplex_setting;
14461        }
14462        if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
14463                link_info->req_flow_ctrl =
14464                        link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH;
14465        else
14466                link_info->req_flow_ctrl = link_info->force_pause_setting;
14467}
14468
14469static void bnxt_fw_echo_reply(struct bnxt *bp)
14470{
14471        struct bnxt_fw_health *fw_health = bp->fw_health;
14472        struct hwrm_func_echo_response_input *req;
14473        int rc;
14474
14475        rc = hwrm_req_init(bp, req, HWRM_FUNC_ECHO_RESPONSE);
14476        if (rc)
14477                return;
14478        req->event_data1 = cpu_to_le32(fw_health->echo_req_data1);
14479        req->event_data2 = cpu_to_le32(fw_health->echo_req_data2);
14480        hwrm_req_send(bp, req);
14481}
14482
14483static void bnxt_ulp_restart(struct bnxt *bp)
14484{
14485        bnxt_ulp_stop(bp);
14486        bnxt_ulp_start(bp, 0);
14487}
14488
14489static void bnxt_sp_task(struct work_struct *work)
14490{
14491        struct bnxt *bp = container_of(work, struct bnxt, sp_task);
14492
14493        set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
14494        smp_mb__after_atomic();
14495        if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
14496                clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
14497                return;
14498        }
14499
14500        if (test_and_clear_bit(BNXT_RESTART_ULP_SP_EVENT, &bp->sp_event)) {
14501                bnxt_ulp_restart(bp);
14502                bnxt_reenable_sriov(bp);
14503        }
14504
14505        if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event))
14506                bnxt_cfg_rx_mode(bp);
14507
14508        if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event))
14509                bnxt_cfg_ntp_filters(bp);
14510        if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event))
14511                bnxt_hwrm_exec_fwd_req(bp);
14512        if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event))
14513                netdev_info(bp->dev, "Receive PF driver unload event!\n");
14514        if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) {
14515                bnxt_hwrm_port_qstats(bp, 0);
14516                bnxt_hwrm_port_qstats_ext(bp, 0);
14517                bnxt_accumulate_all_stats(bp);
14518        }
14519
14520        if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
14521                int rc;
14522
14523                mutex_lock(&bp->link_lock);
14524                if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT,
14525                                       &bp->sp_event))
14526                        bnxt_hwrm_phy_qcaps(bp);
14527
14528                rc = bnxt_update_link(bp, true);
14529                if (rc)
14530                        netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
14531                                   rc);
14532
14533                if (test_and_clear_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT,
14534                                       &bp->sp_event))
14535                        bnxt_init_ethtool_link_settings(bp);
14536                mutex_unlock(&bp->link_lock);
14537        }
14538        if (test_and_clear_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event)) {
14539                int rc;
14540
14541                mutex_lock(&bp->link_lock);
14542                rc = bnxt_update_phy_setting(bp);
14543                mutex_unlock(&bp->link_lock);
14544                if (rc) {
14545                        netdev_warn(bp->dev, "update phy settings retry failed\n");
14546                } else {
14547                        bp->link_info.phy_retry = false;
14548                        netdev_info(bp->dev, "update phy settings retry succeeded\n");
14549                }
14550        }
14551        if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) {
14552                mutex_lock(&bp->link_lock);
14553                bnxt_get_port_module_status(bp);
14554                mutex_unlock(&bp->link_lock);
14555        }
14556
14557        if (test_and_clear_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event))
14558                bnxt_tc_flow_stats_work(bp);
14559
14560        if (test_and_clear_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event))
14561                bnxt_chk_missed_irq(bp);
14562
14563        if (test_and_clear_bit(BNXT_FW_ECHO_REQUEST_SP_EVENT, &bp->sp_event))
14564                bnxt_fw_echo_reply(bp);
14565
14566        if (test_and_clear_bit(BNXT_THERMAL_THRESHOLD_SP_EVENT, &bp->sp_event))
14567                bnxt_hwmon_notify_event(bp);
14568
14569        /* These functions below will clear BNXT_STATE_IN_SP_TASK.  They
14570         * must be the last functions to be called before exiting.
14571         */
14572        if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event))
14573                bnxt_reset(bp, false);
14574
14575        if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event))
14576                bnxt_reset(bp, true);
14577
14578        if (test_and_clear_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event))
14579                bnxt_rx_ring_reset(bp);
14580
14581        if (test_and_clear_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event)) {
14582                if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) ||
14583                    test_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state))
14584                        bnxt_devlink_health_fw_report(bp);
14585                else
14586                        bnxt_fw_reset(bp);
14587        }
14588
14589        if (test_and_clear_bit(BNXT_FW_EXCEPTION_SP_EVENT, &bp->sp_event)) {
14590                if (!is_bnxt_fw_ok(bp))
14591                        bnxt_devlink_health_fw_report(bp);
14592        }
14593
14594        smp_mb__before_atomic();
14595        clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
14596}
14597
14598static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
14599                                int *max_cp);
14600
14601/* Under netdev instance lock */
14602int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
14603                     int tx_xdp)
14604{
14605        int max_rx, max_tx, max_cp, tx_sets = 1, tx_cp;
14606        struct bnxt_hw_rings hwr = {0};
14607        int rx_rings = rx;
14608        int rc;
14609
14610        if (tcs)
14611                tx_sets = tcs;
14612
14613        _bnxt_get_max_rings(bp, &max_rx, &max_tx, &max_cp);
14614
14615        if (max_rx < rx_rings)
14616                return -ENOMEM;
14617
14618        if (bp->flags & BNXT_FLAG_AGG_RINGS)
14619                rx_rings <<= 1;
14620
14621        hwr.rx = rx_rings;
14622        hwr.tx = tx * tx_sets + tx_xdp;
14623        if (max_tx < hwr.tx)
14624                return -ENOMEM;
14625
14626        hwr.vnic = bnxt_get_total_vnics(bp, rx);
14627
14628        tx_cp = __bnxt_num_tx_to_cp(bp, hwr.tx, tx_sets, tx_xdp);
14629        hwr.cp = sh ? max_t(int, tx_cp, rx) : tx_cp + rx;
14630        if (max_cp < hwr.cp)
14631                return -ENOMEM;
14632        hwr.stat = hwr.cp;
14633        if (BNXT_NEW_RM(bp)) {
14634                hwr.cp += bnxt_get_ulp_msix_num_in_use(bp);
14635                hwr.stat += bnxt_get_ulp_stat_ctxs_in_use(bp);
14636                hwr.grp = rx;
14637                hwr.rss_ctx = bnxt_get_total_rss_ctxs(bp, &hwr);
14638        }
14639        if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
14640                hwr.cp_p5 = hwr.tx + rx;
14641        rc = bnxt_hwrm_check_rings(bp, &hwr);
14642        if (!rc && pci_msix_can_alloc_dyn(bp->pdev)) {
14643                if (!bnxt_ulp_registered(bp->edev)) {
14644                        hwr.cp += bnxt_get_ulp_msix_num(bp);
14645                        hwr.cp = min_t(int, hwr.cp, bnxt_get_max_func_irqs(bp));
14646                }
14647                if (hwr.cp > bp->total_irqs) {
14648                        int total_msix = bnxt_change_msix(bp, hwr.cp);
14649
14650                        if (total_msix < hwr.cp) {
14651                                netdev_warn(bp->dev, "Unable to allocate %d MSIX vectors, maximum available %d\n",
14652                                            hwr.cp, total_msix);
14653                                rc = -ENOSPC;
14654                        }
14655                }
14656        }
14657        return rc;
14658}
14659
14660static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev)
14661{
14662        if (bp->bar2) {
14663                pci_iounmap(pdev, bp->bar2);
14664                bp->bar2 = NULL;
14665        }
14666
14667        if (bp->bar1) {
14668                pci_iounmap(pdev, bp->bar1);
14669                bp->bar1 = NULL;
14670        }
14671
14672        if (bp->bar0) {
14673                pci_iounmap(pdev, bp->bar0);
14674                bp->bar0 = NULL;
14675        }
14676}
14677
14678static void bnxt_cleanup_pci(struct bnxt *bp)
14679{
14680        bnxt_unmap_bars(bp, bp->pdev);
14681        pci_release_regions(bp->pdev);
14682        if (pci_is_enabled(bp->pdev))
14683                pci_disable_device(bp->pdev);
14684}
14685
14686static void bnxt_init_dflt_coal(struct bnxt *bp)
14687{
14688        struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
14689        struct bnxt_coal *coal;
14690        u16 flags = 0;
14691
14692        if (coal_cap->cmpl_params &
14693            RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET)
14694                flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
14695
14696        /* Tick values in micro seconds.
14697         * 1 coal_buf x bufs_per_record = 1 completion record.
14698         */
14699        coal = &bp->rx_coal;
14700        coal->coal_ticks = 10;
14701        coal->coal_bufs = 30;
14702        coal->coal_ticks_irq = 1;
14703        coal->coal_bufs_irq = 2;
14704        coal->idle_thresh = 50;
14705        coal->bufs_per_record = 2;
14706        coal->budget = 64;              /* NAPI budget */
14707        coal->flags = flags;
14708
14709        coal = &bp->tx_coal;
14710        coal->coal_ticks = 28;
14711        coal->coal_bufs = 30;
14712        coal->coal_ticks_irq = 2;
14713        coal->coal_bufs_irq = 2;
14714        coal->bufs_per_record = 1;
14715        coal->flags = flags;
14716
14717        bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS;
14718}
14719
14720/* FW that pre-reserves 1 VNIC per function */
14721static bool bnxt_fw_pre_resv_vnics(struct bnxt *bp)
14722{
14723        u16 fw_maj = BNXT_FW_MAJ(bp), fw_bld = BNXT_FW_BLD(bp);
14724
14725        if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
14726            (fw_maj > 218 || (fw_maj == 218 && fw_bld >= 18)))
14727                return true;
14728        if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
14729            (fw_maj > 216 || (fw_maj == 216 && fw_bld >= 172)))
14730                return true;
14731        return false;
14732}
14733
14734static int bnxt_fw_init_one_p1(struct bnxt *bp)
14735{
14736        int rc;
14737
14738        bp->fw_cap = 0;
14739        rc = bnxt_hwrm_ver_get(bp);
14740        /* FW may be unresponsive after FLR. FLR must complete within 100 msec
14741         * so wait before continuing with recovery.
14742         */
14743        if (rc)
14744                msleep(100);
14745        bnxt_try_map_fw_health_reg(bp);
14746        if (rc) {
14747                rc = bnxt_try_recover_fw(bp);
14748                if (rc)
14749                        return rc;
14750                rc = bnxt_hwrm_ver_get(bp);
14751                if (rc)
14752                        return rc;
14753        }
14754
14755        bnxt_nvm_cfg_ver_get(bp);
14756
14757        rc = bnxt_hwrm_func_reset(bp);
14758        if (rc)
14759                return -ENODEV;
14760
14761        bnxt_hwrm_fw_set_time(bp);
14762        return 0;
14763}
14764
14765static int bnxt_fw_init_one_p2(struct bnxt *bp)
14766{
14767        int rc;
14768
14769        /* Get the MAX capabilities for this function */
14770        rc = bnxt_hwrm_func_qcaps(bp);
14771        if (rc) {
14772                netdev_err(bp->dev, "hwrm query capability failure rc: %x\n",
14773                           rc);
14774                return -ENODEV;
14775        }
14776
14777        rc = bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(bp);
14778        if (rc)
14779                netdev_warn(bp->dev, "hwrm query adv flow mgnt failure rc: %d\n",
14780                            rc);
14781
14782        if (bnxt_alloc_fw_health(bp)) {
14783                netdev_warn(bp->dev, "no memory for firmware error recovery\n");
14784        } else {
14785                rc = bnxt_hwrm_error_recovery_qcfg(bp);
14786                if (rc)
14787                        netdev_warn(bp->dev, "hwrm query error recovery failure rc: %d\n",
14788                                    rc);
14789        }
14790
14791        rc = bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false);
14792        if (rc)
14793                return -ENODEV;
14794
14795        rc = bnxt_alloc_crash_dump_mem(bp);
14796        if (rc)
14797                netdev_warn(bp->dev, "crash dump mem alloc failure rc: %d\n",
14798                            rc);
14799        if (!rc) {
14800                rc = bnxt_hwrm_crash_dump_mem_cfg(bp);
14801                if (rc) {
14802                        bnxt_free_crash_dump_mem(bp);
14803                        netdev_warn(bp->dev,
14804                                    "hwrm crash dump mem failure rc: %d\n", rc);
14805                }
14806        }
14807
14808        if (bnxt_fw_pre_resv_vnics(bp))
14809                bp->fw_cap |= BNXT_FW_CAP_PRE_RESV_VNICS;
14810
14811        bnxt_hwrm_func_qcfg(bp);
14812        bnxt_hwrm_vnic_qcaps(bp);
14813        bnxt_hwrm_port_led_qcaps(bp);
14814        bnxt_ethtool_init(bp);
14815        if (bp->fw_cap & BNXT_FW_CAP_PTP)
14816                __bnxt_hwrm_ptp_qcfg(bp);
14817        bnxt_dcb_init(bp);
14818        bnxt_hwmon_init(bp);
14819        return 0;
14820}
14821
14822static void bnxt_set_dflt_rss_hash_type(struct bnxt *bp)
14823{
14824        bp->rss_cap &= ~BNXT_RSS_CAP_UDP_RSS_CAP;
14825        bp->rss_hash_cfg = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 |
14826                           VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 |
14827                           VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 |
14828                           VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
14829        if (bp->rss_cap & BNXT_RSS_CAP_RSS_HASH_TYPE_DELTA)
14830                bp->rss_hash_delta = bp->rss_hash_cfg;
14831        if (BNXT_CHIP_P4_PLUS(bp) && bp->hwrm_spec_code >= 0x10501) {
14832                bp->rss_cap |= BNXT_RSS_CAP_UDP_RSS_CAP;
14833                bp->rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 |
14834                                    VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
14835        }
14836}
14837
14838static void bnxt_set_dflt_rfs(struct bnxt *bp)
14839{
14840        struct net_device *dev = bp->dev;
14841
14842        dev->hw_features &= ~NETIF_F_NTUPLE;
14843        dev->features &= ~NETIF_F_NTUPLE;
14844        bp->flags &= ~BNXT_FLAG_RFS;
14845        if (bnxt_rfs_supported(bp)) {
14846                dev->hw_features |= NETIF_F_NTUPLE;
14847                if (bnxt_rfs_capable(bp, false)) {
14848                        bp->flags |= BNXT_FLAG_RFS;
14849                        dev->features |= NETIF_F_NTUPLE;
14850                }
14851        }
14852}
14853
14854static void bnxt_fw_init_one_p3(struct bnxt *bp)
14855{
14856        struct pci_dev *pdev = bp->pdev;
14857
14858        bnxt_set_dflt_rss_hash_type(bp);
14859        bnxt_set_dflt_rfs(bp);
14860
14861        bnxt_get_wol_settings(bp);
14862        if (bp->flags & BNXT_FLAG_WOL_CAP)
14863                device_set_wakeup_enable(&pdev->dev, bp->wol);
14864        else
14865                device_set_wakeup_capable(&pdev->dev, false);
14866
14867        bnxt_hwrm_set_cache_line_size(bp, cache_line_size());
14868        bnxt_hwrm_coal_params_qcaps(bp);
14869}
14870
14871static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt);
14872
14873int bnxt_fw_init_one(struct bnxt *bp)
14874{
14875        int rc;
14876
14877        rc = bnxt_fw_init_one_p1(bp);
14878        if (rc) {
14879                netdev_err(bp->dev, "Firmware init phase 1 failed\n");
14880                return rc;
14881        }
14882        rc = bnxt_fw_init_one_p2(bp);
14883        if (rc) {
14884                netdev_err(bp->dev, "Firmware init phase 2 failed\n");
14885                return rc;
14886        }
14887        rc = bnxt_probe_phy(bp, false);
14888        if (rc)
14889                return rc;
14890        rc = bnxt_approve_mac(bp, bp->dev->dev_addr, false);
14891        if (rc)
14892                return rc;
14893
14894        bnxt_fw_init_one_p3(bp);
14895        return 0;
14896}
14897
14898static void bnxt_fw_reset_writel(struct bnxt *bp, int reg_idx)
14899{
14900        struct bnxt_fw_health *fw_health = bp->fw_health;
14901        u32 reg = fw_health->fw_reset_seq_regs[reg_idx];
14902        u32 val = fw_health->fw_reset_seq_vals[reg_idx];
14903        u32 reg_type, reg_off, delay_msecs;
14904
14905        delay_msecs = fw_health->fw_reset_seq_delay_msec[reg_idx];
14906        reg_type = BNXT_FW_HEALTH_REG_TYPE(reg);
14907        reg_off = BNXT_FW_HEALTH_REG_OFF(reg);
14908        switch (reg_type) {
14909        case BNXT_FW_HEALTH_REG_TYPE_CFG:
14910                pci_write_config_dword(bp->pdev, reg_off, val);
14911                break;
14912        case BNXT_FW_HEALTH_REG_TYPE_GRC:
14913                writel(reg_off & BNXT_GRC_BASE_MASK,
14914                       bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4);
14915                reg_off = (reg_off & BNXT_GRC_OFFSET_MASK) + 0x2000;
14916                fallthrough;
14917        case BNXT_FW_HEALTH_REG_TYPE_BAR0:
14918                writel(val, bp->bar0 + reg_off);
14919                break;
14920        case BNXT_FW_HEALTH_REG_TYPE_BAR1:
14921                writel(val, bp->bar1 + reg_off);
14922                break;
14923        }
14924        if (delay_msecs) {
14925                pci_read_config_dword(bp->pdev, 0, &val);
14926                msleep(delay_msecs);
14927        }
14928}
14929
14930bool bnxt_hwrm_reset_permitted(struct bnxt *bp)
14931{
14932        struct hwrm_func_qcfg_output *resp;
14933        struct hwrm_func_qcfg_input *req;
14934        bool result = true; /* firmware will enforce if unknown */
14935
14936        if (~bp->fw_cap & BNXT_FW_CAP_HOT_RESET_IF)
14937                return result;
14938
14939        if (hwrm_req_init(bp, req, HWRM_FUNC_QCFG))
14940                return result;
14941
14942        req->fid = cpu_to_le16(0xffff);
14943        resp = hwrm_req_hold(bp, req);
14944        if (!hwrm_req_send(bp, req))
14945                result = !!(le16_to_cpu(resp->flags) &
14946                            FUNC_QCFG_RESP_FLAGS_HOT_RESET_ALLOWED);
14947        hwrm_req_drop(bp, req);
14948        return result;
14949}
14950
14951static void bnxt_reset_all(struct bnxt *bp)
14952{
14953        struct bnxt_fw_health *fw_health = bp->fw_health;
14954        int i, rc;
14955
14956        if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
14957                bnxt_fw_reset_via_optee(bp);
14958                bp->fw_reset_timestamp = jiffies;
14959                return;
14960        }
14961
14962        if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_HOST) {
14963                for (i = 0; i < fw_health->fw_reset_seq_cnt; i++)
14964                        bnxt_fw_reset_writel(bp, i);
14965        } else if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) {
14966                struct hwrm_fw_reset_input *req;
14967
14968                rc = hwrm_req_init(bp, req, HWRM_FW_RESET);
14969                if (!rc) {
14970                        req->target_id = cpu_to_le16(HWRM_TARGET_ID_KONG);
14971                        req->embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP;
14972                        req->selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP;
14973                        req->flags = FW_RESET_REQ_FLAGS_RESET_GRACEFUL;
14974                        rc = hwrm_req_send(bp, req);
14975                }
14976                if (rc != -ENODEV)
14977                        netdev_warn(bp->dev, "Unable to reset FW rc=%d\n", rc);
14978        }
14979        bp->fw_reset_timestamp = jiffies;
14980}
14981
14982static bool bnxt_fw_reset_timeout(struct bnxt *bp)
14983{
14984        return time_after(jiffies, bp->fw_reset_timestamp +
14985                          (bp->fw_reset_max_dsecs * HZ / 10));
14986}
14987
14988static void bnxt_fw_reset_abort(struct bnxt *bp, int rc)
14989{
14990        clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
14991        if (bp->fw_reset_state != BNXT_FW_RESET_STATE_POLL_VF)
14992                bnxt_dl_health_fw_status_update(bp, false);
14993        bp->fw_reset_state = BNXT_FW_RESET_STATE_ABORT;
14994        netif_close(bp->dev);
14995}
14996
14997static void bnxt_fw_reset_task(struct work_struct *work)
14998{
14999        struct bnxt *bp = container_of(work, struct bnxt, fw_reset_task.work);
15000        int rc = 0;
15001
15002        if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
15003                netdev_err(bp->dev, "bnxt_fw_reset_task() called when not in fw reset mode!\n");
15004                return;
15005        }
15006
15007        switch (bp->fw_reset_state) {
15008        case BNXT_FW_RESET_STATE_POLL_VF: {
15009                int n = bnxt_get_registered_vfs(bp);
15010                int tmo;
15011
15012                if (n < 0) {
15013                        netdev_err(bp->dev, "Firmware reset aborted, subsequent func_qcfg cmd failed, rc = %d, %d msecs since reset timestamp\n",
15014                                   n, jiffies_to_msecs(jiffies -
15015                                   bp->fw_reset_timestamp));
15016                        goto fw_reset_abort;
15017                } else if (n > 0) {
15018                        if (bnxt_fw_reset_timeout(bp)) {
15019                                clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
15020                                bp->fw_reset_state = 0;
15021                                netdev_err(bp->dev, "Firmware reset aborted, bnxt_get_registered_vfs() returns %d\n",
15022                                           n);
15023                                goto ulp_start;
15024                        }
15025                        bnxt_queue_fw_reset_work(bp, HZ / 10);
15026                        return;
15027                }
15028                bp->fw_reset_timestamp = jiffies;
15029                netdev_lock(bp->dev);
15030                if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
15031                        bnxt_fw_reset_abort(bp, rc);
15032                        netdev_unlock(bp->dev);
15033                        goto ulp_start;
15034                }
15035                bnxt_fw_reset_close(bp);
15036                if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
15037                        bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN;
15038                        tmo = HZ / 10;
15039                } else {
15040                        bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
15041                        tmo = bp->fw_reset_min_dsecs * HZ / 10;
15042                }
15043                netdev_unlock(bp->dev);
15044                bnxt_queue_fw_reset_work(bp, tmo);
15045                return;
15046        }
15047        case BNXT_FW_RESET_STATE_POLL_FW_DOWN: {
15048                u32 val;
15049
15050                val = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
15051                if (!(val & BNXT_FW_STATUS_SHUTDOWN) &&
15052                    !bnxt_fw_reset_timeout(bp)) {
15053                        bnxt_queue_fw_reset_work(bp, HZ / 5);
15054                        return;
15055                }
15056
15057                if (!bp->fw_health->primary) {
15058                        u32 wait_dsecs = bp->fw_health->normal_func_wait_dsecs;
15059
15060                        bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
15061                        bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10);
15062                        return;
15063                }
15064                bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW;
15065        }
15066                fallthrough;
15067        case BNXT_FW_RESET_STATE_RESET_FW:
15068                bnxt_reset_all(bp);
15069                bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
15070                bnxt_queue_fw_reset_work(bp, bp->fw_reset_min_dsecs * HZ / 10);
15071                return;
15072        case BNXT_FW_RESET_STATE_ENABLE_DEV:
15073                bnxt_inv_fw_health_reg(bp);
15074                if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) &&
15075                    !bp->fw_reset_min_dsecs) {
15076                        u16 val;
15077
15078                        pci_read_config_word(bp->pdev, PCI_SUBSYSTEM_ID, &val);
15079                        if (val == 0xffff) {
15080                                if (bnxt_fw_reset_timeout(bp)) {
15081                                        netdev_err(bp->dev, "Firmware reset aborted, PCI config space invalid\n");
15082                                        rc = -ETIMEDOUT;
15083                                        goto fw_reset_abort;
15084                                }
15085                                bnxt_queue_fw_reset_work(bp, HZ / 1000);
15086                                return;
15087                        }
15088                }
15089                clear_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
15090                clear_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state);
15091                if (test_and_clear_bit(BNXT_STATE_FW_ACTIVATE_RESET, &bp->state) &&
15092                    !test_bit(BNXT_STATE_FW_ACTIVATE, &bp->state))
15093                        bnxt_dl_remote_reload(bp);
15094                if (pci_enable_device(bp->pdev)) {
15095                        netdev_err(bp->dev, "Cannot re-enable PCI device\n");
15096                        rc = -ENODEV;
15097                        goto fw_reset_abort;
15098                }
15099                pci_set_master(bp->pdev);
15100                bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW;
15101                fallthrough;
15102        case BNXT_FW_RESET_STATE_POLL_FW:
15103                bp->hwrm_cmd_timeout = SHORT_HWRM_CMD_TIMEOUT;
15104                rc = bnxt_hwrm_poll(bp);
15105                if (rc) {
15106                        if (bnxt_fw_reset_timeout(bp)) {
15107                                netdev_err(bp->dev, "Firmware reset aborted\n");
15108                                goto fw_reset_abort_status;
15109                        }
15110                        bnxt_queue_fw_reset_work(bp, HZ / 5);
15111                        return;
15112                }
15113                bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
15114                bp->fw_reset_state = BNXT_FW_RESET_STATE_OPENING;
15115                fallthrough;
15116        case BNXT_FW_RESET_STATE_OPENING:
15117                while (!netdev_trylock(bp->dev)) {
15118                        bnxt_queue_fw_reset_work(bp, HZ / 10);
15119                        return;
15120                }
15121                rc = bnxt_open(bp->dev);
15122                if (rc) {
15123                        netdev_err(bp->dev, "bnxt_open() failed during FW reset\n");
15124                        bnxt_fw_reset_abort(bp, rc);
15125                        netdev_unlock(bp->dev);
15126                        goto ulp_start;
15127                }
15128
15129                if ((bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) &&
15130                    bp->fw_health->enabled) {
15131                        bp->fw_health->last_fw_reset_cnt =
15132                                bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
15133                }
15134                bp->fw_reset_state = 0;
15135                /* Make sure fw_reset_state is 0 before clearing the flag */
15136                smp_mb__before_atomic();
15137                clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
15138                bnxt_ptp_reapply_pps(bp);
15139                clear_bit(BNXT_STATE_FW_ACTIVATE, &bp->state);
15140                if (test_and_clear_bit(BNXT_STATE_RECOVER, &bp->state)) {
15141                        bnxt_dl_health_fw_recovery_done(bp);
15142                        bnxt_dl_health_fw_status_update(bp, true);
15143                }
15144                netdev_unlock(bp->dev);
15145                bnxt_ulp_start(bp, 0);
15146                bnxt_reenable_sriov(bp);
15147                netdev_lock(bp->dev);
15148                bnxt_vf_reps_alloc(bp);
15149                bnxt_vf_reps_open(bp);
15150                netdev_unlock(bp->dev);
15151                break;
15152        }
15153        return;
15154
15155fw_reset_abort_status:
15156        if (bp->fw_health->status_reliable ||
15157            (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) {
15158                u32 sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
15159
15160                netdev_err(bp->dev, "fw_health_status 0x%x\n", sts);
15161        }
15162fw_reset_abort:
15163        netdev_lock(bp->dev);
15164        bnxt_fw_reset_abort(bp, rc);
15165        netdev_unlock(bp->dev);
15166ulp_start:
15167        bnxt_ulp_start(bp, rc);
15168}
15169
15170static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
15171{
15172        int rc;
15173        struct bnxt *bp = netdev_priv(dev);
15174
15175        SET_NETDEV_DEV(dev, &pdev->dev);
15176
15177        /* enable device (incl. PCI PM wakeup), and bus-mastering */
15178        rc = pci_enable_device(pdev);
15179        if (rc) {
15180                dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
15181                goto init_err;
15182        }
15183
15184        if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
15185                dev_err(&pdev->dev,
15186                        "Cannot find PCI device base address, aborting\n");
15187                rc = -ENODEV;
15188                goto init_err_disable;
15189        }
15190
15191        rc = pci_request_regions(pdev, DRV_MODULE_NAME);
15192        if (rc) {
15193                dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
15194                goto init_err_disable;
15195        }
15196
15197        if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) != 0 &&
15198            dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
15199                dev_err(&pdev->dev, "System does not support DMA, aborting\n");
15200                rc = -EIO;
15201                goto init_err_release;
15202        }
15203
15204        pci_set_master(pdev);
15205
15206        bp->dev = dev;
15207        bp->pdev = pdev;
15208
15209        /* Doorbell BAR bp->bar1 is mapped after bnxt_fw_init_one_p2()
15210         * determines the BAR size.
15211         */
15212        bp->bar0 = pci_ioremap_bar(pdev, 0);
15213        if (!bp->bar0) {
15214                dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
15215                rc = -ENOMEM;
15216                goto init_err_release;
15217        }
15218
15219        bp->bar2 = pci_ioremap_bar(pdev, 4);
15220        if (!bp->bar2) {
15221                dev_err(&pdev->dev, "Cannot map bar4 registers, aborting\n");
15222                rc = -ENOMEM;
15223                goto init_err_release;
15224        }
15225
15226        INIT_WORK(&bp->sp_task, bnxt_sp_task);
15227        INIT_DELAYED_WORK(&bp->fw_reset_task, bnxt_fw_reset_task);
15228
15229        spin_lock_init(&bp->ntp_fltr_lock);
15230#if BITS_PER_LONG == 32
15231        spin_lock_init(&bp->db_lock);
15232#endif
15233
15234        bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE;
15235        bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE;
15236
15237        timer_setup(&bp->timer, bnxt_timer, 0);
15238        bp->current_interval = BNXT_TIMER_INTERVAL;
15239
15240        bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID;
15241        bp->nge_fw_dst_port_id = INVALID_HW_RING_ID;
15242
15243        clear_bit(BNXT_STATE_OPEN, &bp->state);
15244        return 0;
15245
15246init_err_release:
15247        bnxt_unmap_bars(bp, pdev);
15248        pci_release_regions(pdev);
15249
15250init_err_disable:
15251        pci_disable_device(pdev);
15252
15253init_err:
15254        return rc;
15255}
15256
15257static int bnxt_change_mac_addr(struct net_device *dev, void *p)
15258{
15259        struct sockaddr *addr = p;
15260        struct bnxt *bp = netdev_priv(dev);
15261        int rc = 0;
15262
15263        netdev_assert_locked(dev);
15264
15265        if (!is_valid_ether_addr(addr->sa_data))
15266                return -EADDRNOTAVAIL;
15267
15268        if (ether_addr_equal(addr->sa_data, dev->dev_addr))
15269                return 0;
15270
15271        rc = bnxt_approve_mac(bp, addr->sa_data, true);
15272        if (rc)
15273                return rc;
15274
15275        eth_hw_addr_set(dev, addr->sa_data);
15276        bnxt_clear_usr_fltrs(bp, true);
15277        if (netif_running(dev)) {
15278                bnxt_close_nic(bp, false, false);
15279                rc = bnxt_open_nic(bp, false, false);
15280        }
15281
15282        return rc;
15283}
15284
15285static int bnxt_change_mtu(struct net_device *dev, int new_mtu)
15286{
15287        struct bnxt *bp = netdev_priv(dev);
15288
15289        netdev_assert_locked(dev);
15290
15291        if (netif_running(dev))
15292                bnxt_close_nic(bp, true, false);
15293
15294        WRITE_ONCE(dev->mtu, new_mtu);
15295
15296        /* MTU change may change the AGG ring settings if an XDP multi-buffer
15297         * program is attached.  We need to set the AGG rings settings and
15298         * rx_skb_func accordingly.
15299         */
15300        if (READ_ONCE(bp->xdp_prog))
15301                bnxt_set_rx_skb_mode(bp, true);
15302
15303        bnxt_set_ring_params(bp);
15304
15305        if (netif_running(dev))
15306                return bnxt_open_nic(bp, true, false);
15307
15308        return 0;
15309}
15310
15311int bnxt_setup_mq_tc(struct net_device *dev, u8 tc)
15312{
15313        struct bnxt *bp = netdev_priv(dev);
15314        bool sh = false;
15315        int rc, tx_cp;
15316
15317        if (tc > bp->max_tc) {
15318                netdev_err(dev, "Too many traffic classes requested: %d. Max supported is %d.\n",
15319                           tc, bp->max_tc);
15320                return -EINVAL;
15321        }
15322
15323        if (bp->num_tc == tc)
15324                return 0;
15325
15326        if (bp->flags & BNXT_FLAG_SHARED_RINGS)
15327                sh = true;
15328
15329        rc = bnxt_check_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings,
15330                              sh, tc, bp->tx_nr_rings_xdp);
15331        if (rc)
15332                return rc;
15333
15334        /* Needs to close the device and do hw resource re-allocations */
15335        if (netif_running(bp->dev))
15336                bnxt_close_nic(bp, true, false);
15337
15338        if (tc) {
15339                bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc;
15340                netdev_set_num_tc(dev, tc);
15341                bp->num_tc = tc;
15342        } else {
15343                bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
15344                netdev_reset_tc(dev);
15345                bp->num_tc = 0;
15346        }
15347        bp->tx_nr_rings += bp->tx_nr_rings_xdp;
15348        tx_cp = bnxt_num_tx_to_cp(bp, bp->tx_nr_rings);
15349        bp->cp_nr_rings = sh ? max_t(int, tx_cp, bp->rx_nr_rings) :
15350                               tx_cp + bp->rx_nr_rings;
15351
15352        if (netif_running(bp->dev))
15353                return bnxt_open_nic(bp, true, false);
15354
15355        return 0;
15356}
15357
15358static int bnxt_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
15359                                  void *cb_priv)
15360{
15361        struct bnxt *bp = cb_priv;
15362
15363        if (!bnxt_tc_flower_enabled(bp) ||
15364            !tc_cls_can_offload_and_chain0(bp->dev, type_data))
15365                return -EOPNOTSUPP;
15366
15367        switch (type) {
15368        case TC_SETUP_CLSFLOWER:
15369                return bnxt_tc_setup_flower(bp, bp->pf.fw_fid, type_data);
15370        default:
15371                return -EOPNOTSUPP;
15372        }
15373}
15374
15375LIST_HEAD(bnxt_block_cb_list);
15376
15377static int bnxt_setup_tc(struct net_device *dev, enum tc_setup_type type,
15378                         void *type_data)
15379{
15380        struct bnxt *bp = netdev_priv(dev);
15381
15382        switch (type) {
15383        case TC_SETUP_BLOCK:
15384                return flow_block_cb_setup_simple(type_data,
15385                                                  &bnxt_block_cb_list,
15386                                                  bnxt_setup_tc_block_cb,
15387                                                  bp, bp, true);
15388        case TC_SETUP_QDISC_MQPRIO: {
15389                struct tc_mqprio_qopt *mqprio = type_data;
15390
15391                mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
15392
15393                return bnxt_setup_mq_tc(dev, mqprio->num_tc);
15394        }
15395        default:
15396                return -EOPNOTSUPP;
15397        }
15398}
15399
15400u32 bnxt_get_ntp_filter_idx(struct bnxt *bp, struct flow_keys *fkeys,
15401                            const struct sk_buff *skb)
15402{
15403        struct bnxt_vnic_info *vnic;
15404
15405        if (skb)
15406                return skb_get_hash_raw(skb) & BNXT_NTP_FLTR_HASH_MASK;
15407
15408        vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
15409        return bnxt_toeplitz(bp, fkeys, (void *)vnic->rss_hash_key);
15410}
15411
15412int bnxt_insert_ntp_filter(struct bnxt *bp, struct bnxt_ntuple_filter *fltr,
15413                           u32 idx)
15414{
15415        struct hlist_head *head;
15416        int bit_id;
15417
15418        spin_lock_bh(&bp->ntp_fltr_lock);
15419        bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap, bp->max_fltr, 0);
15420        if (bit_id < 0) {
15421                spin_unlock_bh(&bp->ntp_fltr_lock);
15422                return -ENOMEM;
15423        }
15424
15425        fltr->base.sw_id = (u16)bit_id;
15426        fltr->base.type = BNXT_FLTR_TYPE_NTUPLE;
15427        fltr->base.flags |= BNXT_ACT_RING_DST;
15428        head = &bp->ntp_fltr_hash_tbl[idx];
15429        hlist_add_head_rcu(&fltr->base.hash, head);
15430        set_bit(BNXT_FLTR_INSERTED, &fltr->base.state);
15431        bnxt_insert_usr_fltr(bp, &fltr->base);
15432        bp->ntp_fltr_count++;
15433        spin_unlock_bh(&bp->ntp_fltr_lock);
15434        return 0;
15435}
15436
15437static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1,
15438                            struct bnxt_ntuple_filter *f2)
15439{
15440        struct bnxt_flow_masks *masks1 = &f1->fmasks;
15441        struct bnxt_flow_masks *masks2 = &f2->fmasks;
15442        struct flow_keys *keys1 = &f1->fkeys;
15443        struct flow_keys *keys2 = &f2->fkeys;
15444
15445        if (keys1->basic.n_proto != keys2->basic.n_proto ||
15446            keys1->basic.ip_proto != keys2->basic.ip_proto)
15447                return false;
15448
15449        if (keys1->basic.n_proto == htons(ETH_P_IP)) {
15450                if (keys1->addrs.v4addrs.src != keys2->addrs.v4addrs.src ||
15451                    masks1->addrs.v4addrs.src != masks2->addrs.v4addrs.src ||
15452                    keys1->addrs.v4addrs.dst != keys2->addrs.v4addrs.dst ||
15453                    masks1->addrs.v4addrs.dst != masks2->addrs.v4addrs.dst)
15454                        return false;
15455        } else {
15456                if (!ipv6_addr_equal(&keys1->addrs.v6addrs.src,
15457                                     &keys2->addrs.v6addrs.src) ||
15458                    !ipv6_addr_equal(&masks1->addrs.v6addrs.src,
15459                                     &masks2->addrs.v6addrs.src) ||
15460                    !ipv6_addr_equal(&keys1->addrs.v6addrs.dst,
15461                                     &keys2->addrs.v6addrs.dst) ||
15462                    !ipv6_addr_equal(&masks1->addrs.v6addrs.dst,
15463                                     &masks2->addrs.v6addrs.dst))
15464                        return false;
15465        }
15466
15467        return keys1->ports.src == keys2->ports.src &&
15468               masks1->ports.src == masks2->ports.src &&
15469               keys1->ports.dst == keys2->ports.dst &&
15470               masks1->ports.dst == masks2->ports.dst &&
15471               keys1->control.flags == keys2->control.flags &&
15472               f1->l2_fltr == f2->l2_fltr;
15473}
15474
15475struct bnxt_ntuple_filter *
15476bnxt_lookup_ntp_filter_from_idx(struct bnxt *bp,
15477                                struct bnxt_ntuple_filter *fltr, u32 idx)
15478{
15479        struct bnxt_ntuple_filter *f;
15480        struct hlist_head *head;
15481
15482        head = &bp->ntp_fltr_hash_tbl[idx];
15483        hlist_for_each_entry_rcu(f, head, base.hash) {
15484                if (bnxt_fltr_match(f, fltr))
15485                        return f;
15486        }
15487        return NULL;
15488}
15489
15490#ifdef CONFIG_RFS_ACCEL
15491static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
15492                              u16 rxq_index, u32 flow_id)
15493{
15494        struct bnxt *bp = netdev_priv(dev);
15495        struct bnxt_ntuple_filter *fltr, *new_fltr;
15496        struct flow_keys *fkeys;
15497        struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb);
15498        struct bnxt_l2_filter *l2_fltr;
15499        int rc = 0, idx;
15500        u32 flags;
15501
15502        if (ether_addr_equal(dev->dev_addr, eth->h_dest)) {
15503                l2_fltr = bp->vnic_info[BNXT_VNIC_DEFAULT].l2_filters[0];
15504                atomic_inc(&l2_fltr->refcnt);
15505        } else {
15506                struct bnxt_l2_key key;
15507
15508                ether_addr_copy(key.dst_mac_addr, eth->h_dest);
15509                key.vlan = 0;
15510                l2_fltr = bnxt_lookup_l2_filter_from_key(bp, &key);
15511                if (!l2_fltr)
15512                        return -EINVAL;
15513                if (l2_fltr->base.flags & BNXT_ACT_FUNC_DST) {
15514                        bnxt_del_l2_filter(bp, l2_fltr);
15515                        return -EINVAL;
15516                }
15517        }
15518        new_fltr = kzalloc(sizeof(*new_fltr), GFP_ATOMIC);
15519        if (!new_fltr) {
15520                bnxt_del_l2_filter(bp, l2_fltr);
15521                return -ENOMEM;
15522        }
15523
15524        fkeys = &new_fltr->fkeys;
15525        if (!skb_flow_dissect_flow_keys(skb, fkeys, 0)) {
15526                rc = -EPROTONOSUPPORT;
15527                goto err_free;
15528        }
15529
15530        if ((fkeys->basic.n_proto != htons(ETH_P_IP) &&
15531             fkeys->basic.n_proto != htons(ETH_P_IPV6)) ||
15532            ((fkeys->basic.ip_proto != IPPROTO_TCP) &&
15533             (fkeys->basic.ip_proto != IPPROTO_UDP))) {
15534                rc = -EPROTONOSUPPORT;
15535                goto err_free;
15536        }
15537        new_fltr->fmasks = BNXT_FLOW_IPV4_MASK_ALL;
15538        if (fkeys->basic.n_proto == htons(ETH_P_IPV6)) {
15539                if (bp->hwrm_spec_code < 0x10601) {
15540                        rc = -EPROTONOSUPPORT;
15541                        goto err_free;
15542                }
15543                new_fltr->fmasks = BNXT_FLOW_IPV6_MASK_ALL;
15544        }
15545        flags = fkeys->control.flags;
15546        if (((flags & FLOW_DIS_ENCAPSULATION) &&
15547             bp->hwrm_spec_code < 0x10601) || (flags & FLOW_DIS_IS_FRAGMENT)) {
15548                rc = -EPROTONOSUPPORT;
15549                goto err_free;
15550        }
15551        new_fltr->l2_fltr = l2_fltr;
15552
15553        idx = bnxt_get_ntp_filter_idx(bp, fkeys, skb);
15554        rcu_read_lock();
15555        fltr = bnxt_lookup_ntp_filter_from_idx(bp, new_fltr, idx);
15556        if (fltr) {
15557                rc = fltr->base.sw_id;
15558                rcu_read_unlock();
15559                goto err_free;
15560        }
15561        rcu_read_unlock();
15562
15563        new_fltr->flow_id = flow_id;
15564        new_fltr->base.rxq = rxq_index;
15565        rc = bnxt_insert_ntp_filter(bp, new_fltr, idx);
15566        if (!rc) {
15567                bnxt_queue_sp_work(bp, BNXT_RX_NTP_FLTR_SP_EVENT);
15568                return new_fltr->base.sw_id;
15569        }
15570
15571err_free:
15572        bnxt_del_l2_filter(bp, l2_fltr);
15573        kfree(new_fltr);
15574        return rc;
15575}
15576#endif
15577
15578void bnxt_del_ntp_filter(struct bnxt *bp, struct bnxt_ntuple_filter *fltr)
15579{
15580        spin_lock_bh(&bp->ntp_fltr_lock);
15581        if (!test_and_clear_bit(BNXT_FLTR_INSERTED, &fltr->base.state)) {
15582                spin_unlock_bh(&bp->ntp_fltr_lock);
15583                return;
15584        }
15585        hlist_del_rcu(&fltr->base.hash);
15586        bnxt_del_one_usr_fltr(bp, &fltr->base);
15587        bp->ntp_fltr_count--;
15588        spin_unlock_bh(&bp->ntp_fltr_lock);
15589        bnxt_del_l2_filter(bp, fltr->l2_fltr);
15590        clear_bit(fltr->base.sw_id, bp->ntp_fltr_bmap);
15591        kfree_rcu(fltr, base.rcu);
15592}
15593
15594static void bnxt_cfg_ntp_filters(struct bnxt *bp)
15595{
15596#ifdef CONFIG_RFS_ACCEL
15597        int i;
15598
15599        for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
15600                struct hlist_head *head;
15601                struct hlist_node *tmp;
15602                struct bnxt_ntuple_filter *fltr;
15603                int rc;
15604
15605                head = &bp->ntp_fltr_hash_tbl[i];
15606                hlist_for_each_entry_safe(fltr, tmp, head, base.hash) {
15607                        bool del = false;
15608
15609                        if (test_bit(BNXT_FLTR_VALID, &fltr->base.state)) {
15610                                if (fltr->base.flags & BNXT_ACT_NO_AGING)
15611                                        continue;
15612                                if (rps_may_expire_flow(bp->dev, fltr->base.rxq,
15613                                                        fltr->flow_id,
15614                                                        fltr->base.sw_id)) {
15615                                        bnxt_hwrm_cfa_ntuple_filter_free(bp,
15616                                                                         fltr);
15617                                        del = true;
15618                                }
15619                        } else {
15620                                rc = bnxt_hwrm_cfa_ntuple_filter_alloc(bp,
15621                                                                       fltr);
15622                                if (rc)
15623                                        del = true;
15624                                else
15625                                        set_bit(BNXT_FLTR_VALID, &fltr->base.state);
15626                        }
15627
15628                        if (del)
15629                                bnxt_del_ntp_filter(bp, fltr);
15630                }
15631        }
15632#endif
15633}
15634
15635static int bnxt_udp_tunnel_set_port(struct net_device *netdev, unsigned int table,
15636                                    unsigned int entry, struct udp_tunnel_info *ti)
15637{
15638        struct bnxt *bp = netdev_priv(netdev);
15639        unsigned int cmd;
15640
15641        if (ti->type == UDP_TUNNEL_TYPE_VXLAN)
15642                cmd = TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN;
15643        else if (ti->type == UDP_TUNNEL_TYPE_GENEVE)
15644                cmd = TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE;
15645        else
15646                cmd = TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE;
15647
15648        return bnxt_hwrm_tunnel_dst_port_alloc(bp, ti->port, cmd);
15649}
15650
15651static int bnxt_udp_tunnel_unset_port(struct net_device *netdev, unsigned int table,
15652                                      unsigned int entry, struct udp_tunnel_info *ti)
15653{
15654        struct bnxt *bp = netdev_priv(netdev);
15655        unsigned int cmd;
15656
15657        if (ti->type == UDP_TUNNEL_TYPE_VXLAN)
15658                cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN;
15659        else if (ti->type == UDP_TUNNEL_TYPE_GENEVE)
15660                cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE;
15661        else
15662                cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE;
15663
15664        return bnxt_hwrm_tunnel_dst_port_free(bp, cmd);
15665}
15666
15667static const struct udp_tunnel_nic_info bnxt_udp_tunnels = {
15668        .set_port       = bnxt_udp_tunnel_set_port,
15669        .unset_port     = bnxt_udp_tunnel_unset_port,
15670        .flags          = UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
15671        .tables         = {
15672                { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN,  },
15673                { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
15674        },
15675}, bnxt_udp_tunnels_p7 = {
15676        .set_port       = bnxt_udp_tunnel_set_port,
15677        .unset_port     = bnxt_udp_tunnel_unset_port,
15678        .flags          = UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
15679        .tables         = {
15680                { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN,  },
15681                { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
15682                { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN_GPE, },
15683        },
15684};
15685
15686static int bnxt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
15687                               struct net_device *dev, u32 filter_mask,
15688                               int nlflags)
15689{
15690        struct bnxt *bp = netdev_priv(dev);
15691
15692        return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bp->br_mode, 0, 0,
15693                                       nlflags, filter_mask, NULL);
15694}
15695
15696static int bnxt_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
15697                               u16 flags, struct netlink_ext_ack *extack)
15698{
15699        struct bnxt *bp = netdev_priv(dev);
15700        struct nlattr *attr, *br_spec;
15701        int rem, rc = 0;
15702
15703        if (bp->hwrm_spec_code < 0x10708 || !BNXT_SINGLE_PF(bp))
15704                return -EOPNOTSUPP;
15705
15706        br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
15707        if (!br_spec)
15708                return -EINVAL;
15709
15710        nla_for_each_nested_type(attr, IFLA_BRIDGE_MODE, br_spec, rem) {
15711                u16 mode;
15712
15713                mode = nla_get_u16(attr);
15714                if (mode == bp->br_mode)
15715                        break;
15716
15717                rc = bnxt_hwrm_set_br_mode(bp, mode);
15718                if (!rc)
15719                        bp->br_mode = mode;
15720                break;
15721        }
15722        return rc;
15723}
15724
15725int bnxt_get_port_parent_id(struct net_device *dev,
15726                            struct netdev_phys_item_id *ppid)
15727{
15728        struct bnxt *bp = netdev_priv(dev);
15729
15730        if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
15731                return -EOPNOTSUPP;
15732
15733        /* The PF and it's VF-reps only support the switchdev framework */
15734        if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_DSN_VALID))
15735                return -EOPNOTSUPP;
15736
15737        ppid->id_len = sizeof(bp->dsn);
15738        memcpy(ppid->id, bp->dsn, ppid->id_len);
15739
15740        return 0;
15741}
15742
15743static const struct net_device_ops bnxt_netdev_ops = {
15744        .ndo_open               = bnxt_open,
15745        .ndo_start_xmit         = bnxt_start_xmit,
15746        .ndo_stop               = bnxt_close,
15747        .ndo_get_stats64        = bnxt_get_stats64,
15748        .ndo_set_rx_mode        = bnxt_set_rx_mode,
15749        .ndo_eth_ioctl          = bnxt_ioctl,
15750        .ndo_validate_addr      = eth_validate_addr,
15751        .ndo_set_mac_address    = bnxt_change_mac_addr,
15752        .ndo_change_mtu         = bnxt_change_mtu,
15753        .ndo_fix_features       = bnxt_fix_features,
15754        .ndo_set_features       = bnxt_set_features,
15755        .ndo_features_check     = bnxt_features_check,
15756        .ndo_tx_timeout         = bnxt_tx_timeout,
15757#ifdef CONFIG_BNXT_SRIOV
15758        .ndo_get_vf_config      = bnxt_get_vf_config,
15759        .ndo_set_vf_mac         = bnxt_set_vf_mac,
15760        .ndo_set_vf_vlan        = bnxt_set_vf_vlan,
15761        .ndo_set_vf_rate        = bnxt_set_vf_bw,
15762        .ndo_set_vf_link_state  = bnxt_set_vf_link_state,
15763        .ndo_set_vf_spoofchk    = bnxt_set_vf_spoofchk,
15764        .ndo_set_vf_trust       = bnxt_set_vf_trust,
15765#endif
15766        .ndo_setup_tc           = bnxt_setup_tc,
15767#ifdef CONFIG_RFS_ACCEL
15768        .ndo_rx_flow_steer      = bnxt_rx_flow_steer,
15769#endif
15770        .ndo_bpf                = bnxt_xdp,
15771        .ndo_xdp_xmit           = bnxt_xdp_xmit,
15772        .ndo_bridge_getlink     = bnxt_bridge_getlink,
15773        .ndo_bridge_setlink     = bnxt_bridge_setlink,
15774};
15775
15776static void bnxt_get_queue_stats_rx(struct net_device *dev, int i,
15777                                    struct netdev_queue_stats_rx *stats)
15778{
15779        struct bnxt *bp = netdev_priv(dev);
15780        struct bnxt_cp_ring_info *cpr;
15781        u64 *sw;
15782
15783        if (!bp->bnapi)
15784                return;
15785
15786        cpr = &bp->bnapi[i]->cp_ring;
15787        sw = cpr->stats.sw_stats;
15788
15789        stats->packets = 0;
15790        stats->packets += BNXT_GET_RING_STATS64(sw, rx_ucast_pkts);
15791        stats->packets += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts);
15792        stats->packets += BNXT_GET_RING_STATS64(sw, rx_bcast_pkts);
15793
15794        stats->bytes = 0;
15795        stats->bytes += BNXT_GET_RING_STATS64(sw, rx_ucast_bytes);
15796        stats->bytes += BNXT_GET_RING_STATS64(sw, rx_mcast_bytes);
15797        stats->bytes += BNXT_GET_RING_STATS64(sw, rx_bcast_bytes);
15798
15799        stats->alloc_fail = cpr->sw_stats->rx.rx_oom_discards;
15800}
15801
15802static void bnxt_get_queue_stats_tx(struct net_device *dev, int i,
15803                                    struct netdev_queue_stats_tx *stats)
15804{
15805        struct bnxt *bp = netdev_priv(dev);
15806        struct bnxt_napi *bnapi;
15807        u64 *sw;
15808
15809        if (!bp->tx_ring)
15810                return;
15811
15812        bnapi = bp->tx_ring[bp->tx_ring_map[i]].bnapi;
15813        sw = bnapi->cp_ring.stats.sw_stats;
15814
15815        stats->packets = 0;
15816        stats->packets += BNXT_GET_RING_STATS64(sw, tx_ucast_pkts);
15817        stats->packets += BNXT_GET_RING_STATS64(sw, tx_mcast_pkts);
15818        stats->packets += BNXT_GET_RING_STATS64(sw, tx_bcast_pkts);
15819
15820        stats->bytes = 0;
15821        stats->bytes += BNXT_GET_RING_STATS64(sw, tx_ucast_bytes);
15822        stats->bytes += BNXT_GET_RING_STATS64(sw, tx_mcast_bytes);
15823        stats->bytes += BNXT_GET_RING_STATS64(sw, tx_bcast_bytes);
15824}
15825
15826static void bnxt_get_base_stats(struct net_device *dev,
15827                                struct netdev_queue_stats_rx *rx,
15828                                struct netdev_queue_stats_tx *tx)
15829{
15830        struct bnxt *bp = netdev_priv(dev);
15831
15832        rx->packets = bp->net_stats_prev.rx_packets;
15833        rx->bytes = bp->net_stats_prev.rx_bytes;
15834        rx->alloc_fail = bp->ring_err_stats_prev.rx_total_oom_discards;
15835
15836        tx->packets = bp->net_stats_prev.tx_packets;
15837        tx->bytes = bp->net_stats_prev.tx_bytes;
15838}
15839
15840static const struct netdev_stat_ops bnxt_stat_ops = {
15841        .get_queue_stats_rx     = bnxt_get_queue_stats_rx,
15842        .get_queue_stats_tx     = bnxt_get_queue_stats_tx,
15843        .get_base_stats         = bnxt_get_base_stats,
15844};
15845
15846static int bnxt_queue_mem_alloc(struct net_device *dev, void *qmem, int idx)
15847{
15848        struct bnxt_rx_ring_info *rxr, *clone;
15849        struct bnxt *bp = netdev_priv(dev);
15850        struct bnxt_ring_struct *ring;
15851        int rc;
15852
15853        if (!bp->rx_ring)
15854                return -ENETDOWN;
15855
15856        rxr = &bp->rx_ring[idx];
15857        clone = qmem;
15858        memcpy(clone, rxr, sizeof(*rxr));
15859        bnxt_init_rx_ring_struct(bp, clone);
15860        bnxt_reset_rx_ring_struct(bp, clone);
15861
15862        clone->rx_prod = 0;
15863        clone->rx_agg_prod = 0;
15864        clone->rx_sw_agg_prod = 0;
15865        clone->rx_next_cons = 0;
15866        clone->need_head_pool = false;
15867
15868        rc = bnxt_alloc_rx_page_pool(bp, clone, rxr->page_pool->p.nid);
15869        if (rc)
15870                return rc;
15871
15872        rc = xdp_rxq_info_reg(&clone->xdp_rxq, bp->dev, idx, 0);
15873        if (rc < 0)
15874                goto err_page_pool_destroy;
15875
15876        rc = xdp_rxq_info_reg_mem_model(&clone->xdp_rxq,
15877                                        MEM_TYPE_PAGE_POOL,
15878                                        clone->page_pool);
15879        if (rc)
15880                goto err_rxq_info_unreg;
15881
15882        ring = &clone->rx_ring_struct;
15883        rc = bnxt_alloc_ring(bp, &ring->ring_mem);
15884        if (rc)
15885                goto err_free_rx_ring;
15886
15887        if (bp->flags & BNXT_FLAG_AGG_RINGS) {
15888                ring = &clone->rx_agg_ring_struct;
15889                rc = bnxt_alloc_ring(bp, &ring->ring_mem);
15890                if (rc)
15891                        goto err_free_rx_agg_ring;
15892
15893                rc = bnxt_alloc_rx_agg_bmap(bp, clone);
15894                if (rc)
15895                        goto err_free_rx_agg_ring;
15896        }
15897
15898        if (bp->flags & BNXT_FLAG_TPA) {
15899                rc = bnxt_alloc_one_tpa_info(bp, clone);
15900                if (rc)
15901                        goto err_free_tpa_info;
15902        }
15903
15904        bnxt_init_one_rx_ring_rxbd(bp, clone);
15905        bnxt_init_one_rx_agg_ring_rxbd(bp, clone);
15906
15907        bnxt_alloc_one_rx_ring_skb(bp, clone, idx);
15908        if (bp->flags & BNXT_FLAG_AGG_RINGS)
15909                bnxt_alloc_one_rx_ring_netmem(bp, clone, idx);
15910        if (bp->flags & BNXT_FLAG_TPA)
15911                bnxt_alloc_one_tpa_info_data(bp, clone);
15912
15913        return 0;
15914
15915err_free_tpa_info:
15916        bnxt_free_one_tpa_info(bp, clone);
15917err_free_rx_agg_ring:
15918        bnxt_free_ring(bp, &clone->rx_agg_ring_struct.ring_mem);
15919err_free_rx_ring:
15920        bnxt_free_ring(bp, &clone->rx_ring_struct.ring_mem);
15921err_rxq_info_unreg:
15922        xdp_rxq_info_unreg(&clone->xdp_rxq);
15923err_page_pool_destroy:
15924        page_pool_destroy(clone->page_pool);
15925        if (bnxt_separate_head_pool(clone))
15926                page_pool_destroy(clone->head_pool);
15927        clone->page_pool = NULL;
15928        clone->head_pool = NULL;
15929        return rc;
15930}
15931
15932static void bnxt_queue_mem_free(struct net_device *dev, void *qmem)
15933{
15934        struct bnxt_rx_ring_info *rxr = qmem;
15935        struct bnxt *bp = netdev_priv(dev);
15936        struct bnxt_ring_struct *ring;
15937
15938        bnxt_free_one_rx_ring_skbs(bp, rxr);
15939        bnxt_free_one_tpa_info(bp, rxr);
15940
15941        xdp_rxq_info_unreg(&rxr->xdp_rxq);
15942
15943        page_pool_destroy(rxr->page_pool);
15944        if (bnxt_separate_head_pool(rxr))
15945                page_pool_destroy(rxr->head_pool);
15946        rxr->page_pool = NULL;
15947        rxr->head_pool = NULL;
15948
15949        ring = &rxr->rx_ring_struct;
15950        bnxt_free_ring(bp, &ring->ring_mem);
15951
15952        ring = &rxr->rx_agg_ring_struct;
15953        bnxt_free_ring(bp, &ring->ring_mem);
15954
15955        kfree(rxr->rx_agg_bmap);
15956        rxr->rx_agg_bmap = NULL;
15957}
15958
15959static void bnxt_copy_rx_ring(struct bnxt *bp,
15960                              struct bnxt_rx_ring_info *dst,
15961                              struct bnxt_rx_ring_info *src)
15962{
15963        struct bnxt_ring_mem_info *dst_rmem, *src_rmem;
15964        struct bnxt_ring_struct *dst_ring, *src_ring;
15965        int i;
15966
15967        dst_ring = &dst->rx_ring_struct;
15968        dst_rmem = &dst_ring->ring_mem;
15969        src_ring = &src->rx_ring_struct;
15970        src_rmem = &src_ring->ring_mem;
15971
15972        WARN_ON(dst_rmem->nr_pages != src_rmem->nr_pages);
15973        WARN_ON(dst_rmem->page_size != src_rmem->page_size);
15974        WARN_ON(dst_rmem->flags != src_rmem->flags);
15975        WARN_ON(dst_rmem->depth != src_rmem->depth);
15976        WARN_ON(dst_rmem->vmem_size != src_rmem->vmem_size);
15977        WARN_ON(dst_rmem->ctx_mem != src_rmem->ctx_mem);
15978
15979        dst_rmem->pg_tbl = src_rmem->pg_tbl;
15980        dst_rmem->pg_tbl_map = src_rmem->pg_tbl_map;
15981        *dst_rmem->vmem = *src_rmem->vmem;
15982        for (i = 0; i < dst_rmem->nr_pages; i++) {
15983                dst_rmem->pg_arr[i] = src_rmem->pg_arr[i];
15984                dst_rmem->dma_arr[i] = src_rmem->dma_arr[i];
15985        }
15986
15987        if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
15988                return;
15989
15990        dst_ring = &dst->rx_agg_ring_struct;
15991        dst_rmem = &dst_ring->ring_mem;
15992        src_ring = &src->rx_agg_ring_struct;
15993        src_rmem = &src_ring->ring_mem;
15994
15995        WARN_ON(dst_rmem->nr_pages != src_rmem->nr_pages);
15996        WARN_ON(dst_rmem->page_size != src_rmem->page_size);
15997        WARN_ON(dst_rmem->flags != src_rmem->flags);
15998        WARN_ON(dst_rmem->depth != src_rmem->depth);
15999        WARN_ON(dst_rmem->vmem_size != src_rmem->vmem_size);
16000        WARN_ON(dst_rmem->ctx_mem != src_rmem->ctx_mem);
16001        WARN_ON(dst->rx_agg_bmap_size != src->rx_agg_bmap_size);
16002
16003        dst_rmem->pg_tbl = src_rmem->pg_tbl;
16004        dst_rmem->pg_tbl_map = src_rmem->pg_tbl_map;
16005        *dst_rmem->vmem = *src_rmem->vmem;
16006        for (i = 0; i < dst_rmem->nr_pages; i++) {
16007                dst_rmem->pg_arr[i] = src_rmem->pg_arr[i];
16008                dst_rmem->dma_arr[i] = src_rmem->dma_arr[i];
16009        }
16010
16011        dst->rx_agg_bmap = src->rx_agg_bmap;
16012}
16013
16014static int bnxt_queue_start(struct net_device *dev, void *qmem, int idx)
16015{
16016        struct bnxt *bp = netdev_priv(dev);
16017        struct bnxt_rx_ring_info *rxr, *clone;
16018        struct bnxt_cp_ring_info *cpr;
16019        struct bnxt_vnic_info *vnic;
16020        struct bnxt_napi *bnapi;
16021        int i, rc;
16022        u16 mru;
16023
16024        rxr = &bp->rx_ring[idx];
16025        clone = qmem;
16026
16027        rxr->rx_prod = clone->rx_prod;
16028        rxr->rx_agg_prod = clone->rx_agg_prod;
16029        rxr->rx_sw_agg_prod = clone->rx_sw_agg_prod;
16030        rxr->rx_next_cons = clone->rx_next_cons;
16031        rxr->rx_tpa = clone->rx_tpa;
16032        rxr->rx_tpa_idx_map = clone->rx_tpa_idx_map;
16033        rxr->page_pool = clone->page_pool;
16034        rxr->head_pool = clone->head_pool;
16035        rxr->xdp_rxq = clone->xdp_rxq;
16036        rxr->need_head_pool = clone->need_head_pool;
16037
16038        bnxt_copy_rx_ring(bp, rxr, clone);
16039
16040        bnapi = rxr->bnapi;
16041        cpr = &bnapi->cp_ring;
16042
16043        /* All rings have been reserved and previously allocated.
16044         * Reallocating with the same parameters should never fail.
16045         */
16046        rc = bnxt_hwrm_rx_ring_alloc(bp, rxr);
16047        if (rc)
16048                goto err_reset;
16049
16050        if (bp->tph_mode) {
16051                rc = bnxt_hwrm_cp_ring_alloc_p5(bp, rxr->rx_cpr);
16052                if (rc)
16053                        goto err_reset;
16054        }
16055
16056        rc = bnxt_hwrm_rx_agg_ring_alloc(bp, rxr);
16057        if (rc)
16058                goto err_reset;
16059
16060        bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
16061        if (bp->flags & BNXT_FLAG_AGG_RINGS)
16062                bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
16063
16064        if (bp->flags & BNXT_FLAG_SHARED_RINGS) {
16065                rc = bnxt_tx_queue_start(bp, idx);
16066                if (rc)
16067                        goto err_reset;
16068        }
16069
16070        bnxt_enable_rx_page_pool(rxr);
16071        napi_enable_locked(&bnapi->napi);
16072        bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons);
16073
16074        mru = bp->dev->mtu + ETH_HLEN + VLAN_HLEN;
16075        for (i = 0; i < bp->nr_vnics; i++) {
16076                vnic = &bp->vnic_info[i];
16077
16078                rc = bnxt_set_vnic_mru_p5(bp, vnic, mru, idx);
16079                if (rc)
16080                        return rc;
16081        }
16082        return bnxt_set_rss_ctx_vnic_mru(bp, mru, idx);
16083
16084err_reset:
16085        netdev_err(bp->dev, "Unexpected HWRM error during queue start rc: %d\n",
16086                   rc);
16087        napi_enable_locked(&bnapi->napi);
16088        bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons);
16089        bnxt_reset_task(bp, true);
16090        return rc;
16091}
16092
16093static int bnxt_queue_stop(struct net_device *dev, void *qmem, int idx)
16094{
16095        struct bnxt *bp = netdev_priv(dev);
16096        struct bnxt_rx_ring_info *rxr;
16097        struct bnxt_cp_ring_info *cpr;
16098        struct bnxt_vnic_info *vnic;
16099        struct bnxt_napi *bnapi;
16100        int i;
16101
16102        for (i = 0; i < bp->nr_vnics; i++) {
16103                vnic = &bp->vnic_info[i];
16104
16105                bnxt_set_vnic_mru_p5(bp, vnic, 0, idx);
16106        }
16107        bnxt_set_rss_ctx_vnic_mru(bp, 0, idx);
16108        /* Make sure NAPI sees that the VNIC is disabled */
16109        synchronize_net();
16110        rxr = &bp->rx_ring[idx];
16111        bnapi = rxr->bnapi;
16112        cpr = &bnapi->cp_ring;
16113        cancel_work_sync(&cpr->dim.work);
16114        bnxt_hwrm_rx_ring_free(bp, rxr, false);
16115        bnxt_hwrm_rx_agg_ring_free(bp, rxr, false);
16116        page_pool_disable_direct_recycling(rxr->page_pool);
16117        if (bnxt_separate_head_pool(rxr))
16118                page_pool_disable_direct_recycling(rxr->head_pool);
16119
16120        if (bp->flags & BNXT_FLAG_SHARED_RINGS)
16121                bnxt_tx_queue_stop(bp, idx);
16122
16123        /* Disable NAPI now after freeing the rings because HWRM_RING_FREE
16124         * completion is handled in NAPI to guarantee no more DMA on that ring
16125         * after seeing the completion.
16126         */
16127        napi_disable_locked(&bnapi->napi);
16128
16129        if (bp->tph_mode) {
16130                bnxt_hwrm_cp_ring_free(bp, rxr->rx_cpr);
16131                bnxt_clear_one_cp_ring(bp, rxr->rx_cpr);
16132        }
16133        bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
16134
16135        memcpy(qmem, rxr, sizeof(*rxr));
16136        bnxt_init_rx_ring_struct(bp, qmem);
16137
16138        return 0;
16139}
16140
16141static const struct netdev_queue_mgmt_ops bnxt_queue_mgmt_ops = {
16142        .ndo_queue_mem_size     = sizeof(struct bnxt_rx_ring_info),
16143        .ndo_queue_mem_alloc    = bnxt_queue_mem_alloc,
16144        .ndo_queue_mem_free     = bnxt_queue_mem_free,
16145        .ndo_queue_start        = bnxt_queue_start,
16146        .ndo_queue_stop         = bnxt_queue_stop,
16147};
16148
16149static void bnxt_remove_one(struct pci_dev *pdev)
16150{
16151        struct net_device *dev = pci_get_drvdata(pdev);
16152        struct bnxt *bp = netdev_priv(dev);
16153
16154        if (BNXT_PF(bp))
16155                bnxt_sriov_disable(bp);
16156
16157        bnxt_rdma_aux_device_del(bp);
16158
16159        unregister_netdev(dev);
16160        bnxt_ptp_clear(bp);
16161
16162        bnxt_rdma_aux_device_uninit(bp);
16163
16164        bnxt_free_l2_filters(bp, true);
16165        bnxt_free_ntp_fltrs(bp, true);
16166        WARN_ON(bp->num_rss_ctx);
16167        clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
16168        /* Flush any pending tasks */
16169        cancel_work_sync(&bp->sp_task);
16170        cancel_delayed_work_sync(&bp->fw_reset_task);
16171        bp->sp_event = 0;
16172
16173        bnxt_dl_fw_reporters_destroy(bp);
16174        bnxt_dl_unregister(bp);
16175        bnxt_shutdown_tc(bp);
16176
16177        bnxt_clear_int_mode(bp);
16178        bnxt_hwrm_func_drv_unrgtr(bp);
16179        bnxt_free_hwrm_resources(bp);
16180        bnxt_hwmon_uninit(bp);
16181        bnxt_ethtool_free(bp);
16182        bnxt_dcb_free(bp);
16183        kfree(bp->ptp_cfg);
16184        bp->ptp_cfg = NULL;
16185        kfree(bp->fw_health);
16186        bp->fw_health = NULL;
16187        bnxt_cleanup_pci(bp);
16188        bnxt_free_ctx_mem(bp, true);
16189        bnxt_free_crash_dump_mem(bp);
16190        kfree(bp->rss_indir_tbl);
16191        bp->rss_indir_tbl = NULL;
16192        bnxt_free_port_stats(bp);
16193        free_netdev(dev);
16194}
16195
16196static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt)
16197{
16198        int rc = 0;
16199        struct bnxt_link_info *link_info = &bp->link_info;
16200
16201        bp->phy_flags = 0;
16202        rc = bnxt_hwrm_phy_qcaps(bp);
16203        if (rc) {
16204                netdev_err(bp->dev, "Probe phy can't get phy capabilities (rc: %x)\n",
16205                           rc);
16206                return rc;
16207        }
16208        if (bp->phy_flags & BNXT_PHY_FL_NO_FCS)
16209                bp->dev->priv_flags |= IFF_SUPP_NOFCS;
16210        else
16211                bp->dev->priv_flags &= ~IFF_SUPP_NOFCS;
16212
16213        bp->mac_flags = 0;
16214        bnxt_hwrm_mac_qcaps(bp);
16215
16216        if (!fw_dflt)
16217                return 0;
16218
16219        mutex_lock(&bp->link_lock);
16220        rc = bnxt_update_link(bp, false);
16221        if (rc) {
16222                mutex_unlock(&bp->link_lock);
16223                netdev_err(bp->dev, "Probe phy can't update link (rc: %x)\n",
16224                           rc);
16225                return rc;
16226        }
16227
16228        /* Older firmware does not have supported_auto_speeds, so assume
16229         * that all supported speeds can be autonegotiated.
16230         */
16231        if (link_info->auto_link_speeds && !link_info->support_auto_speeds)
16232                link_info->support_auto_speeds = link_info->support_speeds;
16233
16234        bnxt_init_ethtool_link_settings(bp);
16235        mutex_unlock(&bp->link_lock);
16236        return 0;
16237}
16238
16239static int bnxt_get_max_irq(struct pci_dev *pdev)
16240{
16241        u16 ctrl;
16242
16243        if (!pdev->msix_cap)
16244                return 1;
16245
16246        pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl);
16247        return (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1;
16248}
16249
16250static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
16251                                int *max_cp)
16252{
16253        struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
16254        int max_ring_grps = 0, max_irq;
16255
16256        *max_tx = hw_resc->max_tx_rings;
16257        *max_rx = hw_resc->max_rx_rings;
16258        *max_cp = bnxt_get_max_func_cp_rings_for_en(bp);
16259        max_irq = min_t(int, bnxt_get_max_func_irqs(bp) -
16260                        bnxt_get_ulp_msix_num_in_use(bp),
16261                        hw_resc->max_stat_ctxs -
16262                        bnxt_get_ulp_stat_ctxs_in_use(bp));
16263        if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
16264                *max_cp = min_t(int, *max_cp, max_irq);
16265        max_ring_grps = hw_resc->max_hw_ring_grps;
16266        if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) {
16267                *max_cp -= 1;
16268                *max_rx -= 2;
16269        }
16270        if (bp->flags & BNXT_FLAG_AGG_RINGS)
16271                *max_rx >>= 1;
16272        if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
16273                int rc;
16274
16275                rc = __bnxt_trim_rings(bp, max_rx, max_tx, *max_cp, false);
16276                if (rc) {
16277                        *max_rx = 0;
16278                        *max_tx = 0;
16279                }
16280                /* On P5 chips, max_cp output param should be available NQs */
16281                *max_cp = max_irq;
16282        }
16283        *max_rx = min_t(int, *max_rx, max_ring_grps);
16284}
16285
16286int bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, bool shared)
16287{
16288        int rx, tx, cp;
16289
16290        _bnxt_get_max_rings(bp, &rx, &tx, &cp);
16291        *max_rx = rx;
16292        *max_tx = tx;
16293        if (!rx || !tx || !cp)
16294                return -ENOMEM;
16295
16296        return bnxt_trim_rings(bp, max_rx, max_tx, cp, shared);
16297}
16298
16299static int bnxt_get_dflt_rings(struct bnxt *bp, int *max_rx, int *max_tx,
16300                               bool shared)
16301{
16302        int rc;
16303
16304        rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
16305        if (rc && (bp->flags & BNXT_FLAG_AGG_RINGS)) {
16306                /* Not enough rings, try disabling agg rings. */
16307                bp->flags &= ~BNXT_FLAG_AGG_RINGS;
16308                rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
16309                if (rc) {
16310                        /* set BNXT_FLAG_AGG_RINGS back for consistency */
16311                        bp->flags |= BNXT_FLAG_AGG_RINGS;
16312                        return rc;
16313                }
16314                bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
16315                bp->dev->hw_features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
16316                bp->dev->features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
16317                bnxt_set_ring_params(bp);
16318        }
16319
16320        if (bp->flags & BNXT_FLAG_ROCE_CAP) {
16321                int max_cp, max_stat, max_irq;
16322
16323                /* Reserve minimum resources for RoCE */
16324                max_cp = bnxt_get_max_func_cp_rings(bp);
16325                max_stat = bnxt_get_max_func_stat_ctxs(bp);
16326                max_irq = bnxt_get_max_func_irqs(bp);
16327                if (max_cp <= BNXT_MIN_ROCE_CP_RINGS ||
16328                    max_irq <= BNXT_MIN_ROCE_CP_RINGS ||
16329                    max_stat <= BNXT_MIN_ROCE_STAT_CTXS)
16330                        return 0;
16331
16332                max_cp -= BNXT_MIN_ROCE_CP_RINGS;
16333                max_irq -= BNXT_MIN_ROCE_CP_RINGS;
16334                max_stat -= BNXT_MIN_ROCE_STAT_CTXS;
16335                max_cp = min_t(int, max_cp, max_irq);
16336                max_cp = min_t(int, max_cp, max_stat);
16337                rc = bnxt_trim_rings(bp, max_rx, max_tx, max_cp, shared);
16338                if (rc)
16339                        rc = 0;
16340        }
16341        return rc;
16342}
16343
16344/* In initial default shared ring setting, each shared ring must have a
16345 * RX/TX ring pair.
16346 */
16347static void bnxt_trim_dflt_sh_rings(struct bnxt *bp)
16348{
16349        bp->cp_nr_rings = min_t(int, bp->tx_nr_rings_per_tc, bp->rx_nr_rings);
16350        bp->rx_nr_rings = bp->cp_nr_rings;
16351        bp->tx_nr_rings_per_tc = bp->cp_nr_rings;
16352        bp->tx_nr_rings = bnxt_tx_nr_rings(bp);
16353}
16354
16355static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
16356{
16357        int dflt_rings, max_rx_rings, max_tx_rings, rc;
16358        int avail_msix;
16359
16360        if (!bnxt_can_reserve_rings(bp))
16361                return 0;
16362
16363        if (sh)
16364                bp->flags |= BNXT_FLAG_SHARED_RINGS;
16365        dflt_rings = is_kdump_kernel() ? 1 : netif_get_num_default_rss_queues();
16366        /* Reduce default rings on multi-port cards so that total default
16367         * rings do not exceed CPU count.
16368         */
16369        if (bp->port_count > 1) {
16370                int max_rings =
16371                        max_t(int, num_online_cpus() / bp->port_count, 1);
16372
16373                dflt_rings = min_t(int, dflt_rings, max_rings);
16374        }
16375        rc = bnxt_get_dflt_rings(bp, &max_rx_rings, &max_tx_rings, sh);
16376        if (rc)
16377                return rc;
16378        bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings);
16379        bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings);
16380        if (sh)
16381                bnxt_trim_dflt_sh_rings(bp);
16382        else
16383                bp->cp_nr_rings = bp->tx_nr_rings_per_tc + bp->rx_nr_rings;
16384        bp->tx_nr_rings = bnxt_tx_nr_rings(bp);
16385
16386        avail_msix = bnxt_get_max_func_irqs(bp) - bp->cp_nr_rings;
16387        if (avail_msix >= BNXT_MIN_ROCE_CP_RINGS) {
16388                int ulp_num_msix = min(avail_msix, bp->ulp_num_msix_want);
16389
16390                bnxt_set_ulp_msix_num(bp, ulp_num_msix);
16391                bnxt_set_dflt_ulp_stat_ctxs(bp);
16392        }
16393
16394        rc = __bnxt_reserve_rings(bp);
16395        if (rc && rc != -ENODEV)
16396                netdev_warn(bp->dev, "Unable to reserve tx rings\n");
16397        bp->tx_nr_rings_per_tc = bnxt_tx_nr_rings_per_tc(bp);
16398        if (sh)
16399                bnxt_trim_dflt_sh_rings(bp);
16400
16401        /* Rings may have been trimmed, re-reserve the trimmed rings. */
16402        if (bnxt_need_reserve_rings(bp)) {
16403                rc = __bnxt_reserve_rings(bp);
16404                if (rc && rc != -ENODEV)
16405                        netdev_warn(bp->dev, "2nd rings reservation failed.\n");
16406                bp->tx_nr_rings_per_tc = bnxt_tx_nr_rings_per_tc(bp);
16407        }
16408        if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
16409                bp->rx_nr_rings++;
16410                bp->cp_nr_rings++;
16411        }
16412        if (rc) {
16413                bp->tx_nr_rings = 0;
16414                bp->rx_nr_rings = 0;
16415        }
16416        return rc;
16417}
16418
16419static int bnxt_init_dflt_ring_mode(struct bnxt *bp)
16420{
16421        int rc;
16422
16423        if (bp->tx_nr_rings)
16424                return 0;
16425
16426        bnxt_ulp_irq_stop(bp);
16427        bnxt_clear_int_mode(bp);
16428        rc = bnxt_set_dflt_rings(bp, true);
16429        if (rc) {
16430                if (BNXT_VF(bp) && rc == -ENODEV)
16431                        netdev_err(bp->dev, "Cannot configure VF rings while PF is unavailable.\n");
16432                else
16433                        netdev_err(bp->dev, "Not enough rings available.\n");
16434                goto init_dflt_ring_err;
16435        }
16436        rc = bnxt_init_int_mode(bp);
16437        if (rc)
16438                goto init_dflt_ring_err;
16439
16440        bp->tx_nr_rings_per_tc = bnxt_tx_nr_rings_per_tc(bp);
16441
16442        bnxt_set_dflt_rfs(bp);
16443
16444init_dflt_ring_err:
16445        bnxt_ulp_irq_restart(bp, rc);
16446        return rc;
16447}
16448
16449int bnxt_restore_pf_fw_resources(struct bnxt *bp)
16450{
16451        int rc;
16452
16453        netdev_ops_assert_locked(bp->dev);
16454        bnxt_hwrm_func_qcaps(bp);
16455
16456        if (netif_running(bp->dev))
16457                __bnxt_close_nic(bp, true, false);
16458
16459        bnxt_ulp_irq_stop(bp);
16460        bnxt_clear_int_mode(bp);
16461        rc = bnxt_init_int_mode(bp);
16462        bnxt_ulp_irq_restart(bp, rc);
16463
16464        if (netif_running(bp->dev)) {
16465                if (rc)
16466                        netif_close(bp->dev);
16467                else
16468                        rc = bnxt_open_nic(bp, true, false);
16469        }
16470
16471        return rc;
16472}
16473
16474static int bnxt_init_mac_addr(struct bnxt *bp)
16475{
16476        int rc = 0;
16477
16478        if (BNXT_PF(bp)) {
16479                eth_hw_addr_set(bp->dev, bp->pf.mac_addr);
16480        } else {
16481#ifdef CONFIG_BNXT_SRIOV
16482                struct bnxt_vf_info *vf = &bp->vf;
16483                bool strict_approval = true;
16484
16485                if (is_valid_ether_addr(vf->mac_addr)) {
16486                        /* overwrite netdev dev_addr with admin VF MAC */
16487                        eth_hw_addr_set(bp->dev, vf->mac_addr);
16488                        /* Older PF driver or firmware may not approve this
16489                         * correctly.
16490                         */
16491                        strict_approval = false;
16492                } else {
16493                        eth_hw_addr_random(bp->dev);
16494                }
16495                rc = bnxt_approve_mac(bp, bp->dev->dev_addr, strict_approval);
16496#endif
16497        }
16498        return rc;
16499}
16500
16501static void bnxt_vpd_read_info(struct bnxt *bp)
16502{
16503        struct pci_dev *pdev = bp->pdev;
16504        unsigned int vpd_size, kw_len;
16505        int pos, size;
16506        u8 *vpd_data;
16507
16508        vpd_data = pci_vpd_alloc(pdev, &vpd_size);
16509        if (IS_ERR(vpd_data)) {
16510                pci_warn(pdev, "Unable to read VPD\n");
16511                return;
16512        }
16513
16514        pos = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size,
16515                                           PCI_VPD_RO_KEYWORD_PARTNO, &kw_len);
16516        if (pos < 0)
16517                goto read_sn;
16518
16519        size = min_t(int, kw_len, BNXT_VPD_FLD_LEN - 1);
16520        memcpy(bp->board_partno, &vpd_data[pos], size);
16521
16522read_sn:
16523        pos = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size,
16524                                           PCI_VPD_RO_KEYWORD_SERIALNO,
16525                                           &kw_len);
16526        if (pos < 0)
16527                goto exit;
16528
16529        size = min_t(int, kw_len, BNXT_VPD_FLD_LEN - 1);
16530        memcpy(bp->board_serialno, &vpd_data[pos], size);
16531exit:
16532        kfree(vpd_data);
16533}
16534
16535static int bnxt_pcie_dsn_get(struct bnxt *bp, u8 dsn[])
16536{
16537        struct pci_dev *pdev = bp->pdev;
16538        u64 qword;
16539
16540        qword = pci_get_dsn(pdev);
16541        if (!qword) {
16542                netdev_info(bp->dev, "Unable to read adapter's DSN\n");
16543                return -EOPNOTSUPP;
16544        }
16545
16546        put_unaligned_le64(qword, dsn);
16547
16548        bp->flags |= BNXT_FLAG_DSN_VALID;
16549        return 0;
16550}
16551
16552static int bnxt_map_db_bar(struct bnxt *bp)
16553{
16554        if (!bp->db_size)
16555                return -ENODEV;
16556        bp->bar1 = pci_iomap(bp->pdev, 2, bp->db_size);
16557        if (!bp->bar1)
16558                return -ENOMEM;
16559        return 0;
16560}
16561
16562void bnxt_print_device_info(struct bnxt *bp)
16563{
16564        netdev_info(bp->dev, "%s found at mem %lx, node addr %pM\n",
16565                    board_info[bp->board_idx].name,
16566                    (long)pci_resource_start(bp->pdev, 0), bp->dev->dev_addr);
16567
16568        pcie_print_link_status(bp->pdev);
16569}
16570
16571static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
16572{
16573        struct bnxt_hw_resc *hw_resc;
16574        struct net_device *dev;
16575        struct bnxt *bp;
16576        int rc, max_irqs;
16577
16578        if (pci_is_bridge(pdev))
16579                return -ENODEV;
16580
16581        if (!pdev->msix_cap) {
16582                dev_err(&pdev->dev, "MSIX capability not found, aborting\n");
16583                return -ENODEV;
16584        }
16585
16586        /* Clear any pending DMA transactions from crash kernel
16587         * while loading driver in capture kernel.
16588         */
16589        if (is_kdump_kernel()) {
16590                pci_clear_master(pdev);
16591                pcie_flr(pdev);
16592        }
16593
16594        max_irqs = bnxt_get_max_irq(pdev);
16595        dev = alloc_etherdev_mqs(sizeof(*bp), max_irqs * BNXT_MAX_QUEUE,
16596                                 max_irqs);
16597        if (!dev)
16598                return -ENOMEM;
16599
16600        bp = netdev_priv(dev);
16601        bp->board_idx = ent->driver_data;
16602        bp->msg_enable = BNXT_DEF_MSG_ENABLE;
16603        bnxt_set_max_func_irqs(bp, max_irqs);
16604
16605        if (bnxt_vf_pciid(bp->board_idx))
16606                bp->flags |= BNXT_FLAG_VF;
16607
16608        /* No devlink port registration in case of a VF */
16609        if (BNXT_PF(bp))
16610                SET_NETDEV_DEVLINK_PORT(dev, &bp->dl_port);
16611
16612        rc = bnxt_init_board(pdev, dev);
16613        if (rc < 0)
16614                goto init_err_free;
16615
16616        dev->netdev_ops = &bnxt_netdev_ops;
16617        dev->stat_ops = &bnxt_stat_ops;
16618        dev->watchdog_timeo = BNXT_TX_TIMEOUT;
16619        dev->ethtool_ops = &bnxt_ethtool_ops;
16620        pci_set_drvdata(pdev, dev);
16621
16622        rc = bnxt_alloc_hwrm_resources(bp);
16623        if (rc)
16624                goto init_err_pci_clean;
16625
16626        mutex_init(&bp->hwrm_cmd_lock);
16627        mutex_init(&bp->link_lock);
16628
16629        rc = bnxt_fw_init_one_p1(bp);
16630        if (rc)
16631                goto init_err_pci_clean;
16632
16633        if (BNXT_PF(bp))
16634                bnxt_vpd_read_info(bp);
16635
16636        if (BNXT_CHIP_P5_PLUS(bp)) {
16637                bp->flags |= BNXT_FLAG_CHIP_P5_PLUS;
16638                if (BNXT_CHIP_P7(bp))
16639                        bp->flags |= BNXT_FLAG_CHIP_P7;
16640        }
16641
16642        rc = bnxt_alloc_rss_indir_tbl(bp);
16643        if (rc)
16644                goto init_err_pci_clean;
16645
16646        rc = bnxt_fw_init_one_p2(bp);
16647        if (rc)
16648                goto init_err_pci_clean;
16649
16650        rc = bnxt_map_db_bar(bp);
16651        if (rc) {
16652                dev_err(&pdev->dev, "Cannot map doorbell BAR rc = %d, aborting\n",
16653                        rc);
16654                goto init_err_pci_clean;
16655        }
16656
16657        dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
16658                           NETIF_F_TSO | NETIF_F_TSO6 |
16659                           NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
16660                           NETIF_F_GSO_IPXIP4 |
16661                           NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
16662                           NETIF_F_GSO_PARTIAL | NETIF_F_RXHASH |
16663                           NETIF_F_RXCSUM | NETIF_F_GRO;
16664        if (bp->flags & BNXT_FLAG_UDP_GSO_CAP)
16665                dev->hw_features |= NETIF_F_GSO_UDP_L4;
16666
16667        if (BNXT_SUPPORTS_TPA(bp))
16668                dev->hw_features |= NETIF_F_LRO;
16669
16670        dev->hw_enc_features =
16671                        NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
16672                        NETIF_F_TSO | NETIF_F_TSO6 |
16673                        NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
16674                        NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
16675                        NETIF_F_GSO_IPXIP4 | NETIF_F_GSO_PARTIAL;
16676        if (bp->flags & BNXT_FLAG_UDP_GSO_CAP)
16677                dev->hw_enc_features |= NETIF_F_GSO_UDP_L4;
16678        if (bp->flags & BNXT_FLAG_CHIP_P7)
16679                dev->udp_tunnel_nic_info = &bnxt_udp_tunnels_p7;
16680        else
16681                dev->udp_tunnel_nic_info = &bnxt_udp_tunnels;
16682
16683        dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM |
16684                                    NETIF_F_GSO_GRE_CSUM;
16685        dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA;
16686        if (bp->fw_cap & BNXT_FW_CAP_VLAN_RX_STRIP)
16687                dev->hw_features |= BNXT_HW_FEATURE_VLAN_ALL_RX;
16688        if (bp->fw_cap & BNXT_FW_CAP_VLAN_TX_INSERT)
16689                dev->hw_features |= BNXT_HW_FEATURE_VLAN_ALL_TX;
16690        if (BNXT_SUPPORTS_TPA(bp))
16691                dev->hw_features |= NETIF_F_GRO_HW;
16692        dev->features |= dev->hw_features | NETIF_F_HIGHDMA;
16693        if (dev->features & NETIF_F_GRO_HW)
16694                dev->features &= ~NETIF_F_LRO;
16695        dev->priv_flags |= IFF_UNICAST_FLT;
16696
16697        netif_set_tso_max_size(dev, GSO_MAX_SIZE);
16698        if (bp->tso_max_segs)
16699                netif_set_tso_max_segs(dev, bp->tso_max_segs);
16700
16701        dev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
16702                            NETDEV_XDP_ACT_RX_SG;
16703
16704#ifdef CONFIG_BNXT_SRIOV
16705        init_waitqueue_head(&bp->sriov_cfg_wait);
16706#endif
16707        if (BNXT_SUPPORTS_TPA(bp)) {
16708                bp->gro_func = bnxt_gro_func_5730x;
16709                if (BNXT_CHIP_P4(bp))
16710                        bp->gro_func = bnxt_gro_func_5731x;
16711                else if (BNXT_CHIP_P5_PLUS(bp))
16712                        bp->gro_func = bnxt_gro_func_5750x;
16713        }
16714        if (!BNXT_CHIP_P4_PLUS(bp))
16715                bp->flags |= BNXT_FLAG_DOUBLE_DB;
16716
16717        rc = bnxt_init_mac_addr(bp);
16718        if (rc) {
16719                dev_err(&pdev->dev, "Unable to initialize mac address.\n");
16720                rc = -EADDRNOTAVAIL;
16721                goto init_err_pci_clean;
16722        }
16723
16724        if (BNXT_PF(bp)) {
16725                /* Read the adapter's DSN to use as the eswitch switch_id */
16726                rc = bnxt_pcie_dsn_get(bp, bp->dsn);
16727        }
16728
16729        /* MTU range: 60 - FW defined max */
16730        dev->min_mtu = ETH_ZLEN;
16731        dev->max_mtu = bp->max_mtu;
16732
16733        rc = bnxt_probe_phy(bp, true);
16734        if (rc)
16735                goto init_err_pci_clean;
16736
16737        hw_resc = &bp->hw_resc;
16738        bp->max_fltr = hw_resc->max_rx_em_flows + hw_resc->max_rx_wm_flows +
16739                       BNXT_L2_FLTR_MAX_FLTR;
16740        /* Older firmware may not report these filters properly */
16741        if (bp->max_fltr < BNXT_MAX_FLTR)
16742                bp->max_fltr = BNXT_MAX_FLTR;
16743        bnxt_init_l2_fltr_tbl(bp);
16744        __bnxt_set_rx_skb_mode(bp, false);
16745        bnxt_set_tpa_flags(bp);
16746        bnxt_init_ring_params(bp);
16747        bnxt_set_ring_params(bp);
16748        bnxt_rdma_aux_device_init(bp);
16749        rc = bnxt_set_dflt_rings(bp, true);
16750        if (rc) {
16751                if (BNXT_VF(bp) && rc == -ENODEV) {
16752                        netdev_err(bp->dev, "Cannot configure VF rings while PF is unavailable.\n");
16753                } else {
16754                        netdev_err(bp->dev, "Not enough rings available.\n");
16755                        rc = -ENOMEM;
16756                }
16757                goto init_err_pci_clean;
16758        }
16759
16760        bnxt_fw_init_one_p3(bp);
16761
16762        bnxt_init_dflt_coal(bp);
16763
16764        if (dev->hw_features & BNXT_HW_FEATURE_VLAN_ALL_RX)
16765                bp->flags |= BNXT_FLAG_STRIP_VLAN;
16766
16767        rc = bnxt_init_int_mode(bp);
16768        if (rc)
16769                goto init_err_pci_clean;
16770
16771        /* No TC has been set yet and rings may have been trimmed due to
16772         * limited MSIX, so we re-initialize the TX rings per TC.
16773         */
16774        bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
16775
16776        if (BNXT_PF(bp)) {
16777                if (!bnxt_pf_wq) {
16778                        bnxt_pf_wq =
16779                                create_singlethread_workqueue("bnxt_pf_wq");
16780                        if (!bnxt_pf_wq) {
16781                                dev_err(&pdev->dev, "Unable to create workqueue.\n");
16782                                rc = -ENOMEM;
16783                                goto init_err_pci_clean;
16784                        }
16785                }
16786                rc = bnxt_init_tc(bp);
16787                if (rc)
16788                        netdev_err(dev, "Failed to initialize TC flower offload, err = %d.\n",
16789                                   rc);
16790        }
16791
16792        bnxt_inv_fw_health_reg(bp);
16793        rc = bnxt_dl_register(bp);
16794        if (rc)
16795                goto init_err_dl;
16796
16797        INIT_LIST_HEAD(&bp->usr_fltr_list);
16798
16799        if (BNXT_SUPPORTS_NTUPLE_VNIC(bp))
16800                bp->rss_cap |= BNXT_RSS_CAP_MULTI_RSS_CTX;
16801        if (BNXT_SUPPORTS_QUEUE_API(bp))
16802                dev->queue_mgmt_ops = &bnxt_queue_mgmt_ops;
16803        dev->request_ops_lock = true;
16804        dev->netmem_tx = true;
16805
16806        rc = register_netdev(dev);
16807        if (rc)
16808                goto init_err_cleanup;
16809
16810        bnxt_dl_fw_reporters_create(bp);
16811
16812        bnxt_rdma_aux_device_add(bp);
16813
16814        bnxt_print_device_info(bp);
16815
16816        pci_save_state(pdev);
16817
16818        return 0;
16819init_err_cleanup:
16820        bnxt_rdma_aux_device_uninit(bp);
16821        bnxt_dl_unregister(bp);
16822init_err_dl:
16823        bnxt_shutdown_tc(bp);
16824        bnxt_clear_int_mode(bp);
16825
16826init_err_pci_clean:
16827        bnxt_hwrm_func_drv_unrgtr(bp);
16828        bnxt_free_hwrm_resources(bp);
16829        bnxt_hwmon_uninit(bp);
16830        bnxt_ethtool_free(bp);
16831        bnxt_ptp_clear(bp);
16832        kfree(bp->ptp_cfg);
16833        bp->ptp_cfg = NULL;
16834        kfree(bp->fw_health);
16835        bp->fw_health = NULL;
16836        bnxt_cleanup_pci(bp);
16837        bnxt_free_ctx_mem(bp, true);
16838        bnxt_free_crash_dump_mem(bp);
16839        kfree(bp->rss_indir_tbl);
16840        bp->rss_indir_tbl = NULL;
16841
16842init_err_free:
16843        free_netdev(dev);
16844        return rc;
16845}
16846
16847static void bnxt_shutdown(struct pci_dev *pdev)
16848{
16849        struct net_device *dev = pci_get_drvdata(pdev);
16850        struct bnxt *bp;
16851
16852        if (!dev)
16853                return;
16854
16855        rtnl_lock();
16856        netdev_lock(dev);
16857        bp = netdev_priv(dev);
16858        if (!bp)
16859                goto shutdown_exit;
16860
16861        if (netif_running(dev))
16862                netif_close(dev);
16863
16864        bnxt_ptp_clear(bp);
16865        bnxt_clear_int_mode(bp);
16866        pci_disable_device(pdev);
16867
16868        if (system_state == SYSTEM_POWER_OFF) {
16869                pci_wake_from_d3(pdev, bp->wol);
16870                pci_set_power_state(pdev, PCI_D3hot);
16871        }
16872
16873shutdown_exit:
16874        netdev_unlock(dev);
16875        rtnl_unlock();
16876}
16877
16878#ifdef CONFIG_PM_SLEEP
16879static int bnxt_suspend(struct device *device)
16880{
16881        struct net_device *dev = dev_get_drvdata(device);
16882        struct bnxt *bp = netdev_priv(dev);
16883        int rc = 0;
16884
16885        bnxt_ulp_stop(bp);
16886
16887        netdev_lock(dev);
16888        if (netif_running(dev)) {
16889                netif_device_detach(dev);
16890                rc = bnxt_close(dev);
16891        }
16892        bnxt_hwrm_func_drv_unrgtr(bp);
16893        bnxt_ptp_clear(bp);
16894        pci_disable_device(bp->pdev);
16895        bnxt_free_ctx_mem(bp, false);
16896        netdev_unlock(dev);
16897        return rc;
16898}
16899
16900static int bnxt_resume(struct device *device)
16901{
16902        struct net_device *dev = dev_get_drvdata(device);
16903        struct bnxt *bp = netdev_priv(dev);
16904        int rc = 0;
16905
16906        netdev_lock(dev);
16907        rc = pci_enable_device(bp->pdev);
16908        if (rc) {
16909                netdev_err(dev, "Cannot re-enable PCI device during resume, err = %d\n",
16910                           rc);
16911                goto resume_exit;
16912        }
16913        pci_set_master(bp->pdev);
16914        if (bnxt_hwrm_ver_get(bp)) {
16915                rc = -ENODEV;
16916                goto resume_exit;
16917        }
16918        rc = bnxt_hwrm_func_reset(bp);
16919        if (rc) {
16920                rc = -EBUSY;
16921                goto resume_exit;
16922        }
16923
16924        rc = bnxt_hwrm_func_qcaps(bp);
16925        if (rc)
16926                goto resume_exit;
16927
16928        bnxt_clear_reservations(bp, true);
16929
16930        if (bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false)) {
16931                rc = -ENODEV;
16932                goto resume_exit;
16933        }
16934        if (bp->fw_crash_mem)
16935                bnxt_hwrm_crash_dump_mem_cfg(bp);
16936
16937        if (bnxt_ptp_init(bp)) {
16938                kfree(bp->ptp_cfg);
16939                bp->ptp_cfg = NULL;
16940        }
16941        bnxt_get_wol_settings(bp);
16942        if (netif_running(dev)) {
16943                rc = bnxt_open(dev);
16944                if (!rc)
16945                        netif_device_attach(dev);
16946        }
16947
16948resume_exit:
16949        netdev_unlock(bp->dev);
16950        bnxt_ulp_start(bp, rc);
16951        if (!rc)
16952                bnxt_reenable_sriov(bp);
16953        return rc;
16954}
16955
16956static SIMPLE_DEV_PM_OPS(bnxt_pm_ops, bnxt_suspend, bnxt_resume);
16957#define BNXT_PM_OPS (&bnxt_pm_ops)
16958
16959#else
16960
16961#define BNXT_PM_OPS NULL
16962
16963#endif /* CONFIG_PM_SLEEP */
16964
16965/**
16966 * bnxt_io_error_detected - called when PCI error is detected
16967 * @pdev: Pointer to PCI device
16968 * @state: The current pci connection state
16969 *
16970 * This function is called after a PCI bus error affecting
16971 * this device has been detected.
16972 */
16973static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
16974                                               pci_channel_state_t state)
16975{
16976        struct net_device *netdev = pci_get_drvdata(pdev);
16977        struct bnxt *bp = netdev_priv(netdev);
16978        bool abort = false;
16979
16980        netdev_info(netdev, "PCI I/O error detected\n");
16981
16982        bnxt_ulp_stop(bp);
16983
16984        netdev_lock(netdev);
16985        netif_device_detach(netdev);
16986
16987        if (test_and_set_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
16988                netdev_err(bp->dev, "Firmware reset already in progress\n");
16989                abort = true;
16990        }
16991
16992        if (abort || state == pci_channel_io_perm_failure) {
16993                netdev_unlock(netdev);
16994                return PCI_ERS_RESULT_DISCONNECT;
16995        }
16996
16997        /* Link is not reliable anymore if state is pci_channel_io_frozen
16998         * so we disable bus master to prevent any potential bad DMAs before
16999         * freeing kernel memory.
17000         */
17001        if (state == pci_channel_io_frozen) {
17002                set_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN, &bp->state);
17003                bnxt_fw_fatal_close(bp);
17004        }
17005
17006        if (netif_running(netdev))
17007                __bnxt_close_nic(bp, true, true);
17008
17009        if (pci_is_enabled(pdev))
17010                pci_disable_device(pdev);
17011        bnxt_free_ctx_mem(bp, false);
17012        netdev_unlock(netdev);
17013
17014        /* Request a slot reset. */
17015        return PCI_ERS_RESULT_NEED_RESET;
17016}
17017
17018/**
17019 * bnxt_io_slot_reset - called after the pci bus has been reset.
17020 * @pdev: Pointer to PCI device
17021 *
17022 * Restart the card from scratch, as if from a cold-boot.
17023 * At this point, the card has experienced a hard reset,
17024 * followed by fixups by BIOS, and has its config space
17025 * set up identically to what it was at cold boot.
17026 */
17027static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
17028{
17029        pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
17030        struct net_device *netdev = pci_get_drvdata(pdev);
17031        struct bnxt *bp = netdev_priv(netdev);
17032        int retry = 0;
17033        int err = 0;
17034        int off;
17035
17036        netdev_info(bp->dev, "PCI Slot Reset\n");
17037
17038        if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
17039            test_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN, &bp->state))
17040                msleep(900);
17041
17042        netdev_lock(netdev);
17043
17044        if (pci_enable_device(pdev)) {
17045                dev_err(&pdev->dev,
17046                        "Cannot re-enable PCI device after reset.\n");
17047        } else {
17048                pci_set_master(pdev);
17049                /* Upon fatal error, our device internal logic that latches to
17050                 * BAR value is getting reset and will restore only upon
17051                 * rewriting the BARs.
17052                 *
17053                 * As pci_restore_state() does not re-write the BARs if the
17054                 * value is same as saved value earlier, driver needs to
17055                 * write the BARs to 0 to force restore, in case of fatal error.
17056                 */
17057                if (test_and_clear_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN,
17058                                       &bp->state)) {
17059                        for (off = PCI_BASE_ADDRESS_0;
17060                             off <= PCI_BASE_ADDRESS_5; off += 4)
17061                                pci_write_config_dword(bp->pdev, off, 0);
17062                }
17063                pci_restore_state(pdev);
17064                pci_save_state(pdev);
17065
17066                bnxt_inv_fw_health_reg(bp);
17067                bnxt_try_map_fw_health_reg(bp);
17068
17069                /* In some PCIe AER scenarios, firmware may take up to
17070                 * 10 seconds to become ready in the worst case.
17071                 */
17072                do {
17073                        err = bnxt_try_recover_fw(bp);
17074                        if (!err)
17075                                break;
17076                        retry++;
17077                } while (retry < BNXT_FW_SLOT_RESET_RETRY);
17078
17079                if (err) {
17080                        dev_err(&pdev->dev, "Firmware not ready\n");
17081                        goto reset_exit;
17082                }
17083
17084                err = bnxt_hwrm_func_reset(bp);
17085                if (!err)
17086                        result = PCI_ERS_RESULT_RECOVERED;
17087
17088                /* IRQ will be initialized later in bnxt_io_resume */
17089                bnxt_ulp_irq_stop(bp);
17090                bnxt_clear_int_mode(bp);
17091        }
17092
17093reset_exit:
17094        clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
17095        bnxt_clear_reservations(bp, true);
17096        netdev_unlock(netdev);
17097
17098        return result;
17099}
17100
17101/**
17102 * bnxt_io_resume - called when traffic can start flowing again.
17103 * @pdev: Pointer to PCI device
17104 *
17105 * This callback is called when the error recovery driver tells
17106 * us that its OK to resume normal operation.
17107 */
17108static void bnxt_io_resume(struct pci_dev *pdev)
17109{
17110        struct net_device *netdev = pci_get_drvdata(pdev);
17111        struct bnxt *bp = netdev_priv(netdev);
17112        int err;
17113
17114        netdev_info(bp->dev, "PCI Slot Resume\n");
17115        netdev_lock(netdev);
17116
17117        err = bnxt_hwrm_func_qcaps(bp);
17118        if (!err) {
17119                if (netif_running(netdev)) {
17120                        err = bnxt_open(netdev);
17121                } else {
17122                        err = bnxt_reserve_rings(bp, true);
17123                        if (!err)
17124                                err = bnxt_init_int_mode(bp);
17125                }
17126        }
17127
17128        if (!err)
17129                netif_device_attach(netdev);
17130
17131        netdev_unlock(netdev);
17132        bnxt_ulp_start(bp, err);
17133        if (!err)
17134                bnxt_reenable_sriov(bp);
17135}
17136
17137static const struct pci_error_handlers bnxt_err_handler = {
17138        .error_detected = bnxt_io_error_detected,
17139        .slot_reset     = bnxt_io_slot_reset,
17140        .resume         = bnxt_io_resume
17141};
17142
17143static struct pci_driver bnxt_pci_driver = {
17144        .name           = DRV_MODULE_NAME,
17145        .id_table       = bnxt_pci_tbl,
17146        .probe          = bnxt_init_one,
17147        .remove         = bnxt_remove_one,
17148        .shutdown       = bnxt_shutdown,
17149        .driver.pm      = BNXT_PM_OPS,
17150        .err_handler    = &bnxt_err_handler,
17151#if defined(CONFIG_BNXT_SRIOV)
17152        .sriov_configure = bnxt_sriov_configure,
17153#endif
17154};
17155
17156static int __init bnxt_init(void)
17157{
17158        int err;
17159
17160        bnxt_debug_init();
17161        err = pci_register_driver(&bnxt_pci_driver);
17162        if (err) {
17163                bnxt_debug_exit();
17164                return err;
17165        }
17166
17167        return 0;
17168}
17169
17170static void __exit bnxt_exit(void)
17171{
17172        pci_unregister_driver(&bnxt_pci_driver);
17173        if (bnxt_pf_wq)
17174                destroy_workqueue(bnxt_pf_wq);
17175        bnxt_debug_exit();
17176}
17177
17178module_init(bnxt_init);
17179module_exit(bnxt_exit);
17180