linux/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
<<
>>
Prefs
   1/*
   2 * aQuantia Corporation Network Driver
   3 * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
   4 *
   5 * This program is free software; you can redistribute it and/or modify it
   6 * under the terms and conditions of the GNU General Public License,
   7 * version 2, as published by the Free Software Foundation.
   8 */
   9
  10/* File hw_atl_b0.c: Definition of Atlantic hardware specific functions. */
  11
  12#include "../aq_hw.h"
  13#include "../aq_hw_utils.h"
  14#include "../aq_ring.h"
  15#include "../aq_nic.h"
  16#include "hw_atl_b0.h"
  17#include "hw_atl_utils.h"
  18#include "hw_atl_llh.h"
  19#include "hw_atl_b0_internal.h"
  20#include "hw_atl_llh_internal.h"
  21
  22#define DEFAULT_B0_BOARD_BASIC_CAPABILITIES \
  23        .is_64_dma = true,                \
  24        .msix_irqs = 8U,                  \
  25        .irq_mask = ~0U,                  \
  26        .vecs = HW_ATL_B0_RSS_MAX,        \
  27        .tcs = HW_ATL_B0_TC_MAX,          \
  28        .rxd_alignment = 1U,              \
  29        .rxd_size = HW_ATL_B0_RXD_SIZE,   \
  30        .rxds_max = HW_ATL_B0_MAX_RXD,    \
  31        .rxds_min = HW_ATL_B0_MIN_RXD,    \
  32        .txd_alignment = 1U,              \
  33        .txd_size = HW_ATL_B0_TXD_SIZE,   \
  34        .txds_max = HW_ATL_B0_MAX_TXD,    \
  35        .txds_min = HW_ATL_B0_MIN_TXD,    \
  36        .txhwb_alignment = 4096U,         \
  37        .tx_rings = HW_ATL_B0_TX_RINGS,   \
  38        .rx_rings = HW_ATL_B0_RX_RINGS,   \
  39        .hw_features = NETIF_F_HW_CSUM |  \
  40                        NETIF_F_RXCSUM |  \
  41                        NETIF_F_RXHASH |  \
  42                        NETIF_F_SG |      \
  43                        NETIF_F_TSO |     \
  44                        NETIF_F_LRO |     \
  45                        NETIF_F_NTUPLE |  \
  46                        NETIF_F_HW_VLAN_CTAG_FILTER, \
  47        .hw_priv_flags = IFF_UNICAST_FLT, \
  48        .flow_control = true,             \
  49        .mtu = HW_ATL_B0_MTU_JUMBO,       \
  50        .mac_regs_count = 88,             \
  51        .hw_alive_check_addr = 0x10U
  52
  53const struct aq_hw_caps_s hw_atl_b0_caps_aqc100 = {
  54        DEFAULT_B0_BOARD_BASIC_CAPABILITIES,
  55        .media_type = AQ_HW_MEDIA_TYPE_FIBRE,
  56        .link_speed_msk = AQ_NIC_RATE_10G |
  57                          AQ_NIC_RATE_5G |
  58                          AQ_NIC_RATE_2GS |
  59                          AQ_NIC_RATE_1G |
  60                          AQ_NIC_RATE_100M,
  61};
  62
  63const struct aq_hw_caps_s hw_atl_b0_caps_aqc107 = {
  64        DEFAULT_B0_BOARD_BASIC_CAPABILITIES,
  65        .media_type = AQ_HW_MEDIA_TYPE_TP,
  66        .link_speed_msk = AQ_NIC_RATE_10G |
  67                          AQ_NIC_RATE_5G |
  68                          AQ_NIC_RATE_2GS |
  69                          AQ_NIC_RATE_1G |
  70                          AQ_NIC_RATE_100M,
  71};
  72
  73const struct aq_hw_caps_s hw_atl_b0_caps_aqc108 = {
  74        DEFAULT_B0_BOARD_BASIC_CAPABILITIES,
  75        .media_type = AQ_HW_MEDIA_TYPE_TP,
  76        .link_speed_msk = AQ_NIC_RATE_5G |
  77                          AQ_NIC_RATE_2GS |
  78                          AQ_NIC_RATE_1G |
  79                          AQ_NIC_RATE_100M,
  80};
  81
  82const struct aq_hw_caps_s hw_atl_b0_caps_aqc109 = {
  83        DEFAULT_B0_BOARD_BASIC_CAPABILITIES,
  84        .media_type = AQ_HW_MEDIA_TYPE_TP,
  85        .link_speed_msk = AQ_NIC_RATE_2GS |
  86                          AQ_NIC_RATE_1G |
  87                          AQ_NIC_RATE_100M,
  88};
  89
  90static int hw_atl_b0_hw_reset(struct aq_hw_s *self)
  91{
  92        int err = 0;
  93
  94        err = hw_atl_utils_soft_reset(self);
  95        if (err)
  96                return err;
  97
  98        self->aq_fw_ops->set_state(self, MPI_RESET);
  99
 100        err = aq_hw_err_from_flags(self);
 101
 102        return err;
 103}
 104
 105static int hw_atl_b0_set_fc(struct aq_hw_s *self, u32 fc, u32 tc)
 106{
 107        hw_atl_rpb_rx_xoff_en_per_tc_set(self, !!(fc & AQ_NIC_FC_RX), tc);
 108        return 0;
 109}
 110
 111static int hw_atl_b0_hw_qos_set(struct aq_hw_s *self)
 112{
 113        u32 tc = 0U;
 114        u32 buff_size = 0U;
 115        unsigned int i_priority = 0U;
 116
 117        /* TPS Descriptor rate init */
 118        hw_atl_tps_tx_pkt_shed_desc_rate_curr_time_res_set(self, 0x0U);
 119        hw_atl_tps_tx_pkt_shed_desc_rate_lim_set(self, 0xA);
 120
 121        /* TPS VM init */
 122        hw_atl_tps_tx_pkt_shed_desc_vm_arb_mode_set(self, 0U);
 123
 124        /* TPS TC credits init */
 125        hw_atl_tps_tx_pkt_shed_desc_tc_arb_mode_set(self, 0U);
 126        hw_atl_tps_tx_pkt_shed_data_arb_mode_set(self, 0U);
 127
 128        hw_atl_tps_tx_pkt_shed_tc_data_max_credit_set(self, 0xFFF, 0U);
 129        hw_atl_tps_tx_pkt_shed_tc_data_weight_set(self, 0x64, 0U);
 130        hw_atl_tps_tx_pkt_shed_desc_tc_max_credit_set(self, 0x50, 0U);
 131        hw_atl_tps_tx_pkt_shed_desc_tc_weight_set(self, 0x1E, 0U);
 132
 133        /* Tx buf size */
 134        buff_size = HW_ATL_B0_TXBUF_MAX;
 135
 136        hw_atl_tpb_tx_pkt_buff_size_per_tc_set(self, buff_size, tc);
 137        hw_atl_tpb_tx_buff_hi_threshold_per_tc_set(self,
 138                                                   (buff_size *
 139                                                   (1024 / 32U) * 66U) /
 140                                                   100U, tc);
 141        hw_atl_tpb_tx_buff_lo_threshold_per_tc_set(self,
 142                                                   (buff_size *
 143                                                   (1024 / 32U) * 50U) /
 144                                                   100U, tc);
 145
 146        /* QoS Rx buf size per TC */
 147        tc = 0;
 148        buff_size = HW_ATL_B0_RXBUF_MAX;
 149
 150        hw_atl_rpb_rx_pkt_buff_size_per_tc_set(self, buff_size, tc);
 151        hw_atl_rpb_rx_buff_hi_threshold_per_tc_set(self,
 152                                                   (buff_size *
 153                                                   (1024U / 32U) * 66U) /
 154                                                   100U, tc);
 155        hw_atl_rpb_rx_buff_lo_threshold_per_tc_set(self,
 156                                                   (buff_size *
 157                                                   (1024U / 32U) * 50U) /
 158                                                   100U, tc);
 159
 160        hw_atl_b0_set_fc(self, self->aq_nic_cfg->flow_control, tc);
 161
 162        /* QoS 802.1p priority -> TC mapping */
 163        for (i_priority = 8U; i_priority--;)
 164                hw_atl_rpf_rpb_user_priority_tc_map_set(self, i_priority, 0U);
 165
 166        return aq_hw_err_from_flags(self);
 167}
 168
 169static int hw_atl_b0_hw_rss_hash_set(struct aq_hw_s *self,
 170                                     struct aq_rss_parameters *rss_params)
 171{
 172        struct aq_nic_cfg_s *cfg = self->aq_nic_cfg;
 173        int err = 0;
 174        unsigned int i = 0U;
 175        unsigned int addr = 0U;
 176        u32 val;
 177
 178        for (i = 10, addr = 0U; i--; ++addr) {
 179                u32 key_data = cfg->is_rss ?
 180                        __swab32(rss_params->hash_secret_key[i]) : 0U;
 181                hw_atl_rpf_rss_key_wr_data_set(self, key_data);
 182                hw_atl_rpf_rss_key_addr_set(self, addr);
 183                hw_atl_rpf_rss_key_wr_en_set(self, 1U);
 184                err = readx_poll_timeout_atomic(hw_atl_rpf_rss_key_wr_en_get,
 185                                                self, val, val == 0,
 186                                                1000U, 10000U);
 187                if (err < 0)
 188                        goto err_exit;
 189        }
 190
 191        err = aq_hw_err_from_flags(self);
 192
 193err_exit:
 194        return err;
 195}
 196
 197static int hw_atl_b0_hw_rss_set(struct aq_hw_s *self,
 198                                struct aq_rss_parameters *rss_params)
 199{
 200        u8 *indirection_table = rss_params->indirection_table;
 201        u32 i = 0U;
 202        u32 num_rss_queues = max(1U, self->aq_nic_cfg->num_rss_queues);
 203        int err = 0;
 204        u16 bitary[1 + (HW_ATL_B0_RSS_REDIRECTION_MAX *
 205                   HW_ATL_B0_RSS_REDIRECTION_BITS / 16U)];
 206        u32 val;
 207
 208        memset(bitary, 0, sizeof(bitary));
 209
 210        for (i = HW_ATL_B0_RSS_REDIRECTION_MAX; i--;) {
 211                (*(u32 *)(bitary + ((i * 3U) / 16U))) |=
 212                        ((indirection_table[i] % num_rss_queues) <<
 213                        ((i * 3U) & 0xFU));
 214        }
 215
 216        for (i = ARRAY_SIZE(bitary); i--;) {
 217                hw_atl_rpf_rss_redir_tbl_wr_data_set(self, bitary[i]);
 218                hw_atl_rpf_rss_redir_tbl_addr_set(self, i);
 219                hw_atl_rpf_rss_redir_wr_en_set(self, 1U);
 220                err = readx_poll_timeout_atomic(hw_atl_rpf_rss_redir_wr_en_get,
 221                                                self, val, val == 0,
 222                                                1000U, 10000U);
 223                if (err < 0)
 224                        goto err_exit;
 225        }
 226
 227        err = aq_hw_err_from_flags(self);
 228
 229err_exit:
 230        return err;
 231}
 232
 233static int hw_atl_b0_hw_offload_set(struct aq_hw_s *self,
 234                                    struct aq_nic_cfg_s *aq_nic_cfg)
 235{
 236        unsigned int i;
 237
 238        /* TX checksums offloads*/
 239        hw_atl_tpo_ipv4header_crc_offload_en_set(self, 1);
 240        hw_atl_tpo_tcp_udp_crc_offload_en_set(self, 1);
 241
 242        /* RX checksums offloads*/
 243        hw_atl_rpo_ipv4header_crc_offload_en_set(self, !!(aq_nic_cfg->features &
 244                                                 NETIF_F_RXCSUM));
 245        hw_atl_rpo_tcp_udp_crc_offload_en_set(self, !!(aq_nic_cfg->features &
 246                                              NETIF_F_RXCSUM));
 247
 248        /* LSO offloads*/
 249        hw_atl_tdm_large_send_offload_en_set(self, 0xFFFFFFFFU);
 250
 251/* LRO offloads */
 252        {
 253                unsigned int val = (8U < HW_ATL_B0_LRO_RXD_MAX) ? 0x3U :
 254                        ((4U < HW_ATL_B0_LRO_RXD_MAX) ? 0x2U :
 255                        ((2U < HW_ATL_B0_LRO_RXD_MAX) ? 0x1U : 0x0));
 256
 257                for (i = 0; i < HW_ATL_B0_RINGS_MAX; i++)
 258                        hw_atl_rpo_lro_max_num_of_descriptors_set(self, val, i);
 259
 260                hw_atl_rpo_lro_time_base_divider_set(self, 0x61AU);
 261                hw_atl_rpo_lro_inactive_interval_set(self, 0);
 262                /* the LRO timebase divider is 5 uS (0x61a),
 263                 * which is multiplied by 50(0x32)
 264                 * to get a maximum coalescing interval of 250 uS,
 265                 * which is the default value
 266                 */
 267                hw_atl_rpo_lro_max_coalescing_interval_set(self, 50);
 268
 269                hw_atl_rpo_lro_qsessions_lim_set(self, 1U);
 270
 271                hw_atl_rpo_lro_total_desc_lim_set(self, 2U);
 272
 273                hw_atl_rpo_lro_patch_optimization_en_set(self, 1U);
 274
 275                hw_atl_rpo_lro_min_pay_of_first_pkt_set(self, 10U);
 276
 277                hw_atl_rpo_lro_pkt_lim_set(self, 1U);
 278
 279                hw_atl_rpo_lro_en_set(self,
 280                                      aq_nic_cfg->is_lro ? 0xFFFFFFFFU : 0U);
 281                hw_atl_itr_rsc_en_set(self,
 282                                      aq_nic_cfg->is_lro ? 0xFFFFFFFFU : 0U);
 283
 284                hw_atl_itr_rsc_delay_set(self, 1U);
 285        }
 286        return aq_hw_err_from_flags(self);
 287}
 288
 289static int hw_atl_b0_hw_init_tx_path(struct aq_hw_s *self)
 290{
 291        /* Tx TC/Queue number config */
 292        hw_atl_rpb_tps_tx_tc_mode_set(self, 1U);
 293
 294        hw_atl_thm_lso_tcp_flag_of_first_pkt_set(self, 0x0FF6U);
 295        hw_atl_thm_lso_tcp_flag_of_middle_pkt_set(self, 0x0FF6U);
 296        hw_atl_thm_lso_tcp_flag_of_last_pkt_set(self, 0x0F7FU);
 297
 298        /* Tx interrupts */
 299        hw_atl_tdm_tx_desc_wr_wb_irq_en_set(self, 1U);
 300
 301        /* misc */
 302        aq_hw_write_reg(self, 0x00007040U, IS_CHIP_FEATURE(TPO2) ?
 303                        0x00010000U : 0x00000000U);
 304        hw_atl_tdm_tx_dca_en_set(self, 0U);
 305        hw_atl_tdm_tx_dca_mode_set(self, 0U);
 306
 307        hw_atl_tpb_tx_path_scp_ins_en_set(self, 1U);
 308
 309        return aq_hw_err_from_flags(self);
 310}
 311
 312static int hw_atl_b0_hw_init_rx_path(struct aq_hw_s *self)
 313{
 314        struct aq_nic_cfg_s *cfg = self->aq_nic_cfg;
 315        int i;
 316
 317        /* Rx TC/RSS number config */
 318        hw_atl_rpb_rpf_rx_traf_class_mode_set(self, 1U);
 319
 320        /* Rx flow control */
 321        hw_atl_rpb_rx_flow_ctl_mode_set(self, 1U);
 322
 323        /* RSS Ring selection */
 324        hw_atl_reg_rx_flr_rss_control1set(self, cfg->is_rss ?
 325                                        0xB3333333U : 0x00000000U);
 326
 327        /* Multicast filters */
 328        for (i = HW_ATL_B0_MAC_MAX; i--;) {
 329                hw_atl_rpfl2_uc_flr_en_set(self, (i == 0U) ? 1U : 0U, i);
 330                hw_atl_rpfl2unicast_flr_act_set(self, 1U, i);
 331        }
 332
 333        hw_atl_reg_rx_flr_mcst_flr_msk_set(self, 0x00000000U);
 334        hw_atl_reg_rx_flr_mcst_flr_set(self, 0x00010FFFU, 0U);
 335
 336        /* Vlan filters */
 337        hw_atl_rpf_vlan_outer_etht_set(self, 0x88A8U);
 338        hw_atl_rpf_vlan_inner_etht_set(self, 0x8100U);
 339
 340        hw_atl_rpf_vlan_prom_mode_en_set(self, 1);
 341
 342        // Always accept untagged packets
 343        hw_atl_rpf_vlan_accept_untagged_packets_set(self, 1U);
 344        hw_atl_rpf_vlan_untagged_act_set(self, 1U);
 345
 346        /* Rx Interrupts */
 347        hw_atl_rdm_rx_desc_wr_wb_irq_en_set(self, 1U);
 348
 349        /* misc */
 350        aq_hw_write_reg(self, 0x00005040U,
 351                        IS_CHIP_FEATURE(RPF2) ? 0x000F0000U : 0x00000000U);
 352
 353        hw_atl_rpfl2broadcast_flr_act_set(self, 1U);
 354        hw_atl_rpfl2broadcast_count_threshold_set(self, 0xFFFFU & (~0U / 256U));
 355
 356        hw_atl_rdm_rx_dca_en_set(self, 0U);
 357        hw_atl_rdm_rx_dca_mode_set(self, 0U);
 358
 359        return aq_hw_err_from_flags(self);
 360}
 361
 362static int hw_atl_b0_hw_mac_addr_set(struct aq_hw_s *self, u8 *mac_addr)
 363{
 364        int err = 0;
 365        unsigned int h = 0U;
 366        unsigned int l = 0U;
 367
 368        if (!mac_addr) {
 369                err = -EINVAL;
 370                goto err_exit;
 371        }
 372        h = (mac_addr[0] << 8) | (mac_addr[1]);
 373        l = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
 374                (mac_addr[4] << 8) | mac_addr[5];
 375
 376        hw_atl_rpfl2_uc_flr_en_set(self, 0U, HW_ATL_B0_MAC);
 377        hw_atl_rpfl2unicast_dest_addresslsw_set(self, l, HW_ATL_B0_MAC);
 378        hw_atl_rpfl2unicast_dest_addressmsw_set(self, h, HW_ATL_B0_MAC);
 379        hw_atl_rpfl2_uc_flr_en_set(self, 1U, HW_ATL_B0_MAC);
 380
 381        err = aq_hw_err_from_flags(self);
 382
 383err_exit:
 384        return err;
 385}
 386
 387static int hw_atl_b0_hw_init(struct aq_hw_s *self, u8 *mac_addr)
 388{
 389        static u32 aq_hw_atl_igcr_table_[4][2] = {
 390                [AQ_HW_IRQ_INVALID] = { 0x20000000U, 0x20000000U },
 391                [AQ_HW_IRQ_LEGACY]  = { 0x20000080U, 0x20000080U },
 392                [AQ_HW_IRQ_MSI]     = { 0x20000021U, 0x20000025U },
 393                [AQ_HW_IRQ_MSIX]    = { 0x20000022U, 0x20000026U },
 394        };
 395
 396        int err = 0;
 397        u32 val;
 398
 399        struct aq_nic_cfg_s *aq_nic_cfg = self->aq_nic_cfg;
 400
 401        hw_atl_b0_hw_init_tx_path(self);
 402        hw_atl_b0_hw_init_rx_path(self);
 403
 404        hw_atl_b0_hw_mac_addr_set(self, mac_addr);
 405
 406        self->aq_fw_ops->set_link_speed(self, aq_nic_cfg->link_speed_msk);
 407        self->aq_fw_ops->set_state(self, MPI_INIT);
 408
 409        hw_atl_b0_hw_qos_set(self);
 410        hw_atl_b0_hw_rss_set(self, &aq_nic_cfg->aq_rss);
 411        hw_atl_b0_hw_rss_hash_set(self, &aq_nic_cfg->aq_rss);
 412
 413        /* Force limit MRRS on RDM/TDM to 2K */
 414        val = aq_hw_read_reg(self, HW_ATL_PCI_REG_CONTROL6_ADR);
 415        aq_hw_write_reg(self, HW_ATL_PCI_REG_CONTROL6_ADR,
 416                        (val & ~0x707) | 0x404);
 417
 418        /* TX DMA total request limit. B0 hardware is not capable to
 419         * handle more than (8K-MRRS) incoming DMA data.
 420         * Value 24 in 256byte units
 421         */
 422        aq_hw_write_reg(self, HW_ATL_TX_DMA_TOTAL_REQ_LIMIT_ADR, 24);
 423
 424        /* Reset link status and read out initial hardware counters */
 425        self->aq_link_status.mbps = 0;
 426        self->aq_fw_ops->update_stats(self);
 427
 428        err = aq_hw_err_from_flags(self);
 429        if (err < 0)
 430                goto err_exit;
 431
 432        /* Interrupts */
 433        hw_atl_reg_irq_glb_ctl_set(self,
 434                                   aq_hw_atl_igcr_table_[aq_nic_cfg->irq_type]
 435                                                 [(aq_nic_cfg->vecs > 1U) ?
 436                                                 1 : 0]);
 437
 438        hw_atl_itr_irq_auto_masklsw_set(self, aq_nic_cfg->aq_hw_caps->irq_mask);
 439
 440        /* Interrupts */
 441        hw_atl_reg_gen_irq_map_set(self,
 442                                   ((HW_ATL_B0_ERR_INT << 0x18) | (1U << 0x1F)) |
 443                            ((HW_ATL_B0_ERR_INT << 0x10) | (1U << 0x17)), 0U);
 444
 445        /* Enable link interrupt */
 446        if (aq_nic_cfg->link_irq_vec)
 447                hw_atl_reg_gen_irq_map_set(self, BIT(7) |
 448                                           aq_nic_cfg->link_irq_vec, 3U);
 449
 450        hw_atl_b0_hw_offload_set(self, aq_nic_cfg);
 451
 452err_exit:
 453        return err;
 454}
 455
 456static int hw_atl_b0_hw_ring_tx_start(struct aq_hw_s *self,
 457                                      struct aq_ring_s *ring)
 458{
 459        hw_atl_tdm_tx_desc_en_set(self, 1, ring->idx);
 460        return aq_hw_err_from_flags(self);
 461}
 462
 463static int hw_atl_b0_hw_ring_rx_start(struct aq_hw_s *self,
 464                                      struct aq_ring_s *ring)
 465{
 466        hw_atl_rdm_rx_desc_en_set(self, 1, ring->idx);
 467        return aq_hw_err_from_flags(self);
 468}
 469
 470static int hw_atl_b0_hw_start(struct aq_hw_s *self)
 471{
 472        hw_atl_tpb_tx_buff_en_set(self, 1);
 473        hw_atl_rpb_rx_buff_en_set(self, 1);
 474        return aq_hw_err_from_flags(self);
 475}
 476
 477static int hw_atl_b0_hw_tx_ring_tail_update(struct aq_hw_s *self,
 478                                            struct aq_ring_s *ring)
 479{
 480        hw_atl_reg_tx_dma_desc_tail_ptr_set(self, ring->sw_tail, ring->idx);
 481        return 0;
 482}
 483
 484static int hw_atl_b0_hw_ring_tx_xmit(struct aq_hw_s *self,
 485                                     struct aq_ring_s *ring,
 486                                     unsigned int frags)
 487{
 488        struct aq_ring_buff_s *buff = NULL;
 489        struct hw_atl_txd_s *txd = NULL;
 490        unsigned int buff_pa_len = 0U;
 491        unsigned int pkt_len = 0U;
 492        unsigned int frag_count = 0U;
 493        bool is_gso = false;
 494
 495        buff = &ring->buff_ring[ring->sw_tail];
 496        pkt_len = (buff->is_eop && buff->is_sop) ? buff->len : buff->len_pkt;
 497
 498        for (frag_count = 0; frag_count < frags; frag_count++) {
 499                txd = (struct hw_atl_txd_s *)&ring->dx_ring[ring->sw_tail *
 500                                                HW_ATL_B0_TXD_SIZE];
 501                txd->ctl = 0;
 502                txd->ctl2 = 0;
 503                txd->buf_addr = 0;
 504
 505                buff = &ring->buff_ring[ring->sw_tail];
 506
 507                if (buff->is_txc) {
 508                        txd->ctl |= (buff->len_l3 << 31) |
 509                                (buff->len_l2 << 24) |
 510                                HW_ATL_B0_TXD_CTL_CMD_TCP |
 511                                HW_ATL_B0_TXD_CTL_DESC_TYPE_TXC;
 512                        txd->ctl2 |= (buff->mss << 16) |
 513                                (buff->len_l4 << 8) |
 514                                (buff->len_l3 >> 1);
 515
 516                        pkt_len -= (buff->len_l4 +
 517                                    buff->len_l3 +
 518                                    buff->len_l2);
 519                        is_gso = true;
 520
 521                        if (buff->is_ipv6)
 522                                txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_IPV6;
 523                } else {
 524                        buff_pa_len = buff->len;
 525
 526                        txd->buf_addr = buff->pa;
 527                        txd->ctl |= (HW_ATL_B0_TXD_CTL_BLEN &
 528                                                ((u32)buff_pa_len << 4));
 529                        txd->ctl |= HW_ATL_B0_TXD_CTL_DESC_TYPE_TXD;
 530                        /* PAY_LEN */
 531                        txd->ctl2 |= HW_ATL_B0_TXD_CTL2_LEN & (pkt_len << 14);
 532
 533                        if (is_gso) {
 534                                txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_LSO;
 535                                txd->ctl2 |= HW_ATL_B0_TXD_CTL2_CTX_EN;
 536                        }
 537
 538                        /* Tx checksum offloads */
 539                        if (buff->is_ip_cso)
 540                                txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_IPCSO;
 541
 542                        if (buff->is_udp_cso || buff->is_tcp_cso)
 543                                txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_TUCSO;
 544
 545                        if (unlikely(buff->is_eop)) {
 546                                txd->ctl |= HW_ATL_B0_TXD_CTL_EOP;
 547                                txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_WB;
 548                                is_gso = false;
 549                        }
 550                }
 551
 552                ring->sw_tail = aq_ring_next_dx(ring, ring->sw_tail);
 553        }
 554
 555        hw_atl_b0_hw_tx_ring_tail_update(self, ring);
 556        return aq_hw_err_from_flags(self);
 557}
 558
 559static int hw_atl_b0_hw_ring_rx_init(struct aq_hw_s *self,
 560                                     struct aq_ring_s *aq_ring,
 561                                     struct aq_ring_param_s *aq_ring_param)
 562{
 563        u32 dma_desc_addr_lsw = (u32)aq_ring->dx_ring_pa;
 564        u32 dma_desc_addr_msw = (u32)(((u64)aq_ring->dx_ring_pa) >> 32);
 565
 566        hw_atl_rdm_rx_desc_en_set(self, false, aq_ring->idx);
 567
 568        hw_atl_rdm_rx_desc_head_splitting_set(self, 0U, aq_ring->idx);
 569
 570        hw_atl_reg_rx_dma_desc_base_addresslswset(self, dma_desc_addr_lsw,
 571                                                  aq_ring->idx);
 572
 573        hw_atl_reg_rx_dma_desc_base_addressmswset(self,
 574                                                  dma_desc_addr_msw, aq_ring->idx);
 575
 576        hw_atl_rdm_rx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx);
 577
 578        hw_atl_rdm_rx_desc_data_buff_size_set(self,
 579                                              AQ_CFG_RX_FRAME_MAX / 1024U,
 580                                       aq_ring->idx);
 581
 582        hw_atl_rdm_rx_desc_head_buff_size_set(self, 0U, aq_ring->idx);
 583        hw_atl_rdm_rx_desc_head_splitting_set(self, 0U, aq_ring->idx);
 584        hw_atl_rpo_rx_desc_vlan_stripping_set(self, 0U, aq_ring->idx);
 585
 586        /* Rx ring set mode */
 587
 588        /* Mapping interrupt vector */
 589        hw_atl_itr_irq_map_rx_set(self, aq_ring_param->vec_idx, aq_ring->idx);
 590        hw_atl_itr_irq_map_en_rx_set(self, true, aq_ring->idx);
 591
 592        hw_atl_rdm_cpu_id_set(self, aq_ring_param->cpu, aq_ring->idx);
 593        hw_atl_rdm_rx_desc_dca_en_set(self, 0U, aq_ring->idx);
 594        hw_atl_rdm_rx_head_dca_en_set(self, 0U, aq_ring->idx);
 595        hw_atl_rdm_rx_pld_dca_en_set(self, 0U, aq_ring->idx);
 596
 597        return aq_hw_err_from_flags(self);
 598}
 599
 600static int hw_atl_b0_hw_ring_tx_init(struct aq_hw_s *self,
 601                                     struct aq_ring_s *aq_ring,
 602                                     struct aq_ring_param_s *aq_ring_param)
 603{
 604        u32 dma_desc_lsw_addr = (u32)aq_ring->dx_ring_pa;
 605        u32 dma_desc_msw_addr = (u32)(((u64)aq_ring->dx_ring_pa) >> 32);
 606
 607        hw_atl_reg_tx_dma_desc_base_addresslswset(self, dma_desc_lsw_addr,
 608                                                  aq_ring->idx);
 609
 610        hw_atl_reg_tx_dma_desc_base_addressmswset(self, dma_desc_msw_addr,
 611                                                  aq_ring->idx);
 612
 613        hw_atl_tdm_tx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx);
 614
 615        hw_atl_b0_hw_tx_ring_tail_update(self, aq_ring);
 616
 617        /* Set Tx threshold */
 618        hw_atl_tdm_tx_desc_wr_wb_threshold_set(self, 0U, aq_ring->idx);
 619
 620        /* Mapping interrupt vector */
 621        hw_atl_itr_irq_map_tx_set(self, aq_ring_param->vec_idx, aq_ring->idx);
 622        hw_atl_itr_irq_map_en_tx_set(self, true, aq_ring->idx);
 623
 624        hw_atl_tdm_cpu_id_set(self, aq_ring_param->cpu, aq_ring->idx);
 625        hw_atl_tdm_tx_desc_dca_en_set(self, 0U, aq_ring->idx);
 626
 627        return aq_hw_err_from_flags(self);
 628}
 629
 630static int hw_atl_b0_hw_ring_rx_fill(struct aq_hw_s *self,
 631                                     struct aq_ring_s *ring,
 632                                     unsigned int sw_tail_old)
 633{
 634        for (; sw_tail_old != ring->sw_tail;
 635                sw_tail_old = aq_ring_next_dx(ring, sw_tail_old)) {
 636                struct hw_atl_rxd_s *rxd =
 637                        (struct hw_atl_rxd_s *)&ring->dx_ring[sw_tail_old *
 638                                                        HW_ATL_B0_RXD_SIZE];
 639
 640                struct aq_ring_buff_s *buff = &ring->buff_ring[sw_tail_old];
 641
 642                rxd->buf_addr = buff->pa;
 643                rxd->hdr_addr = 0U;
 644        }
 645
 646        hw_atl_reg_rx_dma_desc_tail_ptr_set(self, sw_tail_old, ring->idx);
 647
 648        return aq_hw_err_from_flags(self);
 649}
 650
 651static int hw_atl_b0_hw_ring_tx_head_update(struct aq_hw_s *self,
 652                                            struct aq_ring_s *ring)
 653{
 654        int err = 0;
 655        unsigned int hw_head_ = hw_atl_tdm_tx_desc_head_ptr_get(self, ring->idx);
 656
 657        if (aq_utils_obj_test(&self->flags, AQ_HW_FLAG_ERR_UNPLUG)) {
 658                err = -ENXIO;
 659                goto err_exit;
 660        }
 661        ring->hw_head = hw_head_;
 662        err = aq_hw_err_from_flags(self);
 663
 664err_exit:
 665        return err;
 666}
 667
 668static int hw_atl_b0_hw_ring_rx_receive(struct aq_hw_s *self,
 669                                        struct aq_ring_s *ring)
 670{
 671        for (; ring->hw_head != ring->sw_tail;
 672                ring->hw_head = aq_ring_next_dx(ring, ring->hw_head)) {
 673                struct aq_ring_buff_s *buff = NULL;
 674                struct hw_atl_rxd_wb_s *rxd_wb = (struct hw_atl_rxd_wb_s *)
 675                        &ring->dx_ring[ring->hw_head * HW_ATL_B0_RXD_SIZE];
 676
 677                unsigned int is_rx_check_sum_enabled = 0U;
 678                unsigned int pkt_type = 0U;
 679                u8 rx_stat = 0U;
 680
 681                if (!(rxd_wb->status & 0x1U)) { /* RxD is not done */
 682                        break;
 683                }
 684
 685                buff = &ring->buff_ring[ring->hw_head];
 686
 687                rx_stat = (0x0000003CU & rxd_wb->status) >> 2;
 688
 689                is_rx_check_sum_enabled = (rxd_wb->type >> 19) & 0x3U;
 690
 691                pkt_type = 0xFFU & (rxd_wb->type >> 4);
 692
 693                if (is_rx_check_sum_enabled & BIT(0) &&
 694                    (0x0U == (pkt_type & 0x3U)))
 695                        buff->is_ip_cso = (rx_stat & BIT(1)) ? 0U : 1U;
 696
 697                if (is_rx_check_sum_enabled & BIT(1)) {
 698                        if (0x4U == (pkt_type & 0x1CU))
 699                                buff->is_udp_cso = (rx_stat & BIT(2)) ? 0U :
 700                                                   !!(rx_stat & BIT(3));
 701                        else if (0x0U == (pkt_type & 0x1CU))
 702                                buff->is_tcp_cso = (rx_stat & BIT(2)) ? 0U :
 703                                                   !!(rx_stat & BIT(3));
 704                }
 705                buff->is_cso_err = !!(rx_stat & 0x6);
 706                /* Checksum offload workaround for small packets */
 707                if (unlikely(rxd_wb->pkt_len <= 60)) {
 708                        buff->is_ip_cso = 0U;
 709                        buff->is_cso_err = 0U;
 710                }
 711
 712                if ((rx_stat & BIT(0)) || rxd_wb->type & 0x1000U) {
 713                        /* MAC error or DMA error */
 714                        buff->is_error = 1U;
 715                }
 716                if (self->aq_nic_cfg->is_rss) {
 717                        /* last 4 byte */
 718                        u16 rss_type = rxd_wb->type & 0xFU;
 719
 720                        if (rss_type && rss_type < 0x8U) {
 721                                buff->is_hash_l4 = (rss_type == 0x4 ||
 722                                rss_type == 0x5);
 723                                buff->rss_hash = rxd_wb->rss_hash;
 724                        }
 725                }
 726
 727                if (HW_ATL_B0_RXD_WB_STAT2_EOP & rxd_wb->status) {
 728                        buff->len = rxd_wb->pkt_len %
 729                                AQ_CFG_RX_FRAME_MAX;
 730                        buff->len = buff->len ?
 731                                buff->len : AQ_CFG_RX_FRAME_MAX;
 732                        buff->next = 0U;
 733                        buff->is_eop = 1U;
 734                } else {
 735                        buff->len =
 736                                rxd_wb->pkt_len > AQ_CFG_RX_FRAME_MAX ?
 737                                AQ_CFG_RX_FRAME_MAX : rxd_wb->pkt_len;
 738
 739                        if (HW_ATL_B0_RXD_WB_STAT2_RSCCNT &
 740                                rxd_wb->status) {
 741                                /* LRO */
 742                                buff->next = rxd_wb->next_desc_ptr;
 743                                ++ring->stats.rx.lro_packets;
 744                        } else {
 745                                /* jumbo */
 746                                buff->next =
 747                                        aq_ring_next_dx(ring,
 748                                                        ring->hw_head);
 749                                ++ring->stats.rx.jumbo_packets;
 750                        }
 751                }
 752        }
 753
 754        return aq_hw_err_from_flags(self);
 755}
 756
 757static int hw_atl_b0_hw_irq_enable(struct aq_hw_s *self, u64 mask)
 758{
 759        hw_atl_itr_irq_msk_setlsw_set(self, LODWORD(mask));
 760        return aq_hw_err_from_flags(self);
 761}
 762
 763static int hw_atl_b0_hw_irq_disable(struct aq_hw_s *self, u64 mask)
 764{
 765        hw_atl_itr_irq_msk_clearlsw_set(self, LODWORD(mask));
 766        hw_atl_itr_irq_status_clearlsw_set(self, LODWORD(mask));
 767
 768        atomic_inc(&self->dpc);
 769        return aq_hw_err_from_flags(self);
 770}
 771
 772static int hw_atl_b0_hw_irq_read(struct aq_hw_s *self, u64 *mask)
 773{
 774        *mask = hw_atl_itr_irq_statuslsw_get(self);
 775        return aq_hw_err_from_flags(self);
 776}
 777
 778#define IS_FILTER_ENABLED(_F_) ((packet_filter & (_F_)) ? 1U : 0U)
 779
 780static int hw_atl_b0_hw_packet_filter_set(struct aq_hw_s *self,
 781                                          unsigned int packet_filter)
 782{
 783        unsigned int i = 0U;
 784        struct aq_nic_cfg_s *cfg = self->aq_nic_cfg;
 785
 786        hw_atl_rpfl2promiscuous_mode_en_set(self,
 787                                            IS_FILTER_ENABLED(IFF_PROMISC));
 788
 789        hw_atl_rpf_vlan_prom_mode_en_set(self,
 790                                     IS_FILTER_ENABLED(IFF_PROMISC) ||
 791                                     cfg->is_vlan_force_promisc);
 792
 793        hw_atl_rpfl2multicast_flr_en_set(self,
 794                                         IS_FILTER_ENABLED(IFF_ALLMULTI), 0);
 795
 796        hw_atl_rpfl2_accept_all_mc_packets_set(self,
 797                                               IS_FILTER_ENABLED(IFF_ALLMULTI));
 798
 799        hw_atl_rpfl2broadcast_en_set(self, IS_FILTER_ENABLED(IFF_BROADCAST));
 800
 801        cfg->is_mc_list_enabled = IS_FILTER_ENABLED(IFF_MULTICAST);
 802
 803        for (i = HW_ATL_B0_MAC_MIN; i < HW_ATL_B0_MAC_MAX; ++i)
 804                hw_atl_rpfl2_uc_flr_en_set(self,
 805                                           (cfg->is_mc_list_enabled &&
 806                                            (i <= cfg->mc_list_count)) ?
 807                                           1U : 0U, i);
 808
 809        return aq_hw_err_from_flags(self);
 810}
 811
 812#undef IS_FILTER_ENABLED
 813
 814static int hw_atl_b0_hw_multicast_list_set(struct aq_hw_s *self,
 815                                           u8 ar_mac
 816                                           [AQ_HW_MULTICAST_ADDRESS_MAX]
 817                                           [ETH_ALEN],
 818                                           u32 count)
 819{
 820        int err = 0;
 821
 822        if (count > (HW_ATL_B0_MAC_MAX - HW_ATL_B0_MAC_MIN)) {
 823                err = -EBADRQC;
 824                goto err_exit;
 825        }
 826        for (self->aq_nic_cfg->mc_list_count = 0U;
 827                        self->aq_nic_cfg->mc_list_count < count;
 828                        ++self->aq_nic_cfg->mc_list_count) {
 829                u32 i = self->aq_nic_cfg->mc_list_count;
 830                u32 h = (ar_mac[i][0] << 8) | (ar_mac[i][1]);
 831                u32 l = (ar_mac[i][2] << 24) | (ar_mac[i][3] << 16) |
 832                                        (ar_mac[i][4] << 8) | ar_mac[i][5];
 833
 834                hw_atl_rpfl2_uc_flr_en_set(self, 0U, HW_ATL_B0_MAC_MIN + i);
 835
 836                hw_atl_rpfl2unicast_dest_addresslsw_set(self,
 837                                                        l, HW_ATL_B0_MAC_MIN + i);
 838
 839                hw_atl_rpfl2unicast_dest_addressmsw_set(self,
 840                                                        h, HW_ATL_B0_MAC_MIN + i);
 841
 842                hw_atl_rpfl2_uc_flr_en_set(self,
 843                                           (self->aq_nic_cfg->is_mc_list_enabled),
 844                                           HW_ATL_B0_MAC_MIN + i);
 845        }
 846
 847        err = aq_hw_err_from_flags(self);
 848
 849err_exit:
 850        return err;
 851}
 852
 853static int hw_atl_b0_hw_interrupt_moderation_set(struct aq_hw_s *self)
 854{
 855        unsigned int i = 0U;
 856        u32 itr_tx = 2U;
 857        u32 itr_rx = 2U;
 858
 859        switch (self->aq_nic_cfg->itr) {
 860        case  AQ_CFG_INTERRUPT_MODERATION_ON:
 861        case  AQ_CFG_INTERRUPT_MODERATION_AUTO:
 862                hw_atl_tdm_tx_desc_wr_wb_irq_en_set(self, 0U);
 863                hw_atl_tdm_tdm_intr_moder_en_set(self, 1U);
 864                hw_atl_rdm_rx_desc_wr_wb_irq_en_set(self, 0U);
 865                hw_atl_rdm_rdm_intr_moder_en_set(self, 1U);
 866
 867                if (self->aq_nic_cfg->itr == AQ_CFG_INTERRUPT_MODERATION_ON) {
 868                        /* HW timers are in 2us units */
 869                        int tx_max_timer = self->aq_nic_cfg->tx_itr / 2;
 870                        int tx_min_timer = tx_max_timer / 2;
 871
 872                        int rx_max_timer = self->aq_nic_cfg->rx_itr / 2;
 873                        int rx_min_timer = rx_max_timer / 2;
 874
 875                        tx_max_timer = min(HW_ATL_INTR_MODER_MAX, tx_max_timer);
 876                        tx_min_timer = min(HW_ATL_INTR_MODER_MIN, tx_min_timer);
 877                        rx_max_timer = min(HW_ATL_INTR_MODER_MAX, rx_max_timer);
 878                        rx_min_timer = min(HW_ATL_INTR_MODER_MIN, rx_min_timer);
 879
 880                        itr_tx |= tx_min_timer << 0x8U;
 881                        itr_tx |= tx_max_timer << 0x10U;
 882                        itr_rx |= rx_min_timer << 0x8U;
 883                        itr_rx |= rx_max_timer << 0x10U;
 884                } else {
 885                        static unsigned int hw_atl_b0_timers_table_tx_[][2] = {
 886                                {0xfU, 0xffU}, /* 10Gbit */
 887                                {0xfU, 0x1ffU}, /* 5Gbit */
 888                                {0xfU, 0x1ffU}, /* 5Gbit 5GS */
 889                                {0xfU, 0x1ffU}, /* 2.5Gbit */
 890                                {0xfU, 0x1ffU}, /* 1Gbit */
 891                                {0xfU, 0x1ffU}, /* 100Mbit */
 892                        };
 893
 894                        static unsigned int hw_atl_b0_timers_table_rx_[][2] = {
 895                                {0x6U, 0x38U},/* 10Gbit */
 896                                {0xCU, 0x70U},/* 5Gbit */
 897                                {0xCU, 0x70U},/* 5Gbit 5GS */
 898                                {0x18U, 0xE0U},/* 2.5Gbit */
 899                                {0x30U, 0x80U},/* 1Gbit */
 900                                {0x4U, 0x50U},/* 100Mbit */
 901                        };
 902
 903                        unsigned int speed_index =
 904                                        hw_atl_utils_mbps_2_speed_index(
 905                                                self->aq_link_status.mbps);
 906
 907                        /* Update user visible ITR settings */
 908                        self->aq_nic_cfg->tx_itr = hw_atl_b0_timers_table_tx_
 909                                                        [speed_index][1] * 2;
 910                        self->aq_nic_cfg->rx_itr = hw_atl_b0_timers_table_rx_
 911                                                        [speed_index][1] * 2;
 912
 913                        itr_tx |= hw_atl_b0_timers_table_tx_
 914                                                [speed_index][0] << 0x8U;
 915                        itr_tx |= hw_atl_b0_timers_table_tx_
 916                                                [speed_index][1] << 0x10U;
 917
 918                        itr_rx |= hw_atl_b0_timers_table_rx_
 919                                                [speed_index][0] << 0x8U;
 920                        itr_rx |= hw_atl_b0_timers_table_rx_
 921                                                [speed_index][1] << 0x10U;
 922                }
 923                break;
 924        case AQ_CFG_INTERRUPT_MODERATION_OFF:
 925                hw_atl_tdm_tx_desc_wr_wb_irq_en_set(self, 1U);
 926                hw_atl_tdm_tdm_intr_moder_en_set(self, 0U);
 927                hw_atl_rdm_rx_desc_wr_wb_irq_en_set(self, 1U);
 928                hw_atl_rdm_rdm_intr_moder_en_set(self, 0U);
 929                itr_tx = 0U;
 930                itr_rx = 0U;
 931                break;
 932        }
 933
 934        for (i = HW_ATL_B0_RINGS_MAX; i--;) {
 935                hw_atl_reg_tx_intr_moder_ctrl_set(self, itr_tx, i);
 936                hw_atl_reg_rx_intr_moder_ctrl_set(self, itr_rx, i);
 937        }
 938
 939        return aq_hw_err_from_flags(self);
 940}
 941
 942static int hw_atl_b0_hw_stop(struct aq_hw_s *self)
 943{
 944        hw_atl_b0_hw_irq_disable(self, HW_ATL_B0_INT_MASK);
 945
 946        /* Invalidate Descriptor Cache to prevent writing to the cached
 947         * descriptors and to the data pointer of those descriptors
 948         */
 949        hw_atl_rdm_rx_dma_desc_cache_init_set(self, 1);
 950
 951        return aq_hw_err_from_flags(self);
 952}
 953
 954static int hw_atl_b0_hw_ring_tx_stop(struct aq_hw_s *self,
 955                                     struct aq_ring_s *ring)
 956{
 957        hw_atl_tdm_tx_desc_en_set(self, 0U, ring->idx);
 958        return aq_hw_err_from_flags(self);
 959}
 960
 961static int hw_atl_b0_hw_ring_rx_stop(struct aq_hw_s *self,
 962                                     struct aq_ring_s *ring)
 963{
 964        hw_atl_rdm_rx_desc_en_set(self, 0U, ring->idx);
 965        return aq_hw_err_from_flags(self);
 966}
 967
 968static int hw_atl_b0_hw_fl3l4_clear(struct aq_hw_s *self,
 969                                    struct aq_rx_filter_l3l4 *data)
 970{
 971        u8 location = data->location;
 972
 973        if (!data->is_ipv6) {
 974                hw_atl_rpfl3l4_cmd_clear(self, location);
 975                hw_atl_rpf_l4_spd_set(self, 0U, location);
 976                hw_atl_rpf_l4_dpd_set(self, 0U, location);
 977                hw_atl_rpfl3l4_ipv4_src_addr_clear(self, location);
 978                hw_atl_rpfl3l4_ipv4_dest_addr_clear(self, location);
 979        } else {
 980                int i;
 981
 982                for (i = 0; i < HW_ATL_RX_CNT_REG_ADDR_IPV6; ++i) {
 983                        hw_atl_rpfl3l4_cmd_clear(self, location + i);
 984                        hw_atl_rpf_l4_spd_set(self, 0U, location + i);
 985                        hw_atl_rpf_l4_dpd_set(self, 0U, location + i);
 986                }
 987                hw_atl_rpfl3l4_ipv6_src_addr_clear(self, location);
 988                hw_atl_rpfl3l4_ipv6_dest_addr_clear(self, location);
 989        }
 990
 991        return aq_hw_err_from_flags(self);
 992}
 993
 994static int hw_atl_b0_hw_fl3l4_set(struct aq_hw_s *self,
 995                                  struct aq_rx_filter_l3l4 *data)
 996{
 997        u8 location = data->location;
 998
 999        hw_atl_b0_hw_fl3l4_clear(self, data);
1000
1001        if (data->cmd) {
1002                if (!data->is_ipv6) {
1003                        hw_atl_rpfl3l4_ipv4_dest_addr_set(self,
1004                                                          location,
1005                                                          data->ip_dst[0]);
1006                        hw_atl_rpfl3l4_ipv4_src_addr_set(self,
1007                                                         location,
1008                                                         data->ip_src[0]);
1009                } else {
1010                        hw_atl_rpfl3l4_ipv6_dest_addr_set(self,
1011                                                          location,
1012                                                          data->ip_dst);
1013                        hw_atl_rpfl3l4_ipv6_src_addr_set(self,
1014                                                         location,
1015                                                         data->ip_src);
1016                }
1017        }
1018        hw_atl_rpf_l4_dpd_set(self, data->p_dst, location);
1019        hw_atl_rpf_l4_spd_set(self, data->p_src, location);
1020        hw_atl_rpfl3l4_cmd_set(self, location, data->cmd);
1021
1022        return aq_hw_err_from_flags(self);
1023}
1024
1025static int hw_atl_b0_hw_fl2_set(struct aq_hw_s *self,
1026                                struct aq_rx_filter_l2 *data)
1027{
1028        hw_atl_rpf_etht_flr_en_set(self, 1U, data->location);
1029        hw_atl_rpf_etht_flr_set(self, data->ethertype, data->location);
1030        hw_atl_rpf_etht_user_priority_en_set(self,
1031                                             !!data->user_priority_en,
1032                                             data->location);
1033        if (data->user_priority_en)
1034                hw_atl_rpf_etht_user_priority_set(self,
1035                                                  data->user_priority,
1036                                                  data->location);
1037
1038        if (data->queue < 0) {
1039                hw_atl_rpf_etht_flr_act_set(self, 0U, data->location);
1040                hw_atl_rpf_etht_rx_queue_en_set(self, 0U, data->location);
1041        } else {
1042                hw_atl_rpf_etht_flr_act_set(self, 1U, data->location);
1043                hw_atl_rpf_etht_rx_queue_en_set(self, 1U, data->location);
1044                hw_atl_rpf_etht_rx_queue_set(self, data->queue, data->location);
1045        }
1046
1047        return aq_hw_err_from_flags(self);
1048}
1049
1050static int hw_atl_b0_hw_fl2_clear(struct aq_hw_s *self,
1051                                  struct aq_rx_filter_l2 *data)
1052{
1053        hw_atl_rpf_etht_flr_en_set(self, 0U, data->location);
1054        hw_atl_rpf_etht_flr_set(self, 0U, data->location);
1055        hw_atl_rpf_etht_user_priority_en_set(self, 0U, data->location);
1056
1057        return aq_hw_err_from_flags(self);
1058}
1059
1060/**
1061 * @brief Set VLAN filter table
1062 * @details Configure VLAN filter table to accept (and assign the queue) traffic
1063 *  for the particular vlan ids.
1064 * Note: use this function under vlan promisc mode not to lost the traffic
1065 *
1066 * @param aq_hw_s
1067 * @param aq_rx_filter_vlan VLAN filter configuration
1068 * @return 0 - OK, <0 - error
1069 */
1070static int hw_atl_b0_hw_vlan_set(struct aq_hw_s *self,
1071                                 struct aq_rx_filter_vlan *aq_vlans)
1072{
1073        int i;
1074
1075        for (i = 0; i < AQ_VLAN_MAX_FILTERS; i++) {
1076                hw_atl_rpf_vlan_flr_en_set(self, 0U, i);
1077                hw_atl_rpf_vlan_rxq_en_flr_set(self, 0U, i);
1078                if (aq_vlans[i].enable) {
1079                        hw_atl_rpf_vlan_id_flr_set(self,
1080                                                   aq_vlans[i].vlan_id,
1081                                                   i);
1082                        hw_atl_rpf_vlan_flr_act_set(self, 1U, i);
1083                        hw_atl_rpf_vlan_flr_en_set(self, 1U, i);
1084                        if (aq_vlans[i].queue != 0xFF) {
1085                                hw_atl_rpf_vlan_rxq_flr_set(self,
1086                                                            aq_vlans[i].queue,
1087                                                            i);
1088                                hw_atl_rpf_vlan_rxq_en_flr_set(self, 1U, i);
1089                        }
1090                }
1091        }
1092
1093        return aq_hw_err_from_flags(self);
1094}
1095
1096static int hw_atl_b0_hw_vlan_ctrl(struct aq_hw_s *self, bool enable)
1097{
1098        /* set promisc in case of disabing the vland filter */
1099        hw_atl_rpf_vlan_prom_mode_en_set(self, !enable);
1100
1101        return aq_hw_err_from_flags(self);
1102}
1103
1104const struct aq_hw_ops hw_atl_ops_b0 = {
1105        .hw_set_mac_address   = hw_atl_b0_hw_mac_addr_set,
1106        .hw_init              = hw_atl_b0_hw_init,
1107        .hw_reset             = hw_atl_b0_hw_reset,
1108        .hw_start             = hw_atl_b0_hw_start,
1109        .hw_ring_tx_start     = hw_atl_b0_hw_ring_tx_start,
1110        .hw_ring_tx_stop      = hw_atl_b0_hw_ring_tx_stop,
1111        .hw_ring_rx_start     = hw_atl_b0_hw_ring_rx_start,
1112        .hw_ring_rx_stop      = hw_atl_b0_hw_ring_rx_stop,
1113        .hw_stop              = hw_atl_b0_hw_stop,
1114
1115        .hw_ring_tx_xmit         = hw_atl_b0_hw_ring_tx_xmit,
1116        .hw_ring_tx_head_update  = hw_atl_b0_hw_ring_tx_head_update,
1117
1118        .hw_ring_rx_receive      = hw_atl_b0_hw_ring_rx_receive,
1119        .hw_ring_rx_fill         = hw_atl_b0_hw_ring_rx_fill,
1120
1121        .hw_irq_enable           = hw_atl_b0_hw_irq_enable,
1122        .hw_irq_disable          = hw_atl_b0_hw_irq_disable,
1123        .hw_irq_read             = hw_atl_b0_hw_irq_read,
1124
1125        .hw_ring_rx_init             = hw_atl_b0_hw_ring_rx_init,
1126        .hw_ring_tx_init             = hw_atl_b0_hw_ring_tx_init,
1127        .hw_packet_filter_set        = hw_atl_b0_hw_packet_filter_set,
1128        .hw_filter_l2_set            = hw_atl_b0_hw_fl2_set,
1129        .hw_filter_l2_clear          = hw_atl_b0_hw_fl2_clear,
1130        .hw_filter_l3l4_set          = hw_atl_b0_hw_fl3l4_set,
1131        .hw_filter_vlan_set          = hw_atl_b0_hw_vlan_set,
1132        .hw_filter_vlan_ctrl         = hw_atl_b0_hw_vlan_ctrl,
1133        .hw_multicast_list_set       = hw_atl_b0_hw_multicast_list_set,
1134        .hw_interrupt_moderation_set = hw_atl_b0_hw_interrupt_moderation_set,
1135        .hw_rss_set                  = hw_atl_b0_hw_rss_set,
1136        .hw_rss_hash_set             = hw_atl_b0_hw_rss_hash_set,
1137        .hw_get_regs                 = hw_atl_utils_hw_get_regs,
1138        .hw_get_hw_stats             = hw_atl_utils_get_hw_stats,
1139        .hw_get_fw_version           = hw_atl_utils_get_fw_version,
1140        .hw_set_offload              = hw_atl_b0_hw_offload_set,
1141        .hw_set_fc                   = hw_atl_b0_set_fc,
1142};
1143