linux/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/* Atlantic Network Driver
   3 *
   4 * Copyright (C) 2014-2019 aQuantia Corporation
   5 * Copyright (C) 2019-2020 Marvell International Ltd.
   6 */
   7
   8/* File hw_atl_b0.c: Definition of Atlantic hardware specific functions. */
   9
  10#include "../aq_hw.h"
  11#include "../aq_hw_utils.h"
  12#include "../aq_ring.h"
  13#include "../aq_nic.h"
  14#include "../aq_phy.h"
  15#include "hw_atl_b0.h"
  16#include "hw_atl_utils.h"
  17#include "hw_atl_llh.h"
  18#include "hw_atl_b0_internal.h"
  19#include "hw_atl_llh_internal.h"
  20
  21#define DEFAULT_B0_BOARD_BASIC_CAPABILITIES \
  22        .is_64_dma = true,                \
  23        .op64bit = false,                 \
  24        .msix_irqs = 8U,                  \
  25        .irq_mask = ~0U,                  \
  26        .vecs = HW_ATL_B0_RSS_MAX,        \
  27        .tcs_max = HW_ATL_B0_TC_MAX,      \
  28        .rxd_alignment = 1U,              \
  29        .rxd_size = HW_ATL_B0_RXD_SIZE,   \
  30        .rxds_max = HW_ATL_B0_MAX_RXD,    \
  31        .rxds_min = HW_ATL_B0_MIN_RXD,    \
  32        .txd_alignment = 1U,              \
  33        .txd_size = HW_ATL_B0_TXD_SIZE,   \
  34        .txds_max = HW_ATL_B0_MAX_TXD,    \
  35        .txds_min = HW_ATL_B0_MIN_TXD,    \
  36        .txhwb_alignment = 4096U,         \
  37        .tx_rings = HW_ATL_B0_TX_RINGS,   \
  38        .rx_rings = HW_ATL_B0_RX_RINGS,   \
  39        .hw_features = NETIF_F_HW_CSUM |  \
  40                        NETIF_F_RXCSUM |  \
  41                        NETIF_F_RXHASH |  \
  42                        NETIF_F_SG |      \
  43                        NETIF_F_TSO |     \
  44                        NETIF_F_TSO6 |    \
  45                        NETIF_F_LRO |     \
  46                        NETIF_F_NTUPLE |  \
  47                        NETIF_F_HW_VLAN_CTAG_FILTER | \
  48                        NETIF_F_HW_VLAN_CTAG_RX |     \
  49                        NETIF_F_HW_VLAN_CTAG_TX |     \
  50                        NETIF_F_GSO_UDP_L4      |     \
  51                        NETIF_F_GSO_PARTIAL |         \
  52                        NETIF_F_HW_TC,                \
  53        .hw_priv_flags = IFF_UNICAST_FLT, \
  54        .flow_control = true,             \
  55        .mtu = HW_ATL_B0_MTU_JUMBO,       \
  56        .mac_regs_count = 88,             \
  57        .hw_alive_check_addr = 0x10U
  58
  59const struct aq_hw_caps_s hw_atl_b0_caps_aqc100 = {
  60        DEFAULT_B0_BOARD_BASIC_CAPABILITIES,
  61        .media_type = AQ_HW_MEDIA_TYPE_FIBRE,
  62        .link_speed_msk = AQ_NIC_RATE_10G |
  63                          AQ_NIC_RATE_5G |
  64                          AQ_NIC_RATE_2G5 |
  65                          AQ_NIC_RATE_1G |
  66                          AQ_NIC_RATE_100M,
  67};
  68
  69const struct aq_hw_caps_s hw_atl_b0_caps_aqc107 = {
  70        DEFAULT_B0_BOARD_BASIC_CAPABILITIES,
  71        .media_type = AQ_HW_MEDIA_TYPE_TP,
  72        .link_speed_msk = AQ_NIC_RATE_10G |
  73                          AQ_NIC_RATE_5G |
  74                          AQ_NIC_RATE_2G5 |
  75                          AQ_NIC_RATE_1G |
  76                          AQ_NIC_RATE_100M,
  77};
  78
  79const struct aq_hw_caps_s hw_atl_b0_caps_aqc108 = {
  80        DEFAULT_B0_BOARD_BASIC_CAPABILITIES,
  81        .media_type = AQ_HW_MEDIA_TYPE_TP,
  82        .link_speed_msk = AQ_NIC_RATE_5G |
  83                          AQ_NIC_RATE_2G5 |
  84                          AQ_NIC_RATE_1G |
  85                          AQ_NIC_RATE_100M,
  86};
  87
  88const struct aq_hw_caps_s hw_atl_b0_caps_aqc109 = {
  89        DEFAULT_B0_BOARD_BASIC_CAPABILITIES,
  90        .media_type = AQ_HW_MEDIA_TYPE_TP,
  91        .link_speed_msk = AQ_NIC_RATE_2G5 |
  92                          AQ_NIC_RATE_1G |
  93                          AQ_NIC_RATE_100M,
  94};
  95
  96const struct aq_hw_caps_s hw_atl_b0_caps_aqc111 = {
  97        DEFAULT_B0_BOARD_BASIC_CAPABILITIES,
  98        .media_type = AQ_HW_MEDIA_TYPE_TP,
  99        .link_speed_msk = AQ_NIC_RATE_5G |
 100                          AQ_NIC_RATE_2G5 |
 101                          AQ_NIC_RATE_1G |
 102                          AQ_NIC_RATE_100M,
 103        .quirks = AQ_NIC_QUIRK_BAD_PTP,
 104};
 105
 106const struct aq_hw_caps_s hw_atl_b0_caps_aqc112 = {
 107        DEFAULT_B0_BOARD_BASIC_CAPABILITIES,
 108        .media_type = AQ_HW_MEDIA_TYPE_TP,
 109        .link_speed_msk = AQ_NIC_RATE_2G5 |
 110                          AQ_NIC_RATE_1G  |
 111                          AQ_NIC_RATE_100M,
 112        .quirks = AQ_NIC_QUIRK_BAD_PTP,
 113};
 114
 115static int hw_atl_b0_hw_reset(struct aq_hw_s *self)
 116{
 117        int err = 0;
 118
 119        err = hw_atl_utils_soft_reset(self);
 120        if (err)
 121                return err;
 122
 123        self->aq_fw_ops->set_state(self, MPI_RESET);
 124
 125        err = aq_hw_err_from_flags(self);
 126
 127        return err;
 128}
 129
 130int hw_atl_b0_set_fc(struct aq_hw_s *self, u32 fc, u32 tc)
 131{
 132        hw_atl_rpb_rx_xoff_en_per_tc_set(self, !!(fc & AQ_NIC_FC_RX), tc);
 133
 134        return 0;
 135}
 136
 137static int hw_atl_b0_tc_ptp_set(struct aq_hw_s *self)
 138{
 139        /* Init TC2 for PTP_TX */
 140        hw_atl_tpb_tx_pkt_buff_size_per_tc_set(self, HW_ATL_B0_PTP_TXBUF_SIZE,
 141                                               AQ_HW_PTP_TC);
 142
 143        /* Init TC2 for PTP_RX */
 144        hw_atl_rpb_rx_pkt_buff_size_per_tc_set(self, HW_ATL_B0_PTP_RXBUF_SIZE,
 145                                               AQ_HW_PTP_TC);
 146        /* No flow control for PTP */
 147        hw_atl_rpb_rx_xoff_en_per_tc_set(self, 0U, AQ_HW_PTP_TC);
 148
 149        return aq_hw_err_from_flags(self);
 150}
 151
 152static int hw_atl_b0_hw_qos_set(struct aq_hw_s *self)
 153{
 154        struct aq_nic_cfg_s *cfg = self->aq_nic_cfg;
 155        u32 tx_buff_size = HW_ATL_B0_TXBUF_MAX;
 156        u32 rx_buff_size = HW_ATL_B0_RXBUF_MAX;
 157        unsigned int prio = 0U;
 158        u32 tc = 0U;
 159
 160        if (cfg->is_ptp) {
 161                tx_buff_size -= HW_ATL_B0_PTP_TXBUF_SIZE;
 162                rx_buff_size -= HW_ATL_B0_PTP_RXBUF_SIZE;
 163        }
 164
 165        /* TPS Descriptor rate init */
 166        hw_atl_tps_tx_pkt_shed_desc_rate_curr_time_res_set(self, 0x0U);
 167        hw_atl_tps_tx_pkt_shed_desc_rate_lim_set(self, 0xA);
 168
 169        /* TPS VM init */
 170        hw_atl_tps_tx_pkt_shed_desc_vm_arb_mode_set(self, 0U);
 171
 172        tx_buff_size /= cfg->tcs;
 173        rx_buff_size /= cfg->tcs;
 174        for (tc = 0; tc < cfg->tcs; tc++) {
 175                u32 threshold = 0U;
 176
 177                /* Tx buf size TC0 */
 178                hw_atl_tpb_tx_pkt_buff_size_per_tc_set(self, tx_buff_size, tc);
 179
 180                threshold = (tx_buff_size * (1024 / 32U) * 66U) / 100U;
 181                hw_atl_tpb_tx_buff_hi_threshold_per_tc_set(self, threshold, tc);
 182
 183                threshold = (tx_buff_size * (1024 / 32U) * 50U) / 100U;
 184                hw_atl_tpb_tx_buff_lo_threshold_per_tc_set(self, threshold, tc);
 185
 186                /* QoS Rx buf size per TC */
 187                hw_atl_rpb_rx_pkt_buff_size_per_tc_set(self, rx_buff_size, tc);
 188
 189                threshold = (rx_buff_size * (1024U / 32U) * 66U) / 100U;
 190                hw_atl_rpb_rx_buff_hi_threshold_per_tc_set(self, threshold, tc);
 191
 192                threshold = (rx_buff_size * (1024U / 32U) * 50U) / 100U;
 193                hw_atl_rpb_rx_buff_lo_threshold_per_tc_set(self, threshold, tc);
 194
 195                hw_atl_b0_set_fc(self, self->aq_nic_cfg->fc.req, tc);
 196        }
 197
 198        if (cfg->is_ptp)
 199                hw_atl_b0_tc_ptp_set(self);
 200
 201        /* QoS 802.1p priority -> TC mapping */
 202        for (prio = 0; prio < 8; ++prio)
 203                hw_atl_rpf_rpb_user_priority_tc_map_set(self, prio,
 204                                                        cfg->prio_tc_map[prio]);
 205
 206        return aq_hw_err_from_flags(self);
 207}
 208
 209int hw_atl_b0_hw_rss_hash_set(struct aq_hw_s *self,
 210                              struct aq_rss_parameters *rss_params)
 211{
 212        struct aq_nic_cfg_s *cfg = self->aq_nic_cfg;
 213        unsigned int addr = 0U;
 214        unsigned int i = 0U;
 215        int err = 0;
 216        u32 val;
 217
 218        for (i = 10, addr = 0U; i--; ++addr) {
 219                u32 key_data = cfg->is_rss ?
 220                        __swab32(rss_params->hash_secret_key[i]) : 0U;
 221                hw_atl_rpf_rss_key_wr_data_set(self, key_data);
 222                hw_atl_rpf_rss_key_addr_set(self, addr);
 223                hw_atl_rpf_rss_key_wr_en_set(self, 1U);
 224                err = readx_poll_timeout_atomic(hw_atl_rpf_rss_key_wr_en_get,
 225                                                self, val, val == 0,
 226                                                1000U, 10000U);
 227                if (err < 0)
 228                        goto err_exit;
 229        }
 230
 231        err = aq_hw_err_from_flags(self);
 232
 233err_exit:
 234        return err;
 235}
 236
 237static int hw_atl_b0_hw_rss_set(struct aq_hw_s *self,
 238                                struct aq_rss_parameters *rss_params)
 239{
 240        u32 num_rss_queues = max(1U, self->aq_nic_cfg->num_rss_queues);
 241        u8 *indirection_table = rss_params->indirection_table;
 242        u16 bitary[1 + (HW_ATL_B0_RSS_REDIRECTION_MAX *
 243                   HW_ATL_B0_RSS_REDIRECTION_BITS / 16U)];
 244        int err = 0;
 245        u32 i = 0U;
 246        u32 val;
 247
 248        memset(bitary, 0, sizeof(bitary));
 249
 250        for (i = HW_ATL_B0_RSS_REDIRECTION_MAX; i--;) {
 251                (*(u32 *)(bitary + ((i * 3U) / 16U))) |=
 252                        ((indirection_table[i] % num_rss_queues) <<
 253                        ((i * 3U) & 0xFU));
 254        }
 255
 256        for (i = ARRAY_SIZE(bitary); i--;) {
 257                hw_atl_rpf_rss_redir_tbl_wr_data_set(self, bitary[i]);
 258                hw_atl_rpf_rss_redir_tbl_addr_set(self, i);
 259                hw_atl_rpf_rss_redir_wr_en_set(self, 1U);
 260                err = readx_poll_timeout_atomic(hw_atl_rpf_rss_redir_wr_en_get,
 261                                                self, val, val == 0,
 262                                                1000U, 10000U);
 263                if (err < 0)
 264                        goto err_exit;
 265        }
 266
 267        err = aq_hw_err_from_flags(self);
 268
 269err_exit:
 270        return err;
 271}
 272
 273int hw_atl_b0_hw_offload_set(struct aq_hw_s *self,
 274                             struct aq_nic_cfg_s *aq_nic_cfg)
 275{
 276        u64 rxcsum = !!(aq_nic_cfg->features & NETIF_F_RXCSUM);
 277        unsigned int i;
 278
 279        /* TX checksums offloads*/
 280        hw_atl_tpo_ipv4header_crc_offload_en_set(self, 1);
 281        hw_atl_tpo_tcp_udp_crc_offload_en_set(self, 1);
 282
 283        /* RX checksums offloads*/
 284        hw_atl_rpo_ipv4header_crc_offload_en_set(self, rxcsum);
 285        hw_atl_rpo_tcp_udp_crc_offload_en_set(self, rxcsum);
 286
 287        /* LSO offloads*/
 288        hw_atl_tdm_large_send_offload_en_set(self, 0xFFFFFFFFU);
 289
 290        /* Outer VLAN tag offload */
 291        hw_atl_rpo_outer_vlan_tag_mode_set(self, 1U);
 292
 293        /* LRO offloads */
 294        {
 295                unsigned int val = (8U < HW_ATL_B0_LRO_RXD_MAX) ? 0x3U :
 296                        ((4U < HW_ATL_B0_LRO_RXD_MAX) ? 0x2U :
 297                        ((2U < HW_ATL_B0_LRO_RXD_MAX) ? 0x1U : 0x0));
 298
 299                for (i = 0; i < HW_ATL_B0_RINGS_MAX; i++)
 300                        hw_atl_rpo_lro_max_num_of_descriptors_set(self, val, i);
 301
 302                hw_atl_rpo_lro_time_base_divider_set(self, 0x61AU);
 303                hw_atl_rpo_lro_inactive_interval_set(self, 0);
 304                /* the LRO timebase divider is 5 uS (0x61a),
 305                 * which is multiplied by 50(0x32)
 306                 * to get a maximum coalescing interval of 250 uS,
 307                 * which is the default value
 308                 */
 309                hw_atl_rpo_lro_max_coalescing_interval_set(self, 50);
 310
 311                hw_atl_rpo_lro_qsessions_lim_set(self, 1U);
 312
 313                hw_atl_rpo_lro_total_desc_lim_set(self, 2U);
 314
 315                hw_atl_rpo_lro_patch_optimization_en_set(self, 1U);
 316
 317                hw_atl_rpo_lro_min_pay_of_first_pkt_set(self, 10U);
 318
 319                hw_atl_rpo_lro_pkt_lim_set(self, 1U);
 320
 321                hw_atl_rpo_lro_en_set(self,
 322                                      aq_nic_cfg->is_lro ? 0xFFFFFFFFU : 0U);
 323                hw_atl_itr_rsc_en_set(self,
 324                                      aq_nic_cfg->is_lro ? 0xFFFFFFFFU : 0U);
 325
 326                hw_atl_itr_rsc_delay_set(self, 1U);
 327        }
 328
 329        return aq_hw_err_from_flags(self);
 330}
 331
 332static int hw_atl_b0_hw_init_tx_tc_rate_limit(struct aq_hw_s *self)
 333{
 334        static const u32 max_weight = BIT(HW_ATL_TPS_DATA_TCTWEIGHT_WIDTH) - 1;
 335        /* Scale factor is based on the number of bits in fractional portion */
 336        static const u32 scale = BIT(HW_ATL_TPS_DESC_RATE_Y_WIDTH);
 337        static const u32 frac_msk = HW_ATL_TPS_DESC_RATE_Y_MSK >>
 338                                    HW_ATL_TPS_DESC_RATE_Y_SHIFT;
 339        const u32 link_speed = self->aq_link_status.mbps;
 340        struct aq_nic_cfg_s *nic_cfg = self->aq_nic_cfg;
 341        unsigned long num_min_rated_tcs = 0;
 342        u32 tc_weight[AQ_CFG_TCS_MAX];
 343        u32 fixed_max_credit;
 344        u8 min_rate_msk = 0;
 345        u32 sum_weight = 0;
 346        int tc;
 347
 348        /* By default max_credit is based upon MTU (in unit of 64b) */
 349        fixed_max_credit = nic_cfg->aq_hw_caps->mtu / 64;
 350
 351        if (link_speed) {
 352                min_rate_msk = nic_cfg->tc_min_rate_msk &
 353                               (BIT(nic_cfg->tcs) - 1);
 354                num_min_rated_tcs = hweight8(min_rate_msk);
 355        }
 356
 357        /* First, calculate weights where min_rate is specified */
 358        if (num_min_rated_tcs) {
 359                for (tc = 0; tc != nic_cfg->tcs; tc++) {
 360                        if (!nic_cfg->tc_min_rate[tc]) {
 361                                tc_weight[tc] = 0;
 362                                continue;
 363                        }
 364
 365                        tc_weight[tc] = (-1L + link_speed +
 366                                         nic_cfg->tc_min_rate[tc] *
 367                                         max_weight) /
 368                                        link_speed;
 369                        tc_weight[tc] = min(tc_weight[tc], max_weight);
 370                        sum_weight += tc_weight[tc];
 371                }
 372        }
 373
 374        /* WSP, if min_rate is set for at least one TC.
 375         * RR otherwise.
 376         *
 377         * NB! MAC FW sets arb mode itself if PTP is enabled. We shouldn't
 378         * overwrite it here in that case.
 379         */
 380        if (!nic_cfg->is_ptp)
 381                hw_atl_tps_tx_pkt_shed_data_arb_mode_set(self, min_rate_msk ? 1U : 0U);
 382
 383        /* Data TC Arbiter takes precedence over Descriptor TC Arbiter,
 384         * leave Descriptor TC Arbiter as RR.
 385         */
 386        hw_atl_tps_tx_pkt_shed_desc_tc_arb_mode_set(self, 0U);
 387
 388        hw_atl_tps_tx_desc_rate_mode_set(self, nic_cfg->is_qos ? 1U : 0U);
 389
 390        for (tc = 0; tc != nic_cfg->tcs; tc++) {
 391                const u32 en = (nic_cfg->tc_max_rate[tc] != 0) ? 1U : 0U;
 392                const u32 desc = AQ_NIC_CFG_TCVEC2RING(nic_cfg, tc, 0);
 393                u32 weight, max_credit;
 394
 395                hw_atl_tps_tx_pkt_shed_desc_tc_max_credit_set(self, tc,
 396                                                              fixed_max_credit);
 397                hw_atl_tps_tx_pkt_shed_desc_tc_weight_set(self, tc, 0x1E);
 398
 399                if (num_min_rated_tcs) {
 400                        weight = tc_weight[tc];
 401
 402                        if (!weight && sum_weight < max_weight)
 403                                weight = (max_weight - sum_weight) /
 404                                         (nic_cfg->tcs - num_min_rated_tcs);
 405                        else if (!weight)
 406                                weight = 0x64;
 407
 408                        max_credit = max(8 * weight, fixed_max_credit);
 409                } else {
 410                        weight = 0x64;
 411                        max_credit = 0xFFF;
 412                }
 413
 414                hw_atl_tps_tx_pkt_shed_tc_data_weight_set(self, tc, weight);
 415                hw_atl_tps_tx_pkt_shed_tc_data_max_credit_set(self, tc,
 416                                                              max_credit);
 417
 418                hw_atl_tps_tx_desc_rate_en_set(self, desc, en);
 419
 420                if (en) {
 421                        /* Nominal rate is always 10G */
 422                        const u32 rate = 10000U * scale /
 423                                         nic_cfg->tc_max_rate[tc];
 424                        const u32 rate_int = rate >>
 425                                             HW_ATL_TPS_DESC_RATE_Y_WIDTH;
 426                        const u32 rate_frac = rate & frac_msk;
 427
 428                        hw_atl_tps_tx_desc_rate_x_set(self, desc, rate_int);
 429                        hw_atl_tps_tx_desc_rate_y_set(self, desc, rate_frac);
 430                } else {
 431                        /* A value of 1 indicates the queue is not
 432                         * rate controlled.
 433                         */
 434                        hw_atl_tps_tx_desc_rate_x_set(self, desc, 1U);
 435                        hw_atl_tps_tx_desc_rate_y_set(self, desc, 0U);
 436                }
 437        }
 438        for (tc = nic_cfg->tcs; tc != AQ_CFG_TCS_MAX; tc++) {
 439                const u32 desc = AQ_NIC_CFG_TCVEC2RING(nic_cfg, tc, 0);
 440
 441                hw_atl_tps_tx_desc_rate_en_set(self, desc, 0U);
 442                hw_atl_tps_tx_desc_rate_x_set(self, desc, 1U);
 443                hw_atl_tps_tx_desc_rate_y_set(self, desc, 0U);
 444        }
 445
 446        return aq_hw_err_from_flags(self);
 447}
 448
 449static int hw_atl_b0_hw_init_tx_path(struct aq_hw_s *self)
 450{
 451        struct aq_nic_cfg_s *nic_cfg = self->aq_nic_cfg;
 452
 453        /* Tx TC/Queue number config */
 454        hw_atl_tpb_tps_tx_tc_mode_set(self, nic_cfg->tc_mode);
 455
 456        hw_atl_thm_lso_tcp_flag_of_first_pkt_set(self, 0x0FF6U);
 457        hw_atl_thm_lso_tcp_flag_of_middle_pkt_set(self, 0x0FF6U);
 458        hw_atl_thm_lso_tcp_flag_of_last_pkt_set(self, 0x0F7FU);
 459
 460        /* Tx interrupts */
 461        hw_atl_tdm_tx_desc_wr_wb_irq_en_set(self, 1U);
 462
 463        /* misc */
 464        aq_hw_write_reg(self, 0x00007040U, ATL_HW_IS_CHIP_FEATURE(self, TPO2) ?
 465                        0x00010000U : 0x00000000U);
 466        hw_atl_tdm_tx_dca_en_set(self, 0U);
 467        hw_atl_tdm_tx_dca_mode_set(self, 0U);
 468
 469        hw_atl_tpb_tx_path_scp_ins_en_set(self, 1U);
 470
 471        return aq_hw_err_from_flags(self);
 472}
 473
 474void hw_atl_b0_hw_init_rx_rss_ctrl1(struct aq_hw_s *self)
 475{
 476        struct aq_nic_cfg_s *cfg = self->aq_nic_cfg;
 477        u32 rss_ctrl1 = HW_ATL_RSS_DISABLED;
 478
 479        if (cfg->is_rss)
 480                rss_ctrl1 = (cfg->tc_mode == AQ_TC_MODE_8TCS) ?
 481                            HW_ATL_RSS_ENABLED_8TCS_2INDEX_BITS :
 482                            HW_ATL_RSS_ENABLED_4TCS_3INDEX_BITS;
 483
 484        hw_atl_reg_rx_flr_rss_control1set(self, rss_ctrl1);
 485}
 486
 487static int hw_atl_b0_hw_init_rx_path(struct aq_hw_s *self)
 488{
 489        struct aq_nic_cfg_s *cfg = self->aq_nic_cfg;
 490        int i;
 491
 492        /* Rx TC/RSS number config */
 493        hw_atl_rpb_rpf_rx_traf_class_mode_set(self, cfg->tc_mode);
 494
 495        /* Rx flow control */
 496        hw_atl_rpb_rx_flow_ctl_mode_set(self, 1U);
 497
 498        /* RSS Ring selection */
 499        hw_atl_b0_hw_init_rx_rss_ctrl1(self);
 500
 501        /* Multicast filters */
 502        for (i = HW_ATL_B0_MAC_MAX; i--;) {
 503                hw_atl_rpfl2_uc_flr_en_set(self, (i == 0U) ? 1U : 0U, i);
 504                hw_atl_rpfl2unicast_flr_act_set(self, 1U, i);
 505        }
 506
 507        hw_atl_reg_rx_flr_mcst_flr_msk_set(self, 0x00000000U);
 508        hw_atl_reg_rx_flr_mcst_flr_set(self, 0x00010FFFU, 0U);
 509
 510        /* Vlan filters */
 511        hw_atl_rpf_vlan_outer_etht_set(self, 0x88A8U);
 512        hw_atl_rpf_vlan_inner_etht_set(self, 0x8100U);
 513
 514        hw_atl_rpf_vlan_prom_mode_en_set(self, 1);
 515
 516        // Always accept untagged packets
 517        hw_atl_rpf_vlan_accept_untagged_packets_set(self, 1U);
 518        hw_atl_rpf_vlan_untagged_act_set(self, 1U);
 519
 520        /* Rx Interrupts */
 521        hw_atl_rdm_rx_desc_wr_wb_irq_en_set(self, 1U);
 522
 523        /* misc */
 524        aq_hw_write_reg(self, 0x00005040U, ATL_HW_IS_CHIP_FEATURE(self, RPF2) ?
 525                        0x000F0000U : 0x00000000U);
 526
 527        hw_atl_rpfl2broadcast_flr_act_set(self, 1U);
 528        hw_atl_rpfl2broadcast_count_threshold_set(self, 0xFFFFU & (~0U / 256U));
 529
 530        hw_atl_rdm_rx_dca_en_set(self, 0U);
 531        hw_atl_rdm_rx_dca_mode_set(self, 0U);
 532
 533        return aq_hw_err_from_flags(self);
 534}
 535
 536int hw_atl_b0_hw_mac_addr_set(struct aq_hw_s *self, u8 *mac_addr)
 537{
 538        unsigned int h = 0U;
 539        unsigned int l = 0U;
 540        int err = 0;
 541
 542        if (!mac_addr) {
 543                err = -EINVAL;
 544                goto err_exit;
 545        }
 546        h = (mac_addr[0] << 8) | (mac_addr[1]);
 547        l = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
 548                (mac_addr[4] << 8) | mac_addr[5];
 549
 550        hw_atl_rpfl2_uc_flr_en_set(self, 0U, HW_ATL_B0_MAC);
 551        hw_atl_rpfl2unicast_dest_addresslsw_set(self, l, HW_ATL_B0_MAC);
 552        hw_atl_rpfl2unicast_dest_addressmsw_set(self, h, HW_ATL_B0_MAC);
 553        hw_atl_rpfl2_uc_flr_en_set(self, 1U, HW_ATL_B0_MAC);
 554
 555        err = aq_hw_err_from_flags(self);
 556
 557err_exit:
 558        return err;
 559}
 560
 561static int hw_atl_b0_hw_init(struct aq_hw_s *self, u8 *mac_addr)
 562{
 563        static u32 aq_hw_atl_igcr_table_[4][2] = {
 564                [AQ_HW_IRQ_INVALID] = { 0x20000000U, 0x20000000U },
 565                [AQ_HW_IRQ_LEGACY]  = { 0x20000080U, 0x20000080U },
 566                [AQ_HW_IRQ_MSI]     = { 0x20000021U, 0x20000025U },
 567                [AQ_HW_IRQ_MSIX]    = { 0x20000022U, 0x20000026U },
 568        };
 569        struct aq_nic_cfg_s *aq_nic_cfg = self->aq_nic_cfg;
 570        int err = 0;
 571        u32 val;
 572
 573
 574        hw_atl_b0_hw_init_tx_path(self);
 575        hw_atl_b0_hw_init_rx_path(self);
 576
 577        hw_atl_b0_hw_mac_addr_set(self, mac_addr);
 578
 579        self->aq_fw_ops->set_link_speed(self, aq_nic_cfg->link_speed_msk);
 580        self->aq_fw_ops->set_state(self, MPI_INIT);
 581
 582        hw_atl_b0_hw_qos_set(self);
 583        hw_atl_b0_hw_rss_set(self, &aq_nic_cfg->aq_rss);
 584        hw_atl_b0_hw_rss_hash_set(self, &aq_nic_cfg->aq_rss);
 585
 586        /* Force limit MRRS on RDM/TDM to 2K */
 587        val = aq_hw_read_reg(self, HW_ATL_PCI_REG_CONTROL6_ADR);
 588        aq_hw_write_reg(self, HW_ATL_PCI_REG_CONTROL6_ADR,
 589                        (val & ~0x707) | 0x404);
 590
 591        /* TX DMA total request limit. B0 hardware is not capable to
 592         * handle more than (8K-MRRS) incoming DMA data.
 593         * Value 24 in 256byte units
 594         */
 595        aq_hw_write_reg(self, HW_ATL_TX_DMA_TOTAL_REQ_LIMIT_ADR, 24);
 596
 597        /* Reset link status and read out initial hardware counters */
 598        self->aq_link_status.mbps = 0;
 599        self->aq_fw_ops->update_stats(self);
 600
 601        err = aq_hw_err_from_flags(self);
 602        if (err < 0)
 603                goto err_exit;
 604
 605        /* Interrupts */
 606        hw_atl_reg_irq_glb_ctl_set(self,
 607                                   aq_hw_atl_igcr_table_[aq_nic_cfg->irq_type]
 608                                                 [(aq_nic_cfg->vecs > 1U) ?
 609                                                 1 : 0]);
 610
 611        hw_atl_itr_irq_auto_masklsw_set(self, aq_nic_cfg->aq_hw_caps->irq_mask);
 612
 613        /* Interrupts */
 614        hw_atl_reg_gen_irq_map_set(self,
 615                                   ((HW_ATL_B0_ERR_INT << 0x18) |
 616                                    (1U << 0x1F)) |
 617                                   ((HW_ATL_B0_ERR_INT << 0x10) |
 618                                    (1U << 0x17)), 0U);
 619
 620        /* Enable link interrupt */
 621        if (aq_nic_cfg->link_irq_vec)
 622                hw_atl_reg_gen_irq_map_set(self, BIT(7) |
 623                                           aq_nic_cfg->link_irq_vec, 3U);
 624
 625        hw_atl_b0_hw_offload_set(self, aq_nic_cfg);
 626
 627err_exit:
 628        return err;
 629}
 630
 631int hw_atl_b0_hw_ring_tx_start(struct aq_hw_s *self, struct aq_ring_s *ring)
 632{
 633        hw_atl_tdm_tx_desc_en_set(self, 1, ring->idx);
 634
 635        return aq_hw_err_from_flags(self);
 636}
 637
 638int hw_atl_b0_hw_ring_rx_start(struct aq_hw_s *self, struct aq_ring_s *ring)
 639{
 640        hw_atl_rdm_rx_desc_en_set(self, 1, ring->idx);
 641
 642        return aq_hw_err_from_flags(self);
 643}
 644
 645int hw_atl_b0_hw_start(struct aq_hw_s *self)
 646{
 647        hw_atl_tpb_tx_buff_en_set(self, 1);
 648        hw_atl_rpb_rx_buff_en_set(self, 1);
 649
 650        return aq_hw_err_from_flags(self);
 651}
 652
 653static int hw_atl_b0_hw_tx_ring_tail_update(struct aq_hw_s *self,
 654                                            struct aq_ring_s *ring)
 655{
 656        hw_atl_reg_tx_dma_desc_tail_ptr_set(self, ring->sw_tail, ring->idx);
 657
 658        return 0;
 659}
 660
 661int hw_atl_b0_hw_ring_tx_xmit(struct aq_hw_s *self, struct aq_ring_s *ring,
 662                              unsigned int frags)
 663{
 664        struct aq_ring_buff_s *buff = NULL;
 665        struct hw_atl_txd_s *txd = NULL;
 666        unsigned int buff_pa_len = 0U;
 667        unsigned int frag_count = 0U;
 668        unsigned int pkt_len = 0U;
 669        bool is_vlan = false;
 670        bool is_gso = false;
 671
 672        buff = &ring->buff_ring[ring->sw_tail];
 673        pkt_len = (buff->is_eop && buff->is_sop) ? buff->len : buff->len_pkt;
 674
 675        for (frag_count = 0; frag_count < frags; frag_count++) {
 676                txd = (struct hw_atl_txd_s *)&ring->dx_ring[ring->sw_tail *
 677                                                HW_ATL_B0_TXD_SIZE];
 678                txd->ctl = 0;
 679                txd->ctl2 = 0;
 680                txd->buf_addr = 0;
 681
 682                buff = &ring->buff_ring[ring->sw_tail];
 683
 684                if (buff->is_gso_tcp || buff->is_gso_udp) {
 685                        if (buff->is_gso_tcp)
 686                                txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_TCP;
 687                        txd->ctl |= HW_ATL_B0_TXD_CTL_DESC_TYPE_TXC;
 688                        txd->ctl |= (buff->len_l3 << 31) |
 689                                    (buff->len_l2 << 24);
 690                        txd->ctl2 |= (buff->mss << 16);
 691                        is_gso = true;
 692
 693                        pkt_len -= (buff->len_l4 +
 694                                    buff->len_l3 +
 695                                    buff->len_l2);
 696                        if (buff->is_ipv6)
 697                                txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_IPV6;
 698                        txd->ctl2 |= (buff->len_l4 << 8) |
 699                                     (buff->len_l3 >> 1);
 700                }
 701                if (buff->is_vlan) {
 702                        txd->ctl |= HW_ATL_B0_TXD_CTL_DESC_TYPE_TXC;
 703                        txd->ctl |= buff->vlan_tx_tag << 4;
 704                        is_vlan = true;
 705                }
 706                if (!buff->is_gso_tcp && !buff->is_gso_udp && !buff->is_vlan) {
 707                        buff_pa_len = buff->len;
 708
 709                        txd->buf_addr = buff->pa;
 710                        txd->ctl |= (HW_ATL_B0_TXD_CTL_BLEN &
 711                                                ((u32)buff_pa_len << 4));
 712                        txd->ctl |= HW_ATL_B0_TXD_CTL_DESC_TYPE_TXD;
 713
 714                        /* PAY_LEN */
 715                        txd->ctl2 |= HW_ATL_B0_TXD_CTL2_LEN & (pkt_len << 14);
 716
 717                        if (is_gso || is_vlan) {
 718                                /* enable tx context */
 719                                txd->ctl2 |= HW_ATL_B0_TXD_CTL2_CTX_EN;
 720                        }
 721                        if (is_gso)
 722                                txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_LSO;
 723
 724                        /* Tx checksum offloads */
 725                        if (buff->is_ip_cso)
 726                                txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_IPCSO;
 727
 728                        if (buff->is_udp_cso || buff->is_tcp_cso)
 729                                txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_TUCSO;
 730
 731                        if (is_vlan)
 732                                txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_VLAN;
 733
 734                        if (unlikely(buff->is_eop)) {
 735                                txd->ctl |= HW_ATL_B0_TXD_CTL_EOP;
 736                                txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_WB;
 737                                is_gso = false;
 738                                is_vlan = false;
 739                        }
 740                }
 741                ring->sw_tail = aq_ring_next_dx(ring, ring->sw_tail);
 742        }
 743
 744        hw_atl_b0_hw_tx_ring_tail_update(self, ring);
 745
 746        return aq_hw_err_from_flags(self);
 747}
 748
 749int hw_atl_b0_hw_ring_rx_init(struct aq_hw_s *self, struct aq_ring_s *aq_ring,
 750                              struct aq_ring_param_s *aq_ring_param)
 751{
 752        u32 dma_desc_addr_msw = (u32)(((u64)aq_ring->dx_ring_pa) >> 32);
 753        u32 vlan_rx_stripping = self->aq_nic_cfg->is_vlan_rx_strip;
 754        u32 dma_desc_addr_lsw = (u32)aq_ring->dx_ring_pa;
 755
 756        hw_atl_rdm_rx_desc_en_set(self, false, aq_ring->idx);
 757
 758        hw_atl_rdm_rx_desc_head_splitting_set(self, 0U, aq_ring->idx);
 759
 760        hw_atl_reg_rx_dma_desc_base_addresslswset(self, dma_desc_addr_lsw,
 761                                                  aq_ring->idx);
 762
 763        hw_atl_reg_rx_dma_desc_base_addressmswset(self,
 764                                                  dma_desc_addr_msw, aq_ring->idx);
 765
 766        hw_atl_rdm_rx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx);
 767
 768        hw_atl_rdm_rx_desc_data_buff_size_set(self,
 769                                              AQ_CFG_RX_FRAME_MAX / 1024U,
 770                                       aq_ring->idx);
 771
 772        hw_atl_rdm_rx_desc_head_buff_size_set(self, 0U, aq_ring->idx);
 773        hw_atl_rdm_rx_desc_head_splitting_set(self, 0U, aq_ring->idx);
 774        hw_atl_rpo_rx_desc_vlan_stripping_set(self, !!vlan_rx_stripping,
 775                                              aq_ring->idx);
 776
 777        /* Rx ring set mode */
 778
 779        /* Mapping interrupt vector */
 780        hw_atl_itr_irq_map_rx_set(self, aq_ring_param->vec_idx, aq_ring->idx);
 781        hw_atl_itr_irq_map_en_rx_set(self, true, aq_ring->idx);
 782
 783        hw_atl_rdm_cpu_id_set(self, aq_ring_param->cpu, aq_ring->idx);
 784        hw_atl_rdm_rx_desc_dca_en_set(self, 0U, aq_ring->idx);
 785        hw_atl_rdm_rx_head_dca_en_set(self, 0U, aq_ring->idx);
 786        hw_atl_rdm_rx_pld_dca_en_set(self, 0U, aq_ring->idx);
 787
 788        return aq_hw_err_from_flags(self);
 789}
 790
 791int hw_atl_b0_hw_ring_tx_init(struct aq_hw_s *self, struct aq_ring_s *aq_ring,
 792                              struct aq_ring_param_s *aq_ring_param)
 793{
 794        u32 dma_desc_msw_addr = (u32)(((u64)aq_ring->dx_ring_pa) >> 32);
 795        u32 dma_desc_lsw_addr = (u32)aq_ring->dx_ring_pa;
 796
 797        hw_atl_reg_tx_dma_desc_base_addresslswset(self, dma_desc_lsw_addr,
 798                                                  aq_ring->idx);
 799
 800        hw_atl_reg_tx_dma_desc_base_addressmswset(self, dma_desc_msw_addr,
 801                                                  aq_ring->idx);
 802
 803        hw_atl_tdm_tx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx);
 804
 805        hw_atl_b0_hw_tx_ring_tail_update(self, aq_ring);
 806
 807        /* Set Tx threshold */
 808        hw_atl_tdm_tx_desc_wr_wb_threshold_set(self, 0U, aq_ring->idx);
 809
 810        /* Mapping interrupt vector */
 811        hw_atl_itr_irq_map_tx_set(self, aq_ring_param->vec_idx, aq_ring->idx);
 812        hw_atl_itr_irq_map_en_tx_set(self, true, aq_ring->idx);
 813
 814        hw_atl_tdm_cpu_id_set(self, aq_ring_param->cpu, aq_ring->idx);
 815        hw_atl_tdm_tx_desc_dca_en_set(self, 0U, aq_ring->idx);
 816
 817        return aq_hw_err_from_flags(self);
 818}
 819
 820int hw_atl_b0_hw_ring_rx_fill(struct aq_hw_s *self, struct aq_ring_s *ring,
 821                              unsigned int sw_tail_old)
 822{
 823        for (; sw_tail_old != ring->sw_tail;
 824                sw_tail_old = aq_ring_next_dx(ring, sw_tail_old)) {
 825                struct hw_atl_rxd_s *rxd =
 826                        (struct hw_atl_rxd_s *)&ring->dx_ring[sw_tail_old *
 827                                                        HW_ATL_B0_RXD_SIZE];
 828
 829                struct aq_ring_buff_s *buff = &ring->buff_ring[sw_tail_old];
 830
 831                rxd->buf_addr = buff->pa;
 832                rxd->hdr_addr = 0U;
 833        }
 834
 835        hw_atl_reg_rx_dma_desc_tail_ptr_set(self, sw_tail_old, ring->idx);
 836
 837        return aq_hw_err_from_flags(self);
 838}
 839
 840static int hw_atl_b0_hw_ring_hwts_rx_fill(struct aq_hw_s *self,
 841                                          struct aq_ring_s *ring)
 842{
 843        unsigned int i;
 844
 845        for (i = aq_ring_avail_dx(ring); i--;
 846                        ring->sw_tail = aq_ring_next_dx(ring, ring->sw_tail)) {
 847                struct hw_atl_rxd_s *rxd =
 848                        (struct hw_atl_rxd_s *)
 849                        &ring->dx_ring[ring->sw_tail * HW_ATL_B0_RXD_SIZE];
 850
 851                rxd->buf_addr = ring->dx_ring_pa + ring->size * ring->dx_size;
 852                rxd->hdr_addr = 0U;
 853        }
 854        /* Make sure descriptors are updated before bump tail*/
 855        wmb();
 856
 857        hw_atl_reg_rx_dma_desc_tail_ptr_set(self, ring->sw_tail, ring->idx);
 858
 859        return aq_hw_err_from_flags(self);
 860}
 861
 862static int hw_atl_b0_hw_ring_hwts_rx_receive(struct aq_hw_s *self,
 863                                             struct aq_ring_s *ring)
 864{
 865        while (ring->hw_head != ring->sw_tail) {
 866                struct hw_atl_rxd_hwts_wb_s *hwts_wb =
 867                        (struct hw_atl_rxd_hwts_wb_s *)
 868                        (ring->dx_ring + (ring->hw_head * HW_ATL_B0_RXD_SIZE));
 869
 870                /* RxD is not done */
 871                if (!(hwts_wb->sec_lw0 & 0x1U))
 872                        break;
 873
 874                ring->hw_head = aq_ring_next_dx(ring, ring->hw_head);
 875        }
 876
 877        return aq_hw_err_from_flags(self);
 878}
 879
 880int hw_atl_b0_hw_ring_tx_head_update(struct aq_hw_s *self,
 881                                     struct aq_ring_s *ring)
 882{
 883        unsigned int hw_head_;
 884        int err = 0;
 885
 886        hw_head_ = hw_atl_tdm_tx_desc_head_ptr_get(self, ring->idx);
 887
 888        if (aq_utils_obj_test(&self->flags, AQ_HW_FLAG_ERR_UNPLUG)) {
 889                err = -ENXIO;
 890                goto err_exit;
 891        }
 892        ring->hw_head = hw_head_;
 893        err = aq_hw_err_from_flags(self);
 894
 895err_exit:
 896        return err;
 897}
 898
 899int hw_atl_b0_hw_ring_rx_receive(struct aq_hw_s *self, struct aq_ring_s *ring)
 900{
 901        for (; ring->hw_head != ring->sw_tail;
 902                ring->hw_head = aq_ring_next_dx(ring, ring->hw_head)) {
 903                struct aq_ring_buff_s *buff = NULL;
 904                struct hw_atl_rxd_wb_s *rxd_wb = (struct hw_atl_rxd_wb_s *)
 905                        &ring->dx_ring[ring->hw_head * HW_ATL_B0_RXD_SIZE];
 906
 907                unsigned int is_rx_check_sum_enabled = 0U;
 908                unsigned int pkt_type = 0U;
 909                u8 rx_stat = 0U;
 910
 911                if (!(rxd_wb->status & 0x1U)) { /* RxD is not done */
 912                        break;
 913                }
 914
 915                buff = &ring->buff_ring[ring->hw_head];
 916
 917                buff->flags = 0U;
 918                buff->is_hash_l4 = 0U;
 919
 920                rx_stat = (0x0000003CU & rxd_wb->status) >> 2;
 921
 922                is_rx_check_sum_enabled = (rxd_wb->type >> 19) & 0x3U;
 923
 924                pkt_type = (rxd_wb->type & HW_ATL_B0_RXD_WB_STAT_PKTTYPE) >>
 925                           HW_ATL_B0_RXD_WB_STAT_PKTTYPE_SHIFT;
 926
 927                if (is_rx_check_sum_enabled & BIT(0) &&
 928                    (0x0U == (pkt_type & 0x3U)))
 929                        buff->is_ip_cso = (rx_stat & BIT(1)) ? 0U : 1U;
 930
 931                if (is_rx_check_sum_enabled & BIT(1)) {
 932                        if (0x4U == (pkt_type & 0x1CU))
 933                                buff->is_udp_cso = (rx_stat & BIT(2)) ? 0U :
 934                                                   !!(rx_stat & BIT(3));
 935                        else if (0x0U == (pkt_type & 0x1CU))
 936                                buff->is_tcp_cso = (rx_stat & BIT(2)) ? 0U :
 937                                                   !!(rx_stat & BIT(3));
 938                }
 939                buff->is_cso_err = !!(rx_stat & 0x6);
 940                /* Checksum offload workaround for small packets */
 941                if (unlikely(rxd_wb->pkt_len <= 60)) {
 942                        buff->is_ip_cso = 0U;
 943                        buff->is_cso_err = 0U;
 944                }
 945
 946                if (self->aq_nic_cfg->is_vlan_rx_strip &&
 947                    ((pkt_type & HW_ATL_B0_RXD_WB_PKTTYPE_VLAN) ||
 948                     (pkt_type & HW_ATL_B0_RXD_WB_PKTTYPE_VLAN_DOUBLE))) {
 949                        buff->is_vlan = 1;
 950                        buff->vlan_rx_tag = le16_to_cpu(rxd_wb->vlan);
 951                }
 952
 953                if ((rx_stat & BIT(0)) || rxd_wb->type & 0x1000U) {
 954                        /* MAC error or DMA error */
 955                        buff->is_error = 1U;
 956                }
 957                if (self->aq_nic_cfg->is_rss) {
 958                        /* last 4 byte */
 959                        u16 rss_type = rxd_wb->type & 0xFU;
 960
 961                        if (rss_type && rss_type < 0x8U) {
 962                                buff->is_hash_l4 = (rss_type == 0x4 ||
 963                                rss_type == 0x5);
 964                                buff->rss_hash = rxd_wb->rss_hash;
 965                        }
 966                }
 967
 968                buff->is_lro = !!(HW_ATL_B0_RXD_WB_STAT2_RSCCNT &
 969                                  rxd_wb->status);
 970                if (HW_ATL_B0_RXD_WB_STAT2_EOP & rxd_wb->status) {
 971                        buff->len = rxd_wb->pkt_len %
 972                                AQ_CFG_RX_FRAME_MAX;
 973                        buff->len = buff->len ?
 974                                buff->len : AQ_CFG_RX_FRAME_MAX;
 975                        buff->next = 0U;
 976                        buff->is_eop = 1U;
 977                } else {
 978                        buff->len =
 979                                rxd_wb->pkt_len > AQ_CFG_RX_FRAME_MAX ?
 980                                AQ_CFG_RX_FRAME_MAX : rxd_wb->pkt_len;
 981
 982                        if (buff->is_lro) {
 983                                /* LRO */
 984                                buff->next = rxd_wb->next_desc_ptr;
 985                                ++ring->stats.rx.lro_packets;
 986                        } else {
 987                                /* jumbo */
 988                                buff->next =
 989                                        aq_ring_next_dx(ring,
 990                                                        ring->hw_head);
 991                                ++ring->stats.rx.jumbo_packets;
 992                        }
 993                }
 994        }
 995
 996        return aq_hw_err_from_flags(self);
 997}
 998
 999int hw_atl_b0_hw_irq_enable(struct aq_hw_s *self, u64 mask)
1000{
1001        hw_atl_itr_irq_msk_setlsw_set(self, LODWORD(mask));
1002
1003        return aq_hw_err_from_flags(self);
1004}
1005
1006int hw_atl_b0_hw_irq_disable(struct aq_hw_s *self, u64 mask)
1007{
1008        hw_atl_itr_irq_msk_clearlsw_set(self, LODWORD(mask));
1009        hw_atl_itr_irq_status_clearlsw_set(self, LODWORD(mask));
1010
1011        atomic_inc(&self->dpc);
1012
1013        return aq_hw_err_from_flags(self);
1014}
1015
1016int hw_atl_b0_hw_irq_read(struct aq_hw_s *self, u64 *mask)
1017{
1018        *mask = hw_atl_itr_irq_statuslsw_get(self);
1019
1020        return aq_hw_err_from_flags(self);
1021}
1022
1023#define IS_FILTER_ENABLED(_F_) ((packet_filter & (_F_)) ? 1U : 0U)
1024
1025int hw_atl_b0_hw_packet_filter_set(struct aq_hw_s *self,
1026                                   unsigned int packet_filter)
1027{
1028        struct aq_nic_cfg_s *cfg = self->aq_nic_cfg;
1029        unsigned int i = 0U;
1030        u32 vlan_promisc;
1031        u32 l2_promisc;
1032
1033        l2_promisc = IS_FILTER_ENABLED(IFF_PROMISC) ||
1034                     !!(cfg->priv_flags & BIT(AQ_HW_LOOPBACK_DMA_NET));
1035        vlan_promisc = l2_promisc || cfg->is_vlan_force_promisc;
1036
1037        hw_atl_rpfl2promiscuous_mode_en_set(self, l2_promisc);
1038
1039        hw_atl_rpf_vlan_prom_mode_en_set(self, vlan_promisc);
1040
1041        hw_atl_rpfl2multicast_flr_en_set(self,
1042                                         IS_FILTER_ENABLED(IFF_ALLMULTI) &&
1043                                         IS_FILTER_ENABLED(IFF_MULTICAST), 0);
1044
1045        hw_atl_rpfl2_accept_all_mc_packets_set(self,
1046                                              IS_FILTER_ENABLED(IFF_ALLMULTI) &&
1047                                              IS_FILTER_ENABLED(IFF_MULTICAST));
1048
1049        hw_atl_rpfl2broadcast_en_set(self, IS_FILTER_ENABLED(IFF_BROADCAST));
1050
1051
1052        for (i = HW_ATL_B0_MAC_MIN; i < HW_ATL_B0_MAC_MAX; ++i)
1053                hw_atl_rpfl2_uc_flr_en_set(self,
1054                                           (cfg->is_mc_list_enabled &&
1055                                            (i <= cfg->mc_list_count)) ?
1056                                           1U : 0U, i);
1057
1058        return aq_hw_err_from_flags(self);
1059}
1060
1061#undef IS_FILTER_ENABLED
1062
1063static int hw_atl_b0_hw_multicast_list_set(struct aq_hw_s *self,
1064                                           u8 ar_mac
1065                                           [AQ_HW_MULTICAST_ADDRESS_MAX]
1066                                           [ETH_ALEN],
1067                                           u32 count)
1068{
1069        int err = 0;
1070        struct aq_nic_cfg_s *cfg = self->aq_nic_cfg;
1071
1072        if (count > (HW_ATL_B0_MAC_MAX - HW_ATL_B0_MAC_MIN)) {
1073                err = -EBADRQC;
1074                goto err_exit;
1075        }
1076        for (cfg->mc_list_count = 0U;
1077                        cfg->mc_list_count < count;
1078                        ++cfg->mc_list_count) {
1079                u32 i = cfg->mc_list_count;
1080                u32 h = (ar_mac[i][0] << 8) | (ar_mac[i][1]);
1081                u32 l = (ar_mac[i][2] << 24) | (ar_mac[i][3] << 16) |
1082                                        (ar_mac[i][4] << 8) | ar_mac[i][5];
1083
1084                hw_atl_rpfl2_uc_flr_en_set(self, 0U, HW_ATL_B0_MAC_MIN + i);
1085
1086                hw_atl_rpfl2unicast_dest_addresslsw_set(self, l,
1087                                                        HW_ATL_B0_MAC_MIN + i);
1088
1089                hw_atl_rpfl2unicast_dest_addressmsw_set(self, h,
1090                                                        HW_ATL_B0_MAC_MIN + i);
1091
1092                hw_atl_rpfl2_uc_flr_en_set(self,
1093                                           (cfg->is_mc_list_enabled),
1094                                           HW_ATL_B0_MAC_MIN + i);
1095        }
1096
1097        err = aq_hw_err_from_flags(self);
1098
1099err_exit:
1100        return err;
1101}
1102
1103static int hw_atl_b0_hw_interrupt_moderation_set(struct aq_hw_s *self)
1104{
1105        unsigned int i = 0U;
1106        u32 itr_tx = 2U;
1107        u32 itr_rx = 2U;
1108
1109        switch (self->aq_nic_cfg->itr) {
1110        case  AQ_CFG_INTERRUPT_MODERATION_ON:
1111        case  AQ_CFG_INTERRUPT_MODERATION_AUTO:
1112                hw_atl_tdm_tx_desc_wr_wb_irq_en_set(self, 0U);
1113                hw_atl_tdm_tdm_intr_moder_en_set(self, 1U);
1114                hw_atl_rdm_rx_desc_wr_wb_irq_en_set(self, 0U);
1115                hw_atl_rdm_rdm_intr_moder_en_set(self, 1U);
1116
1117                if (self->aq_nic_cfg->itr == AQ_CFG_INTERRUPT_MODERATION_ON) {
1118                        /* HW timers are in 2us units */
1119                        int tx_max_timer = self->aq_nic_cfg->tx_itr / 2;
1120                        int tx_min_timer = tx_max_timer / 2;
1121
1122                        int rx_max_timer = self->aq_nic_cfg->rx_itr / 2;
1123                        int rx_min_timer = rx_max_timer / 2;
1124
1125                        tx_max_timer = min(HW_ATL_INTR_MODER_MAX, tx_max_timer);
1126                        tx_min_timer = min(HW_ATL_INTR_MODER_MIN, tx_min_timer);
1127                        rx_max_timer = min(HW_ATL_INTR_MODER_MAX, rx_max_timer);
1128                        rx_min_timer = min(HW_ATL_INTR_MODER_MIN, rx_min_timer);
1129
1130                        itr_tx |= tx_min_timer << 0x8U;
1131                        itr_tx |= tx_max_timer << 0x10U;
1132                        itr_rx |= rx_min_timer << 0x8U;
1133                        itr_rx |= rx_max_timer << 0x10U;
1134                } else {
1135                        static unsigned int hw_atl_b0_timers_table_tx_[][2] = {
1136                                {0xfU, 0xffU}, /* 10Gbit */
1137                                {0xfU, 0x1ffU}, /* 5Gbit */
1138                                {0xfU, 0x1ffU}, /* 5Gbit 5GS */
1139                                {0xfU, 0x1ffU}, /* 2.5Gbit */
1140                                {0xfU, 0x1ffU}, /* 1Gbit */
1141                                {0xfU, 0x1ffU}, /* 100Mbit */
1142                        };
1143
1144                        static unsigned int hw_atl_b0_timers_table_rx_[][2] = {
1145                                {0x6U, 0x38U},/* 10Gbit */
1146                                {0xCU, 0x70U},/* 5Gbit */
1147                                {0xCU, 0x70U},/* 5Gbit 5GS */
1148                                {0x18U, 0xE0U},/* 2.5Gbit */
1149                                {0x30U, 0x80U},/* 1Gbit */
1150                                {0x4U, 0x50U},/* 100Mbit */
1151                        };
1152
1153                        unsigned int speed_index =
1154                                        hw_atl_utils_mbps_2_speed_index(
1155                                                self->aq_link_status.mbps);
1156
1157                        /* Update user visible ITR settings */
1158                        self->aq_nic_cfg->tx_itr = hw_atl_b0_timers_table_tx_
1159                                                        [speed_index][1] * 2;
1160                        self->aq_nic_cfg->rx_itr = hw_atl_b0_timers_table_rx_
1161                                                        [speed_index][1] * 2;
1162
1163                        itr_tx |= hw_atl_b0_timers_table_tx_
1164                                                [speed_index][0] << 0x8U;
1165                        itr_tx |= hw_atl_b0_timers_table_tx_
1166                                                [speed_index][1] << 0x10U;
1167
1168                        itr_rx |= hw_atl_b0_timers_table_rx_
1169                                                [speed_index][0] << 0x8U;
1170                        itr_rx |= hw_atl_b0_timers_table_rx_
1171                                                [speed_index][1] << 0x10U;
1172                }
1173                break;
1174        case AQ_CFG_INTERRUPT_MODERATION_OFF:
1175                hw_atl_tdm_tx_desc_wr_wb_irq_en_set(self, 1U);
1176                hw_atl_tdm_tdm_intr_moder_en_set(self, 0U);
1177                hw_atl_rdm_rx_desc_wr_wb_irq_en_set(self, 1U);
1178                hw_atl_rdm_rdm_intr_moder_en_set(self, 0U);
1179                itr_tx = 0U;
1180                itr_rx = 0U;
1181                break;
1182        }
1183
1184        for (i = HW_ATL_B0_RINGS_MAX; i--;) {
1185                hw_atl_reg_tx_intr_moder_ctrl_set(self, itr_tx, i);
1186                hw_atl_reg_rx_intr_moder_ctrl_set(self, itr_rx, i);
1187        }
1188
1189        return aq_hw_err_from_flags(self);
1190}
1191
1192static int hw_atl_b0_hw_stop(struct aq_hw_s *self)
1193{
1194        int err;
1195        u32 val;
1196
1197        hw_atl_b0_hw_irq_disable(self, HW_ATL_B0_INT_MASK);
1198
1199        /* Invalidate Descriptor Cache to prevent writing to the cached
1200         * descriptors and to the data pointer of those descriptors
1201         */
1202        hw_atl_rdm_rx_dma_desc_cache_init_tgl(self);
1203
1204        err = aq_hw_err_from_flags(self);
1205
1206        if (err)
1207                goto err_exit;
1208
1209        readx_poll_timeout_atomic(hw_atl_rdm_rx_dma_desc_cache_init_done_get,
1210                                  self, val, val == 1, 1000U, 10000U);
1211
1212err_exit:
1213        return err;
1214}
1215
1216int hw_atl_b0_hw_ring_tx_stop(struct aq_hw_s *self, struct aq_ring_s *ring)
1217{
1218        hw_atl_tdm_tx_desc_en_set(self, 0U, ring->idx);
1219
1220        return aq_hw_err_from_flags(self);
1221}
1222
1223int hw_atl_b0_hw_ring_rx_stop(struct aq_hw_s *self, struct aq_ring_s *ring)
1224{
1225        hw_atl_rdm_rx_desc_en_set(self, 0U, ring->idx);
1226
1227        return aq_hw_err_from_flags(self);
1228}
1229
1230#define get_ptp_ts_val_u64(self, indx) \
1231        ((u64)(hw_atl_pcs_ptp_clock_get(self, indx) & 0xffff))
1232
1233static void hw_atl_b0_get_ptp_ts(struct aq_hw_s *self, u64 *stamp)
1234{
1235        u64 ns;
1236
1237        hw_atl_pcs_ptp_clock_read_enable(self, 1);
1238        hw_atl_pcs_ptp_clock_read_enable(self, 0);
1239        ns = (get_ptp_ts_val_u64(self, 0) +
1240              (get_ptp_ts_val_u64(self, 1) << 16)) * NSEC_PER_SEC +
1241             (get_ptp_ts_val_u64(self, 3) +
1242              (get_ptp_ts_val_u64(self, 4) << 16));
1243
1244        *stamp = ns + self->ptp_clk_offset;
1245}
1246
1247static void hw_atl_b0_adj_params_get(u64 freq, s64 adj, u32 *ns, u32 *fns)
1248{
1249        /* For accuracy, the digit is extended */
1250        s64 base_ns = ((adj + NSEC_PER_SEC) * NSEC_PER_SEC);
1251        u64 nsi_frac = 0;
1252        u64 nsi;
1253
1254        base_ns = div64_s64(base_ns, freq);
1255        nsi = div64_u64(base_ns, NSEC_PER_SEC);
1256
1257        if (base_ns != nsi * NSEC_PER_SEC) {
1258                s64 divisor = div64_s64((s64)NSEC_PER_SEC * NSEC_PER_SEC,
1259                                        base_ns - nsi * NSEC_PER_SEC);
1260                nsi_frac = div64_s64(AQ_FRAC_PER_NS * NSEC_PER_SEC, divisor);
1261        }
1262
1263        *ns = (u32)nsi;
1264        *fns = (u32)nsi_frac;
1265}
1266
1267static void
1268hw_atl_b0_mac_adj_param_calc(struct hw_fw_request_ptp_adj_freq *ptp_adj_freq,
1269                             u64 phyfreq, u64 macfreq)
1270{
1271        s64 adj_fns_val;
1272        s64 fns_in_sec_phy = phyfreq * (ptp_adj_freq->fns_phy +
1273                                        AQ_FRAC_PER_NS * ptp_adj_freq->ns_phy);
1274        s64 fns_in_sec_mac = macfreq * (ptp_adj_freq->fns_mac +
1275                                        AQ_FRAC_PER_NS * ptp_adj_freq->ns_mac);
1276        s64 fault_in_sec_phy = AQ_FRAC_PER_NS * NSEC_PER_SEC - fns_in_sec_phy;
1277        s64 fault_in_sec_mac = AQ_FRAC_PER_NS * NSEC_PER_SEC - fns_in_sec_mac;
1278        /* MAC MCP counter freq is macfreq / 4 */
1279        s64 diff_in_mcp_overflow = (fault_in_sec_mac - fault_in_sec_phy) *
1280                                   4 * AQ_FRAC_PER_NS;
1281
1282        diff_in_mcp_overflow = div64_s64(diff_in_mcp_overflow,
1283                                         AQ_HW_MAC_COUNTER_HZ);
1284        adj_fns_val = (ptp_adj_freq->fns_mac + AQ_FRAC_PER_NS *
1285                       ptp_adj_freq->ns_mac) + diff_in_mcp_overflow;
1286
1287        ptp_adj_freq->mac_ns_adj = div64_s64(adj_fns_val, AQ_FRAC_PER_NS);
1288        ptp_adj_freq->mac_fns_adj = adj_fns_val - ptp_adj_freq->mac_ns_adj *
1289                                    AQ_FRAC_PER_NS;
1290}
1291
1292static int hw_atl_b0_adj_sys_clock(struct aq_hw_s *self, s64 delta)
1293{
1294        self->ptp_clk_offset += delta;
1295
1296        self->aq_fw_ops->adjust_ptp(self, self->ptp_clk_offset);
1297
1298        return 0;
1299}
1300
1301static int hw_atl_b0_set_sys_clock(struct aq_hw_s *self, u64 time, u64 ts)
1302{
1303        s64 delta = time - (self->ptp_clk_offset + ts);
1304
1305        return hw_atl_b0_adj_sys_clock(self, delta);
1306}
1307
1308static int hw_atl_b0_ts_to_sys_clock(struct aq_hw_s *self, u64 ts, u64 *time)
1309{
1310        *time = self->ptp_clk_offset + ts;
1311        return 0;
1312}
1313
1314static int hw_atl_b0_adj_clock_freq(struct aq_hw_s *self, s32 ppb)
1315{
1316        struct hw_fw_request_iface fwreq;
1317        size_t size;
1318
1319        memset(&fwreq, 0, sizeof(fwreq));
1320
1321        fwreq.msg_id = HW_AQ_FW_REQUEST_PTP_ADJ_FREQ;
1322        hw_atl_b0_adj_params_get(AQ_HW_MAC_COUNTER_HZ, ppb,
1323                                 &fwreq.ptp_adj_freq.ns_mac,
1324                                 &fwreq.ptp_adj_freq.fns_mac);
1325        hw_atl_b0_adj_params_get(AQ_HW_PHY_COUNTER_HZ, ppb,
1326                                 &fwreq.ptp_adj_freq.ns_phy,
1327                                 &fwreq.ptp_adj_freq.fns_phy);
1328        hw_atl_b0_mac_adj_param_calc(&fwreq.ptp_adj_freq,
1329                                     AQ_HW_PHY_COUNTER_HZ,
1330                                     AQ_HW_MAC_COUNTER_HZ);
1331
1332        size = sizeof(fwreq.msg_id) + sizeof(fwreq.ptp_adj_freq);
1333        return self->aq_fw_ops->send_fw_request(self, &fwreq, size);
1334}
1335
1336static int hw_atl_b0_gpio_pulse(struct aq_hw_s *self, u32 index,
1337                                u64 start, u32 period)
1338{
1339        struct hw_fw_request_iface fwreq;
1340        size_t size;
1341
1342        memset(&fwreq, 0, sizeof(fwreq));
1343
1344        fwreq.msg_id = HW_AQ_FW_REQUEST_PTP_GPIO_CTRL;
1345        fwreq.ptp_gpio_ctrl.index = index;
1346        fwreq.ptp_gpio_ctrl.period = period;
1347        /* Apply time offset */
1348        fwreq.ptp_gpio_ctrl.start = start;
1349
1350        size = sizeof(fwreq.msg_id) + sizeof(fwreq.ptp_gpio_ctrl);
1351        return self->aq_fw_ops->send_fw_request(self, &fwreq, size);
1352}
1353
1354static int hw_atl_b0_extts_gpio_enable(struct aq_hw_s *self, u32 index,
1355                                       u32 enable)
1356{
1357        /* Enable/disable Sync1588 GPIO Timestamping */
1358        aq_phy_write_reg(self, MDIO_MMD_PCS, 0xc611, enable ? 0x71 : 0);
1359
1360        return 0;
1361}
1362
1363static int hw_atl_b0_get_sync_ts(struct aq_hw_s *self, u64 *ts)
1364{
1365        u64 sec_l;
1366        u64 sec_h;
1367        u64 nsec_l;
1368        u64 nsec_h;
1369
1370        if (!ts)
1371                return -1;
1372
1373        /* PTP external GPIO clock seconds count 15:0 */
1374        sec_l = aq_phy_read_reg(self, MDIO_MMD_PCS, 0xc914);
1375        /* PTP external GPIO clock seconds count 31:16 */
1376        sec_h = aq_phy_read_reg(self, MDIO_MMD_PCS, 0xc915);
1377        /* PTP external GPIO clock nanoseconds count 15:0 */
1378        nsec_l = aq_phy_read_reg(self, MDIO_MMD_PCS, 0xc916);
1379        /* PTP external GPIO clock nanoseconds count 31:16 */
1380        nsec_h = aq_phy_read_reg(self, MDIO_MMD_PCS, 0xc917);
1381
1382        *ts = (nsec_h << 16) + nsec_l + ((sec_h << 16) + sec_l) * NSEC_PER_SEC;
1383
1384        return 0;
1385}
1386
1387static u16 hw_atl_b0_rx_extract_ts(struct aq_hw_s *self, u8 *p,
1388                                   unsigned int len, u64 *timestamp)
1389{
1390        unsigned int offset = 14;
1391        struct ethhdr *eth;
1392        __be64 sec;
1393        __be32 ns;
1394        u8 *ptr;
1395
1396        if (len <= offset || !timestamp)
1397                return 0;
1398
1399        /* The TIMESTAMP in the end of package has following format:
1400         * (big-endian)
1401         *   struct {
1402         *     uint64_t sec;
1403         *     uint32_t ns;
1404         *     uint16_t stream_id;
1405         *   };
1406         */
1407        ptr = p + (len - offset);
1408        memcpy(&sec, ptr, sizeof(sec));
1409        ptr += sizeof(sec);
1410        memcpy(&ns, ptr, sizeof(ns));
1411
1412        *timestamp = (be64_to_cpu(sec) & 0xffffffffffffllu) * NSEC_PER_SEC +
1413                     be32_to_cpu(ns) + self->ptp_clk_offset;
1414
1415        eth = (struct ethhdr *)p;
1416
1417        return (eth->h_proto == htons(ETH_P_1588)) ? 12 : 14;
1418}
1419
1420static int hw_atl_b0_extract_hwts(struct aq_hw_s *self, u8 *p, unsigned int len,
1421                                  u64 *timestamp)
1422{
1423        struct hw_atl_rxd_hwts_wb_s *hwts_wb = (struct hw_atl_rxd_hwts_wb_s *)p;
1424        u64 tmp, sec, ns;
1425
1426        sec = 0;
1427        tmp = (hwts_wb->sec_lw0 >> 2) & 0x3ff;
1428        sec += tmp;
1429        tmp = (u64)((hwts_wb->sec_lw1 >> 16) & 0xffff) << 10;
1430        sec += tmp;
1431        tmp = (u64)(hwts_wb->sec_hw & 0xfff) << 26;
1432        sec += tmp;
1433        tmp = (u64)((hwts_wb->sec_hw >> 22) & 0x3ff) << 38;
1434        sec += tmp;
1435        ns = sec * NSEC_PER_SEC + hwts_wb->ns;
1436        if (timestamp)
1437                *timestamp = ns + self->ptp_clk_offset;
1438        return 0;
1439}
1440
1441static int hw_atl_b0_hw_fl3l4_clear(struct aq_hw_s *self,
1442                                    struct aq_rx_filter_l3l4 *data)
1443{
1444        u8 location = data->location;
1445
1446        if (!data->is_ipv6) {
1447                hw_atl_rpfl3l4_cmd_clear(self, location);
1448                hw_atl_rpf_l4_spd_set(self, 0U, location);
1449                hw_atl_rpf_l4_dpd_set(self, 0U, location);
1450                hw_atl_rpfl3l4_ipv4_src_addr_clear(self, location);
1451                hw_atl_rpfl3l4_ipv4_dest_addr_clear(self, location);
1452        } else {
1453                int i;
1454
1455                for (i = 0; i < HW_ATL_RX_CNT_REG_ADDR_IPV6; ++i) {
1456                        hw_atl_rpfl3l4_cmd_clear(self, location + i);
1457                        hw_atl_rpf_l4_spd_set(self, 0U, location + i);
1458                        hw_atl_rpf_l4_dpd_set(self, 0U, location + i);
1459                }
1460                hw_atl_rpfl3l4_ipv6_src_addr_clear(self, location);
1461                hw_atl_rpfl3l4_ipv6_dest_addr_clear(self, location);
1462        }
1463
1464        return aq_hw_err_from_flags(self);
1465}
1466
1467static int hw_atl_b0_hw_fl3l4_set(struct aq_hw_s *self,
1468                                  struct aq_rx_filter_l3l4 *data)
1469{
1470        u8 location = data->location;
1471
1472        hw_atl_b0_hw_fl3l4_clear(self, data);
1473
1474        if (data->cmd & (HW_ATL_RX_ENABLE_CMP_DEST_ADDR_L3 |
1475                         HW_ATL_RX_ENABLE_CMP_SRC_ADDR_L3)) {
1476                if (!data->is_ipv6) {
1477                        hw_atl_rpfl3l4_ipv4_dest_addr_set(self,
1478                                                          location,
1479                                                          data->ip_dst[0]);
1480                        hw_atl_rpfl3l4_ipv4_src_addr_set(self,
1481                                                         location,
1482                                                         data->ip_src[0]);
1483                } else {
1484                        hw_atl_rpfl3l4_ipv6_dest_addr_set(self,
1485                                                          location,
1486                                                          data->ip_dst);
1487                        hw_atl_rpfl3l4_ipv6_src_addr_set(self,
1488                                                         location,
1489                                                         data->ip_src);
1490                }
1491        }
1492
1493        if (data->cmd & (HW_ATL_RX_ENABLE_CMP_DEST_PORT_L4 |
1494                         HW_ATL_RX_ENABLE_CMP_SRC_PORT_L4)) {
1495                hw_atl_rpf_l4_dpd_set(self, data->p_dst, location);
1496                hw_atl_rpf_l4_spd_set(self, data->p_src, location);
1497        }
1498
1499        hw_atl_rpfl3l4_cmd_set(self, location, data->cmd);
1500
1501        return aq_hw_err_from_flags(self);
1502}
1503
1504static int hw_atl_b0_hw_fl2_set(struct aq_hw_s *self,
1505                                struct aq_rx_filter_l2 *data)
1506{
1507        hw_atl_rpf_etht_flr_en_set(self, 1U, data->location);
1508        hw_atl_rpf_etht_flr_set(self, data->ethertype, data->location);
1509        hw_atl_rpf_etht_user_priority_en_set(self,
1510                                             !!data->user_priority_en,
1511                                             data->location);
1512        if (data->user_priority_en)
1513                hw_atl_rpf_etht_user_priority_set(self,
1514                                                  data->user_priority,
1515                                                  data->location);
1516
1517        if (data->queue < 0) {
1518                hw_atl_rpf_etht_flr_act_set(self, 0U, data->location);
1519                hw_atl_rpf_etht_rx_queue_en_set(self, 0U, data->location);
1520        } else {
1521                hw_atl_rpf_etht_flr_act_set(self, 1U, data->location);
1522                hw_atl_rpf_etht_rx_queue_en_set(self, 1U, data->location);
1523                hw_atl_rpf_etht_rx_queue_set(self, data->queue, data->location);
1524        }
1525
1526        return aq_hw_err_from_flags(self);
1527}
1528
1529static int hw_atl_b0_hw_fl2_clear(struct aq_hw_s *self,
1530                                  struct aq_rx_filter_l2 *data)
1531{
1532        hw_atl_rpf_etht_flr_en_set(self, 0U, data->location);
1533        hw_atl_rpf_etht_flr_set(self, 0U, data->location);
1534        hw_atl_rpf_etht_user_priority_en_set(self, 0U, data->location);
1535
1536        return aq_hw_err_from_flags(self);
1537}
1538
1539/*
1540 * @brief Set VLAN filter table
1541 * @details Configure VLAN filter table to accept (and assign the queue) traffic
1542 *  for the particular vlan ids.
1543 * Note: use this function under vlan promisc mode not to lost the traffic
1544 *
1545 * @param aq_hw_s
1546 * @param aq_rx_filter_vlan VLAN filter configuration
1547 * @return 0 - OK, <0 - error
1548 */
1549static int hw_atl_b0_hw_vlan_set(struct aq_hw_s *self,
1550                                 struct aq_rx_filter_vlan *aq_vlans)
1551{
1552        int i;
1553
1554        for (i = 0; i < AQ_VLAN_MAX_FILTERS; i++) {
1555                hw_atl_rpf_vlan_flr_en_set(self, 0U, i);
1556                hw_atl_rpf_vlan_rxq_en_flr_set(self, 0U, i);
1557                if (aq_vlans[i].enable) {
1558                        hw_atl_rpf_vlan_id_flr_set(self,
1559                                                   aq_vlans[i].vlan_id,
1560                                                   i);
1561                        hw_atl_rpf_vlan_flr_act_set(self, 1U, i);
1562                        hw_atl_rpf_vlan_flr_en_set(self, 1U, i);
1563                        if (aq_vlans[i].queue != 0xFF) {
1564                                hw_atl_rpf_vlan_rxq_flr_set(self,
1565                                                            aq_vlans[i].queue,
1566                                                            i);
1567                                hw_atl_rpf_vlan_rxq_en_flr_set(self, 1U, i);
1568                        }
1569                }
1570        }
1571
1572        return aq_hw_err_from_flags(self);
1573}
1574
1575static int hw_atl_b0_hw_vlan_ctrl(struct aq_hw_s *self, bool enable)
1576{
1577        /* set promisc in case of disabing the vland filter */
1578        hw_atl_rpf_vlan_prom_mode_en_set(self, !enable);
1579
1580        return aq_hw_err_from_flags(self);
1581}
1582
1583int hw_atl_b0_set_loopback(struct aq_hw_s *self, u32 mode, bool enable)
1584{
1585        switch (mode) {
1586        case AQ_HW_LOOPBACK_DMA_SYS:
1587                hw_atl_tpb_tx_dma_sys_lbk_en_set(self, enable);
1588                hw_atl_rpb_dma_sys_lbk_set(self, enable);
1589                break;
1590        case AQ_HW_LOOPBACK_PKT_SYS:
1591                hw_atl_tpo_tx_pkt_sys_lbk_en_set(self, enable);
1592                hw_atl_rpf_tpo_to_rpf_sys_lbk_set(self, enable);
1593                break;
1594        case AQ_HW_LOOPBACK_DMA_NET:
1595                hw_atl_rpf_vlan_prom_mode_en_set(self, enable);
1596                hw_atl_rpfl2promiscuous_mode_en_set(self, enable);
1597                hw_atl_tpb_tx_tx_clk_gate_en_set(self, !enable);
1598                hw_atl_tpb_tx_dma_net_lbk_en_set(self, enable);
1599                hw_atl_rpb_dma_net_lbk_set(self, enable);
1600                break;
1601        default:
1602                return -EINVAL;
1603        }
1604
1605        return 0;
1606}
1607
1608static u32 hw_atl_b0_ts_ready_and_latch_high_get(struct aq_hw_s *self)
1609{
1610        if (hw_atl_ts_ready_get(self) && hw_atl_ts_ready_latch_high_get(self))
1611                return 1;
1612
1613        return 0;
1614}
1615
1616static int hw_atl_b0_get_mac_temp(struct aq_hw_s *self, u32 *temp)
1617{
1618        bool ts_disabled;
1619        int err;
1620        u32 val;
1621        u32 ts;
1622
1623        ts_disabled = (hw_atl_ts_power_down_get(self) == 1U);
1624
1625        if (ts_disabled) {
1626                // Set AFE Temperature Sensor to on (off by default)
1627                hw_atl_ts_power_down_set(self, 0U);
1628
1629                // Reset internal capacitors, biasing, and counters
1630                hw_atl_ts_reset_set(self, 1);
1631                hw_atl_ts_reset_set(self, 0);
1632        }
1633
1634        err = readx_poll_timeout(hw_atl_b0_ts_ready_and_latch_high_get, self,
1635                                 val, val == 1, 10000U, 500000U);
1636        if (err)
1637                return err;
1638
1639        ts = hw_atl_ts_data_get(self);
1640        *temp = ts * ts * 16 / 100000 + 60 * ts - 83410;
1641
1642        if (ts_disabled) {
1643                // Set AFE Temperature Sensor back to off
1644                hw_atl_ts_power_down_set(self, 1U);
1645        }
1646
1647        return 0;
1648}
1649
1650const struct aq_hw_ops hw_atl_ops_b0 = {
1651        .hw_soft_reset        = hw_atl_utils_soft_reset,
1652        .hw_prepare           = hw_atl_utils_initfw,
1653        .hw_set_mac_address   = hw_atl_b0_hw_mac_addr_set,
1654        .hw_init              = hw_atl_b0_hw_init,
1655        .hw_reset             = hw_atl_b0_hw_reset,
1656        .hw_start             = hw_atl_b0_hw_start,
1657        .hw_ring_tx_start     = hw_atl_b0_hw_ring_tx_start,
1658        .hw_ring_tx_stop      = hw_atl_b0_hw_ring_tx_stop,
1659        .hw_ring_rx_start     = hw_atl_b0_hw_ring_rx_start,
1660        .hw_ring_rx_stop      = hw_atl_b0_hw_ring_rx_stop,
1661        .hw_stop              = hw_atl_b0_hw_stop,
1662
1663        .hw_ring_tx_xmit         = hw_atl_b0_hw_ring_tx_xmit,
1664        .hw_ring_tx_head_update  = hw_atl_b0_hw_ring_tx_head_update,
1665
1666        .hw_ring_rx_receive      = hw_atl_b0_hw_ring_rx_receive,
1667        .hw_ring_rx_fill         = hw_atl_b0_hw_ring_rx_fill,
1668
1669        .hw_irq_enable           = hw_atl_b0_hw_irq_enable,
1670        .hw_irq_disable          = hw_atl_b0_hw_irq_disable,
1671        .hw_irq_read             = hw_atl_b0_hw_irq_read,
1672
1673        .hw_ring_rx_init             = hw_atl_b0_hw_ring_rx_init,
1674        .hw_ring_tx_init             = hw_atl_b0_hw_ring_tx_init,
1675        .hw_packet_filter_set        = hw_atl_b0_hw_packet_filter_set,
1676        .hw_filter_l2_set            = hw_atl_b0_hw_fl2_set,
1677        .hw_filter_l2_clear          = hw_atl_b0_hw_fl2_clear,
1678        .hw_filter_l3l4_set          = hw_atl_b0_hw_fl3l4_set,
1679        .hw_filter_vlan_set          = hw_atl_b0_hw_vlan_set,
1680        .hw_filter_vlan_ctrl         = hw_atl_b0_hw_vlan_ctrl,
1681        .hw_multicast_list_set       = hw_atl_b0_hw_multicast_list_set,
1682        .hw_interrupt_moderation_set = hw_atl_b0_hw_interrupt_moderation_set,
1683        .hw_rss_set                  = hw_atl_b0_hw_rss_set,
1684        .hw_rss_hash_set             = hw_atl_b0_hw_rss_hash_set,
1685        .hw_tc_rate_limit_set        = hw_atl_b0_hw_init_tx_tc_rate_limit,
1686        .hw_get_regs                 = hw_atl_utils_hw_get_regs,
1687        .hw_get_hw_stats             = hw_atl_utils_get_hw_stats,
1688        .hw_get_fw_version           = hw_atl_utils_get_fw_version,
1689
1690        .hw_ring_hwts_rx_fill        = hw_atl_b0_hw_ring_hwts_rx_fill,
1691        .hw_ring_hwts_rx_receive     = hw_atl_b0_hw_ring_hwts_rx_receive,
1692
1693        .hw_get_ptp_ts           = hw_atl_b0_get_ptp_ts,
1694        .hw_adj_sys_clock        = hw_atl_b0_adj_sys_clock,
1695        .hw_set_sys_clock        = hw_atl_b0_set_sys_clock,
1696        .hw_ts_to_sys_clock      = hw_atl_b0_ts_to_sys_clock,
1697        .hw_adj_clock_freq       = hw_atl_b0_adj_clock_freq,
1698        .hw_gpio_pulse           = hw_atl_b0_gpio_pulse,
1699        .hw_extts_gpio_enable    = hw_atl_b0_extts_gpio_enable,
1700        .hw_get_sync_ts          = hw_atl_b0_get_sync_ts,
1701        .rx_extract_ts           = hw_atl_b0_rx_extract_ts,
1702        .extract_hwts            = hw_atl_b0_extract_hwts,
1703        .hw_set_offload          = hw_atl_b0_hw_offload_set,
1704        .hw_set_loopback         = hw_atl_b0_set_loopback,
1705        .hw_set_fc               = hw_atl_b0_set_fc,
1706
1707        .hw_get_mac_temp         = hw_atl_b0_get_mac_temp,
1708};
1709