linux/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2016-2017 Hisilicon Limited.
   3 *
   4 * This program is free software; you can redistribute it and/or modify
   5 * it under the terms of the GNU General Public License as published by
   6 * the Free Software Foundation; either version 2 of the License, or
   7 * (at your option) any later version.
   8 */
   9
  10#include <linux/acpi.h>
  11#include <linux/device.h>
  12#include <linux/etherdevice.h>
  13#include <linux/init.h>
  14#include <linux/interrupt.h>
  15#include <linux/kernel.h>
  16#include <linux/module.h>
  17#include <linux/netdevice.h>
  18#include <linux/pci.h>
  19#include <linux/platform_device.h>
  20
  21#include "hclge_cmd.h"
  22#include "hclge_main.h"
  23#include "hclge_mdio.h"
  24#include "hclge_tm.h"
  25#include "hnae3.h"
  26
  27#define HCLGE_NAME                      "hclge"
  28#define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
  29#define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
  30#define HCLGE_64BIT_STATS_FIELD_OFF(f) (offsetof(struct hclge_64_bit_stats, f))
  31#define HCLGE_32BIT_STATS_FIELD_OFF(f) (offsetof(struct hclge_32_bit_stats, f))
  32
  33static int hclge_rss_init_hw(struct hclge_dev *hdev);
  34static int hclge_set_mta_filter_mode(struct hclge_dev *hdev,
  35                                     enum hclge_mta_dmac_sel_type mta_mac_sel,
  36                                     bool enable);
  37static int hclge_init_vlan_config(struct hclge_dev *hdev);
  38
  39static struct hnae3_ae_algo ae_algo;
  40
  41static const struct pci_device_id ae_algo_pci_tbl[] = {
  42        {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
  43        {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
  44        {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
  45        {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
  46        {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
  47        {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
  48        {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
  49        /* required last entry */
  50        {0, }
  51};
  52
  53static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
  54        "Mac    Loopback test",
  55        "Serdes Loopback test",
  56        "Phy    Loopback test"
  57};
  58
  59static const struct hclge_comm_stats_str g_all_64bit_stats_string[] = {
  60        {"igu_rx_oversize_pkt",
  61                HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_oversize_pkt)},
  62        {"igu_rx_undersize_pkt",
  63                HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_undersize_pkt)},
  64        {"igu_rx_out_all_pkt",
  65                HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_out_all_pkt)},
  66        {"igu_rx_uni_pkt",
  67                HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_uni_pkt)},
  68        {"igu_rx_multi_pkt",
  69                HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_multi_pkt)},
  70        {"igu_rx_broad_pkt",
  71                HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_broad_pkt)},
  72        {"egu_tx_out_all_pkt",
  73                HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_out_all_pkt)},
  74        {"egu_tx_uni_pkt",
  75                HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_uni_pkt)},
  76        {"egu_tx_multi_pkt",
  77                HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_multi_pkt)},
  78        {"egu_tx_broad_pkt",
  79                HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_broad_pkt)},
  80        {"ssu_ppp_mac_key_num",
  81                HCLGE_64BIT_STATS_FIELD_OFF(ssu_ppp_mac_key_num)},
  82        {"ssu_ppp_host_key_num",
  83                HCLGE_64BIT_STATS_FIELD_OFF(ssu_ppp_host_key_num)},
  84        {"ppp_ssu_mac_rlt_num",
  85                HCLGE_64BIT_STATS_FIELD_OFF(ppp_ssu_mac_rlt_num)},
  86        {"ppp_ssu_host_rlt_num",
  87                HCLGE_64BIT_STATS_FIELD_OFF(ppp_ssu_host_rlt_num)},
  88        {"ssu_tx_in_num",
  89                HCLGE_64BIT_STATS_FIELD_OFF(ssu_tx_in_num)},
  90        {"ssu_tx_out_num",
  91                HCLGE_64BIT_STATS_FIELD_OFF(ssu_tx_out_num)},
  92        {"ssu_rx_in_num",
  93                HCLGE_64BIT_STATS_FIELD_OFF(ssu_rx_in_num)},
  94        {"ssu_rx_out_num",
  95                HCLGE_64BIT_STATS_FIELD_OFF(ssu_rx_out_num)}
  96};
  97
  98static const struct hclge_comm_stats_str g_all_32bit_stats_string[] = {
  99        {"igu_rx_err_pkt",
 100                HCLGE_32BIT_STATS_FIELD_OFF(igu_rx_err_pkt)},
 101        {"igu_rx_no_eof_pkt",
 102                HCLGE_32BIT_STATS_FIELD_OFF(igu_rx_no_eof_pkt)},
 103        {"igu_rx_no_sof_pkt",
 104                HCLGE_32BIT_STATS_FIELD_OFF(igu_rx_no_sof_pkt)},
 105        {"egu_tx_1588_pkt",
 106                HCLGE_32BIT_STATS_FIELD_OFF(egu_tx_1588_pkt)},
 107        {"ssu_full_drop_num",
 108                HCLGE_32BIT_STATS_FIELD_OFF(ssu_full_drop_num)},
 109        {"ssu_part_drop_num",
 110                HCLGE_32BIT_STATS_FIELD_OFF(ssu_part_drop_num)},
 111        {"ppp_key_drop_num",
 112                HCLGE_32BIT_STATS_FIELD_OFF(ppp_key_drop_num)},
 113        {"ppp_rlt_drop_num",
 114                HCLGE_32BIT_STATS_FIELD_OFF(ppp_rlt_drop_num)},
 115        {"ssu_key_drop_num",
 116                HCLGE_32BIT_STATS_FIELD_OFF(ssu_key_drop_num)},
 117        {"pkt_curr_buf_cnt",
 118                HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_cnt)},
 119        {"qcn_fb_rcv_cnt",
 120                HCLGE_32BIT_STATS_FIELD_OFF(qcn_fb_rcv_cnt)},
 121        {"qcn_fb_drop_cnt",
 122                HCLGE_32BIT_STATS_FIELD_OFF(qcn_fb_drop_cnt)},
 123        {"qcn_fb_invaild_cnt",
 124                HCLGE_32BIT_STATS_FIELD_OFF(qcn_fb_invaild_cnt)},
 125        {"rx_packet_tc0_in_cnt",
 126                HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc0_in_cnt)},
 127        {"rx_packet_tc1_in_cnt",
 128                HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc1_in_cnt)},
 129        {"rx_packet_tc2_in_cnt",
 130                HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc2_in_cnt)},
 131        {"rx_packet_tc3_in_cnt",
 132                HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc3_in_cnt)},
 133        {"rx_packet_tc4_in_cnt",
 134                HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc4_in_cnt)},
 135        {"rx_packet_tc5_in_cnt",
 136                HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc5_in_cnt)},
 137        {"rx_packet_tc6_in_cnt",
 138                HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc6_in_cnt)},
 139        {"rx_packet_tc7_in_cnt",
 140                HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc7_in_cnt)},
 141        {"rx_packet_tc0_out_cnt",
 142                HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc0_out_cnt)},
 143        {"rx_packet_tc1_out_cnt",
 144                HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc1_out_cnt)},
 145        {"rx_packet_tc2_out_cnt",
 146                HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc2_out_cnt)},
 147        {"rx_packet_tc3_out_cnt",
 148                HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc3_out_cnt)},
 149        {"rx_packet_tc4_out_cnt",
 150                HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc4_out_cnt)},
 151        {"rx_packet_tc5_out_cnt",
 152                HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc5_out_cnt)},
 153        {"rx_packet_tc6_out_cnt",
 154                HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc6_out_cnt)},
 155        {"rx_packet_tc7_out_cnt",
 156                HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc7_out_cnt)},
 157        {"tx_packet_tc0_in_cnt",
 158                HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc0_in_cnt)},
 159        {"tx_packet_tc1_in_cnt",
 160                HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc1_in_cnt)},
 161        {"tx_packet_tc2_in_cnt",
 162                HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc2_in_cnt)},
 163        {"tx_packet_tc3_in_cnt",
 164                HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc3_in_cnt)},
 165        {"tx_packet_tc4_in_cnt",
 166                HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc4_in_cnt)},
 167        {"tx_packet_tc5_in_cnt",
 168                HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc5_in_cnt)},
 169        {"tx_packet_tc6_in_cnt",
 170                HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc6_in_cnt)},
 171        {"tx_packet_tc7_in_cnt",
 172                HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc7_in_cnt)},
 173        {"tx_packet_tc0_out_cnt",
 174                HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc0_out_cnt)},
 175        {"tx_packet_tc1_out_cnt",
 176                HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc1_out_cnt)},
 177        {"tx_packet_tc2_out_cnt",
 178                HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc2_out_cnt)},
 179        {"tx_packet_tc3_out_cnt",
 180                HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc3_out_cnt)},
 181        {"tx_packet_tc4_out_cnt",
 182                HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc4_out_cnt)},
 183        {"tx_packet_tc5_out_cnt",
 184                HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc5_out_cnt)},
 185        {"tx_packet_tc6_out_cnt",
 186                HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc6_out_cnt)},
 187        {"tx_packet_tc7_out_cnt",
 188                HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc7_out_cnt)},
 189        {"pkt_curr_buf_tc0_cnt",
 190                HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc0_cnt)},
 191        {"pkt_curr_buf_tc1_cnt",
 192                HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc1_cnt)},
 193        {"pkt_curr_buf_tc2_cnt",
 194                HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc2_cnt)},
 195        {"pkt_curr_buf_tc3_cnt",
 196                HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc3_cnt)},
 197        {"pkt_curr_buf_tc4_cnt",
 198                HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc4_cnt)},
 199        {"pkt_curr_buf_tc5_cnt",
 200                HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc5_cnt)},
 201        {"pkt_curr_buf_tc6_cnt",
 202                HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc6_cnt)},
 203        {"pkt_curr_buf_tc7_cnt",
 204                HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc7_cnt)},
 205        {"mb_uncopy_num",
 206                HCLGE_32BIT_STATS_FIELD_OFF(mb_uncopy_num)},
 207        {"lo_pri_unicast_rlt_drop_num",
 208                HCLGE_32BIT_STATS_FIELD_OFF(lo_pri_unicast_rlt_drop_num)},
 209        {"hi_pri_multicast_rlt_drop_num",
 210                HCLGE_32BIT_STATS_FIELD_OFF(hi_pri_multicast_rlt_drop_num)},
 211        {"lo_pri_multicast_rlt_drop_num",
 212                HCLGE_32BIT_STATS_FIELD_OFF(lo_pri_multicast_rlt_drop_num)},
 213        {"rx_oq_drop_pkt_cnt",
 214                HCLGE_32BIT_STATS_FIELD_OFF(rx_oq_drop_pkt_cnt)},
 215        {"tx_oq_drop_pkt_cnt",
 216                HCLGE_32BIT_STATS_FIELD_OFF(tx_oq_drop_pkt_cnt)},
 217        {"nic_l2_err_drop_pkt_cnt",
 218                HCLGE_32BIT_STATS_FIELD_OFF(nic_l2_err_drop_pkt_cnt)},
 219        {"roc_l2_err_drop_pkt_cnt",
 220                HCLGE_32BIT_STATS_FIELD_OFF(roc_l2_err_drop_pkt_cnt)}
 221};
 222
 223static const struct hclge_comm_stats_str g_mac_stats_string[] = {
 224        {"mac_tx_mac_pause_num",
 225                HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
 226        {"mac_rx_mac_pause_num",
 227                HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
 228        {"mac_tx_pfc_pri0_pkt_num",
 229                HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
 230        {"mac_tx_pfc_pri1_pkt_num",
 231                HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
 232        {"mac_tx_pfc_pri2_pkt_num",
 233                HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
 234        {"mac_tx_pfc_pri3_pkt_num",
 235                HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
 236        {"mac_tx_pfc_pri4_pkt_num",
 237                HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
 238        {"mac_tx_pfc_pri5_pkt_num",
 239                HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
 240        {"mac_tx_pfc_pri6_pkt_num",
 241                HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
 242        {"mac_tx_pfc_pri7_pkt_num",
 243                HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
 244        {"mac_rx_pfc_pri0_pkt_num",
 245                HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
 246        {"mac_rx_pfc_pri1_pkt_num",
 247                HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
 248        {"mac_rx_pfc_pri2_pkt_num",
 249                HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
 250        {"mac_rx_pfc_pri3_pkt_num",
 251                HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
 252        {"mac_rx_pfc_pri4_pkt_num",
 253                HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
 254        {"mac_rx_pfc_pri5_pkt_num",
 255                HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
 256        {"mac_rx_pfc_pri6_pkt_num",
 257                HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
 258        {"mac_rx_pfc_pri7_pkt_num",
 259                HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
 260        {"mac_tx_total_pkt_num",
 261                HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
 262        {"mac_tx_total_oct_num",
 263                HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
 264        {"mac_tx_good_pkt_num",
 265                HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
 266        {"mac_tx_bad_pkt_num",
 267                HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
 268        {"mac_tx_good_oct_num",
 269                HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
 270        {"mac_tx_bad_oct_num",
 271                HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
 272        {"mac_tx_uni_pkt_num",
 273                HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
 274        {"mac_tx_multi_pkt_num",
 275                HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
 276        {"mac_tx_broad_pkt_num",
 277                HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
 278        {"mac_tx_undersize_pkt_num",
 279                HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
 280        {"mac_tx_overrsize_pkt_num",
 281                HCLGE_MAC_STATS_FIELD_OFF(mac_tx_overrsize_pkt_num)},
 282        {"mac_tx_64_oct_pkt_num",
 283                HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
 284        {"mac_tx_65_127_oct_pkt_num",
 285                HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
 286        {"mac_tx_128_255_oct_pkt_num",
 287                HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
 288        {"mac_tx_256_511_oct_pkt_num",
 289                HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
 290        {"mac_tx_512_1023_oct_pkt_num",
 291                HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
 292        {"mac_tx_1024_1518_oct_pkt_num",
 293                HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
 294        {"mac_tx_1519_max_oct_pkt_num",
 295                HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_oct_pkt_num)},
 296        {"mac_rx_total_pkt_num",
 297                HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
 298        {"mac_rx_total_oct_num",
 299                HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
 300        {"mac_rx_good_pkt_num",
 301                HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
 302        {"mac_rx_bad_pkt_num",
 303                HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
 304        {"mac_rx_good_oct_num",
 305                HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
 306        {"mac_rx_bad_oct_num",
 307                HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
 308        {"mac_rx_uni_pkt_num",
 309                HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
 310        {"mac_rx_multi_pkt_num",
 311                HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
 312        {"mac_rx_broad_pkt_num",
 313                HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
 314        {"mac_rx_undersize_pkt_num",
 315                HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
 316        {"mac_rx_overrsize_pkt_num",
 317                HCLGE_MAC_STATS_FIELD_OFF(mac_rx_overrsize_pkt_num)},
 318        {"mac_rx_64_oct_pkt_num",
 319                HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
 320        {"mac_rx_65_127_oct_pkt_num",
 321                HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
 322        {"mac_rx_128_255_oct_pkt_num",
 323                HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
 324        {"mac_rx_256_511_oct_pkt_num",
 325                HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
 326        {"mac_rx_512_1023_oct_pkt_num",
 327                HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
 328        {"mac_rx_1024_1518_oct_pkt_num",
 329                HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
 330        {"mac_rx_1519_max_oct_pkt_num",
 331                HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_oct_pkt_num)},
 332
 333        {"mac_trans_fragment_pkt_num",
 334                HCLGE_MAC_STATS_FIELD_OFF(mac_trans_fragment_pkt_num)},
 335        {"mac_trans_undermin_pkt_num",
 336                HCLGE_MAC_STATS_FIELD_OFF(mac_trans_undermin_pkt_num)},
 337        {"mac_trans_jabber_pkt_num",
 338                HCLGE_MAC_STATS_FIELD_OFF(mac_trans_jabber_pkt_num)},
 339        {"mac_trans_err_all_pkt_num",
 340                HCLGE_MAC_STATS_FIELD_OFF(mac_trans_err_all_pkt_num)},
 341        {"mac_trans_from_app_good_pkt_num",
 342                HCLGE_MAC_STATS_FIELD_OFF(mac_trans_from_app_good_pkt_num)},
 343        {"mac_trans_from_app_bad_pkt_num",
 344                HCLGE_MAC_STATS_FIELD_OFF(mac_trans_from_app_bad_pkt_num)},
 345        {"mac_rcv_fragment_pkt_num",
 346                HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_fragment_pkt_num)},
 347        {"mac_rcv_undermin_pkt_num",
 348                HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_undermin_pkt_num)},
 349        {"mac_rcv_jabber_pkt_num",
 350                HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_jabber_pkt_num)},
 351        {"mac_rcv_fcs_err_pkt_num",
 352                HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_fcs_err_pkt_num)},
 353        {"mac_rcv_send_app_good_pkt_num",
 354                HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_send_app_good_pkt_num)},
 355        {"mac_rcv_send_app_bad_pkt_num",
 356                HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_send_app_bad_pkt_num)}
 357};
 358
 359static int hclge_64_bit_update_stats(struct hclge_dev *hdev)
 360{
 361#define HCLGE_64_BIT_CMD_NUM 5
 362#define HCLGE_64_BIT_RTN_DATANUM 4
 363        u64 *data = (u64 *)(&hdev->hw_stats.all_64_bit_stats);
 364        struct hclge_desc desc[HCLGE_64_BIT_CMD_NUM];
 365        u64 *desc_data;
 366        int i, k, n;
 367        int ret;
 368
 369        hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_64_BIT, true);
 370        ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_64_BIT_CMD_NUM);
 371        if (ret) {
 372                dev_err(&hdev->pdev->dev,
 373                        "Get 64 bit pkt stats fail, status = %d.\n", ret);
 374                return ret;
 375        }
 376
 377        for (i = 0; i < HCLGE_64_BIT_CMD_NUM; i++) {
 378                if (unlikely(i == 0)) {
 379                        desc_data = (u64 *)(&desc[i].data[0]);
 380                        n = HCLGE_64_BIT_RTN_DATANUM - 1;
 381                } else {
 382                        desc_data = (u64 *)(&desc[i]);
 383                        n = HCLGE_64_BIT_RTN_DATANUM;
 384                }
 385                for (k = 0; k < n; k++) {
 386                        *data++ += cpu_to_le64(*desc_data);
 387                        desc_data++;
 388                }
 389        }
 390
 391        return 0;
 392}
 393
 394static void hclge_reset_partial_32bit_counter(struct hclge_32_bit_stats *stats)
 395{
 396        stats->pkt_curr_buf_cnt     = 0;
 397        stats->pkt_curr_buf_tc0_cnt = 0;
 398        stats->pkt_curr_buf_tc1_cnt = 0;
 399        stats->pkt_curr_buf_tc2_cnt = 0;
 400        stats->pkt_curr_buf_tc3_cnt = 0;
 401        stats->pkt_curr_buf_tc4_cnt = 0;
 402        stats->pkt_curr_buf_tc5_cnt = 0;
 403        stats->pkt_curr_buf_tc6_cnt = 0;
 404        stats->pkt_curr_buf_tc7_cnt = 0;
 405}
 406
 407static int hclge_32_bit_update_stats(struct hclge_dev *hdev)
 408{
 409#define HCLGE_32_BIT_CMD_NUM 8
 410#define HCLGE_32_BIT_RTN_DATANUM 8
 411
 412        struct hclge_desc desc[HCLGE_32_BIT_CMD_NUM];
 413        struct hclge_32_bit_stats *all_32_bit_stats;
 414        u32 *desc_data;
 415        int i, k, n;
 416        u64 *data;
 417        int ret;
 418
 419        all_32_bit_stats = &hdev->hw_stats.all_32_bit_stats;
 420        data = (u64 *)(&all_32_bit_stats->egu_tx_1588_pkt);
 421
 422        hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_32_BIT, true);
 423        ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_32_BIT_CMD_NUM);
 424        if (ret) {
 425                dev_err(&hdev->pdev->dev,
 426                        "Get 32 bit pkt stats fail, status = %d.\n", ret);
 427
 428                return ret;
 429        }
 430
 431        hclge_reset_partial_32bit_counter(all_32_bit_stats);
 432        for (i = 0; i < HCLGE_32_BIT_CMD_NUM; i++) {
 433                if (unlikely(i == 0)) {
 434                        all_32_bit_stats->igu_rx_err_pkt +=
 435                                cpu_to_le32(desc[i].data[0]);
 436                        all_32_bit_stats->igu_rx_no_eof_pkt +=
 437                                cpu_to_le32(desc[i].data[1] & 0xffff);
 438                        all_32_bit_stats->igu_rx_no_sof_pkt +=
 439                                cpu_to_le32((desc[i].data[1] >> 16) & 0xffff);
 440
 441                        desc_data = (u32 *)(&desc[i].data[2]);
 442                        n = HCLGE_32_BIT_RTN_DATANUM - 4;
 443                } else {
 444                        desc_data = (u32 *)(&desc[i]);
 445                        n = HCLGE_32_BIT_RTN_DATANUM;
 446                }
 447                for (k = 0; k < n; k++) {
 448                        *data++ += cpu_to_le32(*desc_data);
 449                        desc_data++;
 450                }
 451        }
 452
 453        return 0;
 454}
 455
 456static int hclge_mac_update_stats(struct hclge_dev *hdev)
 457{
 458#define HCLGE_MAC_CMD_NUM 17
 459#define HCLGE_RTN_DATA_NUM 4
 460
 461        u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
 462        struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
 463        u64 *desc_data;
 464        int i, k, n;
 465        int ret;
 466
 467        hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
 468        ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
 469        if (ret) {
 470                dev_err(&hdev->pdev->dev,
 471                        "Get MAC pkt stats fail, status = %d.\n", ret);
 472
 473                return ret;
 474        }
 475
 476        for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
 477                if (unlikely(i == 0)) {
 478                        desc_data = (u64 *)(&desc[i].data[0]);
 479                        n = HCLGE_RTN_DATA_NUM - 2;
 480                } else {
 481                        desc_data = (u64 *)(&desc[i]);
 482                        n = HCLGE_RTN_DATA_NUM;
 483                }
 484                for (k = 0; k < n; k++) {
 485                        *data++ += cpu_to_le64(*desc_data);
 486                        desc_data++;
 487                }
 488        }
 489
 490        return 0;
 491}
 492
 493static int hclge_tqps_update_stats(struct hnae3_handle *handle)
 494{
 495        struct hnae3_knic_private_info *kinfo = &handle->kinfo;
 496        struct hclge_vport *vport = hclge_get_vport(handle);
 497        struct hclge_dev *hdev = vport->back;
 498        struct hnae3_queue *queue;
 499        struct hclge_desc desc[1];
 500        struct hclge_tqp *tqp;
 501        int ret, i;
 502
 503        for (i = 0; i < kinfo->num_tqps; i++) {
 504                queue = handle->kinfo.tqp[i];
 505                tqp = container_of(queue, struct hclge_tqp, q);
 506                /* command : HCLGE_OPC_QUERY_IGU_STAT */
 507                hclge_cmd_setup_basic_desc(&desc[0],
 508                                           HCLGE_OPC_QUERY_RX_STATUS,
 509                                           true);
 510
 511                desc[0].data[0] = (tqp->index & 0x1ff);
 512                ret = hclge_cmd_send(&hdev->hw, desc, 1);
 513                if (ret) {
 514                        dev_err(&hdev->pdev->dev,
 515                                "Query tqp stat fail, status = %d,queue = %d\n",
 516                                ret,    i);
 517                        return ret;
 518                }
 519                tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
 520                        cpu_to_le32(desc[0].data[4]);
 521        }
 522
 523        for (i = 0; i < kinfo->num_tqps; i++) {
 524                queue = handle->kinfo.tqp[i];
 525                tqp = container_of(queue, struct hclge_tqp, q);
 526                /* command : HCLGE_OPC_QUERY_IGU_STAT */
 527                hclge_cmd_setup_basic_desc(&desc[0],
 528                                           HCLGE_OPC_QUERY_TX_STATUS,
 529                                           true);
 530
 531                desc[0].data[0] = (tqp->index & 0x1ff);
 532                ret = hclge_cmd_send(&hdev->hw, desc, 1);
 533                if (ret) {
 534                        dev_err(&hdev->pdev->dev,
 535                                "Query tqp stat fail, status = %d,queue = %d\n",
 536                                ret, i);
 537                        return ret;
 538                }
 539                tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
 540                        cpu_to_le32(desc[0].data[4]);
 541        }
 542
 543        return 0;
 544}
 545
 546static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
 547{
 548        struct hnae3_knic_private_info *kinfo = &handle->kinfo;
 549        struct hclge_tqp *tqp;
 550        u64 *buff = data;
 551        int i;
 552
 553        for (i = 0; i < kinfo->num_tqps; i++) {
 554                tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
 555                *buff++ = cpu_to_le64(tqp->tqp_stats.rcb_tx_ring_pktnum_rcd);
 556        }
 557
 558        for (i = 0; i < kinfo->num_tqps; i++) {
 559                tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
 560                *buff++ = cpu_to_le64(tqp->tqp_stats.rcb_rx_ring_pktnum_rcd);
 561        }
 562
 563        return buff;
 564}
 565
 566static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
 567{
 568        struct hnae3_knic_private_info *kinfo = &handle->kinfo;
 569
 570        return kinfo->num_tqps * (2);
 571}
 572
 573static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
 574{
 575        struct hnae3_knic_private_info *kinfo = &handle->kinfo;
 576        u8 *buff = data;
 577        int i = 0;
 578
 579        for (i = 0; i < kinfo->num_tqps; i++) {
 580                struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
 581                        struct hclge_tqp, q);
 582                snprintf(buff, ETH_GSTRING_LEN, "rcb_q%d_tx_pktnum_rcd",
 583                         tqp->index);
 584                buff = buff + ETH_GSTRING_LEN;
 585        }
 586
 587        for (i = 0; i < kinfo->num_tqps; i++) {
 588                struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
 589                        struct hclge_tqp, q);
 590                snprintf(buff, ETH_GSTRING_LEN, "rcb_q%d_rx_pktnum_rcd",
 591                         tqp->index);
 592                buff = buff + ETH_GSTRING_LEN;
 593        }
 594
 595        return buff;
 596}
 597
 598static u64 *hclge_comm_get_stats(void *comm_stats,
 599                                 const struct hclge_comm_stats_str strs[],
 600                                 int size, u64 *data)
 601{
 602        u64 *buf = data;
 603        u32 i;
 604
 605        for (i = 0; i < size; i++)
 606                buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
 607
 608        return buf + size;
 609}
 610
 611static u8 *hclge_comm_get_strings(u32 stringset,
 612                                  const struct hclge_comm_stats_str strs[],
 613                                  int size, u8 *data)
 614{
 615        char *buff = (char *)data;
 616        u32 i;
 617
 618        if (stringset != ETH_SS_STATS)
 619                return buff;
 620
 621        for (i = 0; i < size; i++) {
 622                snprintf(buff, ETH_GSTRING_LEN,
 623                         strs[i].desc);
 624                buff = buff + ETH_GSTRING_LEN;
 625        }
 626
 627        return (u8 *)buff;
 628}
 629
 630static void hclge_update_netstat(struct hclge_hw_stats *hw_stats,
 631                                 struct net_device_stats *net_stats)
 632{
 633        net_stats->tx_dropped = 0;
 634        net_stats->rx_dropped = hw_stats->all_32_bit_stats.ssu_full_drop_num;
 635        net_stats->rx_dropped += hw_stats->all_32_bit_stats.ppp_key_drop_num;
 636        net_stats->rx_dropped += hw_stats->all_32_bit_stats.ssu_key_drop_num;
 637
 638        net_stats->rx_errors = hw_stats->mac_stats.mac_rx_overrsize_pkt_num;
 639        net_stats->rx_errors += hw_stats->mac_stats.mac_rx_undersize_pkt_num;
 640        net_stats->rx_errors += hw_stats->all_32_bit_stats.igu_rx_err_pkt;
 641        net_stats->rx_errors += hw_stats->all_32_bit_stats.igu_rx_no_eof_pkt;
 642        net_stats->rx_errors += hw_stats->all_32_bit_stats.igu_rx_no_sof_pkt;
 643        net_stats->rx_errors += hw_stats->mac_stats.mac_rcv_fcs_err_pkt_num;
 644
 645        net_stats->multicast = hw_stats->mac_stats.mac_tx_multi_pkt_num;
 646        net_stats->multicast += hw_stats->mac_stats.mac_rx_multi_pkt_num;
 647
 648        net_stats->rx_crc_errors = hw_stats->mac_stats.mac_rcv_fcs_err_pkt_num;
 649        net_stats->rx_length_errors =
 650                hw_stats->mac_stats.mac_rx_undersize_pkt_num;
 651        net_stats->rx_length_errors +=
 652                hw_stats->mac_stats.mac_rx_overrsize_pkt_num;
 653        net_stats->rx_over_errors =
 654                hw_stats->mac_stats.mac_rx_overrsize_pkt_num;
 655}
 656
 657static void hclge_update_stats_for_all(struct hclge_dev *hdev)
 658{
 659        struct hnae3_handle *handle;
 660        int status;
 661
 662        handle = &hdev->vport[0].nic;
 663        if (handle->client) {
 664                status = hclge_tqps_update_stats(handle);
 665                if (status) {
 666                        dev_err(&hdev->pdev->dev,
 667                                "Update TQPS stats fail, status = %d.\n",
 668                                status);
 669                }
 670        }
 671
 672        status = hclge_mac_update_stats(hdev);
 673        if (status)
 674                dev_err(&hdev->pdev->dev,
 675                        "Update MAC stats fail, status = %d.\n", status);
 676
 677        status = hclge_32_bit_update_stats(hdev);
 678        if (status)
 679                dev_err(&hdev->pdev->dev,
 680                        "Update 32 bit stats fail, status = %d.\n",
 681                        status);
 682
 683        hclge_update_netstat(&hdev->hw_stats, &handle->kinfo.netdev->stats);
 684}
 685
 686static void hclge_update_stats(struct hnae3_handle *handle,
 687                               struct net_device_stats *net_stats)
 688{
 689        struct hclge_vport *vport = hclge_get_vport(handle);
 690        struct hclge_dev *hdev = vport->back;
 691        struct hclge_hw_stats *hw_stats = &hdev->hw_stats;
 692        int status;
 693
 694        status = hclge_mac_update_stats(hdev);
 695        if (status)
 696                dev_err(&hdev->pdev->dev,
 697                        "Update MAC stats fail, status = %d.\n",
 698                        status);
 699
 700        status = hclge_32_bit_update_stats(hdev);
 701        if (status)
 702                dev_err(&hdev->pdev->dev,
 703                        "Update 32 bit stats fail, status = %d.\n",
 704                        status);
 705
 706        status = hclge_64_bit_update_stats(hdev);
 707        if (status)
 708                dev_err(&hdev->pdev->dev,
 709                        "Update 64 bit stats fail, status = %d.\n",
 710                        status);
 711
 712        status = hclge_tqps_update_stats(handle);
 713        if (status)
 714                dev_err(&hdev->pdev->dev,
 715                        "Update TQPS stats fail, status = %d.\n",
 716                        status);
 717
 718        hclge_update_netstat(hw_stats, net_stats);
 719}
 720
 721static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
 722{
 723#define HCLGE_LOOPBACK_TEST_FLAGS 0x7
 724
 725        struct hclge_vport *vport = hclge_get_vport(handle);
 726        struct hclge_dev *hdev = vport->back;
 727        int count = 0;
 728
 729        /* Loopback test support rules:
 730         * mac: only GE mode support
 731         * serdes: all mac mode will support include GE/XGE/LGE/CGE
 732         * phy: only support when phy device exist on board
 733         */
 734        if (stringset == ETH_SS_TEST) {
 735                /* clear loopback bit flags at first */
 736                handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
 737                if (hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
 738                    hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
 739                    hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
 740                        count += 1;
 741                        handle->flags |= HNAE3_SUPPORT_MAC_LOOPBACK;
 742                } else {
 743                        count = -EOPNOTSUPP;
 744                }
 745        } else if (stringset == ETH_SS_STATS) {
 746                count = ARRAY_SIZE(g_mac_stats_string) +
 747                        ARRAY_SIZE(g_all_32bit_stats_string) +
 748                        ARRAY_SIZE(g_all_64bit_stats_string) +
 749                        hclge_tqps_get_sset_count(handle, stringset);
 750        }
 751
 752        return count;
 753}
 754
 755static void hclge_get_strings(struct hnae3_handle *handle,
 756                              u32 stringset,
 757                              u8 *data)
 758{
 759        u8 *p = (char *)data;
 760        int size;
 761
 762        if (stringset == ETH_SS_STATS) {
 763                size = ARRAY_SIZE(g_mac_stats_string);
 764                p = hclge_comm_get_strings(stringset,
 765                                           g_mac_stats_string,
 766                                           size,
 767                                           p);
 768                size = ARRAY_SIZE(g_all_32bit_stats_string);
 769                p = hclge_comm_get_strings(stringset,
 770                                           g_all_32bit_stats_string,
 771                                           size,
 772                                           p);
 773                size = ARRAY_SIZE(g_all_64bit_stats_string);
 774                p = hclge_comm_get_strings(stringset,
 775                                           g_all_64bit_stats_string,
 776                                           size,
 777                                           p);
 778                p = hclge_tqps_get_strings(handle, p);
 779        } else if (stringset == ETH_SS_TEST) {
 780                if (handle->flags & HNAE3_SUPPORT_MAC_LOOPBACK) {
 781                        memcpy(p,
 782                               hns3_nic_test_strs[HNAE3_MAC_INTER_LOOP_MAC],
 783                               ETH_GSTRING_LEN);
 784                        p += ETH_GSTRING_LEN;
 785                }
 786                if (handle->flags & HNAE3_SUPPORT_SERDES_LOOPBACK) {
 787                        memcpy(p,
 788                               hns3_nic_test_strs[HNAE3_MAC_INTER_LOOP_SERDES],
 789                               ETH_GSTRING_LEN);
 790                        p += ETH_GSTRING_LEN;
 791                }
 792                if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
 793                        memcpy(p,
 794                               hns3_nic_test_strs[HNAE3_MAC_INTER_LOOP_PHY],
 795                               ETH_GSTRING_LEN);
 796                        p += ETH_GSTRING_LEN;
 797                }
 798        }
 799}
 800
 801static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
 802{
 803        struct hclge_vport *vport = hclge_get_vport(handle);
 804        struct hclge_dev *hdev = vport->back;
 805        u64 *p;
 806
 807        p = hclge_comm_get_stats(&hdev->hw_stats.mac_stats,
 808                                 g_mac_stats_string,
 809                                 ARRAY_SIZE(g_mac_stats_string),
 810                                 data);
 811        p = hclge_comm_get_stats(&hdev->hw_stats.all_32_bit_stats,
 812                                 g_all_32bit_stats_string,
 813                                 ARRAY_SIZE(g_all_32bit_stats_string),
 814                                 p);
 815        p = hclge_comm_get_stats(&hdev->hw_stats.all_64_bit_stats,
 816                                 g_all_64bit_stats_string,
 817                                 ARRAY_SIZE(g_all_64bit_stats_string),
 818                                 p);
 819        p = hclge_tqps_get_stats(handle, p);
 820}
 821
 822static int hclge_parse_func_status(struct hclge_dev *hdev,
 823                                   struct hclge_func_status *status)
 824{
 825        if (!(status->pf_state & HCLGE_PF_STATE_DONE))
 826                return -EINVAL;
 827
 828        /* Set the pf to main pf */
 829        if (status->pf_state & HCLGE_PF_STATE_MAIN)
 830                hdev->flag |= HCLGE_FLAG_MAIN;
 831        else
 832                hdev->flag &= ~HCLGE_FLAG_MAIN;
 833
 834        hdev->num_req_vfs = status->vf_num / status->pf_num;
 835        return 0;
 836}
 837
 838static int hclge_query_function_status(struct hclge_dev *hdev)
 839{
 840        struct hclge_func_status *req;
 841        struct hclge_desc desc;
 842        int timeout = 0;
 843        int ret;
 844
 845        hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
 846        req = (struct hclge_func_status *)desc.data;
 847
 848        do {
 849                ret = hclge_cmd_send(&hdev->hw, &desc, 1);
 850                if (ret) {
 851                        dev_err(&hdev->pdev->dev,
 852                                "query function status failed %d.\n",
 853                                ret);
 854
 855                        return ret;
 856                }
 857
 858                /* Check pf reset is done */
 859                if (req->pf_state)
 860                        break;
 861                usleep_range(1000, 2000);
 862        } while (timeout++ < 5);
 863
 864        ret = hclge_parse_func_status(hdev, req);
 865
 866        return ret;
 867}
 868
 869static int hclge_query_pf_resource(struct hclge_dev *hdev)
 870{
 871        struct hclge_pf_res *req;
 872        struct hclge_desc desc;
 873        int ret;
 874
 875        hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
 876        ret = hclge_cmd_send(&hdev->hw, &desc, 1);
 877        if (ret) {
 878                dev_err(&hdev->pdev->dev,
 879                        "query pf resource failed %d.\n", ret);
 880                return ret;
 881        }
 882
 883        req = (struct hclge_pf_res *)desc.data;
 884        hdev->num_tqps = __le16_to_cpu(req->tqp_num);
 885        hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
 886
 887        if (hnae3_dev_roce_supported(hdev)) {
 888                hdev->num_roce_msix =
 889                hnae_get_field(__le16_to_cpu(req->pf_intr_vector_number),
 890                               HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
 891
 892                /* PF should have NIC vectors and Roce vectors,
 893                 * NIC vectors are queued before Roce vectors.
 894                 */
 895                hdev->num_msi = hdev->num_roce_msix  + HCLGE_ROCE_VECTOR_OFFSET;
 896        } else {
 897                hdev->num_msi =
 898                hnae_get_field(__le16_to_cpu(req->pf_intr_vector_number),
 899                               HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
 900        }
 901
 902        return 0;
 903}
 904
 905static int hclge_parse_speed(int speed_cmd, int *speed)
 906{
 907        switch (speed_cmd) {
 908        case 6:
 909                *speed = HCLGE_MAC_SPEED_10M;
 910                break;
 911        case 7:
 912                *speed = HCLGE_MAC_SPEED_100M;
 913                break;
 914        case 0:
 915                *speed = HCLGE_MAC_SPEED_1G;
 916                break;
 917        case 1:
 918                *speed = HCLGE_MAC_SPEED_10G;
 919                break;
 920        case 2:
 921                *speed = HCLGE_MAC_SPEED_25G;
 922                break;
 923        case 3:
 924                *speed = HCLGE_MAC_SPEED_40G;
 925                break;
 926        case 4:
 927                *speed = HCLGE_MAC_SPEED_50G;
 928                break;
 929        case 5:
 930                *speed = HCLGE_MAC_SPEED_100G;
 931                break;
 932        default:
 933                return -EINVAL;
 934        }
 935
 936        return 0;
 937}
 938
 939static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
 940{
 941        struct hclge_cfg_param *req;
 942        u64 mac_addr_tmp_high;
 943        u64 mac_addr_tmp;
 944        int i;
 945
 946        req = (struct hclge_cfg_param *)desc[0].data;
 947
 948        /* get the configuration */
 949        cfg->vmdq_vport_num = hnae_get_field(__le32_to_cpu(req->param[0]),
 950                                             HCLGE_CFG_VMDQ_M,
 951                                             HCLGE_CFG_VMDQ_S);
 952        cfg->tc_num = hnae_get_field(__le32_to_cpu(req->param[0]),
 953                                     HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
 954        cfg->tqp_desc_num = hnae_get_field(__le32_to_cpu(req->param[0]),
 955                                           HCLGE_CFG_TQP_DESC_N_M,
 956                                           HCLGE_CFG_TQP_DESC_N_S);
 957
 958        cfg->phy_addr = hnae_get_field(__le32_to_cpu(req->param[1]),
 959                                       HCLGE_CFG_PHY_ADDR_M,
 960                                       HCLGE_CFG_PHY_ADDR_S);
 961        cfg->media_type = hnae_get_field(__le32_to_cpu(req->param[1]),
 962                                         HCLGE_CFG_MEDIA_TP_M,
 963                                         HCLGE_CFG_MEDIA_TP_S);
 964        cfg->rx_buf_len = hnae_get_field(__le32_to_cpu(req->param[1]),
 965                                         HCLGE_CFG_RX_BUF_LEN_M,
 966                                         HCLGE_CFG_RX_BUF_LEN_S);
 967        /* get mac_address */
 968        mac_addr_tmp = __le32_to_cpu(req->param[2]);
 969        mac_addr_tmp_high = hnae_get_field(__le32_to_cpu(req->param[3]),
 970                                           HCLGE_CFG_MAC_ADDR_H_M,
 971                                           HCLGE_CFG_MAC_ADDR_H_S);
 972
 973        mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
 974
 975        cfg->default_speed = hnae_get_field(__le32_to_cpu(req->param[3]),
 976                                            HCLGE_CFG_DEFAULT_SPEED_M,
 977                                            HCLGE_CFG_DEFAULT_SPEED_S);
 978        for (i = 0; i < ETH_ALEN; i++)
 979                cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
 980
 981        req = (struct hclge_cfg_param *)desc[1].data;
 982        cfg->numa_node_map = __le32_to_cpu(req->param[0]);
 983}
 984
 985/* hclge_get_cfg: query the static parameter from flash
 986 * @hdev: pointer to struct hclge_dev
 987 * @hcfg: the config structure to be getted
 988 */
 989static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
 990{
 991        struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
 992        struct hclge_cfg_param *req;
 993        int i, ret;
 994
 995        for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
 996                req = (struct hclge_cfg_param *)desc[i].data;
 997                hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
 998                                           true);
 999                hnae_set_field(req->offset, HCLGE_CFG_OFFSET_M,
1000                               HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1001                /* Len should be united by 4 bytes when send to hardware */
1002                hnae_set_field(req->offset, HCLGE_CFG_RD_LEN_M,
1003                               HCLGE_CFG_RD_LEN_S,
1004                               HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1005                req->offset = cpu_to_le32(req->offset);
1006        }
1007
1008        ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1009        if (ret) {
1010                dev_err(&hdev->pdev->dev,
1011                        "get config failed %d.\n", ret);
1012                return ret;
1013        }
1014
1015        hclge_parse_cfg(hcfg, desc);
1016        return 0;
1017}
1018
1019static int hclge_get_cap(struct hclge_dev *hdev)
1020{
1021        int ret;
1022
1023        ret = hclge_query_function_status(hdev);
1024        if (ret) {
1025                dev_err(&hdev->pdev->dev,
1026                        "query function status error %d.\n", ret);
1027                return ret;
1028        }
1029
1030        /* get pf resource */
1031        ret = hclge_query_pf_resource(hdev);
1032        if (ret) {
1033                dev_err(&hdev->pdev->dev,
1034                        "query pf resource error %d.\n", ret);
1035                return ret;
1036        }
1037
1038        return 0;
1039}
1040
1041static int hclge_configure(struct hclge_dev *hdev)
1042{
1043        struct hclge_cfg cfg;
1044        int ret, i;
1045
1046        ret = hclge_get_cfg(hdev, &cfg);
1047        if (ret) {
1048                dev_err(&hdev->pdev->dev, "get mac mode error %d.\n", ret);
1049                return ret;
1050        }
1051
1052        hdev->num_vmdq_vport = cfg.vmdq_vport_num;
1053        hdev->base_tqp_pid = 0;
1054        hdev->rss_size_max = 1;
1055        hdev->rx_buf_len = cfg.rx_buf_len;
1056        ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1057        hdev->hw.mac.media_type = cfg.media_type;
1058        hdev->hw.mac.phy_addr = cfg.phy_addr;
1059        hdev->num_desc = cfg.tqp_desc_num;
1060        hdev->tm_info.num_pg = 1;
1061        hdev->tm_info.num_tc = cfg.tc_num;
1062        hdev->tm_info.hw_pfc_map = 0;
1063
1064        ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1065        if (ret) {
1066                dev_err(&hdev->pdev->dev, "Get wrong speed ret=%d.\n", ret);
1067                return ret;
1068        }
1069
1070        if ((hdev->tm_info.num_tc > HNAE3_MAX_TC) ||
1071            (hdev->tm_info.num_tc < 1)) {
1072                dev_warn(&hdev->pdev->dev, "TC num = %d.\n",
1073                         hdev->tm_info.num_tc);
1074                hdev->tm_info.num_tc = 1;
1075        }
1076
1077        /* Currently not support uncontiuous tc */
1078        for (i = 0; i < cfg.tc_num; i++)
1079                hnae_set_bit(hdev->hw_tc_map, i, 1);
1080
1081        if (!hdev->num_vmdq_vport && !hdev->num_req_vfs)
1082                hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1083        else
1084                hdev->tx_sch_mode = HCLGE_FLAG_VNET_BASE_SCH_MODE;
1085
1086        return ret;
1087}
1088
1089static int hclge_config_tso(struct hclge_dev *hdev, int tso_mss_min,
1090                            int tso_mss_max)
1091{
1092        struct hclge_cfg_tso_status *req;
1093        struct hclge_desc desc;
1094
1095        hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1096
1097        req = (struct hclge_cfg_tso_status *)desc.data;
1098        hnae_set_field(req->tso_mss_min, HCLGE_TSO_MSS_MIN_M,
1099                       HCLGE_TSO_MSS_MIN_S, tso_mss_min);
1100        hnae_set_field(req->tso_mss_max, HCLGE_TSO_MSS_MIN_M,
1101                       HCLGE_TSO_MSS_MIN_S, tso_mss_max);
1102
1103        return hclge_cmd_send(&hdev->hw, &desc, 1);
1104}
1105
1106static int hclge_alloc_tqps(struct hclge_dev *hdev)
1107{
1108        struct hclge_tqp *tqp;
1109        int i;
1110
1111        hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1112                                  sizeof(struct hclge_tqp), GFP_KERNEL);
1113        if (!hdev->htqp)
1114                return -ENOMEM;
1115
1116        tqp = hdev->htqp;
1117
1118        for (i = 0; i < hdev->num_tqps; i++) {
1119                tqp->dev = &hdev->pdev->dev;
1120                tqp->index = i;
1121
1122                tqp->q.ae_algo = &ae_algo;
1123                tqp->q.buf_size = hdev->rx_buf_len;
1124                tqp->q.desc_num = hdev->num_desc;
1125                tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET +
1126                        i * HCLGE_TQP_REG_SIZE;
1127
1128                tqp++;
1129        }
1130
1131        return 0;
1132}
1133
1134static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1135                                  u16 tqp_pid, u16 tqp_vid, bool is_pf)
1136{
1137        struct hclge_tqp_map *req;
1138        struct hclge_desc desc;
1139        int ret;
1140
1141        hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1142
1143        req = (struct hclge_tqp_map *)desc.data;
1144        req->tqp_id = cpu_to_le16(tqp_pid);
1145        req->tqp_vf = cpu_to_le16(func_id);
1146        req->tqp_flag = !is_pf << HCLGE_TQP_MAP_TYPE_B |
1147                        1 << HCLGE_TQP_MAP_EN_B;
1148        req->tqp_vid = cpu_to_le16(tqp_vid);
1149
1150        ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1151        if (ret) {
1152                dev_err(&hdev->pdev->dev, "TQP map failed %d.\n",
1153                        ret);
1154                return ret;
1155        }
1156
1157        return 0;
1158}
1159
1160static int  hclge_assign_tqp(struct hclge_vport *vport,
1161                             struct hnae3_queue **tqp, u16 num_tqps)
1162{
1163        struct hclge_dev *hdev = vport->back;
1164        int i, alloced, func_id, ret;
1165        bool is_pf;
1166
1167        func_id = vport->vport_id;
1168        is_pf = (vport->vport_id == 0) ? true : false;
1169
1170        for (i = 0, alloced = 0; i < hdev->num_tqps &&
1171             alloced < num_tqps; i++) {
1172                if (!hdev->htqp[i].alloced) {
1173                        hdev->htqp[i].q.handle = &vport->nic;
1174                        hdev->htqp[i].q.tqp_index = alloced;
1175                        tqp[alloced] = &hdev->htqp[i].q;
1176                        hdev->htqp[i].alloced = true;
1177                        ret = hclge_map_tqps_to_func(hdev, func_id,
1178                                                     hdev->htqp[i].index,
1179                                                     alloced, is_pf);
1180                        if (ret)
1181                                return ret;
1182
1183                        alloced++;
1184                }
1185        }
1186        vport->alloc_tqps = num_tqps;
1187
1188        return 0;
1189}
1190
1191static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps)
1192{
1193        struct hnae3_handle *nic = &vport->nic;
1194        struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1195        struct hclge_dev *hdev = vport->back;
1196        int i, ret;
1197
1198        kinfo->num_desc = hdev->num_desc;
1199        kinfo->rx_buf_len = hdev->rx_buf_len;
1200        kinfo->num_tc = min_t(u16, num_tqps, hdev->tm_info.num_tc);
1201        kinfo->rss_size
1202                = min_t(u16, hdev->rss_size_max, num_tqps / kinfo->num_tc);
1203        kinfo->num_tqps = kinfo->rss_size * kinfo->num_tc;
1204
1205        for (i = 0; i < HNAE3_MAX_TC; i++) {
1206                if (hdev->hw_tc_map & BIT(i)) {
1207                        kinfo->tc_info[i].enable = true;
1208                        kinfo->tc_info[i].tqp_offset = i * kinfo->rss_size;
1209                        kinfo->tc_info[i].tqp_count = kinfo->rss_size;
1210                        kinfo->tc_info[i].tc = i;
1211                } else {
1212                        /* Set to default queue if TC is disable */
1213                        kinfo->tc_info[i].enable = false;
1214                        kinfo->tc_info[i].tqp_offset = 0;
1215                        kinfo->tc_info[i].tqp_count = 1;
1216                        kinfo->tc_info[i].tc = 0;
1217                }
1218        }
1219
1220        kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps,
1221                                  sizeof(struct hnae3_queue *), GFP_KERNEL);
1222        if (!kinfo->tqp)
1223                return -ENOMEM;
1224
1225        ret = hclge_assign_tqp(vport, kinfo->tqp, kinfo->num_tqps);
1226        if (ret) {
1227                dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1228                return -EINVAL;
1229        }
1230
1231        return 0;
1232}
1233
1234static void hclge_unic_setup(struct hclge_vport *vport, u16 num_tqps)
1235{
1236        /* this would be initialized later */
1237}
1238
1239static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1240{
1241        struct hnae3_handle *nic = &vport->nic;
1242        struct hclge_dev *hdev = vport->back;
1243        int ret;
1244
1245        nic->pdev = hdev->pdev;
1246        nic->ae_algo = &ae_algo;
1247        nic->numa_node_mask = hdev->numa_node_mask;
1248
1249        if (hdev->ae_dev->dev_type == HNAE3_DEV_KNIC) {
1250                ret = hclge_knic_setup(vport, num_tqps);
1251                if (ret) {
1252                        dev_err(&hdev->pdev->dev, "knic setup failed %d\n",
1253                                ret);
1254                        return ret;
1255                }
1256        } else {
1257                hclge_unic_setup(vport, num_tqps);
1258        }
1259
1260        return 0;
1261}
1262
1263static int hclge_alloc_vport(struct hclge_dev *hdev)
1264{
1265        struct pci_dev *pdev = hdev->pdev;
1266        struct hclge_vport *vport;
1267        u32 tqp_main_vport;
1268        u32 tqp_per_vport;
1269        int num_vport, i;
1270        int ret;
1271
1272        /* We need to alloc a vport for main NIC of PF */
1273        num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1274
1275        if (hdev->num_tqps < num_vport)
1276                num_vport = hdev->num_tqps;
1277
1278        /* Alloc the same number of TQPs for every vport */
1279        tqp_per_vport = hdev->num_tqps / num_vport;
1280        tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1281
1282        vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1283                             GFP_KERNEL);
1284        if (!vport)
1285                return -ENOMEM;
1286
1287        hdev->vport = vport;
1288        hdev->num_alloc_vport = num_vport;
1289
1290#ifdef CONFIG_PCI_IOV
1291        /* Enable SRIOV */
1292        if (hdev->num_req_vfs) {
1293                dev_info(&pdev->dev, "active VFs(%d) found, enabling SRIOV\n",
1294                         hdev->num_req_vfs);
1295                ret = pci_enable_sriov(hdev->pdev, hdev->num_req_vfs);
1296                if (ret) {
1297                        hdev->num_alloc_vfs = 0;
1298                        dev_err(&pdev->dev, "SRIOV enable failed %d\n",
1299                                ret);
1300                        return ret;
1301                }
1302        }
1303        hdev->num_alloc_vfs = hdev->num_req_vfs;
1304#endif
1305
1306        for (i = 0; i < num_vport; i++) {
1307                vport->back = hdev;
1308                vport->vport_id = i;
1309
1310                if (i == 0)
1311                        ret = hclge_vport_setup(vport, tqp_main_vport);
1312                else
1313                        ret = hclge_vport_setup(vport, tqp_per_vport);
1314                if (ret) {
1315                        dev_err(&pdev->dev,
1316                                "vport setup failed for vport %d, %d\n",
1317                                i, ret);
1318                        return ret;
1319                }
1320
1321                vport++;
1322        }
1323
1324        return 0;
1325}
1326
1327static int  hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev, u16 buf_size)
1328{
1329/* TX buffer size is unit by 128 byte */
1330#define HCLGE_BUF_SIZE_UNIT_SHIFT       7
1331#define HCLGE_BUF_SIZE_UPDATE_EN_MSK    BIT(15)
1332        struct hclge_tx_buff_alloc *req;
1333        struct hclge_desc desc;
1334        int ret;
1335        u8 i;
1336
1337        req = (struct hclge_tx_buff_alloc *)desc.data;
1338
1339        hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1340        for (i = 0; i < HCLGE_TC_NUM; i++)
1341                req->tx_pkt_buff[i] =
1342                        cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1343                                     HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1344
1345        ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1346        if (ret) {
1347                dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1348                        ret);
1349                return ret;
1350        }
1351
1352        return 0;
1353}
1354
1355static int hclge_tx_buffer_alloc(struct hclge_dev *hdev, u32 buf_size)
1356{
1357        int ret = hclge_cmd_alloc_tx_buff(hdev, buf_size);
1358
1359        if (ret) {
1360                dev_err(&hdev->pdev->dev,
1361                        "tx buffer alloc failed %d\n", ret);
1362                return ret;
1363        }
1364
1365        return 0;
1366}
1367
1368static int hclge_get_tc_num(struct hclge_dev *hdev)
1369{
1370        int i, cnt = 0;
1371
1372        for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1373                if (hdev->hw_tc_map & BIT(i))
1374                        cnt++;
1375        return cnt;
1376}
1377
1378static int hclge_get_pfc_enalbe_num(struct hclge_dev *hdev)
1379{
1380        int i, cnt = 0;
1381
1382        for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1383                if (hdev->hw_tc_map & BIT(i) &&
1384                    hdev->tm_info.hw_pfc_map & BIT(i))
1385                        cnt++;
1386        return cnt;
1387}
1388
1389/* Get the number of pfc enabled TCs, which have private buffer */
1390static int hclge_get_pfc_priv_num(struct hclge_dev *hdev)
1391{
1392        struct hclge_priv_buf *priv;
1393        int i, cnt = 0;
1394
1395        for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1396                priv = &hdev->priv_buf[i];
1397                if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1398                    priv->enable)
1399                        cnt++;
1400        }
1401
1402        return cnt;
1403}
1404
1405/* Get the number of pfc disabled TCs, which have private buffer */
1406static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev)
1407{
1408        struct hclge_priv_buf *priv;
1409        int i, cnt = 0;
1410
1411        for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1412                priv = &hdev->priv_buf[i];
1413                if (hdev->hw_tc_map & BIT(i) &&
1414                    !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1415                    priv->enable)
1416                        cnt++;
1417        }
1418
1419        return cnt;
1420}
1421
1422static u32 hclge_get_rx_priv_buff_alloced(struct hclge_dev *hdev)
1423{
1424        struct hclge_priv_buf *priv;
1425        u32 rx_priv = 0;
1426        int i;
1427
1428        for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1429                priv = &hdev->priv_buf[i];
1430                if (priv->enable)
1431                        rx_priv += priv->buf_size;
1432        }
1433        return rx_priv;
1434}
1435
1436static bool  hclge_is_rx_buf_ok(struct hclge_dev *hdev, u32 rx_all)
1437{
1438        u32 shared_buf_min, shared_buf_tc, shared_std;
1439        int tc_num, pfc_enable_num;
1440        u32 shared_buf;
1441        u32 rx_priv;
1442        int i;
1443
1444        tc_num = hclge_get_tc_num(hdev);
1445        pfc_enable_num = hclge_get_pfc_enalbe_num(hdev);
1446
1447        if (hnae3_dev_dcb_supported(hdev))
1448                shared_buf_min = 2 * hdev->mps + HCLGE_DEFAULT_DV;
1449        else
1450                shared_buf_min = 2 * hdev->mps + HCLGE_DEFAULT_NON_DCB_DV;
1451
1452        shared_buf_tc = pfc_enable_num * hdev->mps +
1453                        (tc_num - pfc_enable_num) * hdev->mps / 2 +
1454                        hdev->mps;
1455        shared_std = max_t(u32, shared_buf_min, shared_buf_tc);
1456
1457        rx_priv = hclge_get_rx_priv_buff_alloced(hdev);
1458        if (rx_all <= rx_priv + shared_std)
1459                return false;
1460
1461        shared_buf = rx_all - rx_priv;
1462        hdev->s_buf.buf_size = shared_buf;
1463        hdev->s_buf.self.high = shared_buf;
1464        hdev->s_buf.self.low =  2 * hdev->mps;
1465
1466        for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1467                if ((hdev->hw_tc_map & BIT(i)) &&
1468                    (hdev->tm_info.hw_pfc_map & BIT(i))) {
1469                        hdev->s_buf.tc_thrd[i].low = hdev->mps;
1470                        hdev->s_buf.tc_thrd[i].high = 2 * hdev->mps;
1471                } else {
1472                        hdev->s_buf.tc_thrd[i].low = 0;
1473                        hdev->s_buf.tc_thrd[i].high = hdev->mps;
1474                }
1475        }
1476
1477        return true;
1478}
1479
1480/* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
1481 * @hdev: pointer to struct hclge_dev
1482 * @tx_size: the allocated tx buffer for all TCs
1483 * @return: 0: calculate sucessful, negative: fail
1484 */
1485int hclge_rx_buffer_calc(struct hclge_dev *hdev, u32 tx_size)
1486{
1487        u32 rx_all = hdev->pkt_buf_size - tx_size;
1488        int no_pfc_priv_num, pfc_priv_num;
1489        struct hclge_priv_buf *priv;
1490        int i;
1491
1492        /* When DCB is not supported, rx private
1493         * buffer is not allocated.
1494         */
1495        if (!hnae3_dev_dcb_supported(hdev)) {
1496                if (!hclge_is_rx_buf_ok(hdev, rx_all))
1497                        return -ENOMEM;
1498
1499                return 0;
1500        }
1501
1502        /* step 1, try to alloc private buffer for all enabled tc */
1503        for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1504                priv = &hdev->priv_buf[i];
1505                if (hdev->hw_tc_map & BIT(i)) {
1506                        priv->enable = 1;
1507                        if (hdev->tm_info.hw_pfc_map & BIT(i)) {
1508                                priv->wl.low = hdev->mps;
1509                                priv->wl.high = priv->wl.low + hdev->mps;
1510                                priv->buf_size = priv->wl.high +
1511                                                HCLGE_DEFAULT_DV;
1512                        } else {
1513                                priv->wl.low = 0;
1514                                priv->wl.high = 2 * hdev->mps;
1515                                priv->buf_size = priv->wl.high;
1516                        }
1517                } else {
1518                        priv->enable = 0;
1519                        priv->wl.low = 0;
1520                        priv->wl.high = 0;
1521                        priv->buf_size = 0;
1522                }
1523        }
1524
1525        if (hclge_is_rx_buf_ok(hdev, rx_all))
1526                return 0;
1527
1528        /* step 2, try to decrease the buffer size of
1529         * no pfc TC's private buffer
1530         */
1531        for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1532                priv = &hdev->priv_buf[i];
1533
1534                priv->enable = 0;
1535                priv->wl.low = 0;
1536                priv->wl.high = 0;
1537                priv->buf_size = 0;
1538
1539                if (!(hdev->hw_tc_map & BIT(i)))
1540                        continue;
1541
1542                priv->enable = 1;
1543
1544                if (hdev->tm_info.hw_pfc_map & BIT(i)) {
1545                        priv->wl.low = 128;
1546                        priv->wl.high = priv->wl.low + hdev->mps;
1547                        priv->buf_size = priv->wl.high + HCLGE_DEFAULT_DV;
1548                } else {
1549                        priv->wl.low = 0;
1550                        priv->wl.high = hdev->mps;
1551                        priv->buf_size = priv->wl.high;
1552                }
1553        }
1554
1555        if (hclge_is_rx_buf_ok(hdev, rx_all))
1556                return 0;
1557
1558        /* step 3, try to reduce the number of pfc disabled TCs,
1559         * which have private buffer
1560         */
1561        /* get the total no pfc enable TC number, which have private buffer */
1562        no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev);
1563
1564        /* let the last to be cleared first */
1565        for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1566                priv = &hdev->priv_buf[i];
1567
1568                if (hdev->hw_tc_map & BIT(i) &&
1569                    !(hdev->tm_info.hw_pfc_map & BIT(i))) {
1570                        /* Clear the no pfc TC private buffer */
1571                        priv->wl.low = 0;
1572                        priv->wl.high = 0;
1573                        priv->buf_size = 0;
1574                        priv->enable = 0;
1575                        no_pfc_priv_num--;
1576                }
1577
1578                if (hclge_is_rx_buf_ok(hdev, rx_all) ||
1579                    no_pfc_priv_num == 0)
1580                        break;
1581        }
1582
1583        if (hclge_is_rx_buf_ok(hdev, rx_all))
1584                return 0;
1585
1586        /* step 4, try to reduce the number of pfc enabled TCs
1587         * which have private buffer.
1588         */
1589        pfc_priv_num = hclge_get_pfc_priv_num(hdev);
1590
1591        /* let the last to be cleared first */
1592        for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1593                priv = &hdev->priv_buf[i];
1594
1595                if (hdev->hw_tc_map & BIT(i) &&
1596                    hdev->tm_info.hw_pfc_map & BIT(i)) {
1597                        /* Reduce the number of pfc TC with private buffer */
1598                        priv->wl.low = 0;
1599                        priv->enable = 0;
1600                        priv->wl.high = 0;
1601                        priv->buf_size = 0;
1602                        pfc_priv_num--;
1603                }
1604
1605                if (hclge_is_rx_buf_ok(hdev, rx_all) ||
1606                    pfc_priv_num == 0)
1607                        break;
1608        }
1609        if (hclge_is_rx_buf_ok(hdev, rx_all))
1610                return 0;
1611
1612        return -ENOMEM;
1613}
1614
1615static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev)
1616{
1617        struct hclge_rx_priv_buff *req;
1618        struct hclge_desc desc;
1619        int ret;
1620        int i;
1621
1622        hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
1623        req = (struct hclge_rx_priv_buff *)desc.data;
1624
1625        /* Alloc private buffer TCs */
1626        for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1627                struct hclge_priv_buf *priv = &hdev->priv_buf[i];
1628
1629                req->buf_num[i] =
1630                        cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
1631                req->buf_num[i] |=
1632                        cpu_to_le16(true << HCLGE_TC0_PRI_BUF_EN_B);
1633        }
1634
1635        req->shared_buf =
1636                cpu_to_le16((hdev->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
1637                            (1 << HCLGE_TC0_PRI_BUF_EN_B));
1638
1639        ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1640        if (ret) {
1641                dev_err(&hdev->pdev->dev,
1642                        "rx private buffer alloc cmd failed %d\n", ret);
1643                return ret;
1644        }
1645
1646        return 0;
1647}
1648
1649#define HCLGE_PRIV_ENABLE(a) ((a) > 0 ? 1 : 0)
1650
1651static int hclge_rx_priv_wl_config(struct hclge_dev *hdev)
1652{
1653        struct hclge_rx_priv_wl_buf *req;
1654        struct hclge_priv_buf *priv;
1655        struct hclge_desc desc[2];
1656        int i, j;
1657        int ret;
1658
1659        for (i = 0; i < 2; i++) {
1660                hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
1661                                           false);
1662                req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
1663
1664                /* The first descriptor set the NEXT bit to 1 */
1665                if (i == 0)
1666                        desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1667                else
1668                        desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1669
1670                for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
1671                        priv = &hdev->priv_buf[i * HCLGE_TC_NUM_ONE_DESC + j];
1672                        req->tc_wl[j].high =
1673                                cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
1674                        req->tc_wl[j].high |=
1675                                cpu_to_le16(HCLGE_PRIV_ENABLE(priv->wl.high) <<
1676                                            HCLGE_RX_PRIV_EN_B);
1677                        req->tc_wl[j].low =
1678                                cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
1679                        req->tc_wl[j].low |=
1680                                cpu_to_le16(HCLGE_PRIV_ENABLE(priv->wl.low) <<
1681                                            HCLGE_RX_PRIV_EN_B);
1682                }
1683        }
1684
1685        /* Send 2 descriptor at one time */
1686        ret = hclge_cmd_send(&hdev->hw, desc, 2);
1687        if (ret) {
1688                dev_err(&hdev->pdev->dev,
1689                        "rx private waterline config cmd failed %d\n",
1690                        ret);
1691                return ret;
1692        }
1693        return 0;
1694}
1695
1696static int hclge_common_thrd_config(struct hclge_dev *hdev)
1697{
1698        struct hclge_shared_buf *s_buf = &hdev->s_buf;
1699        struct hclge_rx_com_thrd *req;
1700        struct hclge_desc desc[2];
1701        struct hclge_tc_thrd *tc;
1702        int i, j;
1703        int ret;
1704
1705        for (i = 0; i < 2; i++) {
1706                hclge_cmd_setup_basic_desc(&desc[i],
1707                                           HCLGE_OPC_RX_COM_THRD_ALLOC, false);
1708                req = (struct hclge_rx_com_thrd *)&desc[i].data;
1709
1710                /* The first descriptor set the NEXT bit to 1 */
1711                if (i == 0)
1712                        desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1713                else
1714                        desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1715
1716                for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
1717                        tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
1718
1719                        req->com_thrd[j].high =
1720                                cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
1721                        req->com_thrd[j].high |=
1722                                cpu_to_le16(HCLGE_PRIV_ENABLE(tc->high) <<
1723                                            HCLGE_RX_PRIV_EN_B);
1724                        req->com_thrd[j].low =
1725                                cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
1726                        req->com_thrd[j].low |=
1727                                cpu_to_le16(HCLGE_PRIV_ENABLE(tc->low) <<
1728                                            HCLGE_RX_PRIV_EN_B);
1729                }
1730        }
1731
1732        /* Send 2 descriptors at one time */
1733        ret = hclge_cmd_send(&hdev->hw, desc, 2);
1734        if (ret) {
1735                dev_err(&hdev->pdev->dev,
1736                        "common threshold config cmd failed %d\n", ret);
1737                return ret;
1738        }
1739        return 0;
1740}
1741
1742static int hclge_common_wl_config(struct hclge_dev *hdev)
1743{
1744        struct hclge_shared_buf *buf = &hdev->s_buf;
1745        struct hclge_rx_com_wl *req;
1746        struct hclge_desc desc;
1747        int ret;
1748
1749        hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
1750
1751        req = (struct hclge_rx_com_wl *)desc.data;
1752        req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
1753        req->com_wl.high |=
1754                cpu_to_le16(HCLGE_PRIV_ENABLE(buf->self.high) <<
1755                            HCLGE_RX_PRIV_EN_B);
1756
1757        req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
1758        req->com_wl.low |=
1759                cpu_to_le16(HCLGE_PRIV_ENABLE(buf->self.low) <<
1760                            HCLGE_RX_PRIV_EN_B);
1761
1762        ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1763        if (ret) {
1764                dev_err(&hdev->pdev->dev,
1765                        "common waterline config cmd failed %d\n", ret);
1766                return ret;
1767        }
1768
1769        return 0;
1770}
1771
1772int hclge_buffer_alloc(struct hclge_dev *hdev)
1773{
1774        u32 tx_buf_size = HCLGE_DEFAULT_TX_BUF;
1775        int ret;
1776
1777        hdev->priv_buf = devm_kmalloc_array(&hdev->pdev->dev, HCLGE_MAX_TC_NUM,
1778                                            sizeof(struct hclge_priv_buf),
1779                                            GFP_KERNEL | __GFP_ZERO);
1780        if (!hdev->priv_buf)
1781                return -ENOMEM;
1782
1783        ret = hclge_tx_buffer_alloc(hdev, tx_buf_size);
1784        if (ret) {
1785                dev_err(&hdev->pdev->dev,
1786                        "could not alloc tx buffers %d\n", ret);
1787                return ret;
1788        }
1789
1790        ret = hclge_rx_buffer_calc(hdev, tx_buf_size);
1791        if (ret) {
1792                dev_err(&hdev->pdev->dev,
1793                        "could not calc rx priv buffer size for all TCs %d\n",
1794                        ret);
1795                return ret;
1796        }
1797
1798        ret = hclge_rx_priv_buf_alloc(hdev);
1799        if (ret) {
1800                dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
1801                        ret);
1802                return ret;
1803        }
1804
1805        if (hnae3_dev_dcb_supported(hdev)) {
1806                ret = hclge_rx_priv_wl_config(hdev);
1807                if (ret) {
1808                        dev_err(&hdev->pdev->dev,
1809                                "could not configure rx private waterline %d\n",
1810                                ret);
1811                        return ret;
1812                }
1813
1814                ret = hclge_common_thrd_config(hdev);
1815                if (ret) {
1816                        dev_err(&hdev->pdev->dev,
1817                                "could not configure common threshold %d\n",
1818                                ret);
1819                        return ret;
1820                }
1821        }
1822
1823        ret = hclge_common_wl_config(hdev);
1824        if (ret) {
1825                dev_err(&hdev->pdev->dev,
1826                        "could not configure common waterline %d\n", ret);
1827                return ret;
1828        }
1829
1830        return 0;
1831}
1832
1833static int hclge_init_roce_base_info(struct hclge_vport *vport)
1834{
1835        struct hnae3_handle *roce = &vport->roce;
1836        struct hnae3_handle *nic = &vport->nic;
1837
1838        roce->rinfo.num_vectors = vport->back->num_roce_msix;
1839
1840        if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors ||
1841            vport->back->num_msi_left == 0)
1842                return -EINVAL;
1843
1844        roce->rinfo.base_vector = vport->back->roce_base_vector;
1845
1846        roce->rinfo.netdev = nic->kinfo.netdev;
1847        roce->rinfo.roce_io_base = vport->back->hw.io_base;
1848
1849        roce->pdev = nic->pdev;
1850        roce->ae_algo = nic->ae_algo;
1851        roce->numa_node_mask = nic->numa_node_mask;
1852
1853        return 0;
1854}
1855
1856static int hclge_init_msix(struct hclge_dev *hdev)
1857{
1858        struct pci_dev *pdev = hdev->pdev;
1859        int ret, i;
1860
1861        hdev->msix_entries = devm_kcalloc(&pdev->dev, hdev->num_msi,
1862                                          sizeof(struct msix_entry),
1863                                          GFP_KERNEL);
1864        if (!hdev->msix_entries)
1865                return -ENOMEM;
1866
1867        hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
1868                                           sizeof(u16), GFP_KERNEL);
1869        if (!hdev->vector_status)
1870                return -ENOMEM;
1871
1872        for (i = 0; i < hdev->num_msi; i++) {
1873                hdev->msix_entries[i].entry = i;
1874                hdev->vector_status[i] = HCLGE_INVALID_VPORT;
1875        }
1876
1877        hdev->num_msi_left = hdev->num_msi;
1878        hdev->base_msi_vector = hdev->pdev->irq;
1879        hdev->roce_base_vector = hdev->base_msi_vector +
1880                                HCLGE_ROCE_VECTOR_OFFSET;
1881
1882        ret = pci_enable_msix_range(hdev->pdev, hdev->msix_entries,
1883                                    hdev->num_msi, hdev->num_msi);
1884        if (ret < 0) {
1885                dev_info(&hdev->pdev->dev,
1886                         "MSI-X vector alloc failed: %d\n", ret);
1887                return ret;
1888        }
1889
1890        return 0;
1891}
1892
1893static int hclge_init_msi(struct hclge_dev *hdev)
1894{
1895        struct pci_dev *pdev = hdev->pdev;
1896        int vectors;
1897        int i;
1898
1899        hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
1900                                           sizeof(u16), GFP_KERNEL);
1901        if (!hdev->vector_status)
1902                return -ENOMEM;
1903
1904        for (i = 0; i < hdev->num_msi; i++)
1905                hdev->vector_status[i] = HCLGE_INVALID_VPORT;
1906
1907        vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi, PCI_IRQ_MSI);
1908        if (vectors < 0) {
1909                dev_err(&pdev->dev, "MSI vectors enable failed %d\n", vectors);
1910                return -EINVAL;
1911        }
1912        hdev->num_msi = vectors;
1913        hdev->num_msi_left = vectors;
1914        hdev->base_msi_vector = pdev->irq;
1915        hdev->roce_base_vector = hdev->base_msi_vector +
1916                                HCLGE_ROCE_VECTOR_OFFSET;
1917
1918        return 0;
1919}
1920
1921static void hclge_check_speed_dup(struct hclge_dev *hdev, int duplex, int speed)
1922{
1923        struct hclge_mac *mac = &hdev->hw.mac;
1924
1925        if ((speed == HCLGE_MAC_SPEED_10M) || (speed == HCLGE_MAC_SPEED_100M))
1926                mac->duplex = (u8)duplex;
1927        else
1928                mac->duplex = HCLGE_MAC_FULL;
1929
1930        mac->speed = speed;
1931}
1932
1933int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
1934{
1935        struct hclge_config_mac_speed_dup *req;
1936        struct hclge_desc desc;
1937        int ret;
1938
1939        req = (struct hclge_config_mac_speed_dup *)desc.data;
1940
1941        hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
1942
1943        hnae_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, !!duplex);
1944
1945        switch (speed) {
1946        case HCLGE_MAC_SPEED_10M:
1947                hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
1948                               HCLGE_CFG_SPEED_S, 6);
1949                break;
1950        case HCLGE_MAC_SPEED_100M:
1951                hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
1952                               HCLGE_CFG_SPEED_S, 7);
1953                break;
1954        case HCLGE_MAC_SPEED_1G:
1955                hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
1956                               HCLGE_CFG_SPEED_S, 0);
1957                break;
1958        case HCLGE_MAC_SPEED_10G:
1959                hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
1960                               HCLGE_CFG_SPEED_S, 1);
1961                break;
1962        case HCLGE_MAC_SPEED_25G:
1963                hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
1964                               HCLGE_CFG_SPEED_S, 2);
1965                break;
1966        case HCLGE_MAC_SPEED_40G:
1967                hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
1968                               HCLGE_CFG_SPEED_S, 3);
1969                break;
1970        case HCLGE_MAC_SPEED_50G:
1971                hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
1972                               HCLGE_CFG_SPEED_S, 4);
1973                break;
1974        case HCLGE_MAC_SPEED_100G:
1975                hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
1976                               HCLGE_CFG_SPEED_S, 5);
1977                break;
1978        default:
1979                dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
1980                return -EINVAL;
1981        }
1982
1983        hnae_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
1984                     1);
1985
1986        ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1987        if (ret) {
1988                dev_err(&hdev->pdev->dev,
1989                        "mac speed/duplex config cmd failed %d.\n", ret);
1990                return ret;
1991        }
1992
1993        hclge_check_speed_dup(hdev, duplex, speed);
1994
1995        return 0;
1996}
1997
1998static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
1999                                     u8 duplex)
2000{
2001        struct hclge_vport *vport = hclge_get_vport(handle);
2002        struct hclge_dev *hdev = vport->back;
2003
2004        return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2005}
2006
2007static int hclge_query_mac_an_speed_dup(struct hclge_dev *hdev, int *speed,
2008                                        u8 *duplex)
2009{
2010        struct hclge_query_an_speed_dup *req;
2011        struct hclge_desc desc;
2012        int speed_tmp;
2013        int ret;
2014
2015        req = (struct hclge_query_an_speed_dup *)desc.data;
2016
2017        hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_AN_RESULT, true);
2018        ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2019        if (ret) {
2020                dev_err(&hdev->pdev->dev,
2021                        "mac speed/autoneg/duplex query cmd failed %d\n",
2022                        ret);
2023                return ret;
2024        }
2025
2026        *duplex = hnae_get_bit(req->an_syn_dup_speed, HCLGE_QUERY_DUPLEX_B);
2027        speed_tmp = hnae_get_field(req->an_syn_dup_speed, HCLGE_QUERY_SPEED_M,
2028                                   HCLGE_QUERY_SPEED_S);
2029
2030        ret = hclge_parse_speed(speed_tmp, speed);
2031        if (ret) {
2032                dev_err(&hdev->pdev->dev,
2033                        "could not parse speed(=%d), %d\n", speed_tmp, ret);
2034                return -EIO;
2035        }
2036
2037        return 0;
2038}
2039
2040static int hclge_query_autoneg_result(struct hclge_dev *hdev)
2041{
2042        struct hclge_mac *mac = &hdev->hw.mac;
2043        struct hclge_query_an_speed_dup *req;
2044        struct hclge_desc desc;
2045        int ret;
2046
2047        req = (struct hclge_query_an_speed_dup *)desc.data;
2048
2049        hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_AN_RESULT, true);
2050        ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2051        if (ret) {
2052                dev_err(&hdev->pdev->dev,
2053                        "autoneg result query cmd failed %d.\n", ret);
2054                return ret;
2055        }
2056
2057        mac->autoneg = hnae_get_bit(req->an_syn_dup_speed, HCLGE_QUERY_AN_B);
2058
2059        return 0;
2060}
2061
2062static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2063{
2064        struct hclge_config_auto_neg *req;
2065        struct hclge_desc desc;
2066        int ret;
2067
2068        hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2069
2070        req = (struct hclge_config_auto_neg *)desc.data;
2071        hnae_set_bit(req->cfg_an_cmd_flag, HCLGE_MAC_CFG_AN_EN_B, !!enable);
2072
2073        ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2074        if (ret) {
2075                dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2076                        ret);
2077                return ret;
2078        }
2079
2080        return 0;
2081}
2082
2083static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2084{
2085        struct hclge_vport *vport = hclge_get_vport(handle);
2086        struct hclge_dev *hdev = vport->back;
2087
2088        return hclge_set_autoneg_en(hdev, enable);
2089}
2090
2091static int hclge_get_autoneg(struct hnae3_handle *handle)
2092{
2093        struct hclge_vport *vport = hclge_get_vport(handle);
2094        struct hclge_dev *hdev = vport->back;
2095
2096        hclge_query_autoneg_result(hdev);
2097
2098        return hdev->hw.mac.autoneg;
2099}
2100
2101static int hclge_mac_init(struct hclge_dev *hdev)
2102{
2103        struct hclge_mac *mac = &hdev->hw.mac;
2104        int ret;
2105
2106        ret = hclge_cfg_mac_speed_dup(hdev, hdev->hw.mac.speed, HCLGE_MAC_FULL);
2107        if (ret) {
2108                dev_err(&hdev->pdev->dev,
2109                        "Config mac speed dup fail ret=%d\n", ret);
2110                return ret;
2111        }
2112
2113        mac->link = 0;
2114
2115        ret = hclge_mac_mdio_config(hdev);
2116        if (ret) {
2117                dev_warn(&hdev->pdev->dev,
2118                         "mdio config fail ret=%d\n", ret);
2119                return ret;
2120        }
2121
2122        /* Initialize the MTA table work mode */
2123        hdev->accept_mta_mc     = true;
2124        hdev->enable_mta        = true;
2125        hdev->mta_mac_sel_type  = HCLGE_MAC_ADDR_47_36;
2126
2127        ret = hclge_set_mta_filter_mode(hdev,
2128                                        hdev->mta_mac_sel_type,
2129                                        hdev->enable_mta);
2130        if (ret) {
2131                dev_err(&hdev->pdev->dev, "set mta filter mode failed %d\n",
2132                        ret);
2133                return ret;
2134        }
2135
2136        return hclge_cfg_func_mta_filter(hdev, 0, hdev->accept_mta_mc);
2137}
2138
2139static void hclge_task_schedule(struct hclge_dev *hdev)
2140{
2141        if (!test_bit(HCLGE_STATE_DOWN, &hdev->state) &&
2142            !test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2143            !test_and_set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state))
2144                (void)schedule_work(&hdev->service_task);
2145}
2146
2147static int hclge_get_mac_link_status(struct hclge_dev *hdev)
2148{
2149        struct hclge_link_status *req;
2150        struct hclge_desc desc;
2151        int link_status;
2152        int ret;
2153
2154        hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2155        ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2156        if (ret) {
2157                dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2158                        ret);
2159                return ret;
2160        }
2161
2162        req = (struct hclge_link_status *)desc.data;
2163        link_status = req->status & HCLGE_LINK_STATUS;
2164
2165        return !!link_status;
2166}
2167
2168static int hclge_get_mac_phy_link(struct hclge_dev *hdev)
2169{
2170        int mac_state;
2171        int link_stat;
2172
2173        mac_state = hclge_get_mac_link_status(hdev);
2174
2175        if (hdev->hw.mac.phydev) {
2176                if (!genphy_read_status(hdev->hw.mac.phydev))
2177                        link_stat = mac_state &
2178                                hdev->hw.mac.phydev->link;
2179                else
2180                        link_stat = 0;
2181
2182        } else {
2183                link_stat = mac_state;
2184        }
2185
2186        return !!link_stat;
2187}
2188
2189static void hclge_update_link_status(struct hclge_dev *hdev)
2190{
2191        struct hnae3_client *client = hdev->nic_client;
2192        struct hnae3_handle *handle;
2193        int state;
2194        int i;
2195
2196        if (!client)
2197                return;
2198        state = hclge_get_mac_phy_link(hdev);
2199        if (state != hdev->hw.mac.link) {
2200                for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2201                        handle = &hdev->vport[i].nic;
2202                        client->ops->link_status_change(handle, state);
2203                }
2204                hdev->hw.mac.link = state;
2205        }
2206}
2207
2208static int hclge_update_speed_duplex(struct hclge_dev *hdev)
2209{
2210        struct hclge_mac mac = hdev->hw.mac;
2211        u8 duplex;
2212        int speed;
2213        int ret;
2214
2215        /* get the speed and duplex as autoneg'result from mac cmd when phy
2216         * doesn't exit.
2217         */
2218        if (mac.phydev)
2219                return 0;
2220
2221        /* update mac->antoneg. */
2222        ret = hclge_query_autoneg_result(hdev);
2223        if (ret) {
2224                dev_err(&hdev->pdev->dev,
2225                        "autoneg result query failed %d\n", ret);
2226                return ret;
2227        }
2228
2229        if (!mac.autoneg)
2230                return 0;
2231
2232        ret = hclge_query_mac_an_speed_dup(hdev, &speed, &duplex);
2233        if (ret) {
2234                dev_err(&hdev->pdev->dev,
2235                        "mac autoneg/speed/duplex query failed %d\n", ret);
2236                return ret;
2237        }
2238
2239        if ((mac.speed != speed) || (mac.duplex != duplex)) {
2240                ret = hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2241                if (ret) {
2242                        dev_err(&hdev->pdev->dev,
2243                                "mac speed/duplex config failed %d\n", ret);
2244                        return ret;
2245                }
2246        }
2247
2248        return 0;
2249}
2250
2251static int hclge_update_speed_duplex_h(struct hnae3_handle *handle)
2252{
2253        struct hclge_vport *vport = hclge_get_vport(handle);
2254        struct hclge_dev *hdev = vport->back;
2255
2256        return hclge_update_speed_duplex(hdev);
2257}
2258
2259static int hclge_get_status(struct hnae3_handle *handle)
2260{
2261        struct hclge_vport *vport = hclge_get_vport(handle);
2262        struct hclge_dev *hdev = vport->back;
2263
2264        hclge_update_link_status(hdev);
2265
2266        return hdev->hw.mac.link;
2267}
2268
2269static void hclge_service_timer(unsigned long data)
2270{
2271        struct hclge_dev *hdev = (struct hclge_dev *)data;
2272        (void)mod_timer(&hdev->service_timer, jiffies + HZ);
2273
2274        hclge_task_schedule(hdev);
2275}
2276
2277static void hclge_service_complete(struct hclge_dev *hdev)
2278{
2279        WARN_ON(!test_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state));
2280
2281        /* Flush memory before next watchdog */
2282        smp_mb__before_atomic();
2283        clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
2284}
2285
2286static void hclge_service_task(struct work_struct *work)
2287{
2288        struct hclge_dev *hdev =
2289                container_of(work, struct hclge_dev, service_task);
2290
2291        hclge_update_speed_duplex(hdev);
2292        hclge_update_link_status(hdev);
2293        hclge_update_stats_for_all(hdev);
2294        hclge_service_complete(hdev);
2295}
2296
2297static void hclge_disable_sriov(struct hclge_dev *hdev)
2298{
2299        /* If our VFs are assigned we cannot shut down SR-IOV
2300         * without causing issues, so just leave the hardware
2301         * available but disabled
2302         */
2303        if (pci_vfs_assigned(hdev->pdev)) {
2304                dev_warn(&hdev->pdev->dev,
2305                         "disabling driver while VFs are assigned\n");
2306                return;
2307        }
2308
2309        pci_disable_sriov(hdev->pdev);
2310}
2311
2312struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
2313{
2314        /* VF handle has no client */
2315        if (!handle->client)
2316                return container_of(handle, struct hclge_vport, nic);
2317        else if (handle->client->type == HNAE3_CLIENT_ROCE)
2318                return container_of(handle, struct hclge_vport, roce);
2319        else
2320                return container_of(handle, struct hclge_vport, nic);
2321}
2322
2323static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
2324                            struct hnae3_vector_info *vector_info)
2325{
2326        struct hclge_vport *vport = hclge_get_vport(handle);
2327        struct hnae3_vector_info *vector = vector_info;
2328        struct hclge_dev *hdev = vport->back;
2329        int alloc = 0;
2330        int i, j;
2331
2332        vector_num = min(hdev->num_msi_left, vector_num);
2333
2334        for (j = 0; j < vector_num; j++) {
2335                for (i = 1; i < hdev->num_msi; i++) {
2336                        if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
2337                                vector->vector = pci_irq_vector(hdev->pdev, i);
2338                                vector->io_addr = hdev->hw.io_base +
2339                                        HCLGE_VECTOR_REG_BASE +
2340                                        (i - 1) * HCLGE_VECTOR_REG_OFFSET +
2341                                        vport->vport_id *
2342                                        HCLGE_VECTOR_VF_OFFSET;
2343                                hdev->vector_status[i] = vport->vport_id;
2344
2345                                vector++;
2346                                alloc++;
2347
2348                                break;
2349                        }
2350                }
2351        }
2352        hdev->num_msi_left -= alloc;
2353        hdev->num_msi_used += alloc;
2354
2355        return alloc;
2356}
2357
2358static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
2359{
2360        int i;
2361
2362        for (i = 0; i < hdev->num_msi; i++) {
2363                if (hdev->msix_entries) {
2364                        if (vector == hdev->msix_entries[i].vector)
2365                                return i;
2366                } else {
2367                        if (vector == (hdev->base_msi_vector + i))
2368                                return i;
2369                }
2370        }
2371        return -EINVAL;
2372}
2373
2374static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
2375{
2376        return HCLGE_RSS_KEY_SIZE;
2377}
2378
2379static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle)
2380{
2381        return HCLGE_RSS_IND_TBL_SIZE;
2382}
2383
2384static int hclge_get_rss_algo(struct hclge_dev *hdev)
2385{
2386        struct hclge_rss_config *req;
2387        struct hclge_desc desc;
2388        int rss_hash_algo;
2389        int ret;
2390
2391        hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG, true);
2392
2393        ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2394        if (ret) {
2395                dev_err(&hdev->pdev->dev,
2396                        "Get link status error, status =%d\n", ret);
2397                return ret;
2398        }
2399
2400        req = (struct hclge_rss_config *)desc.data;
2401        rss_hash_algo = (req->hash_config & HCLGE_RSS_HASH_ALGO_MASK);
2402
2403        if (rss_hash_algo == HCLGE_RSS_HASH_ALGO_TOEPLITZ)
2404                return ETH_RSS_HASH_TOP;
2405
2406        return -EINVAL;
2407}
2408
2409static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
2410                                  const u8 hfunc, const u8 *key)
2411{
2412        struct hclge_rss_config *req;
2413        struct hclge_desc desc;
2414        int key_offset;
2415        int key_size;
2416        int ret;
2417
2418        req = (struct hclge_rss_config *)desc.data;
2419
2420        for (key_offset = 0; key_offset < 3; key_offset++) {
2421                hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
2422                                           false);
2423
2424                req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
2425                req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
2426
2427                if (key_offset == 2)
2428                        key_size =
2429                        HCLGE_RSS_KEY_SIZE - HCLGE_RSS_HASH_KEY_NUM * 2;
2430                else
2431                        key_size = HCLGE_RSS_HASH_KEY_NUM;
2432
2433                memcpy(req->hash_key,
2434                       key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
2435
2436                ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2437                if (ret) {
2438                        dev_err(&hdev->pdev->dev,
2439                                "Configure RSS config fail, status = %d\n",
2440                                ret);
2441                        return ret;
2442                }
2443        }
2444        return 0;
2445}
2446
2447static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u32 *indir)
2448{
2449        struct hclge_rss_indirection_table *req;
2450        struct hclge_desc desc;
2451        int i, j;
2452        int ret;
2453
2454        req = (struct hclge_rss_indirection_table *)desc.data;
2455
2456        for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) {
2457                hclge_cmd_setup_basic_desc
2458                        (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
2459
2460                req->start_table_index = i * HCLGE_RSS_CFG_TBL_SIZE;
2461                req->rss_set_bitmap = HCLGE_RSS_SET_BITMAP_MSK;
2462
2463                for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++)
2464                        req->rss_result[j] =
2465                                indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
2466
2467                ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2468                if (ret) {
2469                        dev_err(&hdev->pdev->dev,
2470                                "Configure rss indir table fail,status = %d\n",
2471                                ret);
2472                        return ret;
2473                }
2474        }
2475        return 0;
2476}
2477
2478static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
2479                                 u16 *tc_size, u16 *tc_offset)
2480{
2481        struct hclge_rss_tc_mode *req;
2482        struct hclge_desc desc;
2483        int ret;
2484        int i;
2485
2486        hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
2487        req = (struct hclge_rss_tc_mode *)desc.data;
2488
2489        for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2490                hnae_set_bit(req->rss_tc_mode[i], HCLGE_RSS_TC_VALID_B,
2491                             (tc_valid[i] & 0x1));
2492                hnae_set_field(req->rss_tc_mode[i], HCLGE_RSS_TC_SIZE_M,
2493                               HCLGE_RSS_TC_SIZE_S, tc_size[i]);
2494                hnae_set_field(req->rss_tc_mode[i], HCLGE_RSS_TC_OFFSET_M,
2495                               HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
2496        }
2497
2498        ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2499        if (ret) {
2500                dev_err(&hdev->pdev->dev,
2501                        "Configure rss tc mode fail, status = %d\n", ret);
2502                return ret;
2503        }
2504
2505        return 0;
2506}
2507
2508static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
2509{
2510#define HCLGE_RSS_INPUT_TUPLE_OTHER             0xf
2511#define HCLGE_RSS_INPUT_TUPLE_SCTP              0x1f
2512        struct hclge_rss_input_tuple *req;
2513        struct hclge_desc desc;
2514        int ret;
2515
2516        hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
2517
2518        req = (struct hclge_rss_input_tuple *)desc.data;
2519        req->ipv4_tcp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
2520        req->ipv4_udp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
2521        req->ipv4_sctp_en = HCLGE_RSS_INPUT_TUPLE_SCTP;
2522        req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
2523        req->ipv6_tcp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
2524        req->ipv6_udp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
2525        req->ipv6_sctp_en = HCLGE_RSS_INPUT_TUPLE_SCTP;
2526        req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
2527        ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2528        if (ret) {
2529                dev_err(&hdev->pdev->dev,
2530                        "Configure rss input fail, status = %d\n", ret);
2531                return ret;
2532        }
2533
2534        return 0;
2535}
2536
2537static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
2538                         u8 *key, u8 *hfunc)
2539{
2540        struct hclge_vport *vport = hclge_get_vport(handle);
2541        struct hclge_dev *hdev = vport->back;
2542        int i;
2543
2544        /* Get hash algorithm */
2545        if (hfunc)
2546                *hfunc = hclge_get_rss_algo(hdev);
2547
2548        /* Get the RSS Key required by the user */
2549        if (key)
2550                memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
2551
2552        /* Get indirect table */
2553        if (indir)
2554                for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
2555                        indir[i] =  vport->rss_indirection_tbl[i];
2556
2557        return 0;
2558}
2559
2560static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
2561                         const  u8 *key, const  u8 hfunc)
2562{
2563        struct hclge_vport *vport = hclge_get_vport(handle);
2564        struct hclge_dev *hdev = vport->back;
2565        u8 hash_algo;
2566        int ret, i;
2567
2568        /* Set the RSS Hash Key if specififed by the user */
2569        if (key) {
2570                /* Update the shadow RSS key with user specified qids */
2571                memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
2572
2573                if (hfunc == ETH_RSS_HASH_TOP ||
2574                    hfunc == ETH_RSS_HASH_NO_CHANGE)
2575                        hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
2576                else
2577                        return -EINVAL;
2578                ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
2579                if (ret)
2580                        return ret;
2581        }
2582
2583        /* Update the shadow RSS table with user specified qids */
2584        for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
2585                vport->rss_indirection_tbl[i] = indir[i];
2586
2587        /* Update the hardware */
2588        ret = hclge_set_rss_indir_table(hdev, indir);
2589        return ret;
2590}
2591
2592static int hclge_get_tc_size(struct hnae3_handle *handle)
2593{
2594        struct hclge_vport *vport = hclge_get_vport(handle);
2595        struct hclge_dev *hdev = vport->back;
2596
2597        return hdev->rss_size_max;
2598}
2599
2600static int hclge_rss_init_hw(struct hclge_dev *hdev)
2601{
2602        const  u8 hfunc = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
2603        struct hclge_vport *vport = hdev->vport;
2604        u16 tc_offset[HCLGE_MAX_TC_NUM];
2605        u8 rss_key[HCLGE_RSS_KEY_SIZE];
2606        u16 tc_valid[HCLGE_MAX_TC_NUM];
2607        u16 tc_size[HCLGE_MAX_TC_NUM];
2608        u32 *rss_indir = NULL;
2609        u16 rss_size = 0, roundup_size;
2610        const u8 *key;
2611        int i, ret, j;
2612
2613        rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
2614        if (!rss_indir)
2615                return -ENOMEM;
2616
2617        /* Get default RSS key */
2618        netdev_rss_key_fill(rss_key, HCLGE_RSS_KEY_SIZE);
2619
2620        /* Initialize RSS indirect table for each vport */
2621        for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
2622                for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++) {
2623                        vport[j].rss_indirection_tbl[i] =
2624                                i % vport[j].alloc_rss_size;
2625
2626                        /* vport 0 is for PF */
2627                        if (j != 0)
2628                                continue;
2629
2630                        rss_size = vport[j].alloc_rss_size;
2631                        rss_indir[i] = vport[j].rss_indirection_tbl[i];
2632                }
2633        }
2634        ret = hclge_set_rss_indir_table(hdev, rss_indir);
2635        if (ret)
2636                goto err;
2637
2638        key = rss_key;
2639        ret = hclge_set_rss_algo_key(hdev, hfunc, key);
2640        if (ret)
2641                goto err;
2642
2643        ret = hclge_set_rss_input_tuple(hdev);
2644        if (ret)
2645                goto err;
2646
2647        /* Each TC have the same queue size, and tc_size set to hardware is
2648         * the log2 of roundup power of two of rss_size, the acutal queue
2649         * size is limited by indirection table.
2650         */
2651        if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) {
2652                dev_err(&hdev->pdev->dev,
2653                        "Configure rss tc size failed, invalid TC_SIZE = %d\n",
2654                        rss_size);
2655                ret = -EINVAL;
2656                goto err;
2657        }
2658
2659        roundup_size = roundup_pow_of_two(rss_size);
2660        roundup_size = ilog2(roundup_size);
2661
2662        for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2663                tc_valid[i] = 0;
2664
2665                if (!(hdev->hw_tc_map & BIT(i)))
2666                        continue;
2667
2668                tc_valid[i] = 1;
2669                tc_size[i] = roundup_size;
2670                tc_offset[i] = rss_size * i;
2671        }
2672
2673        ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
2674
2675err:
2676        kfree(rss_indir);
2677
2678        return ret;
2679}
2680
2681int hclge_map_vport_ring_to_vector(struct hclge_vport *vport, int vector_id,
2682                                   struct hnae3_ring_chain_node *ring_chain)
2683{
2684        struct hclge_dev *hdev = vport->back;
2685        struct hclge_ctrl_vector_chain *req;
2686        struct hnae3_ring_chain_node *node;
2687        struct hclge_desc desc;
2688        int ret;
2689        int i;
2690
2691        hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_ADD_RING_TO_VECTOR, false);
2692
2693        req = (struct hclge_ctrl_vector_chain *)desc.data;
2694        req->int_vector_id = vector_id;
2695
2696        i = 0;
2697        for (node = ring_chain; node; node = node->next) {
2698                hnae_set_field(req->tqp_type_and_id[i], HCLGE_INT_TYPE_M,
2699                               HCLGE_INT_TYPE_S,
2700                               hnae_get_bit(node->flag, HNAE3_RING_TYPE_B));
2701                hnae_set_field(req->tqp_type_and_id[i], HCLGE_TQP_ID_M,
2702                               HCLGE_TQP_ID_S,  node->tqp_index);
2703                hnae_set_field(req->tqp_type_and_id[i], HCLGE_INT_GL_IDX_M,
2704                               HCLGE_INT_GL_IDX_S,
2705                               hnae_get_bit(node->flag, HNAE3_RING_TYPE_B));
2706                req->tqp_type_and_id[i] = cpu_to_le16(req->tqp_type_and_id[i]);
2707                req->vfid = vport->vport_id;
2708
2709                if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
2710                        req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
2711
2712                        ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2713                        if (ret) {
2714                                dev_err(&hdev->pdev->dev,
2715                                        "Map TQP fail, status is %d.\n",
2716                                        ret);
2717                                return ret;
2718                        }
2719                        i = 0;
2720
2721                        hclge_cmd_setup_basic_desc(&desc,
2722                                                   HCLGE_OPC_ADD_RING_TO_VECTOR,
2723                                                   false);
2724                        req->int_vector_id = vector_id;
2725                }
2726        }
2727
2728        if (i > 0) {
2729                req->int_cause_num = i;
2730
2731                ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2732                if (ret) {
2733                        dev_err(&hdev->pdev->dev,
2734                                "Map TQP fail, status is %d.\n", ret);
2735                        return ret;
2736                }
2737        }
2738
2739        return 0;
2740}
2741
2742int hclge_map_handle_ring_to_vector(struct hnae3_handle *handle,
2743                                    int vector,
2744                                    struct hnae3_ring_chain_node *ring_chain)
2745{
2746        struct hclge_vport *vport = hclge_get_vport(handle);
2747        struct hclge_dev *hdev = vport->back;
2748        int vector_id;
2749
2750        vector_id = hclge_get_vector_index(hdev, vector);
2751        if (vector_id < 0) {
2752                dev_err(&hdev->pdev->dev,
2753                        "Get vector index fail. ret =%d\n", vector_id);
2754                return vector_id;
2755        }
2756
2757        return hclge_map_vport_ring_to_vector(vport, vector_id, ring_chain);
2758}
2759
2760static int hclge_unmap_ring_from_vector(
2761        struct hnae3_handle *handle, int vector,
2762        struct hnae3_ring_chain_node *ring_chain)
2763{
2764        struct hclge_vport *vport = hclge_get_vport(handle);
2765        struct hclge_dev *hdev = vport->back;
2766        struct hclge_ctrl_vector_chain *req;
2767        struct hnae3_ring_chain_node *node;
2768        struct hclge_desc desc;
2769        int i, vector_id;
2770        int ret;
2771
2772        vector_id = hclge_get_vector_index(hdev, vector);
2773        if (vector_id < 0) {
2774                dev_err(&handle->pdev->dev,
2775                        "Get vector index fail. ret =%d\n", vector_id);
2776                return vector_id;
2777        }
2778
2779        hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_DEL_RING_TO_VECTOR, false);
2780
2781        req = (struct hclge_ctrl_vector_chain *)desc.data;
2782        req->int_vector_id = vector_id;
2783
2784        i = 0;
2785        for (node = ring_chain; node; node = node->next) {
2786                hnae_set_field(req->tqp_type_and_id[i], HCLGE_INT_TYPE_M,
2787                               HCLGE_INT_TYPE_S,
2788                               hnae_get_bit(node->flag, HNAE3_RING_TYPE_B));
2789                hnae_set_field(req->tqp_type_and_id[i], HCLGE_TQP_ID_M,
2790                               HCLGE_TQP_ID_S,  node->tqp_index);
2791                hnae_set_field(req->tqp_type_and_id[i], HCLGE_INT_GL_IDX_M,
2792                               HCLGE_INT_GL_IDX_S,
2793                               hnae_get_bit(node->flag, HNAE3_RING_TYPE_B));
2794
2795                req->tqp_type_and_id[i] = cpu_to_le16(req->tqp_type_and_id[i]);
2796                req->vfid = vport->vport_id;
2797
2798                if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
2799                        req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
2800
2801                        ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2802                        if (ret) {
2803                                dev_err(&hdev->pdev->dev,
2804                                        "Unmap TQP fail, status is %d.\n",
2805                                        ret);
2806                                return ret;
2807                        }
2808                        i = 0;
2809                        hclge_cmd_setup_basic_desc(&desc,
2810                                                   HCLGE_OPC_DEL_RING_TO_VECTOR,
2811                                                   false);
2812                        req->int_vector_id = vector_id;
2813                }
2814        }
2815
2816        if (i > 0) {
2817                req->int_cause_num = i;
2818
2819                ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2820                if (ret) {
2821                        dev_err(&hdev->pdev->dev,
2822                                "Unmap TQP fail, status is %d.\n", ret);
2823                        return ret;
2824                }
2825        }
2826
2827        return 0;
2828}
2829
2830int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
2831                               struct hclge_promisc_param *param)
2832{
2833        struct hclge_promisc_cfg *req;
2834        struct hclge_desc desc;
2835        int ret;
2836
2837        hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
2838
2839        req = (struct hclge_promisc_cfg *)desc.data;
2840        req->vf_id = param->vf_id;
2841        req->flag = (param->enable << HCLGE_PROMISC_EN_B);
2842
2843        ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2844        if (ret) {
2845                dev_err(&hdev->pdev->dev,
2846                        "Set promisc mode fail, status is %d.\n", ret);
2847                return ret;
2848        }
2849        return 0;
2850}
2851
2852void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc,
2853                              bool en_mc, bool en_bc, int vport_id)
2854{
2855        if (!param)
2856                return;
2857
2858        memset(param, 0, sizeof(struct hclge_promisc_param));
2859        if (en_uc)
2860                param->enable = HCLGE_PROMISC_EN_UC;
2861        if (en_mc)
2862                param->enable |= HCLGE_PROMISC_EN_MC;
2863        if (en_bc)
2864                param->enable |= HCLGE_PROMISC_EN_BC;
2865        param->vf_id = vport_id;
2866}
2867
2868static void hclge_set_promisc_mode(struct hnae3_handle *handle, u32 en)
2869{
2870        struct hclge_vport *vport = hclge_get_vport(handle);
2871        struct hclge_dev *hdev = vport->back;
2872        struct hclge_promisc_param param;
2873
2874        hclge_promisc_param_init(&param, en, en, true, vport->vport_id);
2875        hclge_cmd_set_promisc_mode(hdev, &param);
2876}
2877
2878static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
2879{
2880        struct hclge_desc desc;
2881        struct hclge_config_mac_mode *req =
2882                (struct hclge_config_mac_mode *)desc.data;
2883        int ret;
2884
2885        hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
2886        hnae_set_bit(req->txrx_pad_fcs_loop_en, HCLGE_MAC_TX_EN_B, enable);
2887        hnae_set_bit(req->txrx_pad_fcs_loop_en, HCLGE_MAC_RX_EN_B, enable);
2888        hnae_set_bit(req->txrx_pad_fcs_loop_en, HCLGE_MAC_PAD_TX_B, enable);
2889        hnae_set_bit(req->txrx_pad_fcs_loop_en, HCLGE_MAC_PAD_RX_B, enable);
2890        hnae_set_bit(req->txrx_pad_fcs_loop_en, HCLGE_MAC_1588_TX_B, 0);
2891        hnae_set_bit(req->txrx_pad_fcs_loop_en, HCLGE_MAC_1588_RX_B, 0);
2892        hnae_set_bit(req->txrx_pad_fcs_loop_en, HCLGE_MAC_APP_LP_B, 0);
2893        hnae_set_bit(req->txrx_pad_fcs_loop_en, HCLGE_MAC_LINE_LP_B, 0);
2894        hnae_set_bit(req->txrx_pad_fcs_loop_en, HCLGE_MAC_FCS_TX_B, enable);
2895        hnae_set_bit(req->txrx_pad_fcs_loop_en, HCLGE_MAC_RX_FCS_B, enable);
2896        hnae_set_bit(req->txrx_pad_fcs_loop_en,
2897                     HCLGE_MAC_RX_FCS_STRIP_B, enable);
2898        hnae_set_bit(req->txrx_pad_fcs_loop_en,
2899                     HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, enable);
2900        hnae_set_bit(req->txrx_pad_fcs_loop_en,
2901                     HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, enable);
2902        hnae_set_bit(req->txrx_pad_fcs_loop_en,
2903                     HCLGE_MAC_TX_UNDER_MIN_ERR_B, enable);
2904
2905        ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2906        if (ret)
2907                dev_err(&hdev->pdev->dev,
2908                        "mac enable fail, ret =%d.\n", ret);
2909}
2910
2911static int hclge_tqp_enable(struct hclge_dev *hdev, int tqp_id,
2912                            int stream_id, bool enable)
2913{
2914        struct hclge_desc desc;
2915        struct hclge_cfg_com_tqp_queue *req =
2916                (struct hclge_cfg_com_tqp_queue *)desc.data;
2917        int ret;
2918
2919        hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
2920        req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK);
2921        req->stream_id = cpu_to_le16(stream_id);
2922        req->enable |= enable << HCLGE_TQP_ENABLE_B;
2923
2924        ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2925        if (ret)
2926                dev_err(&hdev->pdev->dev,
2927                        "Tqp enable fail, status =%d.\n", ret);
2928        return ret;
2929}
2930
2931static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
2932{
2933        struct hclge_vport *vport = hclge_get_vport(handle);
2934        struct hnae3_queue *queue;
2935        struct hclge_tqp *tqp;
2936        int i;
2937
2938        for (i = 0; i < vport->alloc_tqps; i++) {
2939                queue = handle->kinfo.tqp[i];
2940                tqp = container_of(queue, struct hclge_tqp, q);
2941                memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
2942        }
2943}
2944
2945static int hclge_ae_start(struct hnae3_handle *handle)
2946{
2947        struct hclge_vport *vport = hclge_get_vport(handle);
2948        struct hclge_dev *hdev = vport->back;
2949        int i, queue_id, ret;
2950
2951        for (i = 0; i < vport->alloc_tqps; i++) {
2952                /* todo clear interrupt */
2953                /* ring enable */
2954                queue_id = hclge_get_queue_id(handle->kinfo.tqp[i]);
2955                if (queue_id < 0) {
2956                        dev_warn(&hdev->pdev->dev,
2957                                 "Get invalid queue id, ignore it\n");
2958                        continue;
2959                }
2960
2961                hclge_tqp_enable(hdev, queue_id, 0, true);
2962        }
2963        /* mac enable */
2964        hclge_cfg_mac_mode(hdev, true);
2965        clear_bit(HCLGE_STATE_DOWN, &hdev->state);
2966        (void)mod_timer(&hdev->service_timer, jiffies + HZ);
2967
2968        ret = hclge_mac_start_phy(hdev);
2969        if (ret)
2970                return ret;
2971
2972        /* reset tqp stats */
2973        hclge_reset_tqp_stats(handle);
2974
2975        return 0;
2976}
2977
2978static void hclge_ae_stop(struct hnae3_handle *handle)
2979{
2980        struct hclge_vport *vport = hclge_get_vport(handle);
2981        struct hclge_dev *hdev = vport->back;
2982        int i, queue_id;
2983
2984        for (i = 0; i < vport->alloc_tqps; i++) {
2985                /* Ring disable */
2986                queue_id = hclge_get_queue_id(handle->kinfo.tqp[i]);
2987                if (queue_id < 0) {
2988                        dev_warn(&hdev->pdev->dev,
2989                                 "Get invalid queue id, ignore it\n");
2990                        continue;
2991                }
2992
2993                hclge_tqp_enable(hdev, queue_id, 0, false);
2994        }
2995        /* Mac disable */
2996        hclge_cfg_mac_mode(hdev, false);
2997
2998        hclge_mac_stop_phy(hdev);
2999
3000        /* reset tqp stats */
3001        hclge_reset_tqp_stats(handle);
3002}
3003
3004static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
3005                                         u16 cmdq_resp, u8  resp_code,
3006                                         enum hclge_mac_vlan_tbl_opcode op)
3007{
3008        struct hclge_dev *hdev = vport->back;
3009        int return_status = -EIO;
3010
3011        if (cmdq_resp) {
3012                dev_err(&hdev->pdev->dev,
3013                        "cmdq execute failed for get_mac_vlan_cmd_status,status=%d.\n",
3014                        cmdq_resp);
3015                return -EIO;
3016        }
3017
3018        if (op == HCLGE_MAC_VLAN_ADD) {
3019                if ((!resp_code) || (resp_code == 1)) {
3020                        return_status = 0;
3021                } else if (resp_code == 2) {
3022                        return_status = -EIO;
3023                        dev_err(&hdev->pdev->dev,
3024                                "add mac addr failed for uc_overflow.\n");
3025                } else if (resp_code == 3) {
3026                        return_status = -EIO;
3027                        dev_err(&hdev->pdev->dev,
3028                                "add mac addr failed for mc_overflow.\n");
3029                } else {
3030                        dev_err(&hdev->pdev->dev,
3031                                "add mac addr failed for undefined, code=%d.\n",
3032                                resp_code);
3033                }
3034        } else if (op == HCLGE_MAC_VLAN_REMOVE) {
3035                if (!resp_code) {
3036                        return_status = 0;
3037                } else if (resp_code == 1) {
3038                        return_status = -EIO;
3039                        dev_dbg(&hdev->pdev->dev,
3040                                "remove mac addr failed for miss.\n");
3041                } else {
3042                        dev_err(&hdev->pdev->dev,
3043                                "remove mac addr failed for undefined, code=%d.\n",
3044                                resp_code);
3045                }
3046        } else if (op == HCLGE_MAC_VLAN_LKUP) {
3047                if (!resp_code) {
3048                        return_status = 0;
3049                } else if (resp_code == 1) {
3050                        return_status = -EIO;
3051                        dev_dbg(&hdev->pdev->dev,
3052                                "lookup mac addr failed for miss.\n");
3053                } else {
3054                        dev_err(&hdev->pdev->dev,
3055                                "lookup mac addr failed for undefined, code=%d.\n",
3056                                resp_code);
3057                }
3058        } else {
3059                return_status = -EIO;
3060                dev_err(&hdev->pdev->dev,
3061                        "unknown opcode for get_mac_vlan_cmd_status,opcode=%d.\n",
3062                        op);
3063        }
3064
3065        return return_status;
3066}
3067
3068static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
3069{
3070        int word_num;
3071        int bit_num;
3072
3073        if (vfid > 255 || vfid < 0)
3074                return -EIO;
3075
3076        if (vfid >= 0 && vfid <= 191) {
3077                word_num = vfid / 32;
3078                bit_num  = vfid % 32;
3079                if (clr)
3080                        desc[1].data[word_num] &= ~(1 << bit_num);
3081                else
3082                        desc[1].data[word_num] |= (1 << bit_num);
3083        } else {
3084                word_num = (vfid - 192) / 32;
3085                bit_num  = vfid % 32;
3086                if (clr)
3087                        desc[2].data[word_num] &= ~(1 << bit_num);
3088                else
3089                        desc[2].data[word_num] |= (1 << bit_num);
3090        }
3091
3092        return 0;
3093}
3094
3095static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
3096{
3097#define HCLGE_DESC_NUMBER 3
3098#define HCLGE_FUNC_NUMBER_PER_DESC 6
3099        int i, j;
3100
3101        for (i = 0; i < HCLGE_DESC_NUMBER; i++)
3102                for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
3103                        if (desc[i].data[j])
3104                                return false;
3105
3106        return true;
3107}
3108
3109static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry *new_req,
3110                                   const u8 *addr)
3111{
3112        const unsigned char *mac_addr = addr;
3113        u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
3114                       (mac_addr[0]) | (mac_addr[1] << 8);
3115        u32 low_val  = mac_addr[4] | (mac_addr[5] << 8);
3116
3117        new_req->mac_addr_hi32 = cpu_to_le32(high_val);
3118        new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
3119}
3120
3121u16 hclge_get_mac_addr_to_mta_index(struct hclge_vport *vport,
3122                                    const u8 *addr)
3123{
3124        u16 high_val = addr[1] | (addr[0] << 8);
3125        struct hclge_dev *hdev = vport->back;
3126        u32 rsh = 4 - hdev->mta_mac_sel_type;
3127        u16 ret_val = (high_val >> rsh) & 0xfff;
3128
3129        return ret_val;
3130}
3131
3132static int hclge_set_mta_filter_mode(struct hclge_dev *hdev,
3133                                     enum hclge_mta_dmac_sel_type mta_mac_sel,
3134                                     bool enable)
3135{
3136        struct hclge_mta_filter_mode *req;
3137        struct hclge_desc desc;
3138        int ret;
3139
3140        req = (struct hclge_mta_filter_mode *)desc.data;
3141        hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_MAC_MODE_CFG, false);
3142
3143        hnae_set_bit(req->dmac_sel_en, HCLGE_CFG_MTA_MAC_EN_B,
3144                     enable);
3145        hnae_set_field(req->dmac_sel_en, HCLGE_CFG_MTA_MAC_SEL_M,
3146                       HCLGE_CFG_MTA_MAC_SEL_S, mta_mac_sel);
3147
3148        ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3149        if (ret) {
3150                dev_err(&hdev->pdev->dev,
3151                        "Config mat filter mode failed for cmd_send, ret =%d.\n",
3152                        ret);
3153                return ret;
3154        }
3155
3156        return 0;
3157}
3158
3159int hclge_cfg_func_mta_filter(struct hclge_dev *hdev,
3160                              u8 func_id,
3161                              bool enable)
3162{
3163        struct hclge_cfg_func_mta_filter *req;
3164        struct hclge_desc desc;
3165        int ret;
3166
3167        req = (struct hclge_cfg_func_mta_filter *)desc.data;
3168        hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_MAC_FUNC_CFG, false);
3169
3170        hnae_set_bit(req->accept, HCLGE_CFG_FUNC_MTA_ACCEPT_B,
3171                     enable);
3172        req->function_id = func_id;
3173
3174        ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3175        if (ret) {
3176                dev_err(&hdev->pdev->dev,
3177                        "Config func_id enable failed for cmd_send, ret =%d.\n",
3178                        ret);
3179                return ret;
3180        }
3181
3182        return 0;
3183}
3184
3185static int hclge_set_mta_table_item(struct hclge_vport *vport,
3186                                    u16 idx,
3187                                    bool enable)
3188{
3189        struct hclge_dev *hdev = vport->back;
3190        struct hclge_cfg_func_mta_item *req;
3191        struct hclge_desc desc;
3192        int ret;
3193
3194        req = (struct hclge_cfg_func_mta_item *)desc.data;
3195        hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_TBL_ITEM_CFG, false);
3196        hnae_set_bit(req->accept, HCLGE_CFG_MTA_ITEM_ACCEPT_B, enable);
3197
3198        hnae_set_field(req->item_idx, HCLGE_CFG_MTA_ITEM_IDX_M,
3199                       HCLGE_CFG_MTA_ITEM_IDX_S, idx);
3200        req->item_idx = cpu_to_le16(req->item_idx);
3201
3202        ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3203        if (ret) {
3204                dev_err(&hdev->pdev->dev,
3205                        "Config mta table item failed for cmd_send, ret =%d.\n",
3206                        ret);
3207                return ret;
3208        }
3209
3210        return 0;
3211}
3212
3213static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
3214                                     struct hclge_mac_vlan_tbl_entry *req)
3215{
3216        struct hclge_dev *hdev = vport->back;
3217        struct hclge_desc desc;
3218        u8 resp_code;
3219        int ret;
3220
3221        hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
3222
3223        memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry));
3224
3225        ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3226        if (ret) {
3227                dev_err(&hdev->pdev->dev,
3228                        "del mac addr failed for cmd_send, ret =%d.\n",
3229                        ret);
3230                return ret;
3231        }
3232        resp_code = (desc.data[0] >> 8) & 0xff;
3233
3234        return hclge_get_mac_vlan_cmd_status(vport, desc.retval, resp_code,
3235                                             HCLGE_MAC_VLAN_REMOVE);
3236}
3237
3238static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
3239                                     struct hclge_mac_vlan_tbl_entry *req,
3240                                     struct hclge_desc *desc,
3241                                     bool is_mc)
3242{
3243        struct hclge_dev *hdev = vport->back;
3244        u8 resp_code;
3245        int ret;
3246
3247        hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
3248        if (is_mc) {
3249                desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
3250                memcpy(desc[0].data,
3251                       req,
3252                       sizeof(struct hclge_mac_vlan_tbl_entry));
3253                hclge_cmd_setup_basic_desc(&desc[1],
3254                                           HCLGE_OPC_MAC_VLAN_ADD,
3255                                           true);
3256                desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
3257                hclge_cmd_setup_basic_desc(&desc[2],
3258                                           HCLGE_OPC_MAC_VLAN_ADD,
3259                                           true);
3260                ret = hclge_cmd_send(&hdev->hw, desc, 3);
3261        } else {
3262                memcpy(desc[0].data,
3263                       req,
3264                       sizeof(struct hclge_mac_vlan_tbl_entry));
3265                ret = hclge_cmd_send(&hdev->hw, desc, 1);
3266        }
3267        if (ret) {
3268                dev_err(&hdev->pdev->dev,
3269                        "lookup mac addr failed for cmd_send, ret =%d.\n",
3270                        ret);
3271                return ret;
3272        }
3273        resp_code = (desc[0].data[0] >> 8) & 0xff;
3274
3275        return hclge_get_mac_vlan_cmd_status(vport, desc[0].retval, resp_code,
3276                                             HCLGE_MAC_VLAN_LKUP);
3277}
3278
3279static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
3280                                  struct hclge_mac_vlan_tbl_entry *req,
3281                                  struct hclge_desc *mc_desc)
3282{
3283        struct hclge_dev *hdev = vport->back;
3284        int cfg_status;
3285        u8 resp_code;
3286        int ret;
3287
3288        if (!mc_desc) {
3289                struct hclge_desc desc;
3290
3291                hclge_cmd_setup_basic_desc(&desc,
3292                                           HCLGE_OPC_MAC_VLAN_ADD,
3293                                           false);
3294                memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry));
3295                ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3296                resp_code = (desc.data[0] >> 8) & 0xff;
3297                cfg_status = hclge_get_mac_vlan_cmd_status(vport, desc.retval,
3298                                                           resp_code,
3299                                                           HCLGE_MAC_VLAN_ADD);
3300        } else {
3301                mc_desc[0].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_WR);
3302                mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
3303                mc_desc[1].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_WR);
3304                mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
3305                mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_WR);
3306                mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
3307                memcpy(mc_desc[0].data, req,
3308                       sizeof(struct hclge_mac_vlan_tbl_entry));
3309                ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
3310                resp_code = (mc_desc[0].data[0] >> 8) & 0xff;
3311                cfg_status = hclge_get_mac_vlan_cmd_status(vport,
3312                                                           mc_desc[0].retval,
3313                                                           resp_code,
3314                                                           HCLGE_MAC_VLAN_ADD);
3315        }
3316
3317        if (ret) {
3318                dev_err(&hdev->pdev->dev,
3319                        "add mac addr failed for cmd_send, ret =%d.\n",
3320                        ret);
3321                return ret;
3322        }
3323
3324        return cfg_status;
3325}
3326
3327static int hclge_add_uc_addr(struct hnae3_handle *handle,
3328                             const unsigned char *addr)
3329{
3330        struct hclge_vport *vport = hclge_get_vport(handle);
3331
3332        return hclge_add_uc_addr_common(vport, addr);
3333}
3334
3335int hclge_add_uc_addr_common(struct hclge_vport *vport,
3336                             const unsigned char *addr)
3337{
3338        struct hclge_dev *hdev = vport->back;
3339        struct hclge_mac_vlan_tbl_entry req;
3340        enum hclge_cmd_status status;
3341
3342        /* mac addr check */
3343        if (is_zero_ether_addr(addr) ||
3344            is_broadcast_ether_addr(addr) ||
3345            is_multicast_ether_addr(addr)) {
3346                dev_err(&hdev->pdev->dev,
3347                        "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
3348                         addr,
3349                         is_zero_ether_addr(addr),
3350                         is_broadcast_ether_addr(addr),
3351                         is_multicast_ether_addr(addr));
3352                return -EINVAL;
3353        }
3354
3355        memset(&req, 0, sizeof(req));
3356        hnae_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
3357        hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
3358        hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 0);
3359        hnae_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
3360        hnae_set_bit(req.egress_port,
3361                     HCLGE_MAC_EPORT_SW_EN_B, 0);
3362        hnae_set_bit(req.egress_port,
3363                     HCLGE_MAC_EPORT_TYPE_B, 0);
3364        hnae_set_field(req.egress_port, HCLGE_MAC_EPORT_VFID_M,
3365                       HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
3366        hnae_set_field(req.egress_port, HCLGE_MAC_EPORT_PFID_M,
3367                       HCLGE_MAC_EPORT_PFID_S, 0);
3368        req.egress_port = cpu_to_le16(req.egress_port);
3369
3370        hclge_prepare_mac_addr(&req, addr);
3371
3372        status = hclge_add_mac_vlan_tbl(vport, &req, NULL);
3373
3374        return status;
3375}
3376
3377static int hclge_rm_uc_addr(struct hnae3_handle *handle,
3378                            const unsigned char *addr)
3379{
3380        struct hclge_vport *vport = hclge_get_vport(handle);
3381
3382        return hclge_rm_uc_addr_common(vport, addr);
3383}
3384
3385int hclge_rm_uc_addr_common(struct hclge_vport *vport,
3386                            const unsigned char *addr)
3387{
3388        struct hclge_dev *hdev = vport->back;
3389        struct hclge_mac_vlan_tbl_entry req;
3390        enum hclge_cmd_status status;
3391
3392        /* mac addr check */
3393        if (is_zero_ether_addr(addr) ||
3394            is_broadcast_ether_addr(addr) ||
3395            is_multicast_ether_addr(addr)) {
3396                dev_dbg(&hdev->pdev->dev,
3397                        "Remove mac err! invalid mac:%pM.\n",
3398                         addr);
3399                return -EINVAL;
3400        }
3401
3402        memset(&req, 0, sizeof(req));
3403        hnae_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
3404        hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
3405        hclge_prepare_mac_addr(&req, addr);
3406        status = hclge_remove_mac_vlan_tbl(vport, &req);
3407
3408        return status;
3409}
3410
3411static int hclge_add_mc_addr(struct hnae3_handle *handle,
3412                             const unsigned char *addr)
3413{
3414        struct hclge_vport *vport = hclge_get_vport(handle);
3415
3416        return  hclge_add_mc_addr_common(vport, addr);
3417}
3418
3419int hclge_add_mc_addr_common(struct hclge_vport *vport,
3420                             const unsigned char *addr)
3421{
3422        struct hclge_dev *hdev = vport->back;
3423        struct hclge_mac_vlan_tbl_entry req;
3424        struct hclge_desc desc[3];
3425        u16 tbl_idx;
3426        int status;
3427
3428        /* mac addr check */
3429        if (!is_multicast_ether_addr(addr)) {
3430                dev_err(&hdev->pdev->dev,
3431                        "Add mc mac err! invalid mac:%pM.\n",
3432                         addr);
3433                return -EINVAL;
3434        }
3435        memset(&req, 0, sizeof(req));
3436        hnae_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
3437        hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
3438        hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
3439        hnae_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
3440        hclge_prepare_mac_addr(&req, addr);
3441        status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
3442        if (!status) {
3443                /* This mac addr exist, update VFID for it */
3444                hclge_update_desc_vfid(desc, vport->vport_id, false);
3445                status = hclge_add_mac_vlan_tbl(vport, &req, desc);
3446        } else {
3447                /* This mac addr do not exist, add new entry for it */
3448                memset(desc[0].data, 0, sizeof(desc[0].data));
3449                memset(desc[1].data, 0, sizeof(desc[0].data));
3450                memset(desc[2].data, 0, sizeof(desc[0].data));
3451                hclge_update_desc_vfid(desc, vport->vport_id, false);
3452                status = hclge_add_mac_vlan_tbl(vport, &req, desc);
3453        }
3454
3455        /* Set MTA table for this MAC address */
3456        tbl_idx = hclge_get_mac_addr_to_mta_index(vport, addr);
3457        status = hclge_set_mta_table_item(vport, tbl_idx, true);
3458
3459        return status;
3460}
3461
3462static int hclge_rm_mc_addr(struct hnae3_handle *handle,
3463                            const unsigned char *addr)
3464{
3465        struct hclge_vport *vport = hclge_get_vport(handle);
3466
3467        return hclge_rm_mc_addr_common(vport, addr);
3468}
3469
3470int hclge_rm_mc_addr_common(struct hclge_vport *vport,
3471                            const unsigned char *addr)
3472{
3473        struct hclge_dev *hdev = vport->back;
3474        struct hclge_mac_vlan_tbl_entry req;
3475        enum hclge_cmd_status status;
3476        struct hclge_desc desc[3];
3477        u16 tbl_idx;
3478
3479        /* mac addr check */
3480        if (!is_multicast_ether_addr(addr)) {
3481                dev_dbg(&hdev->pdev->dev,
3482                        "Remove mc mac err! invalid mac:%pM.\n",
3483                         addr);
3484                return -EINVAL;
3485        }
3486
3487        memset(&req, 0, sizeof(req));
3488        hnae_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
3489        hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
3490        hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
3491        hnae_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
3492        hclge_prepare_mac_addr(&req, addr);
3493        status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
3494        if (!status) {
3495                /* This mac addr exist, remove this handle's VFID for it */
3496                hclge_update_desc_vfid(desc, vport->vport_id, true);
3497
3498                if (hclge_is_all_function_id_zero(desc))
3499                        /* All the vfid is zero, so need to delete this entry */
3500                        status = hclge_remove_mac_vlan_tbl(vport, &req);
3501                else
3502                        /* Not all the vfid is zero, update the vfid */
3503                        status = hclge_add_mac_vlan_tbl(vport, &req, desc);
3504
3505        } else {
3506                /* This mac addr do not exist, can't delete it */
3507                dev_err(&hdev->pdev->dev,
3508                        "Rm multicast mac addr failed, ret = %d.\n",
3509                        status);
3510                return -EIO;
3511        }
3512
3513        /* Set MTB table for this MAC address */
3514        tbl_idx = hclge_get_mac_addr_to_mta_index(vport, addr);
3515        status = hclge_set_mta_table_item(vport, tbl_idx, false);
3516
3517        return status;
3518}
3519
3520static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
3521{
3522        struct hclge_vport *vport = hclge_get_vport(handle);
3523        struct hclge_dev *hdev = vport->back;
3524
3525        ether_addr_copy(p, hdev->hw.mac.mac_addr);
3526}
3527
3528static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p)
3529{
3530        const unsigned char *new_addr = (const unsigned char *)p;
3531        struct hclge_vport *vport = hclge_get_vport(handle);
3532        struct hclge_dev *hdev = vport->back;
3533
3534        /* mac addr check */
3535        if (is_zero_ether_addr(new_addr) ||
3536            is_broadcast_ether_addr(new_addr) ||
3537            is_multicast_ether_addr(new_addr)) {
3538                dev_err(&hdev->pdev->dev,
3539                        "Change uc mac err! invalid mac:%p.\n",
3540                         new_addr);
3541                return -EINVAL;
3542        }
3543
3544        hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr);
3545
3546        if (!hclge_add_uc_addr(handle, new_addr)) {
3547                ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
3548                return 0;
3549        }
3550
3551        return -EIO;
3552}
3553
3554static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
3555                                      bool filter_en)
3556{
3557        struct hclge_vlan_filter_ctrl *req;
3558        struct hclge_desc desc;
3559        int ret;
3560
3561        hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, false);
3562
3563        req = (struct hclge_vlan_filter_ctrl *)desc.data;
3564        req->vlan_type = vlan_type;
3565        req->vlan_fe = filter_en;
3566
3567        ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3568        if (ret) {
3569                dev_err(&hdev->pdev->dev, "set vlan filter fail, ret =%d.\n",
3570                        ret);
3571                return ret;
3572        }
3573
3574        return 0;
3575}
3576
3577int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid,
3578                             bool is_kill, u16 vlan, u8 qos, __be16 proto)
3579{
3580#define HCLGE_MAX_VF_BYTES  16
3581        struct hclge_vlan_filter_vf_cfg *req0;
3582        struct hclge_vlan_filter_vf_cfg *req1;
3583        struct hclge_desc desc[2];
3584        u8 vf_byte_val;
3585        u8 vf_byte_off;
3586        int ret;
3587
3588        hclge_cmd_setup_basic_desc(&desc[0],
3589                                   HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
3590        hclge_cmd_setup_basic_desc(&desc[1],
3591                                   HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
3592
3593        desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
3594
3595        vf_byte_off = vfid / 8;
3596        vf_byte_val = 1 << (vfid % 8);
3597
3598        req0 = (struct hclge_vlan_filter_vf_cfg *)desc[0].data;
3599        req1 = (struct hclge_vlan_filter_vf_cfg *)desc[1].data;
3600
3601        req0->vlan_id  = vlan;
3602        req0->vlan_cfg = is_kill;
3603
3604        if (vf_byte_off < HCLGE_MAX_VF_BYTES)
3605                req0->vf_bitmap[vf_byte_off] = vf_byte_val;
3606        else
3607                req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
3608
3609        ret = hclge_cmd_send(&hdev->hw, desc, 2);
3610        if (ret) {
3611                dev_err(&hdev->pdev->dev,
3612                        "Send vf vlan command fail, ret =%d.\n",
3613                        ret);
3614                return ret;
3615        }
3616
3617        if (!is_kill) {
3618                if (!req0->resp_code || req0->resp_code == 1)
3619                        return 0;
3620
3621                dev_err(&hdev->pdev->dev,
3622                        "Add vf vlan filter fail, ret =%d.\n",
3623                        req0->resp_code);
3624        } else {
3625                if (!req0->resp_code)
3626                        return 0;
3627
3628                dev_err(&hdev->pdev->dev,
3629                        "Kill vf vlan filter fail, ret =%d.\n",
3630                        req0->resp_code);
3631        }
3632
3633        return -EIO;
3634}
3635
3636static int hclge_set_port_vlan_filter(struct hnae3_handle *handle,
3637                                      __be16 proto, u16 vlan_id,
3638                                      bool is_kill)
3639{
3640        struct hclge_vport *vport = hclge_get_vport(handle);
3641        struct hclge_dev *hdev = vport->back;
3642        struct hclge_vlan_filter_pf_cfg *req;
3643        struct hclge_desc desc;
3644        u8 vlan_offset_byte_val;
3645        u8 vlan_offset_byte;
3646        u8 vlan_offset_160;
3647        int ret;
3648
3649        hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
3650
3651        vlan_offset_160 = vlan_id / 160;
3652        vlan_offset_byte = (vlan_id % 160) / 8;
3653        vlan_offset_byte_val = 1 << (vlan_id % 8);
3654
3655        req = (struct hclge_vlan_filter_pf_cfg *)desc.data;
3656        req->vlan_offset = vlan_offset_160;
3657        req->vlan_cfg = is_kill;
3658        req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
3659
3660        ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3661        if (ret) {
3662                dev_err(&hdev->pdev->dev,
3663                        "port vlan command, send fail, ret =%d.\n",
3664                        ret);
3665                return ret;
3666        }
3667
3668        ret = hclge_set_vf_vlan_common(hdev, 0, is_kill, vlan_id, 0, proto);
3669        if (ret) {
3670                dev_err(&hdev->pdev->dev,
3671                        "Set pf vlan filter config fail, ret =%d.\n",
3672                        ret);
3673                return -EIO;
3674        }
3675
3676        return 0;
3677}
3678
3679static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
3680                                    u16 vlan, u8 qos, __be16 proto)
3681{
3682        struct hclge_vport *vport = hclge_get_vport(handle);
3683        struct hclge_dev *hdev = vport->back;
3684
3685        if ((vfid >= hdev->num_alloc_vfs) || (vlan > 4095) || (qos > 7))
3686                return -EINVAL;
3687        if (proto != htons(ETH_P_8021Q))
3688                return -EPROTONOSUPPORT;
3689
3690        return hclge_set_vf_vlan_common(hdev, vfid, false, vlan, qos, proto);
3691}
3692
3693static int hclge_init_vlan_config(struct hclge_dev *hdev)
3694{
3695#define HCLGE_VLAN_TYPE_VF_TABLE   0
3696#define HCLGE_VLAN_TYPE_PORT_TABLE 1
3697        struct hnae3_handle *handle;
3698        int ret;
3699
3700        ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_VLAN_TYPE_VF_TABLE,
3701                                         true);
3702        if (ret)
3703                return ret;
3704
3705        ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_VLAN_TYPE_PORT_TABLE,
3706                                         true);
3707        if (ret)
3708                return ret;
3709
3710        handle = &hdev->vport[0].nic;
3711        return hclge_set_port_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
3712}
3713
3714static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
3715{
3716        struct hclge_vport *vport = hclge_get_vport(handle);
3717        struct hclge_config_max_frm_size *req;
3718        struct hclge_dev *hdev = vport->back;
3719        struct hclge_desc desc;
3720        int ret;
3721
3722        if ((new_mtu < HCLGE_MAC_MIN_MTU) || (new_mtu > HCLGE_MAC_MAX_MTU))
3723                return -EINVAL;
3724
3725        hdev->mps = new_mtu;
3726        hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
3727
3728        req = (struct hclge_config_max_frm_size *)desc.data;
3729        req->max_frm_size = cpu_to_le16(new_mtu);
3730
3731        ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3732        if (ret) {
3733                dev_err(&hdev->pdev->dev, "set mtu fail, ret =%d.\n", ret);
3734                return ret;
3735        }
3736
3737        return 0;
3738}
3739
3740static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
3741                                    bool enable)
3742{
3743        struct hclge_reset_tqp_queue *req;
3744        struct hclge_desc desc;
3745        int ret;
3746
3747        hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
3748
3749        req = (struct hclge_reset_tqp_queue *)desc.data;
3750        req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
3751        hnae_set_bit(req->reset_req, HCLGE_TQP_RESET_B, enable);
3752
3753        ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3754        if (ret) {
3755                dev_err(&hdev->pdev->dev,
3756                        "Send tqp reset cmd error, status =%d\n", ret);
3757                return ret;
3758        }
3759
3760        return 0;
3761}
3762
3763static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
3764{
3765        struct hclge_reset_tqp_queue *req;
3766        struct hclge_desc desc;
3767        int ret;
3768
3769        hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
3770
3771        req = (struct hclge_reset_tqp_queue *)desc.data;
3772        req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
3773
3774        ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3775        if (ret) {
3776                dev_err(&hdev->pdev->dev,
3777                        "Get reset status error, status =%d\n", ret);
3778                return ret;
3779        }
3780
3781        return hnae_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
3782}
3783
3784static void hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
3785{
3786        struct hclge_vport *vport = hclge_get_vport(handle);
3787        struct hclge_dev *hdev = vport->back;
3788        int reset_try_times = 0;
3789        int reset_status;
3790        int ret;
3791
3792        ret = hclge_tqp_enable(hdev, queue_id, 0, false);
3793        if (ret) {
3794                dev_warn(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
3795                return;
3796        }
3797
3798        ret = hclge_send_reset_tqp_cmd(hdev, queue_id, true);
3799        if (ret) {
3800                dev_warn(&hdev->pdev->dev,
3801                         "Send reset tqp cmd fail, ret = %d\n", ret);
3802                return;
3803        }
3804
3805        reset_try_times = 0;
3806        while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
3807                /* Wait for tqp hw reset */
3808                msleep(20);
3809                reset_status = hclge_get_reset_status(hdev, queue_id);
3810                if (reset_status)
3811                        break;
3812        }
3813
3814        if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
3815                dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
3816                return;
3817        }
3818
3819        ret = hclge_send_reset_tqp_cmd(hdev, queue_id, false);
3820        if (ret) {
3821                dev_warn(&hdev->pdev->dev,
3822                         "Deassert the soft reset fail, ret = %d\n", ret);
3823                return;
3824        }
3825}
3826
3827static u32 hclge_get_fw_version(struct hnae3_handle *handle)
3828{
3829        struct hclge_vport *vport = hclge_get_vport(handle);
3830        struct hclge_dev *hdev = vport->back;
3831
3832        return hdev->fw_version;
3833}
3834
3835static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
3836                                 u32 *rx_en, u32 *tx_en)
3837{
3838        struct hclge_vport *vport = hclge_get_vport(handle);
3839        struct hclge_dev *hdev = vport->back;
3840
3841        *auto_neg = hclge_get_autoneg(handle);
3842
3843        if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
3844                *rx_en = 0;
3845                *tx_en = 0;
3846                return;
3847        }
3848
3849        if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
3850                *rx_en = 1;
3851                *tx_en = 0;
3852        } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
3853                *tx_en = 1;
3854                *rx_en = 0;
3855        } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
3856                *rx_en = 1;
3857                *tx_en = 1;
3858        } else {
3859                *rx_en = 0;
3860                *tx_en = 0;
3861        }
3862}
3863
3864static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
3865                                          u8 *auto_neg, u32 *speed, u8 *duplex)
3866{
3867        struct hclge_vport *vport = hclge_get_vport(handle);
3868        struct hclge_dev *hdev = vport->back;
3869
3870        if (speed)
3871                *speed = hdev->hw.mac.speed;
3872        if (duplex)
3873                *duplex = hdev->hw.mac.duplex;
3874        if (auto_neg)
3875                *auto_neg = hdev->hw.mac.autoneg;
3876}
3877
3878static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type)
3879{
3880        struct hclge_vport *vport = hclge_get_vport(handle);
3881        struct hclge_dev *hdev = vport->back;
3882
3883        if (media_type)
3884                *media_type = hdev->hw.mac.media_type;
3885}
3886
3887static void hclge_get_mdix_mode(struct hnae3_handle *handle,
3888                                u8 *tp_mdix_ctrl, u8 *tp_mdix)
3889{
3890        struct hclge_vport *vport = hclge_get_vport(handle);
3891        struct hclge_dev *hdev = vport->back;
3892        struct phy_device *phydev = hdev->hw.mac.phydev;
3893        int mdix_ctrl, mdix, retval, is_resolved;
3894
3895        if (!phydev) {
3896                *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
3897                *tp_mdix = ETH_TP_MDI_INVALID;
3898                return;
3899        }
3900
3901        phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
3902
3903        retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
3904        mdix_ctrl = hnae_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
3905                                   HCLGE_PHY_MDIX_CTRL_S);
3906
3907        retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
3908        mdix = hnae_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
3909        is_resolved = hnae_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
3910
3911        phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
3912
3913        switch (mdix_ctrl) {
3914        case 0x0:
3915                *tp_mdix_ctrl = ETH_TP_MDI;
3916                break;
3917        case 0x1:
3918                *tp_mdix_ctrl = ETH_TP_MDI_X;
3919                break;
3920        case 0x3:
3921                *tp_mdix_ctrl = ETH_TP_MDI_AUTO;
3922                break;
3923        default:
3924                *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
3925                break;
3926        }
3927
3928        if (!is_resolved)
3929                *tp_mdix = ETH_TP_MDI_INVALID;
3930        else if (mdix)
3931                *tp_mdix = ETH_TP_MDI_X;
3932        else
3933                *tp_mdix = ETH_TP_MDI;
3934}
3935
3936static int hclge_init_client_instance(struct hnae3_client *client,
3937                                      struct hnae3_ae_dev *ae_dev)
3938{
3939        struct hclge_dev *hdev = ae_dev->priv;
3940        struct hclge_vport *vport;
3941        int i, ret;
3942
3943        for (i = 0; i <  hdev->num_vmdq_vport + 1; i++) {
3944                vport = &hdev->vport[i];
3945
3946                switch (client->type) {
3947                case HNAE3_CLIENT_KNIC:
3948
3949                        hdev->nic_client = client;
3950                        vport->nic.client = client;
3951                        ret = client->ops->init_instance(&vport->nic);
3952                        if (ret)
3953                                goto err;
3954
3955                        if (hdev->roce_client &&
3956                            hnae3_dev_roce_supported(hdev)) {
3957                                struct hnae3_client *rc = hdev->roce_client;
3958
3959                                ret = hclge_init_roce_base_info(vport);
3960                                if (ret)
3961                                        goto err;
3962
3963                                ret = rc->ops->init_instance(&vport->roce);
3964                                if (ret)
3965                                        goto err;
3966                        }
3967
3968                        break;
3969                case HNAE3_CLIENT_UNIC:
3970                        hdev->nic_client = client;
3971                        vport->nic.client = client;
3972
3973                        ret = client->ops->init_instance(&vport->nic);
3974                        if (ret)
3975                                goto err;
3976
3977                        break;
3978                case HNAE3_CLIENT_ROCE:
3979                        if (hnae3_dev_roce_supported(hdev)) {
3980                                hdev->roce_client = client;
3981                                vport->roce.client = client;
3982                        }
3983
3984                        if (hdev->roce_client) {
3985                                ret = hclge_init_roce_base_info(vport);
3986                                if (ret)
3987                                        goto err;
3988
3989                                ret = client->ops->init_instance(&vport->roce);
3990                                if (ret)
3991                                        goto err;
3992                        }
3993                }
3994        }
3995
3996        return 0;
3997err:
3998        return ret;
3999}
4000
4001static void hclge_uninit_client_instance(struct hnae3_client *client,
4002                                         struct hnae3_ae_dev *ae_dev)
4003{
4004        struct hclge_dev *hdev = ae_dev->priv;
4005        struct hclge_vport *vport;
4006        int i;
4007
4008        for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
4009                vport = &hdev->vport[i];
4010                if (hdev->roce_client)
4011                        hdev->roce_client->ops->uninit_instance(&vport->roce,
4012                                                                0);
4013                if (client->type == HNAE3_CLIENT_ROCE)
4014                        return;
4015                if (client->ops->uninit_instance)
4016                        client->ops->uninit_instance(&vport->nic, 0);
4017        }
4018}
4019
4020static int hclge_pci_init(struct hclge_dev *hdev)
4021{
4022        struct pci_dev *pdev = hdev->pdev;
4023        struct hclge_hw *hw;
4024        int ret;
4025
4026        ret = pci_enable_device(pdev);
4027        if (ret) {
4028                dev_err(&pdev->dev, "failed to enable PCI device\n");
4029                goto err_no_drvdata;
4030        }
4031
4032        ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
4033        if (ret) {
4034                ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
4035                if (ret) {
4036                        dev_err(&pdev->dev,
4037                                "can't set consistent PCI DMA");
4038                        goto err_disable_device;
4039                }
4040                dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
4041        }
4042
4043        ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
4044        if (ret) {
4045                dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
4046                goto err_disable_device;
4047        }
4048
4049        pci_set_master(pdev);
4050        hw = &hdev->hw;
4051        hw->back = hdev;
4052        hw->io_base = pcim_iomap(pdev, 2, 0);
4053        if (!hw->io_base) {
4054                dev_err(&pdev->dev, "Can't map configuration register space\n");
4055                ret = -ENOMEM;
4056                goto err_clr_master;
4057        }
4058
4059        return 0;
4060err_clr_master:
4061        pci_clear_master(pdev);
4062        pci_release_regions(pdev);
4063err_disable_device:
4064        pci_disable_device(pdev);
4065err_no_drvdata:
4066        pci_set_drvdata(pdev, NULL);
4067
4068        return ret;
4069}
4070
4071static void hclge_pci_uninit(struct hclge_dev *hdev)
4072{
4073        struct pci_dev *pdev = hdev->pdev;
4074
4075        if (hdev->flag & HCLGE_FLAG_USE_MSIX) {
4076                pci_disable_msix(pdev);
4077                devm_kfree(&pdev->dev, hdev->msix_entries);
4078                hdev->msix_entries = NULL;
4079        } else {
4080                pci_disable_msi(pdev);
4081        }
4082
4083        pci_clear_master(pdev);
4084        pci_release_mem_regions(pdev);
4085        pci_disable_device(pdev);
4086}
4087
4088static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
4089{
4090        struct pci_dev *pdev = ae_dev->pdev;
4091        struct hclge_dev *hdev;
4092        int ret;
4093
4094        hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
4095        if (!hdev) {
4096                ret = -ENOMEM;
4097                goto err_hclge_dev;
4098        }
4099
4100        hdev->flag |= HCLGE_FLAG_USE_MSIX;
4101        hdev->pdev = pdev;
4102        hdev->ae_dev = ae_dev;
4103        ae_dev->priv = hdev;
4104
4105        ret = hclge_pci_init(hdev);
4106        if (ret) {
4107                dev_err(&pdev->dev, "PCI init failed\n");
4108                goto err_pci_init;
4109        }
4110
4111        /* Command queue initialize */
4112        ret = hclge_cmd_init(hdev);
4113        if (ret)
4114                goto err_cmd_init;
4115
4116        ret = hclge_get_cap(hdev);
4117        if (ret) {
4118                dev_err(&pdev->dev, "get hw capability error, ret = %d.\n",
4119                        ret);
4120                return ret;
4121        }
4122
4123        ret = hclge_configure(hdev);
4124        if (ret) {
4125                dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
4126                return ret;
4127        }
4128
4129        if (hdev->flag & HCLGE_FLAG_USE_MSIX)
4130                ret = hclge_init_msix(hdev);
4131        else
4132                ret = hclge_init_msi(hdev);
4133        if (ret) {
4134                dev_err(&pdev->dev, "Init msix/msi error, ret = %d.\n", ret);
4135                return ret;
4136        }
4137
4138        ret = hclge_alloc_tqps(hdev);
4139        if (ret) {
4140                dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
4141                return ret;
4142        }
4143
4144        ret = hclge_alloc_vport(hdev);
4145        if (ret) {
4146                dev_err(&pdev->dev, "Allocate vport error, ret = %d.\n", ret);
4147                return ret;
4148        }
4149
4150        ret = hclge_mac_init(hdev);
4151        if (ret) {
4152                dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
4153                return ret;
4154        }
4155        ret = hclge_buffer_alloc(hdev);
4156        if (ret) {
4157                dev_err(&pdev->dev, "Buffer allocate fail, ret =%d\n", ret);
4158                return  ret;
4159        }
4160
4161        ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
4162        if (ret) {
4163                dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
4164                return ret;
4165        }
4166
4167        ret = hclge_init_vlan_config(hdev);
4168        if (ret) {
4169                dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
4170                return  ret;
4171        }
4172
4173        ret = hclge_tm_schd_init(hdev);
4174        if (ret) {
4175                dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
4176                return ret;
4177        }
4178
4179        ret = hclge_rss_init_hw(hdev);
4180        if (ret) {
4181                dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
4182                return ret;
4183        }
4184
4185        setup_timer(&hdev->service_timer, hclge_service_timer,
4186                    (unsigned long)hdev);
4187        INIT_WORK(&hdev->service_task, hclge_service_task);
4188
4189        set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
4190        set_bit(HCLGE_STATE_DOWN, &hdev->state);
4191
4192        pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME);
4193        return 0;
4194
4195err_cmd_init:
4196        pci_release_regions(pdev);
4197err_pci_init:
4198        pci_set_drvdata(pdev, NULL);
4199err_hclge_dev:
4200        return ret;
4201}
4202
4203static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
4204{
4205        struct hclge_dev *hdev = ae_dev->priv;
4206        struct hclge_mac *mac = &hdev->hw.mac;
4207
4208        set_bit(HCLGE_STATE_DOWN, &hdev->state);
4209
4210        if (IS_ENABLED(CONFIG_PCI_IOV))
4211                hclge_disable_sriov(hdev);
4212
4213        if (hdev->service_timer.data)
4214                del_timer_sync(&hdev->service_timer);
4215        if (hdev->service_task.func)
4216                cancel_work_sync(&hdev->service_task);
4217
4218        if (mac->phydev)
4219                mdiobus_unregister(mac->mdio_bus);
4220
4221        hclge_destroy_cmd_queue(&hdev->hw);
4222        hclge_pci_uninit(hdev);
4223        ae_dev->priv = NULL;
4224}
4225
4226static const struct hnae3_ae_ops hclge_ops = {
4227        .init_ae_dev = hclge_init_ae_dev,
4228        .uninit_ae_dev = hclge_uninit_ae_dev,
4229        .init_client_instance = hclge_init_client_instance,
4230        .uninit_client_instance = hclge_uninit_client_instance,
4231        .map_ring_to_vector = hclge_map_handle_ring_to_vector,
4232        .unmap_ring_from_vector = hclge_unmap_ring_from_vector,
4233        .get_vector = hclge_get_vector,
4234        .set_promisc_mode = hclge_set_promisc_mode,
4235        .start = hclge_ae_start,
4236        .stop = hclge_ae_stop,
4237        .get_status = hclge_get_status,
4238        .get_ksettings_an_result = hclge_get_ksettings_an_result,
4239        .update_speed_duplex_h = hclge_update_speed_duplex_h,
4240        .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
4241        .get_media_type = hclge_get_media_type,
4242        .get_rss_key_size = hclge_get_rss_key_size,
4243        .get_rss_indir_size = hclge_get_rss_indir_size,
4244        .get_rss = hclge_get_rss,
4245        .set_rss = hclge_set_rss,
4246        .get_tc_size = hclge_get_tc_size,
4247        .get_mac_addr = hclge_get_mac_addr,
4248        .set_mac_addr = hclge_set_mac_addr,
4249        .add_uc_addr = hclge_add_uc_addr,
4250        .rm_uc_addr = hclge_rm_uc_addr,
4251        .add_mc_addr = hclge_add_mc_addr,
4252        .rm_mc_addr = hclge_rm_mc_addr,
4253        .set_autoneg = hclge_set_autoneg,
4254        .get_autoneg = hclge_get_autoneg,
4255        .get_pauseparam = hclge_get_pauseparam,
4256        .set_mtu = hclge_set_mtu,
4257        .reset_queue = hclge_reset_tqp,
4258        .get_stats = hclge_get_stats,
4259        .update_stats = hclge_update_stats,
4260        .get_strings = hclge_get_strings,
4261        .get_sset_count = hclge_get_sset_count,
4262        .get_fw_version = hclge_get_fw_version,
4263        .get_mdix_mode = hclge_get_mdix_mode,
4264        .set_vlan_filter = hclge_set_port_vlan_filter,
4265        .set_vf_vlan_filter = hclge_set_vf_vlan_filter,
4266};
4267
4268static struct hnae3_ae_algo ae_algo = {
4269        .ops = &hclge_ops,
4270        .name = HCLGE_NAME,
4271        .pdev_id_table = ae_algo_pci_tbl,
4272};
4273
4274static int hclge_init(void)
4275{
4276        pr_info("%s is initializing\n", HCLGE_NAME);
4277
4278        return hnae3_register_ae_algo(&ae_algo);
4279}
4280
4281static void hclge_exit(void)
4282{
4283        hnae3_unregister_ae_algo(&ae_algo);
4284}
4285module_init(hclge_init);
4286module_exit(hclge_exit);
4287
4288MODULE_LICENSE("GPL");
4289MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
4290MODULE_DESCRIPTION("HCLGE Driver");
4291MODULE_VERSION(HCLGE_MOD_VERSION);
4292