linux/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0+
   2// Copyright (c) 2016-2017 Hisilicon Limited.
   3
   4#include <linux/etherdevice.h>
   5
   6#include "hclge_cmd.h"
   7#include "hclge_main.h"
   8#include "hclge_tm.h"
   9
  10enum hclge_shaper_level {
  11        HCLGE_SHAPER_LVL_PRI    = 0,
  12        HCLGE_SHAPER_LVL_PG     = 1,
  13        HCLGE_SHAPER_LVL_PORT   = 2,
  14        HCLGE_SHAPER_LVL_QSET   = 3,
  15        HCLGE_SHAPER_LVL_CNT    = 4,
  16        HCLGE_SHAPER_LVL_VF     = 0,
  17        HCLGE_SHAPER_LVL_PF     = 1,
  18};
  19
  20#define HCLGE_TM_PFC_PKT_GET_CMD_NUM    3
  21#define HCLGE_TM_PFC_NUM_GET_PER_CMD    3
  22
  23#define HCLGE_SHAPER_BS_U_DEF   5
  24#define HCLGE_SHAPER_BS_S_DEF   20
  25
  26#define HCLGE_ETHER_MAX_RATE    100000
  27
  28/* hclge_shaper_para_calc: calculate ir parameter for the shaper
  29 * @ir: Rate to be config, its unit is Mbps
  30 * @shaper_level: the shaper level. eg: port, pg, priority, queueset
  31 * @ir_b: IR_B parameter of IR shaper
  32 * @ir_u: IR_U parameter of IR shaper
  33 * @ir_s: IR_S parameter of IR shaper
  34 *
  35 * the formula:
  36 *
  37 *              IR_b * (2 ^ IR_u) * 8
  38 * IR(Mbps) = -------------------------  *  CLOCK(1000Mbps)
  39 *              Tick * (2 ^ IR_s)
  40 *
  41 * @return: 0: calculate sucessful, negative: fail
  42 */
  43static int hclge_shaper_para_calc(u32 ir, u8 shaper_level,
  44                                  u8 *ir_b, u8 *ir_u, u8 *ir_s)
  45{
  46#define DIVISOR_CLK             (1000 * 8)
  47#define DIVISOR_IR_B_126        (126 * DIVISOR_CLK)
  48
  49        const u16 tick_array[HCLGE_SHAPER_LVL_CNT] = {
  50                6 * 256,        /* Prioriy level */
  51                6 * 32,         /* Prioriy group level */
  52                6 * 8,          /* Port level */
  53                6 * 256         /* Qset level */
  54        };
  55        u8 ir_u_calc = 0;
  56        u8 ir_s_calc = 0;
  57        u32 ir_calc;
  58        u32 tick;
  59
  60        /* Calc tick */
  61        if (shaper_level >= HCLGE_SHAPER_LVL_CNT ||
  62            ir > HCLGE_ETHER_MAX_RATE)
  63                return -EINVAL;
  64
  65        tick = tick_array[shaper_level];
  66
  67        /**
  68         * Calc the speed if ir_b = 126, ir_u = 0 and ir_s = 0
  69         * the formula is changed to:
  70         *              126 * 1 * 8
  71         * ir_calc = ---------------- * 1000
  72         *              tick * 1
  73         */
  74        ir_calc = (DIVISOR_IR_B_126 + (tick >> 1) - 1) / tick;
  75
  76        if (ir_calc == ir) {
  77                *ir_b = 126;
  78                *ir_u = 0;
  79                *ir_s = 0;
  80
  81                return 0;
  82        } else if (ir_calc > ir) {
  83                /* Increasing the denominator to select ir_s value */
  84                while (ir_calc >= ir && ir) {
  85                        ir_s_calc++;
  86                        ir_calc = DIVISOR_IR_B_126 / (tick * (1 << ir_s_calc));
  87                }
  88
  89                *ir_b = (ir * tick * (1 << ir_s_calc) + (DIVISOR_CLK >> 1)) /
  90                        DIVISOR_CLK;
  91        } else {
  92                /* Increasing the numerator to select ir_u value */
  93                u32 numerator;
  94
  95                while (ir_calc < ir) {
  96                        ir_u_calc++;
  97                        numerator = DIVISOR_IR_B_126 * (1 << ir_u_calc);
  98                        ir_calc = (numerator + (tick >> 1)) / tick;
  99                }
 100
 101                if (ir_calc == ir) {
 102                        *ir_b = 126;
 103                } else {
 104                        u32 denominator = DIVISOR_CLK * (1 << --ir_u_calc);
 105                        *ir_b = (ir * tick + (denominator >> 1)) / denominator;
 106                }
 107        }
 108
 109        *ir_u = ir_u_calc;
 110        *ir_s = ir_s_calc;
 111
 112        return 0;
 113}
 114
 115static int hclge_pfc_stats_get(struct hclge_dev *hdev,
 116                               enum hclge_opcode_type opcode, u64 *stats)
 117{
 118        struct hclge_desc desc[HCLGE_TM_PFC_PKT_GET_CMD_NUM];
 119        int ret, i, j;
 120
 121        if (!(opcode == HCLGE_OPC_QUERY_PFC_RX_PKT_CNT ||
 122              opcode == HCLGE_OPC_QUERY_PFC_TX_PKT_CNT))
 123                return -EINVAL;
 124
 125        for (i = 0; i < HCLGE_TM_PFC_PKT_GET_CMD_NUM - 1; i++) {
 126                hclge_cmd_setup_basic_desc(&desc[i], opcode, true);
 127                desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
 128        }
 129
 130        hclge_cmd_setup_basic_desc(&desc[i], opcode, true);
 131
 132        ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_TM_PFC_PKT_GET_CMD_NUM);
 133        if (ret)
 134                return ret;
 135
 136        for (i = 0; i < HCLGE_TM_PFC_PKT_GET_CMD_NUM; i++) {
 137                struct hclge_pfc_stats_cmd *pfc_stats =
 138                                (struct hclge_pfc_stats_cmd *)desc[i].data;
 139
 140                for (j = 0; j < HCLGE_TM_PFC_NUM_GET_PER_CMD; j++) {
 141                        u32 index = i * HCLGE_TM_PFC_PKT_GET_CMD_NUM + j;
 142
 143                        if (index < HCLGE_MAX_TC_NUM)
 144                                stats[index] =
 145                                        le64_to_cpu(pfc_stats->pkt_num[j]);
 146                }
 147        }
 148        return 0;
 149}
 150
 151int hclge_pfc_rx_stats_get(struct hclge_dev *hdev, u64 *stats)
 152{
 153        return hclge_pfc_stats_get(hdev, HCLGE_OPC_QUERY_PFC_RX_PKT_CNT, stats);
 154}
 155
 156int hclge_pfc_tx_stats_get(struct hclge_dev *hdev, u64 *stats)
 157{
 158        return hclge_pfc_stats_get(hdev, HCLGE_OPC_QUERY_PFC_TX_PKT_CNT, stats);
 159}
 160
 161int hclge_mac_pause_en_cfg(struct hclge_dev *hdev, bool tx, bool rx)
 162{
 163        struct hclge_desc desc;
 164
 165        hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PAUSE_EN, false);
 166
 167        desc.data[0] = cpu_to_le32((tx ? HCLGE_TX_MAC_PAUSE_EN_MSK : 0) |
 168                (rx ? HCLGE_RX_MAC_PAUSE_EN_MSK : 0));
 169
 170        return hclge_cmd_send(&hdev->hw, &desc, 1);
 171}
 172
 173static int hclge_pfc_pause_en_cfg(struct hclge_dev *hdev, u8 tx_rx_bitmap,
 174                                  u8 pfc_bitmap)
 175{
 176        struct hclge_desc desc;
 177        struct hclge_pfc_en_cmd *pfc = (struct hclge_pfc_en_cmd *)desc.data;
 178
 179        hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PFC_PAUSE_EN, false);
 180
 181        pfc->tx_rx_en_bitmap = tx_rx_bitmap;
 182        pfc->pri_en_bitmap = pfc_bitmap;
 183
 184        return hclge_cmd_send(&hdev->hw, &desc, 1);
 185}
 186
 187static int hclge_pause_param_cfg(struct hclge_dev *hdev, const u8 *addr,
 188                                 u8 pause_trans_gap, u16 pause_trans_time)
 189{
 190        struct hclge_cfg_pause_param_cmd *pause_param;
 191        struct hclge_desc desc;
 192
 193        pause_param = (struct hclge_cfg_pause_param_cmd *)desc.data;
 194
 195        hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, false);
 196
 197        ether_addr_copy(pause_param->mac_addr, addr);
 198        ether_addr_copy(pause_param->mac_addr_extra, addr);
 199        pause_param->pause_trans_gap = pause_trans_gap;
 200        pause_param->pause_trans_time = cpu_to_le16(pause_trans_time);
 201
 202        return hclge_cmd_send(&hdev->hw, &desc, 1);
 203}
 204
 205int hclge_pause_addr_cfg(struct hclge_dev *hdev, const u8 *mac_addr)
 206{
 207        struct hclge_cfg_pause_param_cmd *pause_param;
 208        struct hclge_desc desc;
 209        u16 trans_time;
 210        u8 trans_gap;
 211        int ret;
 212
 213        pause_param = (struct hclge_cfg_pause_param_cmd *)desc.data;
 214
 215        hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, true);
 216
 217        ret = hclge_cmd_send(&hdev->hw, &desc, 1);
 218        if (ret)
 219                return ret;
 220
 221        trans_gap = pause_param->pause_trans_gap;
 222        trans_time = le16_to_cpu(pause_param->pause_trans_time);
 223
 224        return hclge_pause_param_cfg(hdev, mac_addr, trans_gap, trans_time);
 225}
 226
 227static int hclge_fill_pri_array(struct hclge_dev *hdev, u8 *pri, u8 pri_id)
 228{
 229        u8 tc;
 230
 231        tc = hdev->tm_info.prio_tc[pri_id];
 232
 233        if (tc >= hdev->tm_info.num_tc)
 234                return -EINVAL;
 235
 236        /**
 237         * the register for priority has four bytes, the first bytes includes
 238         *  priority0 and priority1, the higher 4bit stands for priority1
 239         *  while the lower 4bit stands for priority0, as below:
 240         * first byte:  | pri_1 | pri_0 |
 241         * second byte: | pri_3 | pri_2 |
 242         * third byte:  | pri_5 | pri_4 |
 243         * fourth byte: | pri_7 | pri_6 |
 244         */
 245        pri[pri_id >> 1] |= tc << ((pri_id & 1) * 4);
 246
 247        return 0;
 248}
 249
 250static int hclge_up_to_tc_map(struct hclge_dev *hdev)
 251{
 252        struct hclge_desc desc;
 253        u8 *pri = (u8 *)desc.data;
 254        u8 pri_id;
 255        int ret;
 256
 257        hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PRI_TO_TC_MAPPING, false);
 258
 259        for (pri_id = 0; pri_id < HNAE3_MAX_USER_PRIO; pri_id++) {
 260                ret = hclge_fill_pri_array(hdev, pri, pri_id);
 261                if (ret)
 262                        return ret;
 263        }
 264
 265        return hclge_cmd_send(&hdev->hw, &desc, 1);
 266}
 267
 268static int hclge_tm_pg_to_pri_map_cfg(struct hclge_dev *hdev,
 269                                      u8 pg_id, u8 pri_bit_map)
 270{
 271        struct hclge_pg_to_pri_link_cmd *map;
 272        struct hclge_desc desc;
 273
 274        hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_TO_PRI_LINK, false);
 275
 276        map = (struct hclge_pg_to_pri_link_cmd *)desc.data;
 277
 278        map->pg_id = pg_id;
 279        map->pri_bit_map = pri_bit_map;
 280
 281        return hclge_cmd_send(&hdev->hw, &desc, 1);
 282}
 283
 284static int hclge_tm_qs_to_pri_map_cfg(struct hclge_dev *hdev,
 285                                      u16 qs_id, u8 pri)
 286{
 287        struct hclge_qs_to_pri_link_cmd *map;
 288        struct hclge_desc desc;
 289
 290        hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_TO_PRI_LINK, false);
 291
 292        map = (struct hclge_qs_to_pri_link_cmd *)desc.data;
 293
 294        map->qs_id = cpu_to_le16(qs_id);
 295        map->priority = pri;
 296        map->link_vld = HCLGE_TM_QS_PRI_LINK_VLD_MSK;
 297
 298        return hclge_cmd_send(&hdev->hw, &desc, 1);
 299}
 300
 301static int hclge_tm_q_to_qs_map_cfg(struct hclge_dev *hdev,
 302                                    u16 q_id, u16 qs_id)
 303{
 304        struct hclge_nq_to_qs_link_cmd *map;
 305        struct hclge_desc desc;
 306
 307        hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_NQ_TO_QS_LINK, false);
 308
 309        map = (struct hclge_nq_to_qs_link_cmd *)desc.data;
 310
 311        map->nq_id = cpu_to_le16(q_id);
 312        map->qset_id = cpu_to_le16(qs_id | HCLGE_TM_Q_QS_LINK_VLD_MSK);
 313
 314        return hclge_cmd_send(&hdev->hw, &desc, 1);
 315}
 316
 317static int hclge_tm_pg_weight_cfg(struct hclge_dev *hdev, u8 pg_id,
 318                                  u8 dwrr)
 319{
 320        struct hclge_pg_weight_cmd *weight;
 321        struct hclge_desc desc;
 322
 323        hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_WEIGHT, false);
 324
 325        weight = (struct hclge_pg_weight_cmd *)desc.data;
 326
 327        weight->pg_id = pg_id;
 328        weight->dwrr = dwrr;
 329
 330        return hclge_cmd_send(&hdev->hw, &desc, 1);
 331}
 332
 333static int hclge_tm_pri_weight_cfg(struct hclge_dev *hdev, u8 pri_id,
 334                                   u8 dwrr)
 335{
 336        struct hclge_priority_weight_cmd *weight;
 337        struct hclge_desc desc;
 338
 339        hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PRI_WEIGHT, false);
 340
 341        weight = (struct hclge_priority_weight_cmd *)desc.data;
 342
 343        weight->pri_id = pri_id;
 344        weight->dwrr = dwrr;
 345
 346        return hclge_cmd_send(&hdev->hw, &desc, 1);
 347}
 348
 349static int hclge_tm_qs_weight_cfg(struct hclge_dev *hdev, u16 qs_id,
 350                                  u8 dwrr)
 351{
 352        struct hclge_qs_weight_cmd *weight;
 353        struct hclge_desc desc;
 354
 355        hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_WEIGHT, false);
 356
 357        weight = (struct hclge_qs_weight_cmd *)desc.data;
 358
 359        weight->qs_id = cpu_to_le16(qs_id);
 360        weight->dwrr = dwrr;
 361
 362        return hclge_cmd_send(&hdev->hw, &desc, 1);
 363}
 364
 365static u32 hclge_tm_get_shapping_para(u8 ir_b, u8 ir_u, u8 ir_s,
 366                                      u8 bs_b, u8 bs_s)
 367{
 368        u32 shapping_para = 0;
 369
 370        hclge_tm_set_field(shapping_para, IR_B, ir_b);
 371        hclge_tm_set_field(shapping_para, IR_U, ir_u);
 372        hclge_tm_set_field(shapping_para, IR_S, ir_s);
 373        hclge_tm_set_field(shapping_para, BS_B, bs_b);
 374        hclge_tm_set_field(shapping_para, BS_S, bs_s);
 375
 376        return shapping_para;
 377}
 378
 379static int hclge_tm_pg_shapping_cfg(struct hclge_dev *hdev,
 380                                    enum hclge_shap_bucket bucket, u8 pg_id,
 381                                    u32 shapping_para)
 382{
 383        struct hclge_pg_shapping_cmd *shap_cfg_cmd;
 384        enum hclge_opcode_type opcode;
 385        struct hclge_desc desc;
 386
 387        opcode = bucket ? HCLGE_OPC_TM_PG_P_SHAPPING :
 388                 HCLGE_OPC_TM_PG_C_SHAPPING;
 389        hclge_cmd_setup_basic_desc(&desc, opcode, false);
 390
 391        shap_cfg_cmd = (struct hclge_pg_shapping_cmd *)desc.data;
 392
 393        shap_cfg_cmd->pg_id = pg_id;
 394
 395        shap_cfg_cmd->pg_shapping_para = cpu_to_le32(shapping_para);
 396
 397        return hclge_cmd_send(&hdev->hw, &desc, 1);
 398}
 399
 400static int hclge_tm_port_shaper_cfg(struct hclge_dev *hdev)
 401{
 402        struct hclge_port_shapping_cmd *shap_cfg_cmd;
 403        struct hclge_desc desc;
 404        u8 ir_u, ir_b, ir_s;
 405        u32 shapping_para;
 406        int ret;
 407
 408        ret = hclge_shaper_para_calc(hdev->hw.mac.speed,
 409                                     HCLGE_SHAPER_LVL_PORT,
 410                                     &ir_b, &ir_u, &ir_s);
 411        if (ret)
 412                return ret;
 413
 414        hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PORT_SHAPPING, false);
 415        shap_cfg_cmd = (struct hclge_port_shapping_cmd *)desc.data;
 416
 417        shapping_para = hclge_tm_get_shapping_para(ir_b, ir_u, ir_s,
 418                                                   HCLGE_SHAPER_BS_U_DEF,
 419                                                   HCLGE_SHAPER_BS_S_DEF);
 420
 421        shap_cfg_cmd->port_shapping_para = cpu_to_le32(shapping_para);
 422
 423        return hclge_cmd_send(&hdev->hw, &desc, 1);
 424}
 425
 426static int hclge_tm_pri_shapping_cfg(struct hclge_dev *hdev,
 427                                     enum hclge_shap_bucket bucket, u8 pri_id,
 428                                     u32 shapping_para)
 429{
 430        struct hclge_pri_shapping_cmd *shap_cfg_cmd;
 431        enum hclge_opcode_type opcode;
 432        struct hclge_desc desc;
 433
 434        opcode = bucket ? HCLGE_OPC_TM_PRI_P_SHAPPING :
 435                 HCLGE_OPC_TM_PRI_C_SHAPPING;
 436
 437        hclge_cmd_setup_basic_desc(&desc, opcode, false);
 438
 439        shap_cfg_cmd = (struct hclge_pri_shapping_cmd *)desc.data;
 440
 441        shap_cfg_cmd->pri_id = pri_id;
 442
 443        shap_cfg_cmd->pri_shapping_para = cpu_to_le32(shapping_para);
 444
 445        return hclge_cmd_send(&hdev->hw, &desc, 1);
 446}
 447
 448static int hclge_tm_pg_schd_mode_cfg(struct hclge_dev *hdev, u8 pg_id)
 449{
 450        struct hclge_desc desc;
 451
 452        hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_SCH_MODE_CFG, false);
 453
 454        if (hdev->tm_info.pg_info[pg_id].pg_sch_mode == HCLGE_SCH_MODE_DWRR)
 455                desc.data[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK);
 456        else
 457                desc.data[1] = 0;
 458
 459        desc.data[0] = cpu_to_le32(pg_id);
 460
 461        return hclge_cmd_send(&hdev->hw, &desc, 1);
 462}
 463
 464static int hclge_tm_pri_schd_mode_cfg(struct hclge_dev *hdev, u8 pri_id)
 465{
 466        struct hclge_desc desc;
 467
 468        hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PRI_SCH_MODE_CFG, false);
 469
 470        if (hdev->tm_info.tc_info[pri_id].tc_sch_mode == HCLGE_SCH_MODE_DWRR)
 471                desc.data[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK);
 472        else
 473                desc.data[1] = 0;
 474
 475        desc.data[0] = cpu_to_le32(pri_id);
 476
 477        return hclge_cmd_send(&hdev->hw, &desc, 1);
 478}
 479
 480static int hclge_tm_qs_schd_mode_cfg(struct hclge_dev *hdev, u16 qs_id, u8 mode)
 481{
 482        struct hclge_desc desc;
 483
 484        hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_SCH_MODE_CFG, false);
 485
 486        if (mode == HCLGE_SCH_MODE_DWRR)
 487                desc.data[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK);
 488        else
 489                desc.data[1] = 0;
 490
 491        desc.data[0] = cpu_to_le32(qs_id);
 492
 493        return hclge_cmd_send(&hdev->hw, &desc, 1);
 494}
 495
 496static int hclge_tm_qs_bp_cfg(struct hclge_dev *hdev, u8 tc, u8 grp_id,
 497                              u32 bit_map)
 498{
 499        struct hclge_bp_to_qs_map_cmd *bp_to_qs_map_cmd;
 500        struct hclge_desc desc;
 501
 502        hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_BP_TO_QSET_MAPPING,
 503                                   false);
 504
 505        bp_to_qs_map_cmd = (struct hclge_bp_to_qs_map_cmd *)desc.data;
 506
 507        bp_to_qs_map_cmd->tc_id = tc;
 508        bp_to_qs_map_cmd->qs_group_id = grp_id;
 509        bp_to_qs_map_cmd->qs_bit_map = cpu_to_le32(bit_map);
 510
 511        return hclge_cmd_send(&hdev->hw, &desc, 1);
 512}
 513
 514static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport)
 515{
 516        struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
 517        struct hclge_dev *hdev = vport->back;
 518        u16 max_rss_size;
 519        u8 i;
 520
 521        /* TC configuration is shared by PF/VF in one port, only allow
 522         * one tc for VF for simplicity. VF's vport_id is non zero.
 523         */
 524        kinfo->num_tc = vport->vport_id ? 1 :
 525                        min_t(u16, vport->alloc_tqps, hdev->tm_info.num_tc);
 526        vport->qs_offset = (vport->vport_id ? hdev->tm_info.num_tc : 0) +
 527                                (vport->vport_id ? (vport->vport_id - 1) : 0);
 528
 529        max_rss_size = min_t(u16, hdev->rss_size_max,
 530                             vport->alloc_tqps / kinfo->num_tc);
 531
 532        /* Set to user value, no larger than max_rss_size. */
 533        if (kinfo->req_rss_size != kinfo->rss_size && kinfo->req_rss_size &&
 534            kinfo->req_rss_size <= max_rss_size) {
 535                dev_info(&hdev->pdev->dev, "rss changes from %d to %d\n",
 536                         kinfo->rss_size, kinfo->req_rss_size);
 537                kinfo->rss_size = kinfo->req_rss_size;
 538        } else if (kinfo->rss_size > max_rss_size ||
 539                   (!kinfo->req_rss_size && kinfo->rss_size < max_rss_size)) {
 540                /* if user not set rss, the rss_size should compare with the
 541                 * valid msi numbers to ensure one to one map between tqp and
 542                 * irq as default.
 543                 */
 544                if (!kinfo->req_rss_size)
 545                        max_rss_size = min_t(u16, max_rss_size,
 546                                             (hdev->num_nic_msi - 1) /
 547                                             kinfo->num_tc);
 548
 549                /* Set to the maximum specification value (max_rss_size). */
 550                kinfo->rss_size = max_rss_size;
 551        }
 552
 553        kinfo->num_tqps = kinfo->num_tc * kinfo->rss_size;
 554        vport->dwrr = 100;  /* 100 percent as init */
 555        vport->alloc_rss_size = kinfo->rss_size;
 556        vport->bw_limit = hdev->tm_info.pg_info[0].bw_limit;
 557
 558        for (i = 0; i < HNAE3_MAX_TC; i++) {
 559                if (hdev->hw_tc_map & BIT(i) && i < kinfo->num_tc) {
 560                        kinfo->tc_info[i].enable = true;
 561                        kinfo->tc_info[i].tqp_offset = i * kinfo->rss_size;
 562                        kinfo->tc_info[i].tqp_count = kinfo->rss_size;
 563                        kinfo->tc_info[i].tc = i;
 564                } else {
 565                        /* Set to default queue if TC is disable */
 566                        kinfo->tc_info[i].enable = false;
 567                        kinfo->tc_info[i].tqp_offset = 0;
 568                        kinfo->tc_info[i].tqp_count = 1;
 569                        kinfo->tc_info[i].tc = 0;
 570                }
 571        }
 572
 573        memcpy(kinfo->prio_tc, hdev->tm_info.prio_tc,
 574               FIELD_SIZEOF(struct hnae3_knic_private_info, prio_tc));
 575}
 576
 577static void hclge_tm_vport_info_update(struct hclge_dev *hdev)
 578{
 579        struct hclge_vport *vport = hdev->vport;
 580        u32 i;
 581
 582        for (i = 0; i < hdev->num_alloc_vport; i++) {
 583                hclge_tm_vport_tc_info_update(vport);
 584
 585                vport++;
 586        }
 587}
 588
 589static void hclge_tm_tc_info_init(struct hclge_dev *hdev)
 590{
 591        u8 i;
 592
 593        for (i = 0; i < hdev->tm_info.num_tc; i++) {
 594                hdev->tm_info.tc_info[i].tc_id = i;
 595                hdev->tm_info.tc_info[i].tc_sch_mode = HCLGE_SCH_MODE_DWRR;
 596                hdev->tm_info.tc_info[i].pgid = 0;
 597                hdev->tm_info.tc_info[i].bw_limit =
 598                        hdev->tm_info.pg_info[0].bw_limit;
 599        }
 600
 601        for (i = 0; i < HNAE3_MAX_USER_PRIO; i++)
 602                hdev->tm_info.prio_tc[i] =
 603                        (i >= hdev->tm_info.num_tc) ? 0 : i;
 604
 605        /* DCB is enabled if we have more than 1 TC or pfc_en is
 606         * non-zero.
 607         */
 608        if (hdev->tm_info.num_tc > 1 || hdev->tm_info.pfc_en)
 609                hdev->flag |= HCLGE_FLAG_DCB_ENABLE;
 610        else
 611                hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE;
 612}
 613
 614static void hclge_tm_pg_info_init(struct hclge_dev *hdev)
 615{
 616#define BW_PERCENT      100
 617
 618        u8 i;
 619
 620        for (i = 0; i < hdev->tm_info.num_pg; i++) {
 621                int k;
 622
 623                hdev->tm_info.pg_dwrr[i] = i ? 0 : BW_PERCENT;
 624
 625                hdev->tm_info.pg_info[i].pg_id = i;
 626                hdev->tm_info.pg_info[i].pg_sch_mode = HCLGE_SCH_MODE_DWRR;
 627
 628                hdev->tm_info.pg_info[i].bw_limit = HCLGE_ETHER_MAX_RATE;
 629
 630                if (i != 0)
 631                        continue;
 632
 633                hdev->tm_info.pg_info[i].tc_bit_map = hdev->hw_tc_map;
 634                for (k = 0; k < hdev->tm_info.num_tc; k++)
 635                        hdev->tm_info.pg_info[i].tc_dwrr[k] = BW_PERCENT;
 636        }
 637}
 638
 639static void hclge_pfc_info_init(struct hclge_dev *hdev)
 640{
 641        if (!(hdev->flag & HCLGE_FLAG_DCB_ENABLE)) {
 642                if (hdev->fc_mode_last_time == HCLGE_FC_PFC)
 643                        dev_warn(&hdev->pdev->dev,
 644                                 "DCB is disable, but last mode is FC_PFC\n");
 645
 646                hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
 647        } else if (hdev->tm_info.fc_mode != HCLGE_FC_PFC) {
 648                /* fc_mode_last_time record the last fc_mode when
 649                 * DCB is enabled, so that fc_mode can be set to
 650                 * the correct value when DCB is disabled.
 651                 */
 652                hdev->fc_mode_last_time = hdev->tm_info.fc_mode;
 653                hdev->tm_info.fc_mode = HCLGE_FC_PFC;
 654        }
 655}
 656
 657static void hclge_tm_schd_info_init(struct hclge_dev *hdev)
 658{
 659        hclge_tm_pg_info_init(hdev);
 660
 661        hclge_tm_tc_info_init(hdev);
 662
 663        hclge_tm_vport_info_update(hdev);
 664
 665        hclge_pfc_info_init(hdev);
 666}
 667
 668static int hclge_tm_pg_to_pri_map(struct hclge_dev *hdev)
 669{
 670        int ret;
 671        u32 i;
 672
 673        if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE)
 674                return 0;
 675
 676        for (i = 0; i < hdev->tm_info.num_pg; i++) {
 677                /* Cfg mapping */
 678                ret = hclge_tm_pg_to_pri_map_cfg(
 679                        hdev, i, hdev->tm_info.pg_info[i].tc_bit_map);
 680                if (ret)
 681                        return ret;
 682        }
 683
 684        return 0;
 685}
 686
 687static int hclge_tm_pg_shaper_cfg(struct hclge_dev *hdev)
 688{
 689        u8 ir_u, ir_b, ir_s;
 690        u32 shaper_para;
 691        int ret;
 692        u32 i;
 693
 694        /* Cfg pg schd */
 695        if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE)
 696                return 0;
 697
 698        /* Pg to pri */
 699        for (i = 0; i < hdev->tm_info.num_pg; i++) {
 700                /* Calc shaper para */
 701                ret = hclge_shaper_para_calc(
 702                                        hdev->tm_info.pg_info[i].bw_limit,
 703                                        HCLGE_SHAPER_LVL_PG,
 704                                        &ir_b, &ir_u, &ir_s);
 705                if (ret)
 706                        return ret;
 707
 708                shaper_para = hclge_tm_get_shapping_para(0, 0, 0,
 709                                                         HCLGE_SHAPER_BS_U_DEF,
 710                                                         HCLGE_SHAPER_BS_S_DEF);
 711                ret = hclge_tm_pg_shapping_cfg(hdev,
 712                                               HCLGE_TM_SHAP_C_BUCKET, i,
 713                                               shaper_para);
 714                if (ret)
 715                        return ret;
 716
 717                shaper_para = hclge_tm_get_shapping_para(ir_b, ir_u, ir_s,
 718                                                         HCLGE_SHAPER_BS_U_DEF,
 719                                                         HCLGE_SHAPER_BS_S_DEF);
 720                ret = hclge_tm_pg_shapping_cfg(hdev,
 721                                               HCLGE_TM_SHAP_P_BUCKET, i,
 722                                               shaper_para);
 723                if (ret)
 724                        return ret;
 725        }
 726
 727        return 0;
 728}
 729
 730static int hclge_tm_pg_dwrr_cfg(struct hclge_dev *hdev)
 731{
 732        int ret;
 733        u32 i;
 734
 735        /* cfg pg schd */
 736        if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE)
 737                return 0;
 738
 739        /* pg to prio */
 740        for (i = 0; i < hdev->tm_info.num_pg; i++) {
 741                /* Cfg dwrr */
 742                ret = hclge_tm_pg_weight_cfg(hdev, i, hdev->tm_info.pg_dwrr[i]);
 743                if (ret)
 744                        return ret;
 745        }
 746
 747        return 0;
 748}
 749
 750static int hclge_vport_q_to_qs_map(struct hclge_dev *hdev,
 751                                   struct hclge_vport *vport)
 752{
 753        struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
 754        struct hnae3_queue **tqp = kinfo->tqp;
 755        struct hnae3_tc_info *v_tc_info;
 756        u32 i, j;
 757        int ret;
 758
 759        for (i = 0; i < kinfo->num_tc; i++) {
 760                v_tc_info = &kinfo->tc_info[i];
 761                for (j = 0; j < v_tc_info->tqp_count; j++) {
 762                        struct hnae3_queue *q = tqp[v_tc_info->tqp_offset + j];
 763
 764                        ret = hclge_tm_q_to_qs_map_cfg(hdev,
 765                                                       hclge_get_queue_id(q),
 766                                                       vport->qs_offset + i);
 767                        if (ret)
 768                                return ret;
 769                }
 770        }
 771
 772        return 0;
 773}
 774
 775static int hclge_tm_pri_q_qs_cfg(struct hclge_dev *hdev)
 776{
 777        struct hclge_vport *vport = hdev->vport;
 778        int ret;
 779        u32 i, k;
 780
 781        if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) {
 782                /* Cfg qs -> pri mapping, one by one mapping */
 783                for (k = 0; k < hdev->num_alloc_vport; k++) {
 784                        struct hnae3_knic_private_info *kinfo =
 785                                &vport[k].nic.kinfo;
 786
 787                        for (i = 0; i < kinfo->num_tc; i++) {
 788                                ret = hclge_tm_qs_to_pri_map_cfg(
 789                                        hdev, vport[k].qs_offset + i, i);
 790                                if (ret)
 791                                        return ret;
 792                        }
 793                }
 794        } else if (hdev->tx_sch_mode == HCLGE_FLAG_VNET_BASE_SCH_MODE) {
 795                /* Cfg qs -> pri mapping,  qs = tc, pri = vf, 8 qs -> 1 pri */
 796                for (k = 0; k < hdev->num_alloc_vport; k++)
 797                        for (i = 0; i < HNAE3_MAX_TC; i++) {
 798                                ret = hclge_tm_qs_to_pri_map_cfg(
 799                                        hdev, vport[k].qs_offset + i, k);
 800                                if (ret)
 801                                        return ret;
 802                        }
 803        } else {
 804                return -EINVAL;
 805        }
 806
 807        /* Cfg q -> qs mapping */
 808        for (i = 0; i < hdev->num_alloc_vport; i++) {
 809                ret = hclge_vport_q_to_qs_map(hdev, vport);
 810                if (ret)
 811                        return ret;
 812
 813                vport++;
 814        }
 815
 816        return 0;
 817}
 818
 819static int hclge_tm_pri_tc_base_shaper_cfg(struct hclge_dev *hdev)
 820{
 821        u8 ir_u, ir_b, ir_s;
 822        u32 shaper_para;
 823        int ret;
 824        u32 i;
 825
 826        for (i = 0; i < hdev->tm_info.num_tc; i++) {
 827                ret = hclge_shaper_para_calc(
 828                                        hdev->tm_info.tc_info[i].bw_limit,
 829                                        HCLGE_SHAPER_LVL_PRI,
 830                                        &ir_b, &ir_u, &ir_s);
 831                if (ret)
 832                        return ret;
 833
 834                shaper_para = hclge_tm_get_shapping_para(0, 0, 0,
 835                                                         HCLGE_SHAPER_BS_U_DEF,
 836                                                         HCLGE_SHAPER_BS_S_DEF);
 837                ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_C_BUCKET, i,
 838                                                shaper_para);
 839                if (ret)
 840                        return ret;
 841
 842                shaper_para = hclge_tm_get_shapping_para(ir_b, ir_u, ir_s,
 843                                                         HCLGE_SHAPER_BS_U_DEF,
 844                                                         HCLGE_SHAPER_BS_S_DEF);
 845                ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_P_BUCKET, i,
 846                                                shaper_para);
 847                if (ret)
 848                        return ret;
 849        }
 850
 851        return 0;
 852}
 853
 854static int hclge_tm_pri_vnet_base_shaper_pri_cfg(struct hclge_vport *vport)
 855{
 856        struct hclge_dev *hdev = vport->back;
 857        u8 ir_u, ir_b, ir_s;
 858        u32 shaper_para;
 859        int ret;
 860
 861        ret = hclge_shaper_para_calc(vport->bw_limit, HCLGE_SHAPER_LVL_VF,
 862                                     &ir_b, &ir_u, &ir_s);
 863        if (ret)
 864                return ret;
 865
 866        shaper_para = hclge_tm_get_shapping_para(0, 0, 0,
 867                                                 HCLGE_SHAPER_BS_U_DEF,
 868                                                 HCLGE_SHAPER_BS_S_DEF);
 869        ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_C_BUCKET,
 870                                        vport->vport_id, shaper_para);
 871        if (ret)
 872                return ret;
 873
 874        shaper_para = hclge_tm_get_shapping_para(ir_b, ir_u, ir_s,
 875                                                 HCLGE_SHAPER_BS_U_DEF,
 876                                                 HCLGE_SHAPER_BS_S_DEF);
 877        ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_P_BUCKET,
 878                                        vport->vport_id, shaper_para);
 879        if (ret)
 880                return ret;
 881
 882        return 0;
 883}
 884
 885static int hclge_tm_pri_vnet_base_shaper_qs_cfg(struct hclge_vport *vport)
 886{
 887        struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
 888        struct hclge_dev *hdev = vport->back;
 889        u8 ir_u, ir_b, ir_s;
 890        u32 i;
 891        int ret;
 892
 893        for (i = 0; i < kinfo->num_tc; i++) {
 894                ret = hclge_shaper_para_calc(
 895                                        hdev->tm_info.tc_info[i].bw_limit,
 896                                        HCLGE_SHAPER_LVL_QSET,
 897                                        &ir_b, &ir_u, &ir_s);
 898                if (ret)
 899                        return ret;
 900        }
 901
 902        return 0;
 903}
 904
 905static int hclge_tm_pri_vnet_base_shaper_cfg(struct hclge_dev *hdev)
 906{
 907        struct hclge_vport *vport = hdev->vport;
 908        int ret;
 909        u32 i;
 910
 911        /* Need config vport shaper */
 912        for (i = 0; i < hdev->num_alloc_vport; i++) {
 913                ret = hclge_tm_pri_vnet_base_shaper_pri_cfg(vport);
 914                if (ret)
 915                        return ret;
 916
 917                ret = hclge_tm_pri_vnet_base_shaper_qs_cfg(vport);
 918                if (ret)
 919                        return ret;
 920
 921                vport++;
 922        }
 923
 924        return 0;
 925}
 926
 927static int hclge_tm_pri_shaper_cfg(struct hclge_dev *hdev)
 928{
 929        int ret;
 930
 931        if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) {
 932                ret = hclge_tm_pri_tc_base_shaper_cfg(hdev);
 933                if (ret)
 934                        return ret;
 935        } else {
 936                ret = hclge_tm_pri_vnet_base_shaper_cfg(hdev);
 937                if (ret)
 938                        return ret;
 939        }
 940
 941        return 0;
 942}
 943
 944static int hclge_tm_pri_tc_base_dwrr_cfg(struct hclge_dev *hdev)
 945{
 946        struct hclge_vport *vport = hdev->vport;
 947        struct hclge_pg_info *pg_info;
 948        u8 dwrr;
 949        int ret;
 950        u32 i, k;
 951
 952        for (i = 0; i < hdev->tm_info.num_tc; i++) {
 953                pg_info =
 954                        &hdev->tm_info.pg_info[hdev->tm_info.tc_info[i].pgid];
 955                dwrr = pg_info->tc_dwrr[i];
 956
 957                ret = hclge_tm_pri_weight_cfg(hdev, i, dwrr);
 958                if (ret)
 959                        return ret;
 960
 961                for (k = 0; k < hdev->num_alloc_vport; k++) {
 962                        ret = hclge_tm_qs_weight_cfg(
 963                                hdev, vport[k].qs_offset + i,
 964                                vport[k].dwrr);
 965                        if (ret)
 966                                return ret;
 967                }
 968        }
 969
 970        return 0;
 971}
 972
 973static int hclge_tm_ets_tc_dwrr_cfg(struct hclge_dev *hdev)
 974{
 975#define DEFAULT_TC_WEIGHT       1
 976#define DEFAULT_TC_OFFSET       14
 977
 978        struct hclge_ets_tc_weight_cmd *ets_weight;
 979        struct hclge_desc desc;
 980        unsigned int i;
 981
 982        hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_ETS_TC_WEIGHT, false);
 983        ets_weight = (struct hclge_ets_tc_weight_cmd *)desc.data;
 984
 985        for (i = 0; i < HNAE3_MAX_TC; i++) {
 986                struct hclge_pg_info *pg_info;
 987
 988                ets_weight->tc_weight[i] = DEFAULT_TC_WEIGHT;
 989
 990                if (!(hdev->hw_tc_map & BIT(i)))
 991                        continue;
 992
 993                pg_info =
 994                        &hdev->tm_info.pg_info[hdev->tm_info.tc_info[i].pgid];
 995                ets_weight->tc_weight[i] = pg_info->tc_dwrr[i];
 996        }
 997
 998        ets_weight->weight_offset = DEFAULT_TC_OFFSET;
 999
1000        return hclge_cmd_send(&hdev->hw, &desc, 1);
1001}
1002
1003static int hclge_tm_pri_vnet_base_dwrr_pri_cfg(struct hclge_vport *vport)
1004{
1005        struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1006        struct hclge_dev *hdev = vport->back;
1007        int ret;
1008        u8 i;
1009
1010        /* Vf dwrr */
1011        ret = hclge_tm_pri_weight_cfg(hdev, vport->vport_id, vport->dwrr);
1012        if (ret)
1013                return ret;
1014
1015        /* Qset dwrr */
1016        for (i = 0; i < kinfo->num_tc; i++) {
1017                ret = hclge_tm_qs_weight_cfg(
1018                        hdev, vport->qs_offset + i,
1019                        hdev->tm_info.pg_info[0].tc_dwrr[i]);
1020                if (ret)
1021                        return ret;
1022        }
1023
1024        return 0;
1025}
1026
1027static int hclge_tm_pri_vnet_base_dwrr_cfg(struct hclge_dev *hdev)
1028{
1029        struct hclge_vport *vport = hdev->vport;
1030        int ret;
1031        u32 i;
1032
1033        for (i = 0; i < hdev->num_alloc_vport; i++) {
1034                ret = hclge_tm_pri_vnet_base_dwrr_pri_cfg(vport);
1035                if (ret)
1036                        return ret;
1037
1038                vport++;
1039        }
1040
1041        return 0;
1042}
1043
1044static int hclge_tm_pri_dwrr_cfg(struct hclge_dev *hdev)
1045{
1046        int ret;
1047
1048        if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) {
1049                ret = hclge_tm_pri_tc_base_dwrr_cfg(hdev);
1050                if (ret)
1051                        return ret;
1052
1053                if (!hnae3_dev_dcb_supported(hdev))
1054                        return 0;
1055
1056                ret = hclge_tm_ets_tc_dwrr_cfg(hdev);
1057                if (ret == -EOPNOTSUPP) {
1058                        dev_warn(&hdev->pdev->dev,
1059                                 "fw %08x does't support ets tc weight cmd\n",
1060                                 hdev->fw_version);
1061                        ret = 0;
1062                }
1063
1064                return ret;
1065        } else {
1066                ret = hclge_tm_pri_vnet_base_dwrr_cfg(hdev);
1067                if (ret)
1068                        return ret;
1069        }
1070
1071        return 0;
1072}
1073
1074static int hclge_tm_map_cfg(struct hclge_dev *hdev)
1075{
1076        int ret;
1077
1078        ret = hclge_up_to_tc_map(hdev);
1079        if (ret)
1080                return ret;
1081
1082        ret = hclge_tm_pg_to_pri_map(hdev);
1083        if (ret)
1084                return ret;
1085
1086        return hclge_tm_pri_q_qs_cfg(hdev);
1087}
1088
1089static int hclge_tm_shaper_cfg(struct hclge_dev *hdev)
1090{
1091        int ret;
1092
1093        ret = hclge_tm_port_shaper_cfg(hdev);
1094        if (ret)
1095                return ret;
1096
1097        ret = hclge_tm_pg_shaper_cfg(hdev);
1098        if (ret)
1099                return ret;
1100
1101        return hclge_tm_pri_shaper_cfg(hdev);
1102}
1103
1104int hclge_tm_dwrr_cfg(struct hclge_dev *hdev)
1105{
1106        int ret;
1107
1108        ret = hclge_tm_pg_dwrr_cfg(hdev);
1109        if (ret)
1110                return ret;
1111
1112        return hclge_tm_pri_dwrr_cfg(hdev);
1113}
1114
1115static int hclge_tm_lvl2_schd_mode_cfg(struct hclge_dev *hdev)
1116{
1117        int ret;
1118        u8 i;
1119
1120        /* Only being config on TC-Based scheduler mode */
1121        if (hdev->tx_sch_mode == HCLGE_FLAG_VNET_BASE_SCH_MODE)
1122                return 0;
1123
1124        for (i = 0; i < hdev->tm_info.num_pg; i++) {
1125                ret = hclge_tm_pg_schd_mode_cfg(hdev, i);
1126                if (ret)
1127                        return ret;
1128        }
1129
1130        return 0;
1131}
1132
1133static int hclge_tm_schd_mode_vnet_base_cfg(struct hclge_vport *vport)
1134{
1135        struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1136        struct hclge_dev *hdev = vport->back;
1137        int ret;
1138        u8 i;
1139
1140        if (vport->vport_id >= HNAE3_MAX_TC)
1141                return -EINVAL;
1142
1143        ret = hclge_tm_pri_schd_mode_cfg(hdev, vport->vport_id);
1144        if (ret)
1145                return ret;
1146
1147        for (i = 0; i < kinfo->num_tc; i++) {
1148                u8 sch_mode = hdev->tm_info.tc_info[i].tc_sch_mode;
1149
1150                ret = hclge_tm_qs_schd_mode_cfg(hdev, vport->qs_offset + i,
1151                                                sch_mode);
1152                if (ret)
1153                        return ret;
1154        }
1155
1156        return 0;
1157}
1158
1159static int hclge_tm_lvl34_schd_mode_cfg(struct hclge_dev *hdev)
1160{
1161        struct hclge_vport *vport = hdev->vport;
1162        int ret;
1163        u8 i, k;
1164
1165        if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) {
1166                for (i = 0; i < hdev->tm_info.num_tc; i++) {
1167                        ret = hclge_tm_pri_schd_mode_cfg(hdev, i);
1168                        if (ret)
1169                                return ret;
1170
1171                        for (k = 0; k < hdev->num_alloc_vport; k++) {
1172                                ret = hclge_tm_qs_schd_mode_cfg(
1173                                        hdev, vport[k].qs_offset + i,
1174                                        HCLGE_SCH_MODE_DWRR);
1175                                if (ret)
1176                                        return ret;
1177                        }
1178                }
1179        } else {
1180                for (i = 0; i < hdev->num_alloc_vport; i++) {
1181                        ret = hclge_tm_schd_mode_vnet_base_cfg(vport);
1182                        if (ret)
1183                                return ret;
1184
1185                        vport++;
1186                }
1187        }
1188
1189        return 0;
1190}
1191
1192static int hclge_tm_schd_mode_hw(struct hclge_dev *hdev)
1193{
1194        int ret;
1195
1196        ret = hclge_tm_lvl2_schd_mode_cfg(hdev);
1197        if (ret)
1198                return ret;
1199
1200        return hclge_tm_lvl34_schd_mode_cfg(hdev);
1201}
1202
1203int hclge_tm_schd_setup_hw(struct hclge_dev *hdev)
1204{
1205        int ret;
1206
1207        /* Cfg tm mapping  */
1208        ret = hclge_tm_map_cfg(hdev);
1209        if (ret)
1210                return ret;
1211
1212        /* Cfg tm shaper */
1213        ret = hclge_tm_shaper_cfg(hdev);
1214        if (ret)
1215                return ret;
1216
1217        /* Cfg dwrr */
1218        ret = hclge_tm_dwrr_cfg(hdev);
1219        if (ret)
1220                return ret;
1221
1222        /* Cfg schd mode for each level schd */
1223        return hclge_tm_schd_mode_hw(hdev);
1224}
1225
1226static int hclge_pause_param_setup_hw(struct hclge_dev *hdev)
1227{
1228        struct hclge_mac *mac = &hdev->hw.mac;
1229
1230        return hclge_pause_param_cfg(hdev, mac->mac_addr,
1231                                     HCLGE_DEFAULT_PAUSE_TRANS_GAP,
1232                                     HCLGE_DEFAULT_PAUSE_TRANS_TIME);
1233}
1234
1235static int hclge_pfc_setup_hw(struct hclge_dev *hdev)
1236{
1237        u8 enable_bitmap = 0;
1238
1239        if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
1240                enable_bitmap = HCLGE_TX_MAC_PAUSE_EN_MSK |
1241                                HCLGE_RX_MAC_PAUSE_EN_MSK;
1242
1243        return hclge_pfc_pause_en_cfg(hdev, enable_bitmap,
1244                                      hdev->tm_info.pfc_en);
1245}
1246
1247/* Each Tc has a 1024 queue sets to backpress, it divides to
1248 * 32 group, each group contains 32 queue sets, which can be
1249 * represented by u32 bitmap.
1250 */
1251static int hclge_bp_setup_hw(struct hclge_dev *hdev, u8 tc)
1252{
1253        int i;
1254
1255        for (i = 0; i < HCLGE_BP_GRP_NUM; i++) {
1256                u32 qs_bitmap = 0;
1257                int k, ret;
1258
1259                for (k = 0; k < hdev->num_alloc_vport; k++) {
1260                        struct hclge_vport *vport = &hdev->vport[k];
1261                        u16 qs_id = vport->qs_offset + tc;
1262                        u8 grp, sub_grp;
1263
1264                        grp = hnae3_get_field(qs_id, HCLGE_BP_GRP_ID_M,
1265                                              HCLGE_BP_GRP_ID_S);
1266                        sub_grp = hnae3_get_field(qs_id, HCLGE_BP_SUB_GRP_ID_M,
1267                                                  HCLGE_BP_SUB_GRP_ID_S);
1268                        if (i == grp)
1269                                qs_bitmap |= (1 << sub_grp);
1270                }
1271
1272                ret = hclge_tm_qs_bp_cfg(hdev, tc, i, qs_bitmap);
1273                if (ret)
1274                        return ret;
1275        }
1276
1277        return 0;
1278}
1279
1280static int hclge_mac_pause_setup_hw(struct hclge_dev *hdev)
1281{
1282        bool tx_en, rx_en;
1283
1284        switch (hdev->tm_info.fc_mode) {
1285        case HCLGE_FC_NONE:
1286                tx_en = false;
1287                rx_en = false;
1288                break;
1289        case HCLGE_FC_RX_PAUSE:
1290                tx_en = false;
1291                rx_en = true;
1292                break;
1293        case HCLGE_FC_TX_PAUSE:
1294                tx_en = true;
1295                rx_en = false;
1296                break;
1297        case HCLGE_FC_FULL:
1298                tx_en = true;
1299                rx_en = true;
1300                break;
1301        case HCLGE_FC_PFC:
1302                tx_en = false;
1303                rx_en = false;
1304                break;
1305        default:
1306                tx_en = true;
1307                rx_en = true;
1308        }
1309
1310        return hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
1311}
1312
1313static int hclge_tm_bp_setup(struct hclge_dev *hdev)
1314{
1315        int ret = 0;
1316        int i;
1317
1318        for (i = 0; i < hdev->tm_info.num_tc; i++) {
1319                ret = hclge_bp_setup_hw(hdev, i);
1320                if (ret)
1321                        return ret;
1322        }
1323
1324        return ret;
1325}
1326
1327int hclge_pause_setup_hw(struct hclge_dev *hdev, bool init)
1328{
1329        int ret;
1330
1331        ret = hclge_pause_param_setup_hw(hdev);
1332        if (ret)
1333                return ret;
1334
1335        ret = hclge_mac_pause_setup_hw(hdev);
1336        if (ret)
1337                return ret;
1338
1339        /* Only DCB-supported dev supports qset back pressure and pfc cmd */
1340        if (!hnae3_dev_dcb_supported(hdev))
1341                return 0;
1342
1343        /* GE MAC does not support PFC, when driver is initializing and MAC
1344         * is in GE Mode, ignore the error here, otherwise initialization
1345         * will fail.
1346         */
1347        ret = hclge_pfc_setup_hw(hdev);
1348        if (init && ret == -EOPNOTSUPP)
1349                dev_warn(&hdev->pdev->dev, "GE MAC does not support pfc\n");
1350        else if (ret) {
1351                dev_err(&hdev->pdev->dev, "config pfc failed! ret = %d\n",
1352                        ret);
1353                return ret;
1354        }
1355
1356        return hclge_tm_bp_setup(hdev);
1357}
1358
1359void hclge_tm_prio_tc_info_update(struct hclge_dev *hdev, u8 *prio_tc)
1360{
1361        struct hclge_vport *vport = hdev->vport;
1362        struct hnae3_knic_private_info *kinfo;
1363        u32 i, k;
1364
1365        for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) {
1366                hdev->tm_info.prio_tc[i] = prio_tc[i];
1367
1368                for (k = 0;  k < hdev->num_alloc_vport; k++) {
1369                        kinfo = &vport[k].nic.kinfo;
1370                        kinfo->prio_tc[i] = prio_tc[i];
1371                }
1372        }
1373}
1374
1375void hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc)
1376{
1377        u8 bit_map = 0;
1378        u8 i;
1379
1380        hdev->tm_info.num_tc = num_tc;
1381
1382        for (i = 0; i < hdev->tm_info.num_tc; i++)
1383                bit_map |= BIT(i);
1384
1385        if (!bit_map) {
1386                bit_map = 1;
1387                hdev->tm_info.num_tc = 1;
1388        }
1389
1390        hdev->hw_tc_map = bit_map;
1391
1392        hclge_tm_schd_info_init(hdev);
1393}
1394
1395void hclge_tm_pfc_info_update(struct hclge_dev *hdev)
1396{
1397        /* DCB is enabled if we have more than 1 TC or pfc_en is
1398         * non-zero.
1399         */
1400        if (hdev->tm_info.num_tc > 1 || hdev->tm_info.pfc_en)
1401                hdev->flag |= HCLGE_FLAG_DCB_ENABLE;
1402        else
1403                hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE;
1404
1405        hclge_pfc_info_init(hdev);
1406}
1407
1408int hclge_tm_init_hw(struct hclge_dev *hdev, bool init)
1409{
1410        int ret;
1411
1412        if ((hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE) &&
1413            (hdev->tx_sch_mode != HCLGE_FLAG_VNET_BASE_SCH_MODE))
1414                return -ENOTSUPP;
1415
1416        ret = hclge_tm_schd_setup_hw(hdev);
1417        if (ret)
1418                return ret;
1419
1420        ret = hclge_pause_setup_hw(hdev, init);
1421        if (ret)
1422                return ret;
1423
1424        return 0;
1425}
1426
1427int hclge_tm_schd_init(struct hclge_dev *hdev)
1428{
1429        /* fc_mode is HCLGE_FC_FULL on reset */
1430        hdev->tm_info.fc_mode = HCLGE_FC_FULL;
1431        hdev->fc_mode_last_time = hdev->tm_info.fc_mode;
1432
1433        if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE &&
1434            hdev->tm_info.num_pg != 1)
1435                return -EINVAL;
1436
1437        hclge_tm_schd_info_init(hdev);
1438
1439        return hclge_tm_init_hw(hdev, true);
1440}
1441
1442int hclge_tm_vport_map_update(struct hclge_dev *hdev)
1443{
1444        struct hclge_vport *vport = hdev->vport;
1445        int ret;
1446
1447        hclge_tm_vport_tc_info_update(vport);
1448
1449        ret = hclge_vport_q_to_qs_map(hdev, vport);
1450        if (ret)
1451                return ret;
1452
1453        if (!(hdev->flag & HCLGE_FLAG_DCB_ENABLE))
1454                return 0;
1455
1456        return hclge_tm_bp_setup(hdev);
1457}
1458