linux/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0+
   2// Copyright (c) 2016-2017 Hisilicon Limited.
   3
   4#include "hclge_main.h"
   5#include "hclge_dcb.h"
   6#include "hclge_tm.h"
   7#include "hnae3.h"
   8
   9#define BW_PERCENT      100
  10
  11static int hclge_ieee_ets_to_tm_info(struct hclge_dev *hdev,
  12                                     struct ieee_ets *ets)
  13{
  14        u8 i;
  15
  16        for (i = 0; i < HNAE3_MAX_TC; i++) {
  17                switch (ets->tc_tsa[i]) {
  18                case IEEE_8021QAZ_TSA_STRICT:
  19                        hdev->tm_info.tc_info[i].tc_sch_mode =
  20                                HCLGE_SCH_MODE_SP;
  21                        hdev->tm_info.pg_info[0].tc_dwrr[i] = 0;
  22                        break;
  23                case IEEE_8021QAZ_TSA_ETS:
  24                        hdev->tm_info.tc_info[i].tc_sch_mode =
  25                                HCLGE_SCH_MODE_DWRR;
  26                        hdev->tm_info.pg_info[0].tc_dwrr[i] =
  27                                ets->tc_tx_bw[i];
  28                        break;
  29                default:
  30                        /* Hardware only supports SP (strict priority)
  31                         * or ETS (enhanced transmission selection)
  32                         * algorithms, if we receive some other value
  33                         * from dcbnl, then throw an error.
  34                         */
  35                        return -EINVAL;
  36                }
  37        }
  38
  39        hclge_tm_prio_tc_info_update(hdev, ets->prio_tc);
  40
  41        return 0;
  42}
  43
  44static void hclge_tm_info_to_ieee_ets(struct hclge_dev *hdev,
  45                                      struct ieee_ets *ets)
  46{
  47        u32 i;
  48
  49        memset(ets, 0, sizeof(*ets));
  50        ets->willing = 1;
  51        ets->ets_cap = hdev->tc_max;
  52
  53        for (i = 0; i < HNAE3_MAX_TC; i++) {
  54                ets->prio_tc[i] = hdev->tm_info.prio_tc[i];
  55                ets->tc_tx_bw[i] = hdev->tm_info.pg_info[0].tc_dwrr[i];
  56
  57                if (hdev->tm_info.tc_info[i].tc_sch_mode ==
  58                    HCLGE_SCH_MODE_SP)
  59                        ets->tc_tsa[i] = IEEE_8021QAZ_TSA_STRICT;
  60                else
  61                        ets->tc_tsa[i] = IEEE_8021QAZ_TSA_ETS;
  62        }
  63}
  64
  65/* IEEE std */
  66static int hclge_ieee_getets(struct hnae3_handle *h, struct ieee_ets *ets)
  67{
  68        struct hclge_vport *vport = hclge_get_vport(h);
  69        struct hclge_dev *hdev = vport->back;
  70
  71        hclge_tm_info_to_ieee_ets(hdev, ets);
  72
  73        return 0;
  74}
  75
  76static int hclge_dcb_common_validate(struct hclge_dev *hdev, u8 num_tc,
  77                                     u8 *prio_tc)
  78{
  79        int i;
  80
  81        if (num_tc > hdev->tc_max) {
  82                dev_err(&hdev->pdev->dev,
  83                        "tc num checking failed, %u > tc_max(%u)\n",
  84                        num_tc, hdev->tc_max);
  85                return -EINVAL;
  86        }
  87
  88        for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) {
  89                if (prio_tc[i] >= num_tc) {
  90                        dev_err(&hdev->pdev->dev,
  91                                "prio_tc[%d] checking failed, %u >= num_tc(%u)\n",
  92                                i, prio_tc[i], num_tc);
  93                        return -EINVAL;
  94                }
  95        }
  96
  97        if (num_tc > hdev->vport[0].alloc_tqps) {
  98                dev_err(&hdev->pdev->dev,
  99                        "allocated tqp checking failed, %u > tqp(%u)\n",
 100                        num_tc, hdev->vport[0].alloc_tqps);
 101                return -EINVAL;
 102        }
 103
 104        return 0;
 105}
 106
 107static int hclge_ets_validate(struct hclge_dev *hdev, struct ieee_ets *ets,
 108                              u8 *tc, bool *changed)
 109{
 110        bool has_ets_tc = false;
 111        u32 total_ets_bw = 0;
 112        u8 max_tc = 0;
 113        int ret;
 114        u8 i;
 115
 116        for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) {
 117                if (ets->prio_tc[i] != hdev->tm_info.prio_tc[i])
 118                        *changed = true;
 119
 120                if (ets->prio_tc[i] > max_tc)
 121                        max_tc = ets->prio_tc[i];
 122        }
 123
 124        ret = hclge_dcb_common_validate(hdev, max_tc + 1, ets->prio_tc);
 125        if (ret)
 126                return ret;
 127
 128        for (i = 0; i < hdev->tc_max; i++) {
 129                switch (ets->tc_tsa[i]) {
 130                case IEEE_8021QAZ_TSA_STRICT:
 131                        if (hdev->tm_info.tc_info[i].tc_sch_mode !=
 132                                HCLGE_SCH_MODE_SP)
 133                                *changed = true;
 134                        break;
 135                case IEEE_8021QAZ_TSA_ETS:
 136                        if (hdev->tm_info.tc_info[i].tc_sch_mode !=
 137                                HCLGE_SCH_MODE_DWRR)
 138                                *changed = true;
 139
 140                        total_ets_bw += ets->tc_tx_bw[i];
 141                        has_ets_tc = true;
 142                        break;
 143                default:
 144                        return -EINVAL;
 145                }
 146        }
 147
 148        if (has_ets_tc && total_ets_bw != BW_PERCENT)
 149                return -EINVAL;
 150
 151        *tc = max_tc + 1;
 152        if (*tc != hdev->tm_info.num_tc)
 153                *changed = true;
 154
 155        return 0;
 156}
 157
 158static int hclge_map_update(struct hclge_dev *hdev)
 159{
 160        int ret;
 161
 162        ret = hclge_tm_schd_setup_hw(hdev);
 163        if (ret)
 164                return ret;
 165
 166        ret = hclge_pause_setup_hw(hdev, false);
 167        if (ret)
 168                return ret;
 169
 170        ret = hclge_buffer_alloc(hdev);
 171        if (ret)
 172                return ret;
 173
 174        hclge_rss_indir_init_cfg(hdev);
 175
 176        return hclge_rss_init_hw(hdev);
 177}
 178
 179static int hclge_notify_down_uinit(struct hclge_dev *hdev)
 180{
 181        int ret;
 182
 183        ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
 184        if (ret)
 185                return ret;
 186
 187        return hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
 188}
 189
 190static int hclge_notify_init_up(struct hclge_dev *hdev)
 191{
 192        int ret;
 193
 194        ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
 195        if (ret)
 196                return ret;
 197
 198        return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
 199}
 200
 201static int hclge_ieee_setets(struct hnae3_handle *h, struct ieee_ets *ets)
 202{
 203        struct hclge_vport *vport = hclge_get_vport(h);
 204        struct net_device *netdev = h->kinfo.netdev;
 205        struct hclge_dev *hdev = vport->back;
 206        bool map_changed = false;
 207        u8 num_tc = 0;
 208        int ret;
 209
 210        if (!(hdev->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) ||
 211            hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE)
 212                return -EINVAL;
 213
 214        ret = hclge_ets_validate(hdev, ets, &num_tc, &map_changed);
 215        if (ret)
 216                return ret;
 217
 218        if (map_changed) {
 219                netif_dbg(h, drv, netdev, "set ets\n");
 220
 221                ret = hclge_notify_down_uinit(hdev);
 222                if (ret)
 223                        return ret;
 224        }
 225
 226        hclge_tm_schd_info_update(hdev, num_tc);
 227
 228        ret = hclge_ieee_ets_to_tm_info(hdev, ets);
 229        if (ret)
 230                goto err_out;
 231
 232        if (map_changed) {
 233                ret = hclge_map_update(hdev);
 234                if (ret)
 235                        goto err_out;
 236
 237                ret = hclge_notify_init_up(hdev);
 238                if (ret)
 239                        return ret;
 240        }
 241
 242        return hclge_tm_dwrr_cfg(hdev);
 243
 244err_out:
 245        if (!map_changed)
 246                return ret;
 247
 248        hclge_notify_init_up(hdev);
 249
 250        return ret;
 251}
 252
 253static int hclge_ieee_getpfc(struct hnae3_handle *h, struct ieee_pfc *pfc)
 254{
 255        u64 requests[HNAE3_MAX_TC], indications[HNAE3_MAX_TC];
 256        struct hclge_vport *vport = hclge_get_vport(h);
 257        struct hclge_dev *hdev = vport->back;
 258        u8 i, j, pfc_map, *prio_tc;
 259        int ret;
 260
 261        memset(pfc, 0, sizeof(*pfc));
 262        pfc->pfc_cap = hdev->pfc_max;
 263        prio_tc = hdev->tm_info.prio_tc;
 264        pfc_map = hdev->tm_info.hw_pfc_map;
 265
 266        /* Pfc setting is based on TC */
 267        for (i = 0; i < hdev->tm_info.num_tc; i++) {
 268                for (j = 0; j < HNAE3_MAX_USER_PRIO; j++) {
 269                        if ((prio_tc[j] == i) && (pfc_map & BIT(i)))
 270                                pfc->pfc_en |= BIT(j);
 271                }
 272        }
 273
 274        ret = hclge_pfc_tx_stats_get(hdev, requests);
 275        if (ret)
 276                return ret;
 277
 278        ret = hclge_pfc_rx_stats_get(hdev, indications);
 279        if (ret)
 280                return ret;
 281
 282        for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
 283                pfc->requests[i] = requests[i];
 284                pfc->indications[i] = indications[i];
 285        }
 286        return 0;
 287}
 288
 289static int hclge_ieee_setpfc(struct hnae3_handle *h, struct ieee_pfc *pfc)
 290{
 291        struct hclge_vport *vport = hclge_get_vport(h);
 292        struct net_device *netdev = h->kinfo.netdev;
 293        struct hclge_dev *hdev = vport->back;
 294        u8 i, j, pfc_map, *prio_tc;
 295        int ret;
 296
 297        if (!(hdev->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) ||
 298            hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE)
 299                return -EINVAL;
 300
 301        if (pfc->pfc_en == hdev->tm_info.pfc_en)
 302                return 0;
 303
 304        prio_tc = hdev->tm_info.prio_tc;
 305        pfc_map = 0;
 306
 307        for (i = 0; i < hdev->tm_info.num_tc; i++) {
 308                for (j = 0; j < HNAE3_MAX_USER_PRIO; j++) {
 309                        if ((prio_tc[j] == i) && (pfc->pfc_en & BIT(j))) {
 310                                pfc_map |= BIT(i);
 311                                break;
 312                        }
 313                }
 314        }
 315
 316        hdev->tm_info.hw_pfc_map = pfc_map;
 317        hdev->tm_info.pfc_en = pfc->pfc_en;
 318
 319        netif_dbg(h, drv, netdev,
 320                  "set pfc: pfc_en=%x, pfc_map=%x, num_tc=%u\n",
 321                  pfc->pfc_en, pfc_map, hdev->tm_info.num_tc);
 322
 323        hclge_tm_pfc_info_update(hdev);
 324
 325        ret = hclge_pause_setup_hw(hdev, false);
 326        if (ret)
 327                return ret;
 328
 329        ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
 330        if (ret)
 331                return ret;
 332
 333        ret = hclge_buffer_alloc(hdev);
 334        if (ret) {
 335                hclge_notify_client(hdev, HNAE3_UP_CLIENT);
 336                return ret;
 337        }
 338
 339        return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
 340}
 341
 342/* DCBX configuration */
 343static u8 hclge_getdcbx(struct hnae3_handle *h)
 344{
 345        struct hclge_vport *vport = hclge_get_vport(h);
 346        struct hclge_dev *hdev = vport->back;
 347
 348        if (hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE)
 349                return 0;
 350
 351        return hdev->dcbx_cap;
 352}
 353
 354static u8 hclge_setdcbx(struct hnae3_handle *h, u8 mode)
 355{
 356        struct hclge_vport *vport = hclge_get_vport(h);
 357        struct net_device *netdev = h->kinfo.netdev;
 358        struct hclge_dev *hdev = vport->back;
 359
 360        netif_dbg(h, drv, netdev, "set dcbx: mode=%u\n", mode);
 361
 362        /* No support for LLD_MANAGED modes or CEE */
 363        if ((mode & DCB_CAP_DCBX_LLD_MANAGED) ||
 364            (mode & DCB_CAP_DCBX_VER_CEE) ||
 365            !(mode & DCB_CAP_DCBX_HOST))
 366                return 1;
 367
 368        hdev->dcbx_cap = mode;
 369
 370        return 0;
 371}
 372
 373static int hclge_mqprio_qopt_check(struct hclge_dev *hdev,
 374                                   struct tc_mqprio_qopt_offload *mqprio_qopt)
 375{
 376        u16 queue_sum = 0;
 377        int ret;
 378        int i;
 379
 380        if (!mqprio_qopt->qopt.num_tc) {
 381                mqprio_qopt->qopt.num_tc = 1;
 382                return 0;
 383        }
 384
 385        ret = hclge_dcb_common_validate(hdev, mqprio_qopt->qopt.num_tc,
 386                                        mqprio_qopt->qopt.prio_tc_map);
 387        if (ret)
 388                return ret;
 389
 390        for (i = 0; i < mqprio_qopt->qopt.num_tc; i++) {
 391                if (!is_power_of_2(mqprio_qopt->qopt.count[i])) {
 392                        dev_err(&hdev->pdev->dev,
 393                                "qopt queue count must be power of 2\n");
 394                        return -EINVAL;
 395                }
 396
 397                if (mqprio_qopt->qopt.count[i] > hdev->pf_rss_size_max) {
 398                        dev_err(&hdev->pdev->dev,
 399                                "qopt queue count should be no more than %u\n",
 400                                hdev->pf_rss_size_max);
 401                        return -EINVAL;
 402                }
 403
 404                if (mqprio_qopt->qopt.offset[i] != queue_sum) {
 405                        dev_err(&hdev->pdev->dev,
 406                                "qopt queue offset must start from 0, and being continuous\n");
 407                        return -EINVAL;
 408                }
 409
 410                if (mqprio_qopt->min_rate[i] || mqprio_qopt->max_rate[i]) {
 411                        dev_err(&hdev->pdev->dev,
 412                                "qopt tx_rate is not supported\n");
 413                        return -EOPNOTSUPP;
 414                }
 415
 416                queue_sum = mqprio_qopt->qopt.offset[i];
 417                queue_sum += mqprio_qopt->qopt.count[i];
 418        }
 419        if (hdev->vport[0].alloc_tqps < queue_sum) {
 420                dev_err(&hdev->pdev->dev,
 421                        "qopt queue count sum should be less than %u\n",
 422                        hdev->vport[0].alloc_tqps);
 423                return -EINVAL;
 424        }
 425
 426        return 0;
 427}
 428
 429static void hclge_sync_mqprio_qopt(struct hnae3_tc_info *tc_info,
 430                                   struct tc_mqprio_qopt_offload *mqprio_qopt)
 431{
 432        int i;
 433
 434        memset(tc_info, 0, sizeof(*tc_info));
 435        tc_info->num_tc = mqprio_qopt->qopt.num_tc;
 436        memcpy(tc_info->prio_tc, mqprio_qopt->qopt.prio_tc_map,
 437               sizeof_field(struct hnae3_tc_info, prio_tc));
 438        memcpy(tc_info->tqp_count, mqprio_qopt->qopt.count,
 439               sizeof_field(struct hnae3_tc_info, tqp_count));
 440        memcpy(tc_info->tqp_offset, mqprio_qopt->qopt.offset,
 441               sizeof_field(struct hnae3_tc_info, tqp_offset));
 442
 443        for (i = 0; i < HNAE3_MAX_USER_PRIO; i++)
 444                set_bit(tc_info->prio_tc[i], &tc_info->tc_en);
 445}
 446
 447static int hclge_config_tc(struct hclge_dev *hdev,
 448                           struct hnae3_tc_info *tc_info)
 449{
 450        int i;
 451
 452        hclge_tm_schd_info_update(hdev, tc_info->num_tc);
 453        for (i = 0; i < HNAE3_MAX_USER_PRIO; i++)
 454                hdev->tm_info.prio_tc[i] = tc_info->prio_tc[i];
 455
 456        return hclge_map_update(hdev);
 457}
 458
 459/* Set up TC for hardware offloaded mqprio in channel mode */
 460static int hclge_setup_tc(struct hnae3_handle *h,
 461                          struct tc_mqprio_qopt_offload *mqprio_qopt)
 462{
 463        struct hclge_vport *vport = hclge_get_vport(h);
 464        struct hnae3_knic_private_info *kinfo;
 465        struct hclge_dev *hdev = vport->back;
 466        struct hnae3_tc_info old_tc_info;
 467        u8 tc = mqprio_qopt->qopt.num_tc;
 468        int ret;
 469
 470        /* if client unregistered, it's not allowed to change
 471         * mqprio configuration, which may cause uninit ring
 472         * fail.
 473         */
 474        if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state))
 475                return -EBUSY;
 476
 477        if (hdev->flag & HCLGE_FLAG_DCB_ENABLE)
 478                return -EINVAL;
 479
 480        ret = hclge_mqprio_qopt_check(hdev, mqprio_qopt);
 481        if (ret) {
 482                dev_err(&hdev->pdev->dev,
 483                        "failed to check mqprio qopt params, ret = %d\n", ret);
 484                return ret;
 485        }
 486
 487        ret = hclge_notify_down_uinit(hdev);
 488        if (ret)
 489                return ret;
 490
 491        kinfo = &vport->nic.kinfo;
 492        memcpy(&old_tc_info, &kinfo->tc_info, sizeof(old_tc_info));
 493        hclge_sync_mqprio_qopt(&kinfo->tc_info, mqprio_qopt);
 494        kinfo->tc_info.mqprio_active = tc > 0;
 495
 496        ret = hclge_config_tc(hdev, &kinfo->tc_info);
 497        if (ret)
 498                goto err_out;
 499
 500        hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE;
 501
 502        if (tc > 1)
 503                hdev->flag |= HCLGE_FLAG_MQPRIO_ENABLE;
 504        else
 505                hdev->flag &= ~HCLGE_FLAG_MQPRIO_ENABLE;
 506
 507        return hclge_notify_init_up(hdev);
 508
 509err_out:
 510        /* roll-back */
 511        memcpy(&kinfo->tc_info, &old_tc_info, sizeof(old_tc_info));
 512        if (hclge_config_tc(hdev, &kinfo->tc_info))
 513                dev_err(&hdev->pdev->dev,
 514                        "failed to roll back tc configuration\n");
 515
 516        hclge_notify_init_up(hdev);
 517
 518        return ret;
 519}
 520
 521static const struct hnae3_dcb_ops hns3_dcb_ops = {
 522        .ieee_getets    = hclge_ieee_getets,
 523        .ieee_setets    = hclge_ieee_setets,
 524        .ieee_getpfc    = hclge_ieee_getpfc,
 525        .ieee_setpfc    = hclge_ieee_setpfc,
 526        .getdcbx        = hclge_getdcbx,
 527        .setdcbx        = hclge_setdcbx,
 528        .setup_tc       = hclge_setup_tc,
 529};
 530
 531void hclge_dcb_ops_set(struct hclge_dev *hdev)
 532{
 533        struct hclge_vport *vport = hdev->vport;
 534        struct hnae3_knic_private_info *kinfo;
 535
 536        /* Hdev does not support DCB or vport is
 537         * not a pf, then dcb_ops is not set.
 538         */
 539        if (!hnae3_dev_dcb_supported(hdev) ||
 540            vport->vport_id != 0)
 541                return;
 542
 543        kinfo = &vport->nic.kinfo;
 544        kinfo->dcb_ops = &hns3_dcb_ops;
 545        hdev->dcbx_cap = DCB_CAP_DCBX_VER_IEEE | DCB_CAP_DCBX_HOST;
 546}
 547