linux/net/smc/smc_stats.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Shared Memory Communications over RDMA (SMC-R) and RoCE
   4 *
   5 * SMC statistics netlink routines
   6 *
   7 * Copyright IBM Corp. 2021
   8 *
   9 * Author(s):  Guvenc Gulce
  10 */
  11#include <linux/init.h>
  12#include <linux/mutex.h>
  13#include <linux/percpu.h>
  14#include <linux/ctype.h>
  15#include <linux/smc.h>
  16#include <net/genetlink.h>
  17#include <net/sock.h>
  18#include "smc_netlink.h"
  19#include "smc_stats.h"
  20
  21int smc_stats_init(struct net *net)
  22{
  23        net->smc.fback_rsn = kzalloc(sizeof(*net->smc.fback_rsn), GFP_KERNEL);
  24        if (!net->smc.fback_rsn)
  25                goto err_fback;
  26        net->smc.smc_stats = alloc_percpu(struct smc_stats);
  27        if (!net->smc.smc_stats)
  28                goto err_stats;
  29        mutex_init(&net->smc.mutex_fback_rsn);
  30        return 0;
  31
  32err_stats:
  33        kfree(net->smc.fback_rsn);
  34err_fback:
  35        return -ENOMEM;
  36}
  37
  38void smc_stats_exit(struct net *net)
  39{
  40        kfree(net->smc.fback_rsn);
  41        if (net->smc.smc_stats)
  42                free_percpu(net->smc.smc_stats);
  43}
  44
  45static int smc_nl_fill_stats_rmb_data(struct sk_buff *skb,
  46                                      struct smc_stats *stats, int tech,
  47                                      int type)
  48{
  49        struct smc_stats_rmbcnt *stats_rmb_cnt;
  50        struct nlattr *attrs;
  51
  52        if (type == SMC_NLA_STATS_T_TX_RMB_STATS)
  53                stats_rmb_cnt = &stats->smc[tech].rmb_tx;
  54        else
  55                stats_rmb_cnt = &stats->smc[tech].rmb_rx;
  56
  57        attrs = nla_nest_start(skb, type);
  58        if (!attrs)
  59                goto errout;
  60        if (nla_put_u64_64bit(skb, SMC_NLA_STATS_RMB_REUSE_CNT,
  61                              stats_rmb_cnt->reuse_cnt,
  62                              SMC_NLA_STATS_RMB_PAD))
  63                goto errattr;
  64        if (nla_put_u64_64bit(skb, SMC_NLA_STATS_RMB_SIZE_SM_PEER_CNT,
  65                              stats_rmb_cnt->buf_size_small_peer_cnt,
  66                              SMC_NLA_STATS_RMB_PAD))
  67                goto errattr;
  68        if (nla_put_u64_64bit(skb, SMC_NLA_STATS_RMB_SIZE_SM_CNT,
  69                              stats_rmb_cnt->buf_size_small_cnt,
  70                              SMC_NLA_STATS_RMB_PAD))
  71                goto errattr;
  72        if (nla_put_u64_64bit(skb, SMC_NLA_STATS_RMB_FULL_PEER_CNT,
  73                              stats_rmb_cnt->buf_full_peer_cnt,
  74                              SMC_NLA_STATS_RMB_PAD))
  75                goto errattr;
  76        if (nla_put_u64_64bit(skb, SMC_NLA_STATS_RMB_FULL_CNT,
  77                              stats_rmb_cnt->buf_full_cnt,
  78                              SMC_NLA_STATS_RMB_PAD))
  79                goto errattr;
  80        if (nla_put_u64_64bit(skb, SMC_NLA_STATS_RMB_ALLOC_CNT,
  81                              stats_rmb_cnt->alloc_cnt,
  82                              SMC_NLA_STATS_RMB_PAD))
  83                goto errattr;
  84        if (nla_put_u64_64bit(skb, SMC_NLA_STATS_RMB_DGRADE_CNT,
  85                              stats_rmb_cnt->dgrade_cnt,
  86                              SMC_NLA_STATS_RMB_PAD))
  87                goto errattr;
  88
  89        nla_nest_end(skb, attrs);
  90        return 0;
  91
  92errattr:
  93        nla_nest_cancel(skb, attrs);
  94errout:
  95        return -EMSGSIZE;
  96}
  97
  98static int smc_nl_fill_stats_bufsize_data(struct sk_buff *skb,
  99                                          struct smc_stats *stats, int tech,
 100                                          int type)
 101{
 102        struct smc_stats_memsize *stats_pload;
 103        struct nlattr *attrs;
 104
 105        if (type == SMC_NLA_STATS_T_TXPLOAD_SIZE)
 106                stats_pload = &stats->smc[tech].tx_pd;
 107        else if (type == SMC_NLA_STATS_T_RXPLOAD_SIZE)
 108                stats_pload = &stats->smc[tech].rx_pd;
 109        else if (type == SMC_NLA_STATS_T_TX_RMB_SIZE)
 110                stats_pload = &stats->smc[tech].tx_rmbsize;
 111        else if (type == SMC_NLA_STATS_T_RX_RMB_SIZE)
 112                stats_pload = &stats->smc[tech].rx_rmbsize;
 113        else
 114                goto errout;
 115
 116        attrs = nla_nest_start(skb, type);
 117        if (!attrs)
 118                goto errout;
 119        if (nla_put_u64_64bit(skb, SMC_NLA_STATS_PLOAD_8K,
 120                              stats_pload->buf[SMC_BUF_8K],
 121                              SMC_NLA_STATS_PLOAD_PAD))
 122                goto errattr;
 123        if (nla_put_u64_64bit(skb, SMC_NLA_STATS_PLOAD_16K,
 124                              stats_pload->buf[SMC_BUF_16K],
 125                              SMC_NLA_STATS_PLOAD_PAD))
 126                goto errattr;
 127        if (nla_put_u64_64bit(skb, SMC_NLA_STATS_PLOAD_32K,
 128                              stats_pload->buf[SMC_BUF_32K],
 129                              SMC_NLA_STATS_PLOAD_PAD))
 130                goto errattr;
 131        if (nla_put_u64_64bit(skb, SMC_NLA_STATS_PLOAD_64K,
 132                              stats_pload->buf[SMC_BUF_64K],
 133                              SMC_NLA_STATS_PLOAD_PAD))
 134                goto errattr;
 135        if (nla_put_u64_64bit(skb, SMC_NLA_STATS_PLOAD_128K,
 136                              stats_pload->buf[SMC_BUF_128K],
 137                              SMC_NLA_STATS_PLOAD_PAD))
 138                goto errattr;
 139        if (nla_put_u64_64bit(skb, SMC_NLA_STATS_PLOAD_256K,
 140                              stats_pload->buf[SMC_BUF_256K],
 141                              SMC_NLA_STATS_PLOAD_PAD))
 142                goto errattr;
 143        if (nla_put_u64_64bit(skb, SMC_NLA_STATS_PLOAD_512K,
 144                              stats_pload->buf[SMC_BUF_512K],
 145                              SMC_NLA_STATS_PLOAD_PAD))
 146                goto errattr;
 147        if (nla_put_u64_64bit(skb, SMC_NLA_STATS_PLOAD_1024K,
 148                              stats_pload->buf[SMC_BUF_1024K],
 149                              SMC_NLA_STATS_PLOAD_PAD))
 150                goto errattr;
 151        if (nla_put_u64_64bit(skb, SMC_NLA_STATS_PLOAD_G_1024K,
 152                              stats_pload->buf[SMC_BUF_G_1024K],
 153                              SMC_NLA_STATS_PLOAD_PAD))
 154                goto errattr;
 155
 156        nla_nest_end(skb, attrs);
 157        return 0;
 158
 159errattr:
 160        nla_nest_cancel(skb, attrs);
 161errout:
 162        return -EMSGSIZE;
 163}
 164
 165static int smc_nl_fill_stats_tech_data(struct sk_buff *skb,
 166                                       struct smc_stats *stats, int tech)
 167{
 168        struct smc_stats_tech *smc_tech;
 169        struct nlattr *attrs;
 170
 171        smc_tech = &stats->smc[tech];
 172        if (tech == SMC_TYPE_D)
 173                attrs = nla_nest_start(skb, SMC_NLA_STATS_SMCD_TECH);
 174        else
 175                attrs = nla_nest_start(skb, SMC_NLA_STATS_SMCR_TECH);
 176
 177        if (!attrs)
 178                goto errout;
 179        if (smc_nl_fill_stats_rmb_data(skb, stats, tech,
 180                                       SMC_NLA_STATS_T_TX_RMB_STATS))
 181                goto errattr;
 182        if (smc_nl_fill_stats_rmb_data(skb, stats, tech,
 183                                       SMC_NLA_STATS_T_RX_RMB_STATS))
 184                goto errattr;
 185        if (smc_nl_fill_stats_bufsize_data(skb, stats, tech,
 186                                           SMC_NLA_STATS_T_TXPLOAD_SIZE))
 187                goto errattr;
 188        if (smc_nl_fill_stats_bufsize_data(skb, stats, tech,
 189                                           SMC_NLA_STATS_T_RXPLOAD_SIZE))
 190                goto errattr;
 191        if (smc_nl_fill_stats_bufsize_data(skb, stats, tech,
 192                                           SMC_NLA_STATS_T_TX_RMB_SIZE))
 193                goto errattr;
 194        if (smc_nl_fill_stats_bufsize_data(skb, stats, tech,
 195                                           SMC_NLA_STATS_T_RX_RMB_SIZE))
 196                goto errattr;
 197        if (nla_put_u64_64bit(skb, SMC_NLA_STATS_T_CLNT_V1_SUCC,
 198                              smc_tech->clnt_v1_succ_cnt,
 199                              SMC_NLA_STATS_PAD))
 200                goto errattr;
 201        if (nla_put_u64_64bit(skb, SMC_NLA_STATS_T_CLNT_V2_SUCC,
 202                              smc_tech->clnt_v2_succ_cnt,
 203                              SMC_NLA_STATS_PAD))
 204                goto errattr;
 205        if (nla_put_u64_64bit(skb, SMC_NLA_STATS_T_SRV_V1_SUCC,
 206                              smc_tech->srv_v1_succ_cnt,
 207                              SMC_NLA_STATS_PAD))
 208                goto errattr;
 209        if (nla_put_u64_64bit(skb, SMC_NLA_STATS_T_SRV_V2_SUCC,
 210                              smc_tech->srv_v2_succ_cnt,
 211                              SMC_NLA_STATS_PAD))
 212                goto errattr;
 213        if (nla_put_u64_64bit(skb, SMC_NLA_STATS_T_RX_BYTES,
 214                              smc_tech->rx_bytes,
 215                              SMC_NLA_STATS_PAD))
 216                goto errattr;
 217        if (nla_put_u64_64bit(skb, SMC_NLA_STATS_T_TX_BYTES,
 218                              smc_tech->tx_bytes,
 219                              SMC_NLA_STATS_PAD))
 220                goto errattr;
 221        if (nla_put_u64_64bit(skb, SMC_NLA_STATS_T_RX_CNT,
 222                              smc_tech->rx_cnt,
 223                              SMC_NLA_STATS_PAD))
 224                goto errattr;
 225        if (nla_put_u64_64bit(skb, SMC_NLA_STATS_T_TX_CNT,
 226                              smc_tech->tx_cnt,
 227                              SMC_NLA_STATS_PAD))
 228                goto errattr;
 229        if (nla_put_u64_64bit(skb, SMC_NLA_STATS_T_SENDPAGE_CNT,
 230                              smc_tech->sendpage_cnt,
 231                              SMC_NLA_STATS_PAD))
 232                goto errattr;
 233        if (nla_put_u64_64bit(skb, SMC_NLA_STATS_T_CORK_CNT,
 234                              smc_tech->cork_cnt,
 235                              SMC_NLA_STATS_PAD))
 236                goto errattr;
 237        if (nla_put_u64_64bit(skb, SMC_NLA_STATS_T_NDLY_CNT,
 238                              smc_tech->ndly_cnt,
 239                              SMC_NLA_STATS_PAD))
 240                goto errattr;
 241        if (nla_put_u64_64bit(skb, SMC_NLA_STATS_T_SPLICE_CNT,
 242                              smc_tech->splice_cnt,
 243                              SMC_NLA_STATS_PAD))
 244                goto errattr;
 245        if (nla_put_u64_64bit(skb, SMC_NLA_STATS_T_URG_DATA_CNT,
 246                              smc_tech->urg_data_cnt,
 247                              SMC_NLA_STATS_PAD))
 248                goto errattr;
 249
 250        nla_nest_end(skb, attrs);
 251        return 0;
 252
 253errattr:
 254        nla_nest_cancel(skb, attrs);
 255errout:
 256        return -EMSGSIZE;
 257}
 258
 259int smc_nl_get_stats(struct sk_buff *skb,
 260                     struct netlink_callback *cb)
 261{
 262        struct smc_nl_dmp_ctx *cb_ctx = smc_nl_dmp_ctx(cb);
 263        struct net *net = sock_net(skb->sk);
 264        struct smc_stats *stats;
 265        struct nlattr *attrs;
 266        int cpu, i, size;
 267        void *nlh;
 268        u64 *src;
 269        u64 *sum;
 270
 271        if (cb_ctx->pos[0])
 272                goto errmsg;
 273        nlh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
 274                          &smc_gen_nl_family, NLM_F_MULTI,
 275                          SMC_NETLINK_GET_STATS);
 276        if (!nlh)
 277                goto errmsg;
 278
 279        attrs = nla_nest_start(skb, SMC_GEN_STATS);
 280        if (!attrs)
 281                goto errnest;
 282        stats = kzalloc(sizeof(*stats), GFP_KERNEL);
 283        if (!stats)
 284                goto erralloc;
 285        size = sizeof(*stats) / sizeof(u64);
 286        for_each_possible_cpu(cpu) {
 287                src = (u64 *)per_cpu_ptr(net->smc.smc_stats, cpu);
 288                sum = (u64 *)stats;
 289                for (i = 0; i < size; i++)
 290                        *(sum++) += *(src++);
 291        }
 292        if (smc_nl_fill_stats_tech_data(skb, stats, SMC_TYPE_D))
 293                goto errattr;
 294        if (smc_nl_fill_stats_tech_data(skb, stats, SMC_TYPE_R))
 295                goto errattr;
 296        if (nla_put_u64_64bit(skb, SMC_NLA_STATS_CLNT_HS_ERR_CNT,
 297                              stats->clnt_hshake_err_cnt,
 298                              SMC_NLA_STATS_PAD))
 299                goto errattr;
 300        if (nla_put_u64_64bit(skb, SMC_NLA_STATS_SRV_HS_ERR_CNT,
 301                              stats->srv_hshake_err_cnt,
 302                              SMC_NLA_STATS_PAD))
 303                goto errattr;
 304
 305        nla_nest_end(skb, attrs);
 306        genlmsg_end(skb, nlh);
 307        cb_ctx->pos[0] = 1;
 308        kfree(stats);
 309        return skb->len;
 310
 311errattr:
 312        kfree(stats);
 313erralloc:
 314        nla_nest_cancel(skb, attrs);
 315errnest:
 316        genlmsg_cancel(skb, nlh);
 317errmsg:
 318        return skb->len;
 319}
 320
 321static int smc_nl_get_fback_details(struct sk_buff *skb,
 322                                    struct netlink_callback *cb, int pos,
 323                                    bool is_srv)
 324{
 325        struct smc_nl_dmp_ctx *cb_ctx = smc_nl_dmp_ctx(cb);
 326        struct net *net = sock_net(skb->sk);
 327        int cnt_reported = cb_ctx->pos[2];
 328        struct smc_stats_fback *trgt_arr;
 329        struct nlattr *attrs;
 330        int rc = 0;
 331        void *nlh;
 332
 333        if (is_srv)
 334                trgt_arr = &net->smc.fback_rsn->srv[0];
 335        else
 336                trgt_arr = &net->smc.fback_rsn->clnt[0];
 337        if (!trgt_arr[pos].fback_code)
 338                return -ENODATA;
 339        nlh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
 340                          &smc_gen_nl_family, NLM_F_MULTI,
 341                          SMC_NETLINK_GET_FBACK_STATS);
 342        if (!nlh)
 343                goto errmsg;
 344        attrs = nla_nest_start(skb, SMC_GEN_FBACK_STATS);
 345        if (!attrs)
 346                goto errout;
 347        if (nla_put_u8(skb, SMC_NLA_FBACK_STATS_TYPE, is_srv))
 348                goto errattr;
 349        if (!cnt_reported) {
 350                if (nla_put_u64_64bit(skb, SMC_NLA_FBACK_STATS_SRV_CNT,
 351                                      net->smc.fback_rsn->srv_fback_cnt,
 352                                      SMC_NLA_FBACK_STATS_PAD))
 353                        goto errattr;
 354                if (nla_put_u64_64bit(skb, SMC_NLA_FBACK_STATS_CLNT_CNT,
 355                                      net->smc.fback_rsn->clnt_fback_cnt,
 356                                      SMC_NLA_FBACK_STATS_PAD))
 357                        goto errattr;
 358                cnt_reported = 1;
 359        }
 360
 361        if (nla_put_u32(skb, SMC_NLA_FBACK_STATS_RSN_CODE,
 362                        trgt_arr[pos].fback_code))
 363                goto errattr;
 364        if (nla_put_u16(skb, SMC_NLA_FBACK_STATS_RSN_CNT,
 365                        trgt_arr[pos].count))
 366                goto errattr;
 367
 368        cb_ctx->pos[2] = cnt_reported;
 369        nla_nest_end(skb, attrs);
 370        genlmsg_end(skb, nlh);
 371        return rc;
 372
 373errattr:
 374        nla_nest_cancel(skb, attrs);
 375errout:
 376        genlmsg_cancel(skb, nlh);
 377errmsg:
 378        return -EMSGSIZE;
 379}
 380
 381int smc_nl_get_fback_stats(struct sk_buff *skb, struct netlink_callback *cb)
 382{
 383        struct smc_nl_dmp_ctx *cb_ctx = smc_nl_dmp_ctx(cb);
 384        struct net *net = sock_net(skb->sk);
 385        int rc_srv = 0, rc_clnt = 0, k;
 386        int skip_serv = cb_ctx->pos[1];
 387        int snum = cb_ctx->pos[0];
 388        bool is_srv = true;
 389
 390        mutex_lock(&net->smc.mutex_fback_rsn);
 391        for (k = 0; k < SMC_MAX_FBACK_RSN_CNT; k++) {
 392                if (k < snum)
 393                        continue;
 394                if (!skip_serv) {
 395                        rc_srv = smc_nl_get_fback_details(skb, cb, k, is_srv);
 396                        if (rc_srv && rc_srv != -ENODATA)
 397                                break;
 398                } else {
 399                        skip_serv = 0;
 400                }
 401                rc_clnt = smc_nl_get_fback_details(skb, cb, k, !is_srv);
 402                if (rc_clnt && rc_clnt != -ENODATA) {
 403                        skip_serv = 1;
 404                        break;
 405                }
 406                if (rc_clnt == -ENODATA && rc_srv == -ENODATA)
 407                        break;
 408        }
 409        mutex_unlock(&net->smc.mutex_fback_rsn);
 410        cb_ctx->pos[1] = skip_serv;
 411        cb_ctx->pos[0] = k;
 412        return skb->len;
 413}
 414