linux/include/net/gen_stats.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef __NET_GEN_STATS_H
   3#define __NET_GEN_STATS_H
   4
   5#include <linux/gen_stats.h>
   6#include <linux/socket.h>
   7#include <linux/rtnetlink.h>
   8#include <linux/pkt_sched.h>
   9
  10/* Throughput stats.
  11 * Must be initialized beforehand with gnet_stats_basic_sync_init().
  12 *
  13 * If no reads can ever occur parallel to writes (e.g. stack-allocated
  14 * bstats), then the internal stat values can be written to and read
  15 * from directly. Otherwise, use _bstats_set/update() for writes and
  16 * gnet_stats_add_basic() for reads.
  17 */
  18struct gnet_stats_basic_sync {
  19        u64_stats_t bytes;
  20        u64_stats_t packets;
  21        struct u64_stats_sync syncp;
  22} __aligned(2 * sizeof(u64));
  23
  24struct net_rate_estimator;
  25
  26struct gnet_dump {
  27        spinlock_t *      lock;
  28        struct sk_buff *  skb;
  29        struct nlattr *   tail;
  30
  31        /* Backward compatibility */
  32        int               compat_tc_stats;
  33        int               compat_xstats;
  34        int               padattr;
  35        void *            xstats;
  36        int               xstats_len;
  37        struct tc_stats   tc_stats;
  38};
  39
  40void gnet_stats_basic_sync_init(struct gnet_stats_basic_sync *b);
  41int gnet_stats_start_copy(struct sk_buff *skb, int type, spinlock_t *lock,
  42                          struct gnet_dump *d, int padattr);
  43
  44int gnet_stats_start_copy_compat(struct sk_buff *skb, int type,
  45                                 int tc_stats_type, int xstats_type,
  46                                 spinlock_t *lock, struct gnet_dump *d,
  47                                 int padattr);
  48
  49int gnet_stats_copy_basic(struct gnet_dump *d,
  50                          struct gnet_stats_basic_sync __percpu *cpu,
  51                          struct gnet_stats_basic_sync *b, bool running);
  52void gnet_stats_add_basic(struct gnet_stats_basic_sync *bstats,
  53                          struct gnet_stats_basic_sync __percpu *cpu,
  54                          struct gnet_stats_basic_sync *b, bool running);
  55int gnet_stats_copy_basic_hw(struct gnet_dump *d,
  56                             struct gnet_stats_basic_sync __percpu *cpu,
  57                             struct gnet_stats_basic_sync *b, bool running);
  58int gnet_stats_copy_rate_est(struct gnet_dump *d,
  59                             struct net_rate_estimator __rcu **ptr);
  60int gnet_stats_copy_queue(struct gnet_dump *d,
  61                          struct gnet_stats_queue __percpu *cpu_q,
  62                          struct gnet_stats_queue *q, __u32 qlen);
  63void gnet_stats_add_queue(struct gnet_stats_queue *qstats,
  64                          const struct gnet_stats_queue __percpu *cpu_q,
  65                          const struct gnet_stats_queue *q);
  66int gnet_stats_copy_app(struct gnet_dump *d, void *st, int len);
  67
  68int gnet_stats_finish_copy(struct gnet_dump *d);
  69
  70int gen_new_estimator(struct gnet_stats_basic_sync *bstats,
  71                      struct gnet_stats_basic_sync __percpu *cpu_bstats,
  72                      struct net_rate_estimator __rcu **rate_est,
  73                      spinlock_t *lock,
  74                      bool running, struct nlattr *opt);
  75void gen_kill_estimator(struct net_rate_estimator __rcu **ptr);
  76int gen_replace_estimator(struct gnet_stats_basic_sync *bstats,
  77                          struct gnet_stats_basic_sync __percpu *cpu_bstats,
  78                          struct net_rate_estimator __rcu **ptr,
  79                          spinlock_t *lock,
  80                          bool running, struct nlattr *opt);
  81bool gen_estimator_active(struct net_rate_estimator __rcu **ptr);
  82bool gen_estimator_read(struct net_rate_estimator __rcu **ptr,
  83                        struct gnet_stats_rate_est64 *sample);
  84#endif
  85