linux/net/core/drop_monitor.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Monitoring code for network dropped packet alerts
   4 *
   5 * Copyright (C) 2009 Neil Horman <nhorman@tuxdriver.com>
   6 */
   7
   8#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
   9
  10#include <linux/netdevice.h>
  11#include <linux/etherdevice.h>
  12#include <linux/string.h>
  13#include <linux/if_arp.h>
  14#include <linux/inetdevice.h>
  15#include <linux/inet.h>
  16#include <linux/interrupt.h>
  17#include <linux/netpoll.h>
  18#include <linux/sched.h>
  19#include <linux/delay.h>
  20#include <linux/types.h>
  21#include <linux/workqueue.h>
  22#include <linux/netlink.h>
  23#include <linux/net_dropmon.h>
  24#include <linux/percpu.h>
  25#include <linux/timer.h>
  26#include <linux/bitops.h>
  27#include <linux/slab.h>
  28#include <linux/module.h>
  29#include <net/genetlink.h>
  30#include <net/netevent.h>
  31
  32#include <trace/events/skb.h>
  33#include <trace/events/napi.h>
  34
  35#include <asm/unaligned.h>
  36
  37#define TRACE_ON 1
  38#define TRACE_OFF 0
  39
  40/*
  41 * Globals, our netlink socket pointer
  42 * and the work handle that will send up
  43 * netlink alerts
  44 */
  45static int trace_state = TRACE_OFF;
  46static DEFINE_MUTEX(trace_state_mutex);
  47
  48struct per_cpu_dm_data {
  49        spinlock_t              lock;
  50        struct sk_buff          *skb;
  51        struct work_struct      dm_alert_work;
  52        struct timer_list       send_timer;
  53};
  54
  55struct dm_hw_stat_delta {
  56        struct net_device *dev;
  57        unsigned long last_rx;
  58        struct list_head list;
  59        struct rcu_head rcu;
  60        unsigned long last_drop_val;
  61};
  62
  63static struct genl_family net_drop_monitor_family;
  64
  65static DEFINE_PER_CPU(struct per_cpu_dm_data, dm_cpu_data);
  66
  67static int dm_hit_limit = 64;
  68static int dm_delay = 1;
  69static unsigned long dm_hw_check_delta = 2*HZ;
  70static LIST_HEAD(hw_stats_list);
  71
  72static struct sk_buff *reset_per_cpu_data(struct per_cpu_dm_data *data)
  73{
  74        size_t al;
  75        struct net_dm_alert_msg *msg;
  76        struct nlattr *nla;
  77        struct sk_buff *skb;
  78        unsigned long flags;
  79        void *msg_header;
  80
  81        al = sizeof(struct net_dm_alert_msg);
  82        al += dm_hit_limit * sizeof(struct net_dm_drop_point);
  83        al += sizeof(struct nlattr);
  84
  85        skb = genlmsg_new(al, GFP_KERNEL);
  86
  87        if (!skb)
  88                goto err;
  89
  90        msg_header = genlmsg_put(skb, 0, 0, &net_drop_monitor_family,
  91                                 0, NET_DM_CMD_ALERT);
  92        if (!msg_header) {
  93                nlmsg_free(skb);
  94                skb = NULL;
  95                goto err;
  96        }
  97        nla = nla_reserve(skb, NLA_UNSPEC,
  98                          sizeof(struct net_dm_alert_msg));
  99        if (!nla) {
 100                nlmsg_free(skb);
 101                skb = NULL;
 102                goto err;
 103        }
 104        msg = nla_data(nla);
 105        memset(msg, 0, al);
 106        goto out;
 107
 108err:
 109        mod_timer(&data->send_timer, jiffies + HZ / 10);
 110out:
 111        spin_lock_irqsave(&data->lock, flags);
 112        swap(data->skb, skb);
 113        spin_unlock_irqrestore(&data->lock, flags);
 114
 115        if (skb) {
 116                struct nlmsghdr *nlh = (struct nlmsghdr *)skb->data;
 117                struct genlmsghdr *gnlh = (struct genlmsghdr *)nlmsg_data(nlh);
 118
 119                genlmsg_end(skb, genlmsg_data(gnlh));
 120        }
 121
 122        return skb;
 123}
 124
 125static const struct genl_multicast_group dropmon_mcgrps[] = {
 126        { .name = "events", },
 127};
 128
 129static void send_dm_alert(struct work_struct *work)
 130{
 131        struct sk_buff *skb;
 132        struct per_cpu_dm_data *data;
 133
 134        data = container_of(work, struct per_cpu_dm_data, dm_alert_work);
 135
 136        skb = reset_per_cpu_data(data);
 137
 138        if (skb)
 139                genlmsg_multicast(&net_drop_monitor_family, skb, 0,
 140                                  0, GFP_KERNEL);
 141}
 142
 143/*
 144 * This is the timer function to delay the sending of an alert
 145 * in the event that more drops will arrive during the
 146 * hysteresis period.
 147 */
 148static void sched_send_work(struct timer_list *t)
 149{
 150        struct per_cpu_dm_data *data = from_timer(data, t, send_timer);
 151
 152        schedule_work(&data->dm_alert_work);
 153}
 154
 155static void trace_drop_common(struct sk_buff *skb, void *location)
 156{
 157        struct net_dm_alert_msg *msg;
 158        struct nlmsghdr *nlh;
 159        struct nlattr *nla;
 160        int i;
 161        struct sk_buff *dskb;
 162        struct per_cpu_dm_data *data;
 163        unsigned long flags;
 164
 165        local_irq_save(flags);
 166        data = this_cpu_ptr(&dm_cpu_data);
 167        spin_lock(&data->lock);
 168        dskb = data->skb;
 169
 170        if (!dskb)
 171                goto out;
 172
 173        nlh = (struct nlmsghdr *)dskb->data;
 174        nla = genlmsg_data(nlmsg_data(nlh));
 175        msg = nla_data(nla);
 176        for (i = 0; i < msg->entries; i++) {
 177                if (!memcmp(&location, msg->points[i].pc, sizeof(void *))) {
 178                        msg->points[i].count++;
 179                        goto out;
 180                }
 181        }
 182        if (msg->entries == dm_hit_limit)
 183                goto out;
 184        /*
 185         * We need to create a new entry
 186         */
 187        __nla_reserve_nohdr(dskb, sizeof(struct net_dm_drop_point));
 188        nla->nla_len += NLA_ALIGN(sizeof(struct net_dm_drop_point));
 189        memcpy(msg->points[msg->entries].pc, &location, sizeof(void *));
 190        msg->points[msg->entries].count = 1;
 191        msg->entries++;
 192
 193        if (!timer_pending(&data->send_timer)) {
 194                data->send_timer.expires = jiffies + dm_delay * HZ;
 195                add_timer(&data->send_timer);
 196        }
 197
 198out:
 199        spin_unlock_irqrestore(&data->lock, flags);
 200}
 201
 202static void trace_kfree_skb_hit(void *ignore, struct sk_buff *skb, void *location)
 203{
 204        trace_drop_common(skb, location);
 205}
 206
 207static void trace_napi_poll_hit(void *ignore, struct napi_struct *napi,
 208                                int work, int budget)
 209{
 210        struct dm_hw_stat_delta *new_stat;
 211
 212        /*
 213         * Don't check napi structures with no associated device
 214         */
 215        if (!napi->dev)
 216                return;
 217
 218        rcu_read_lock();
 219        list_for_each_entry_rcu(new_stat, &hw_stats_list, list) {
 220                /*
 221                 * only add a note to our monitor buffer if:
 222                 * 1) this is the dev we received on
 223                 * 2) its after the last_rx delta
 224                 * 3) our rx_dropped count has gone up
 225                 */
 226                if ((new_stat->dev == napi->dev)  &&
 227                    (time_after(jiffies, new_stat->last_rx + dm_hw_check_delta)) &&
 228                    (napi->dev->stats.rx_dropped != new_stat->last_drop_val)) {
 229                        trace_drop_common(NULL, NULL);
 230                        new_stat->last_drop_val = napi->dev->stats.rx_dropped;
 231                        new_stat->last_rx = jiffies;
 232                        break;
 233                }
 234        }
 235        rcu_read_unlock();
 236}
 237
 238static int set_all_monitor_traces(int state)
 239{
 240        int rc = 0;
 241        struct dm_hw_stat_delta *new_stat = NULL;
 242        struct dm_hw_stat_delta *temp;
 243
 244        mutex_lock(&trace_state_mutex);
 245
 246        if (state == trace_state) {
 247                rc = -EAGAIN;
 248                goto out_unlock;
 249        }
 250
 251        switch (state) {
 252        case TRACE_ON:
 253                if (!try_module_get(THIS_MODULE)) {
 254                        rc = -ENODEV;
 255                        break;
 256                }
 257
 258                rc |= register_trace_kfree_skb(trace_kfree_skb_hit, NULL);
 259                rc |= register_trace_napi_poll(trace_napi_poll_hit, NULL);
 260                break;
 261
 262        case TRACE_OFF:
 263                rc |= unregister_trace_kfree_skb(trace_kfree_skb_hit, NULL);
 264                rc |= unregister_trace_napi_poll(trace_napi_poll_hit, NULL);
 265
 266                tracepoint_synchronize_unregister();
 267
 268                /*
 269                 * Clean the device list
 270                 */
 271                list_for_each_entry_safe(new_stat, temp, &hw_stats_list, list) {
 272                        if (new_stat->dev == NULL) {
 273                                list_del_rcu(&new_stat->list);
 274                                kfree_rcu(new_stat, rcu);
 275                        }
 276                }
 277
 278                module_put(THIS_MODULE);
 279
 280                break;
 281        default:
 282                rc = 1;
 283                break;
 284        }
 285
 286        if (!rc)
 287                trace_state = state;
 288        else
 289                rc = -EINPROGRESS;
 290
 291out_unlock:
 292        mutex_unlock(&trace_state_mutex);
 293
 294        return rc;
 295}
 296
 297
 298static int net_dm_cmd_config(struct sk_buff *skb,
 299                        struct genl_info *info)
 300{
 301        return -ENOTSUPP;
 302}
 303
 304static int net_dm_cmd_trace(struct sk_buff *skb,
 305                        struct genl_info *info)
 306{
 307        switch (info->genlhdr->cmd) {
 308        case NET_DM_CMD_START:
 309                return set_all_monitor_traces(TRACE_ON);
 310        case NET_DM_CMD_STOP:
 311                return set_all_monitor_traces(TRACE_OFF);
 312        }
 313
 314        return -ENOTSUPP;
 315}
 316
 317static int dropmon_net_event(struct notifier_block *ev_block,
 318                             unsigned long event, void *ptr)
 319{
 320        struct net_device *dev = netdev_notifier_info_to_dev(ptr);
 321        struct dm_hw_stat_delta *new_stat = NULL;
 322        struct dm_hw_stat_delta *tmp;
 323
 324        switch (event) {
 325        case NETDEV_REGISTER:
 326                new_stat = kzalloc(sizeof(struct dm_hw_stat_delta), GFP_KERNEL);
 327
 328                if (!new_stat)
 329                        goto out;
 330
 331                new_stat->dev = dev;
 332                new_stat->last_rx = jiffies;
 333                mutex_lock(&trace_state_mutex);
 334                list_add_rcu(&new_stat->list, &hw_stats_list);
 335                mutex_unlock(&trace_state_mutex);
 336                break;
 337        case NETDEV_UNREGISTER:
 338                mutex_lock(&trace_state_mutex);
 339                list_for_each_entry_safe(new_stat, tmp, &hw_stats_list, list) {
 340                        if (new_stat->dev == dev) {
 341                                new_stat->dev = NULL;
 342                                if (trace_state == TRACE_OFF) {
 343                                        list_del_rcu(&new_stat->list);
 344                                        kfree_rcu(new_stat, rcu);
 345                                        break;
 346                                }
 347                        }
 348                }
 349                mutex_unlock(&trace_state_mutex);
 350                break;
 351        }
 352out:
 353        return NOTIFY_DONE;
 354}
 355
 356static const struct genl_ops dropmon_ops[] = {
 357        {
 358                .cmd = NET_DM_CMD_CONFIG,
 359                .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
 360                .doit = net_dm_cmd_config,
 361        },
 362        {
 363                .cmd = NET_DM_CMD_START,
 364                .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
 365                .doit = net_dm_cmd_trace,
 366        },
 367        {
 368                .cmd = NET_DM_CMD_STOP,
 369                .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
 370                .doit = net_dm_cmd_trace,
 371        },
 372};
 373
 374static struct genl_family net_drop_monitor_family __ro_after_init = {
 375        .hdrsize        = 0,
 376        .name           = "NET_DM",
 377        .version        = 2,
 378        .module         = THIS_MODULE,
 379        .ops            = dropmon_ops,
 380        .n_ops          = ARRAY_SIZE(dropmon_ops),
 381        .mcgrps         = dropmon_mcgrps,
 382        .n_mcgrps       = ARRAY_SIZE(dropmon_mcgrps),
 383};
 384
 385static struct notifier_block dropmon_net_notifier = {
 386        .notifier_call = dropmon_net_event
 387};
 388
 389static int __init init_net_drop_monitor(void)
 390{
 391        struct per_cpu_dm_data *data;
 392        int cpu, rc;
 393
 394        pr_info("Initializing network drop monitor service\n");
 395
 396        if (sizeof(void *) > 8) {
 397                pr_err("Unable to store program counters on this arch, Drop monitor failed\n");
 398                return -ENOSPC;
 399        }
 400
 401        rc = genl_register_family(&net_drop_monitor_family);
 402        if (rc) {
 403                pr_err("Could not create drop monitor netlink family\n");
 404                return rc;
 405        }
 406        WARN_ON(net_drop_monitor_family.mcgrp_offset != NET_DM_GRP_ALERT);
 407
 408        rc = register_netdevice_notifier(&dropmon_net_notifier);
 409        if (rc < 0) {
 410                pr_crit("Failed to register netdevice notifier\n");
 411                goto out_unreg;
 412        }
 413
 414        rc = 0;
 415
 416        for_each_possible_cpu(cpu) {
 417                data = &per_cpu(dm_cpu_data, cpu);
 418                INIT_WORK(&data->dm_alert_work, send_dm_alert);
 419                timer_setup(&data->send_timer, sched_send_work, 0);
 420                spin_lock_init(&data->lock);
 421                reset_per_cpu_data(data);
 422        }
 423
 424
 425        goto out;
 426
 427out_unreg:
 428        genl_unregister_family(&net_drop_monitor_family);
 429out:
 430        return rc;
 431}
 432
 433static void exit_net_drop_monitor(void)
 434{
 435        struct per_cpu_dm_data *data;
 436        int cpu;
 437
 438        BUG_ON(unregister_netdevice_notifier(&dropmon_net_notifier));
 439
 440        /*
 441         * Because of the module_get/put we do in the trace state change path
 442         * we are guarnateed not to have any current users when we get here
 443         * all we need to do is make sure that we don't have any running timers
 444         * or pending schedule calls
 445         */
 446
 447        for_each_possible_cpu(cpu) {
 448                data = &per_cpu(dm_cpu_data, cpu);
 449                del_timer_sync(&data->send_timer);
 450                cancel_work_sync(&data->dm_alert_work);
 451                /*
 452                 * At this point, we should have exclusive access
 453                 * to this struct and can free the skb inside it
 454                 */
 455                kfree_skb(data->skb);
 456        }
 457
 458        BUG_ON(genl_unregister_family(&net_drop_monitor_family));
 459}
 460
 461module_init(init_net_drop_monitor);
 462module_exit(exit_net_drop_monitor);
 463
 464MODULE_LICENSE("GPL v2");
 465MODULE_AUTHOR("Neil Horman <nhorman@tuxdriver.com>");
 466MODULE_ALIAS_GENL_FAMILY("NET_DM");
 467