linux/net/core/netpoll.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Common framework for low-level network console, dump, and debugger code
   4 *
   5 * Sep 8 2003  Matt Mackall <mpm@selenic.com>
   6 *
   7 * based on the netconsole code from:
   8 *
   9 * Copyright (C) 2001  Ingo Molnar <mingo@redhat.com>
  10 * Copyright (C) 2002  Red Hat, Inc.
  11 */
  12
  13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  14
  15#include <linux/moduleparam.h>
  16#include <linux/kernel.h>
  17#include <linux/netdevice.h>
  18#include <linux/etherdevice.h>
  19#include <linux/string.h>
  20#include <linux/if_arp.h>
  21#include <linux/inetdevice.h>
  22#include <linux/inet.h>
  23#include <linux/interrupt.h>
  24#include <linux/netpoll.h>
  25#include <linux/sched.h>
  26#include <linux/delay.h>
  27#include <linux/rcupdate.h>
  28#include <linux/workqueue.h>
  29#include <linux/slab.h>
  30#include <linux/export.h>
  31#include <linux/if_vlan.h>
  32#include <net/tcp.h>
  33#include <net/udp.h>
  34#include <net/addrconf.h>
  35#include <net/ndisc.h>
  36#include <net/ip6_checksum.h>
  37#include <asm/unaligned.h>
  38#include <trace/events/napi.h>
  39#include <linux/kconfig.h>
  40
  41/*
  42 * We maintain a small pool of fully-sized skbs, to make sure the
  43 * message gets out even in extreme OOM situations.
  44 */
  45
  46#define MAX_UDP_CHUNK 1460
  47#define MAX_SKBS 32
  48
  49static struct sk_buff_head skb_pool;
  50
  51DEFINE_STATIC_SRCU(netpoll_srcu);
  52
  53#define USEC_PER_POLL   50
  54
  55#define MAX_SKB_SIZE                                                    \
  56        (sizeof(struct ethhdr) +                                        \
  57         sizeof(struct iphdr) +                                         \
  58         sizeof(struct udphdr) +                                        \
  59         MAX_UDP_CHUNK)
  60
  61static void zap_completion_queue(void);
  62
  63static unsigned int carrier_timeout = 4;
  64module_param(carrier_timeout, uint, 0644);
  65
  66#define np_info(np, fmt, ...)                           \
  67        pr_info("%s: " fmt, np->name, ##__VA_ARGS__)
  68#define np_err(np, fmt, ...)                            \
  69        pr_err("%s: " fmt, np->name, ##__VA_ARGS__)
  70#define np_notice(np, fmt, ...)                         \
  71        pr_notice("%s: " fmt, np->name, ##__VA_ARGS__)
  72
  73static netdev_tx_t netpoll_start_xmit(struct sk_buff *skb,
  74                                      struct net_device *dev,
  75                                      struct netdev_queue *txq)
  76{
  77        netdev_tx_t status = NETDEV_TX_OK;
  78        netdev_features_t features;
  79
  80        features = netif_skb_features(skb);
  81
  82        if (skb_vlan_tag_present(skb) &&
  83            !vlan_hw_offload_capable(features, skb->vlan_proto)) {
  84                skb = __vlan_hwaccel_push_inside(skb);
  85                if (unlikely(!skb)) {
  86                        /* This is actually a packet drop, but we
  87                         * don't want the code that calls this
  88                         * function to try and operate on a NULL skb.
  89                         */
  90                        goto out;
  91                }
  92        }
  93
  94        status = netdev_start_xmit(skb, dev, txq, false);
  95
  96out:
  97        return status;
  98}
  99
 100static void queue_process(struct work_struct *work)
 101{
 102        struct netpoll_info *npinfo =
 103                container_of(work, struct netpoll_info, tx_work.work);
 104        struct sk_buff *skb;
 105        unsigned long flags;
 106
 107        while ((skb = skb_dequeue(&npinfo->txq))) {
 108                struct net_device *dev = skb->dev;
 109                struct netdev_queue *txq;
 110                unsigned int q_index;
 111
 112                if (!netif_device_present(dev) || !netif_running(dev)) {
 113                        kfree_skb(skb);
 114                        continue;
 115                }
 116
 117                local_irq_save(flags);
 118                /* check if skb->queue_mapping is still valid */
 119                q_index = skb_get_queue_mapping(skb);
 120                if (unlikely(q_index >= dev->real_num_tx_queues)) {
 121                        q_index = q_index % dev->real_num_tx_queues;
 122                        skb_set_queue_mapping(skb, q_index);
 123                }
 124                txq = netdev_get_tx_queue(dev, q_index);
 125                HARD_TX_LOCK(dev, txq, smp_processor_id());
 126                if (netif_xmit_frozen_or_stopped(txq) ||
 127                    !dev_xmit_complete(netpoll_start_xmit(skb, dev, txq))) {
 128                        skb_queue_head(&npinfo->txq, skb);
 129                        HARD_TX_UNLOCK(dev, txq);
 130                        local_irq_restore(flags);
 131
 132                        schedule_delayed_work(&npinfo->tx_work, HZ/10);
 133                        return;
 134                }
 135                HARD_TX_UNLOCK(dev, txq);
 136                local_irq_restore(flags);
 137        }
 138}
 139
 140static void poll_one_napi(struct napi_struct *napi)
 141{
 142        int work;
 143
 144        /* If we set this bit but see that it has already been set,
 145         * that indicates that napi has been disabled and we need
 146         * to abort this operation
 147         */
 148        if (test_and_set_bit(NAPI_STATE_NPSVC, &napi->state))
 149                return;
 150
 151        /* We explicilty pass the polling call a budget of 0 to
 152         * indicate that we are clearing the Tx path only.
 153         */
 154        work = napi->poll(napi, 0);
 155        WARN_ONCE(work, "%pS exceeded budget in poll\n", napi->poll);
 156        trace_napi_poll(napi, work, 0);
 157
 158        clear_bit(NAPI_STATE_NPSVC, &napi->state);
 159}
 160
 161static void poll_napi(struct net_device *dev)
 162{
 163        struct napi_struct *napi;
 164        int cpu = smp_processor_id();
 165
 166        list_for_each_entry_rcu(napi, &dev->napi_list, dev_list) {
 167                if (cmpxchg(&napi->poll_owner, -1, cpu) == -1) {
 168                        poll_one_napi(napi);
 169                        smp_store_release(&napi->poll_owner, -1);
 170                }
 171        }
 172}
 173
 174void netpoll_poll_dev(struct net_device *dev)
 175{
 176        struct netpoll_info *ni = rcu_dereference_bh(dev->npinfo);
 177        const struct net_device_ops *ops;
 178
 179        /* Don't do any rx activity if the dev_lock mutex is held
 180         * the dev_open/close paths use this to block netpoll activity
 181         * while changing device state
 182         */
 183        if (!ni || down_trylock(&ni->dev_lock))
 184                return;
 185
 186        if (!netif_running(dev)) {
 187                up(&ni->dev_lock);
 188                return;
 189        }
 190
 191        ops = dev->netdev_ops;
 192        if (ops->ndo_poll_controller)
 193                ops->ndo_poll_controller(dev);
 194
 195        poll_napi(dev);
 196
 197        up(&ni->dev_lock);
 198
 199        zap_completion_queue();
 200}
 201EXPORT_SYMBOL(netpoll_poll_dev);
 202
 203void netpoll_poll_disable(struct net_device *dev)
 204{
 205        struct netpoll_info *ni;
 206        int idx;
 207        might_sleep();
 208        idx = srcu_read_lock(&netpoll_srcu);
 209        ni = srcu_dereference(dev->npinfo, &netpoll_srcu);
 210        if (ni)
 211                down(&ni->dev_lock);
 212        srcu_read_unlock(&netpoll_srcu, idx);
 213}
 214EXPORT_SYMBOL(netpoll_poll_disable);
 215
 216void netpoll_poll_enable(struct net_device *dev)
 217{
 218        struct netpoll_info *ni;
 219        rcu_read_lock();
 220        ni = rcu_dereference(dev->npinfo);
 221        if (ni)
 222                up(&ni->dev_lock);
 223        rcu_read_unlock();
 224}
 225EXPORT_SYMBOL(netpoll_poll_enable);
 226
 227static void refill_skbs(void)
 228{
 229        struct sk_buff *skb;
 230        unsigned long flags;
 231
 232        spin_lock_irqsave(&skb_pool.lock, flags);
 233        while (skb_pool.qlen < MAX_SKBS) {
 234                skb = alloc_skb(MAX_SKB_SIZE, GFP_ATOMIC);
 235                if (!skb)
 236                        break;
 237
 238                __skb_queue_tail(&skb_pool, skb);
 239        }
 240        spin_unlock_irqrestore(&skb_pool.lock, flags);
 241}
 242
 243static void zap_completion_queue(void)
 244{
 245        unsigned long flags;
 246        struct softnet_data *sd = &get_cpu_var(softnet_data);
 247
 248        if (sd->completion_queue) {
 249                struct sk_buff *clist;
 250
 251                local_irq_save(flags);
 252                clist = sd->completion_queue;
 253                sd->completion_queue = NULL;
 254                local_irq_restore(flags);
 255
 256                while (clist != NULL) {
 257                        struct sk_buff *skb = clist;
 258                        clist = clist->next;
 259                        if (!skb_irq_freeable(skb)) {
 260                                refcount_set(&skb->users, 1);
 261                                dev_kfree_skb_any(skb); /* put this one back */
 262                        } else {
 263                                __kfree_skb(skb);
 264                        }
 265                }
 266        }
 267
 268        put_cpu_var(softnet_data);
 269}
 270
 271static struct sk_buff *find_skb(struct netpoll *np, int len, int reserve)
 272{
 273        int count = 0;
 274        struct sk_buff *skb;
 275
 276        zap_completion_queue();
 277        refill_skbs();
 278repeat:
 279
 280        skb = alloc_skb(len, GFP_ATOMIC);
 281        if (!skb)
 282                skb = skb_dequeue(&skb_pool);
 283
 284        if (!skb) {
 285                if (++count < 10) {
 286                        netpoll_poll_dev(np->dev);
 287                        goto repeat;
 288                }
 289                return NULL;
 290        }
 291
 292        refcount_set(&skb->users, 1);
 293        skb_reserve(skb, reserve);
 294        return skb;
 295}
 296
 297static int netpoll_owner_active(struct net_device *dev)
 298{
 299        struct napi_struct *napi;
 300
 301        list_for_each_entry_rcu(napi, &dev->napi_list, dev_list) {
 302                if (napi->poll_owner == smp_processor_id())
 303                        return 1;
 304        }
 305        return 0;
 306}
 307
 308/* call with IRQ disabled */
 309static netdev_tx_t __netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
 310{
 311        netdev_tx_t status = NETDEV_TX_BUSY;
 312        struct net_device *dev;
 313        unsigned long tries;
 314        /* It is up to the caller to keep npinfo alive. */
 315        struct netpoll_info *npinfo;
 316
 317        lockdep_assert_irqs_disabled();
 318
 319        dev = np->dev;
 320        npinfo = rcu_dereference_bh(dev->npinfo);
 321
 322        if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) {
 323                dev_kfree_skb_irq(skb);
 324                return NET_XMIT_DROP;
 325        }
 326
 327        /* don't get messages out of order, and no recursion */
 328        if (skb_queue_len(&npinfo->txq) == 0 && !netpoll_owner_active(dev)) {
 329                struct netdev_queue *txq;
 330
 331                txq = netdev_core_pick_tx(dev, skb, NULL);
 332
 333                /* try until next clock tick */
 334                for (tries = jiffies_to_usecs(1)/USEC_PER_POLL;
 335                     tries > 0; --tries) {
 336                        if (HARD_TX_TRYLOCK(dev, txq)) {
 337                                if (!netif_xmit_stopped(txq))
 338                                        status = netpoll_start_xmit(skb, dev, txq);
 339
 340                                HARD_TX_UNLOCK(dev, txq);
 341
 342                                if (dev_xmit_complete(status))
 343                                        break;
 344
 345                        }
 346
 347                        /* tickle device maybe there is some cleanup */
 348                        netpoll_poll_dev(np->dev);
 349
 350                        udelay(USEC_PER_POLL);
 351                }
 352
 353                WARN_ONCE(!irqs_disabled(),
 354                        "netpoll_send_skb_on_dev(): %s enabled interrupts in poll (%pS)\n",
 355                        dev->name, dev->netdev_ops->ndo_start_xmit);
 356
 357        }
 358
 359        if (!dev_xmit_complete(status)) {
 360                skb_queue_tail(&npinfo->txq, skb);
 361                schedule_delayed_work(&npinfo->tx_work,0);
 362        }
 363        return NETDEV_TX_OK;
 364}
 365
 366netdev_tx_t netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
 367{
 368        unsigned long flags;
 369        netdev_tx_t ret;
 370
 371        if (unlikely(!np)) {
 372                dev_kfree_skb_irq(skb);
 373                ret = NET_XMIT_DROP;
 374        } else {
 375                local_irq_save(flags);
 376                ret = __netpoll_send_skb(np, skb);
 377                local_irq_restore(flags);
 378        }
 379        return ret;
 380}
 381EXPORT_SYMBOL(netpoll_send_skb);
 382
 383void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
 384{
 385        int total_len, ip_len, udp_len;
 386        struct sk_buff *skb;
 387        struct udphdr *udph;
 388        struct iphdr *iph;
 389        struct ethhdr *eth;
 390        static atomic_t ip_ident;
 391        struct ipv6hdr *ip6h;
 392
 393        if (!IS_ENABLED(CONFIG_PREEMPT_RT))
 394                WARN_ON_ONCE(!irqs_disabled());
 395
 396        udp_len = len + sizeof(*udph);
 397        if (np->ipv6)
 398                ip_len = udp_len + sizeof(*ip6h);
 399        else
 400                ip_len = udp_len + sizeof(*iph);
 401
 402        total_len = ip_len + LL_RESERVED_SPACE(np->dev);
 403
 404        skb = find_skb(np, total_len + np->dev->needed_tailroom,
 405                       total_len - len);
 406        if (!skb)
 407                return;
 408
 409        skb_copy_to_linear_data(skb, msg, len);
 410        skb_put(skb, len);
 411
 412        skb_push(skb, sizeof(*udph));
 413        skb_reset_transport_header(skb);
 414        udph = udp_hdr(skb);
 415        udph->source = htons(np->local_port);
 416        udph->dest = htons(np->remote_port);
 417        udph->len = htons(udp_len);
 418
 419        if (np->ipv6) {
 420                udph->check = 0;
 421                udph->check = csum_ipv6_magic(&np->local_ip.in6,
 422                                              &np->remote_ip.in6,
 423                                              udp_len, IPPROTO_UDP,
 424                                              csum_partial(udph, udp_len, 0));
 425                if (udph->check == 0)
 426                        udph->check = CSUM_MANGLED_0;
 427
 428                skb_push(skb, sizeof(*ip6h));
 429                skb_reset_network_header(skb);
 430                ip6h = ipv6_hdr(skb);
 431
 432                /* ip6h->version = 6; ip6h->priority = 0; */
 433                *(unsigned char *)ip6h = 0x60;
 434                ip6h->flow_lbl[0] = 0;
 435                ip6h->flow_lbl[1] = 0;
 436                ip6h->flow_lbl[2] = 0;
 437
 438                ip6h->payload_len = htons(sizeof(struct udphdr) + len);
 439                ip6h->nexthdr = IPPROTO_UDP;
 440                ip6h->hop_limit = 32;
 441                ip6h->saddr = np->local_ip.in6;
 442                ip6h->daddr = np->remote_ip.in6;
 443
 444                eth = skb_push(skb, ETH_HLEN);
 445                skb_reset_mac_header(skb);
 446                skb->protocol = eth->h_proto = htons(ETH_P_IPV6);
 447        } else {
 448                udph->check = 0;
 449                udph->check = csum_tcpudp_magic(np->local_ip.ip,
 450                                                np->remote_ip.ip,
 451                                                udp_len, IPPROTO_UDP,
 452                                                csum_partial(udph, udp_len, 0));
 453                if (udph->check == 0)
 454                        udph->check = CSUM_MANGLED_0;
 455
 456                skb_push(skb, sizeof(*iph));
 457                skb_reset_network_header(skb);
 458                iph = ip_hdr(skb);
 459
 460                /* iph->version = 4; iph->ihl = 5; */
 461                *(unsigned char *)iph = 0x45;
 462                iph->tos      = 0;
 463                put_unaligned(htons(ip_len), &(iph->tot_len));
 464                iph->id       = htons(atomic_inc_return(&ip_ident));
 465                iph->frag_off = 0;
 466                iph->ttl      = 64;
 467                iph->protocol = IPPROTO_UDP;
 468                iph->check    = 0;
 469                put_unaligned(np->local_ip.ip, &(iph->saddr));
 470                put_unaligned(np->remote_ip.ip, &(iph->daddr));
 471                iph->check    = ip_fast_csum((unsigned char *)iph, iph->ihl);
 472
 473                eth = skb_push(skb, ETH_HLEN);
 474                skb_reset_mac_header(skb);
 475                skb->protocol = eth->h_proto = htons(ETH_P_IP);
 476        }
 477
 478        ether_addr_copy(eth->h_source, np->dev->dev_addr);
 479        ether_addr_copy(eth->h_dest, np->remote_mac);
 480
 481        skb->dev = np->dev;
 482
 483        netpoll_send_skb(np, skb);
 484}
 485EXPORT_SYMBOL(netpoll_send_udp);
 486
 487void netpoll_print_options(struct netpoll *np)
 488{
 489        np_info(np, "local port %d\n", np->local_port);
 490        if (np->ipv6)
 491                np_info(np, "local IPv6 address %pI6c\n", &np->local_ip.in6);
 492        else
 493                np_info(np, "local IPv4 address %pI4\n", &np->local_ip.ip);
 494        np_info(np, "interface '%s'\n", np->dev_name);
 495        np_info(np, "remote port %d\n", np->remote_port);
 496        if (np->ipv6)
 497                np_info(np, "remote IPv6 address %pI6c\n", &np->remote_ip.in6);
 498        else
 499                np_info(np, "remote IPv4 address %pI4\n", &np->remote_ip.ip);
 500        np_info(np, "remote ethernet address %pM\n", np->remote_mac);
 501}
 502EXPORT_SYMBOL(netpoll_print_options);
 503
 504static int netpoll_parse_ip_addr(const char *str, union inet_addr *addr)
 505{
 506        const char *end;
 507
 508        if (!strchr(str, ':') &&
 509            in4_pton(str, -1, (void *)addr, -1, &end) > 0) {
 510                if (!*end)
 511                        return 0;
 512        }
 513        if (in6_pton(str, -1, addr->in6.s6_addr, -1, &end) > 0) {
 514#if IS_ENABLED(CONFIG_IPV6)
 515                if (!*end)
 516                        return 1;
 517#else
 518                return -1;
 519#endif
 520        }
 521        return -1;
 522}
 523
 524int netpoll_parse_options(struct netpoll *np, char *opt)
 525{
 526        char *cur=opt, *delim;
 527        int ipv6;
 528        bool ipversion_set = false;
 529
 530        if (*cur != '@') {
 531                if ((delim = strchr(cur, '@')) == NULL)
 532                        goto parse_failed;
 533                *delim = 0;
 534                if (kstrtou16(cur, 10, &np->local_port))
 535                        goto parse_failed;
 536                cur = delim;
 537        }
 538        cur++;
 539
 540        if (*cur != '/') {
 541                ipversion_set = true;
 542                if ((delim = strchr(cur, '/')) == NULL)
 543                        goto parse_failed;
 544                *delim = 0;
 545                ipv6 = netpoll_parse_ip_addr(cur, &np->local_ip);
 546                if (ipv6 < 0)
 547                        goto parse_failed;
 548                else
 549                        np->ipv6 = (bool)ipv6;
 550                cur = delim;
 551        }
 552        cur++;
 553
 554        if (*cur != ',') {
 555                /* parse out dev name */
 556                if ((delim = strchr(cur, ',')) == NULL)
 557                        goto parse_failed;
 558                *delim = 0;
 559                strlcpy(np->dev_name, cur, sizeof(np->dev_name));
 560                cur = delim;
 561        }
 562        cur++;
 563
 564        if (*cur != '@') {
 565                /* dst port */
 566                if ((delim = strchr(cur, '@')) == NULL)
 567                        goto parse_failed;
 568                *delim = 0;
 569                if (*cur == ' ' || *cur == '\t')
 570                        np_info(np, "warning: whitespace is not allowed\n");
 571                if (kstrtou16(cur, 10, &np->remote_port))
 572                        goto parse_failed;
 573                cur = delim;
 574        }
 575        cur++;
 576
 577        /* dst ip */
 578        if ((delim = strchr(cur, '/')) == NULL)
 579                goto parse_failed;
 580        *delim = 0;
 581        ipv6 = netpoll_parse_ip_addr(cur, &np->remote_ip);
 582        if (ipv6 < 0)
 583                goto parse_failed;
 584        else if (ipversion_set && np->ipv6 != (bool)ipv6)
 585                goto parse_failed;
 586        else
 587                np->ipv6 = (bool)ipv6;
 588        cur = delim + 1;
 589
 590        if (*cur != 0) {
 591                /* MAC address */
 592                if (!mac_pton(cur, np->remote_mac))
 593                        goto parse_failed;
 594        }
 595
 596        netpoll_print_options(np);
 597
 598        return 0;
 599
 600 parse_failed:
 601        np_info(np, "couldn't parse config at '%s'!\n", cur);
 602        return -1;
 603}
 604EXPORT_SYMBOL(netpoll_parse_options);
 605
 606int __netpoll_setup(struct netpoll *np, struct net_device *ndev)
 607{
 608        struct netpoll_info *npinfo;
 609        const struct net_device_ops *ops;
 610        int err;
 611
 612        np->dev = ndev;
 613        strlcpy(np->dev_name, ndev->name, IFNAMSIZ);
 614
 615        if (ndev->priv_flags & IFF_DISABLE_NETPOLL) {
 616                np_err(np, "%s doesn't support polling, aborting\n",
 617                       np->dev_name);
 618                err = -ENOTSUPP;
 619                goto out;
 620        }
 621
 622        if (!ndev->npinfo) {
 623                npinfo = kmalloc(sizeof(*npinfo), GFP_KERNEL);
 624                if (!npinfo) {
 625                        err = -ENOMEM;
 626                        goto out;
 627                }
 628
 629                sema_init(&npinfo->dev_lock, 1);
 630                skb_queue_head_init(&npinfo->txq);
 631                INIT_DELAYED_WORK(&npinfo->tx_work, queue_process);
 632
 633                refcount_set(&npinfo->refcnt, 1);
 634
 635                ops = np->dev->netdev_ops;
 636                if (ops->ndo_netpoll_setup) {
 637                        err = ops->ndo_netpoll_setup(ndev, npinfo);
 638                        if (err)
 639                                goto free_npinfo;
 640                }
 641        } else {
 642                npinfo = rtnl_dereference(ndev->npinfo);
 643                refcount_inc(&npinfo->refcnt);
 644        }
 645
 646        npinfo->netpoll = np;
 647
 648        /* last thing to do is link it to the net device structure */
 649        rcu_assign_pointer(ndev->npinfo, npinfo);
 650
 651        return 0;
 652
 653free_npinfo:
 654        kfree(npinfo);
 655out:
 656        return err;
 657}
 658EXPORT_SYMBOL_GPL(__netpoll_setup);
 659
 660int netpoll_setup(struct netpoll *np)
 661{
 662        struct net_device *ndev = NULL;
 663        struct in_device *in_dev;
 664        int err;
 665
 666        rtnl_lock();
 667        if (np->dev_name[0]) {
 668                struct net *net = current->nsproxy->net_ns;
 669                ndev = __dev_get_by_name(net, np->dev_name);
 670        }
 671        if (!ndev) {
 672                np_err(np, "%s doesn't exist, aborting\n", np->dev_name);
 673                err = -ENODEV;
 674                goto unlock;
 675        }
 676        dev_hold(ndev);
 677
 678        if (netdev_master_upper_dev_get(ndev)) {
 679                np_err(np, "%s is a slave device, aborting\n", np->dev_name);
 680                err = -EBUSY;
 681                goto put;
 682        }
 683
 684        if (!netif_running(ndev)) {
 685                unsigned long atmost, atleast;
 686
 687                np_info(np, "device %s not up yet, forcing it\n", np->dev_name);
 688
 689                err = dev_open(ndev, NULL);
 690
 691                if (err) {
 692                        np_err(np, "failed to open %s\n", ndev->name);
 693                        goto put;
 694                }
 695
 696                rtnl_unlock();
 697                atleast = jiffies + HZ/10;
 698                atmost = jiffies + carrier_timeout * HZ;
 699                while (!netif_carrier_ok(ndev)) {
 700                        if (time_after(jiffies, atmost)) {
 701                                np_notice(np, "timeout waiting for carrier\n");
 702                                break;
 703                        }
 704                        msleep(1);
 705                }
 706
 707                /* If carrier appears to come up instantly, we don't
 708                 * trust it and pause so that we don't pump all our
 709                 * queued console messages into the bitbucket.
 710                 */
 711
 712                if (time_before(jiffies, atleast)) {
 713                        np_notice(np, "carrier detect appears untrustworthy, waiting 4 seconds\n");
 714                        msleep(4000);
 715                }
 716                rtnl_lock();
 717        }
 718
 719        if (!np->local_ip.ip) {
 720                if (!np->ipv6) {
 721                        const struct in_ifaddr *ifa;
 722
 723                        in_dev = __in_dev_get_rtnl(ndev);
 724                        if (!in_dev)
 725                                goto put_noaddr;
 726
 727                        ifa = rtnl_dereference(in_dev->ifa_list);
 728                        if (!ifa) {
 729put_noaddr:
 730                                np_err(np, "no IP address for %s, aborting\n",
 731                                       np->dev_name);
 732                                err = -EDESTADDRREQ;
 733                                goto put;
 734                        }
 735
 736                        np->local_ip.ip = ifa->ifa_local;
 737                        np_info(np, "local IP %pI4\n", &np->local_ip.ip);
 738                } else {
 739#if IS_ENABLED(CONFIG_IPV6)
 740                        struct inet6_dev *idev;
 741
 742                        err = -EDESTADDRREQ;
 743                        idev = __in6_dev_get(ndev);
 744                        if (idev) {
 745                                struct inet6_ifaddr *ifp;
 746
 747                                read_lock_bh(&idev->lock);
 748                                list_for_each_entry(ifp, &idev->addr_list, if_list) {
 749                                        if (!!(ipv6_addr_type(&ifp->addr) & IPV6_ADDR_LINKLOCAL) !=
 750                                            !!(ipv6_addr_type(&np->remote_ip.in6) & IPV6_ADDR_LINKLOCAL))
 751                                                continue;
 752                                        np->local_ip.in6 = ifp->addr;
 753                                        err = 0;
 754                                        break;
 755                                }
 756                                read_unlock_bh(&idev->lock);
 757                        }
 758                        if (err) {
 759                                np_err(np, "no IPv6 address for %s, aborting\n",
 760                                       np->dev_name);
 761                                goto put;
 762                        } else
 763                                np_info(np, "local IPv6 %pI6c\n", &np->local_ip.in6);
 764#else
 765                        np_err(np, "IPv6 is not supported %s, aborting\n",
 766                               np->dev_name);
 767                        err = -EINVAL;
 768                        goto put;
 769#endif
 770                }
 771        }
 772
 773        /* fill up the skb queue */
 774        refill_skbs();
 775
 776        err = __netpoll_setup(np, ndev);
 777        if (err)
 778                goto put;
 779
 780        rtnl_unlock();
 781        return 0;
 782
 783put:
 784        dev_put(ndev);
 785unlock:
 786        rtnl_unlock();
 787        return err;
 788}
 789EXPORT_SYMBOL(netpoll_setup);
 790
 791static int __init netpoll_init(void)
 792{
 793        skb_queue_head_init(&skb_pool);
 794        return 0;
 795}
 796core_initcall(netpoll_init);
 797
 798static void rcu_cleanup_netpoll_info(struct rcu_head *rcu_head)
 799{
 800        struct netpoll_info *npinfo =
 801                        container_of(rcu_head, struct netpoll_info, rcu);
 802
 803        skb_queue_purge(&npinfo->txq);
 804
 805        /* we can't call cancel_delayed_work_sync here, as we are in softirq */
 806        cancel_delayed_work(&npinfo->tx_work);
 807
 808        /* clean after last, unfinished work */
 809        __skb_queue_purge(&npinfo->txq);
 810        /* now cancel it again */
 811        cancel_delayed_work(&npinfo->tx_work);
 812        kfree(npinfo);
 813}
 814
 815void __netpoll_cleanup(struct netpoll *np)
 816{
 817        struct netpoll_info *npinfo;
 818
 819        npinfo = rtnl_dereference(np->dev->npinfo);
 820        if (!npinfo)
 821                return;
 822
 823        synchronize_srcu(&netpoll_srcu);
 824
 825        if (refcount_dec_and_test(&npinfo->refcnt)) {
 826                const struct net_device_ops *ops;
 827
 828                ops = np->dev->netdev_ops;
 829                if (ops->ndo_netpoll_cleanup)
 830                        ops->ndo_netpoll_cleanup(np->dev);
 831
 832                RCU_INIT_POINTER(np->dev->npinfo, NULL);
 833                call_rcu(&npinfo->rcu, rcu_cleanup_netpoll_info);
 834        } else
 835                RCU_INIT_POINTER(np->dev->npinfo, NULL);
 836}
 837EXPORT_SYMBOL_GPL(__netpoll_cleanup);
 838
 839void __netpoll_free(struct netpoll *np)
 840{
 841        ASSERT_RTNL();
 842
 843        /* Wait for transmitting packets to finish before freeing. */
 844        synchronize_rcu();
 845        __netpoll_cleanup(np);
 846        kfree(np);
 847}
 848EXPORT_SYMBOL_GPL(__netpoll_free);
 849
 850void netpoll_cleanup(struct netpoll *np)
 851{
 852        rtnl_lock();
 853        if (!np->dev)
 854                goto out;
 855        __netpoll_cleanup(np);
 856        dev_put(np->dev);
 857        np->dev = NULL;
 858out:
 859        rtnl_unlock();
 860}
 861EXPORT_SYMBOL(netpoll_cleanup);
 862