linux/net/batman-adv/send.c
<<
>>
Prefs
   1/* Copyright (C) 2007-2016  B.A.T.M.A.N. contributors:
   2 *
   3 * Marek Lindner, Simon Wunderlich
   4 *
   5 * This program is free software; you can redistribute it and/or
   6 * modify it under the terms of version 2 of the GNU General Public
   7 * License as published by the Free Software Foundation.
   8 *
   9 * This program is distributed in the hope that it will be useful, but
  10 * WITHOUT ANY WARRANTY; without even the implied warranty of
  11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  12 * General Public License for more details.
  13 *
  14 * You should have received a copy of the GNU General Public License
  15 * along with this program; if not, see <http://www.gnu.org/licenses/>.
  16 */
  17
  18#include "send.h"
  19#include "main.h"
  20
  21#include <linux/atomic.h>
  22#include <linux/byteorder/generic.h>
  23#include <linux/etherdevice.h>
  24#include <linux/fs.h>
  25#include <linux/if_ether.h>
  26#include <linux/if.h>
  27#include <linux/jiffies.h>
  28#include <linux/kernel.h>
  29#include <linux/list.h>
  30#include <linux/netdevice.h>
  31#include <linux/printk.h>
  32#include <linux/rculist.h>
  33#include <linux/rcupdate.h>
  34#include <linux/skbuff.h>
  35#include <linux/slab.h>
  36#include <linux/spinlock.h>
  37#include <linux/stddef.h>
  38#include <linux/workqueue.h>
  39
  40#include "distributed-arp-table.h"
  41#include "fragmentation.h"
  42#include "gateway_client.h"
  43#include "hard-interface.h"
  44#include "network-coding.h"
  45#include "originator.h"
  46#include "routing.h"
  47#include "soft-interface.h"
  48#include "translation-table.h"
  49
  50static void batadv_send_outstanding_bcast_packet(struct work_struct *work);
  51
  52/**
  53 * batadv_send_skb_packet - send an already prepared packet
  54 * @skb: the packet to send
  55 * @hard_iface: the interface to use to send the broadcast packet
  56 * @dst_addr: the payload destination
  57 *
  58 * Send out an already prepared packet to the given neighbor or broadcast it
  59 * using the specified interface. Either hard_iface or neigh_node must be not
  60 * NULL.
  61 * If neigh_node is NULL, then the packet is broadcasted using hard_iface,
  62 * otherwise it is sent as unicast to the given neighbor.
  63 *
  64 * Return: NET_TX_DROP in case of error or the result of dev_queue_xmit(skb)
  65 * otherwise
  66 */
  67int batadv_send_skb_packet(struct sk_buff *skb,
  68                           struct batadv_hard_iface *hard_iface,
  69                           const u8 *dst_addr)
  70{
  71        struct batadv_priv *bat_priv;
  72        struct ethhdr *ethhdr;
  73
  74        bat_priv = netdev_priv(hard_iface->soft_iface);
  75
  76        if (hard_iface->if_status != BATADV_IF_ACTIVE)
  77                goto send_skb_err;
  78
  79        if (unlikely(!hard_iface->net_dev))
  80                goto send_skb_err;
  81
  82        if (!(hard_iface->net_dev->flags & IFF_UP)) {
  83                pr_warn("Interface %s is not up - can't send packet via that interface!\n",
  84                        hard_iface->net_dev->name);
  85                goto send_skb_err;
  86        }
  87
  88        /* push to the ethernet header. */
  89        if (batadv_skb_head_push(skb, ETH_HLEN) < 0)
  90                goto send_skb_err;
  91
  92        skb_reset_mac_header(skb);
  93
  94        ethhdr = eth_hdr(skb);
  95        ether_addr_copy(ethhdr->h_source, hard_iface->net_dev->dev_addr);
  96        ether_addr_copy(ethhdr->h_dest, dst_addr);
  97        ethhdr->h_proto = htons(ETH_P_BATMAN);
  98
  99        skb_set_network_header(skb, ETH_HLEN);
 100        skb->protocol = htons(ETH_P_BATMAN);
 101
 102        skb->dev = hard_iface->net_dev;
 103
 104        /* Save a clone of the skb to use when decoding coded packets */
 105        batadv_nc_skb_store_for_decoding(bat_priv, skb);
 106
 107        /* dev_queue_xmit() returns a negative result on error.  However on
 108         * congestion and traffic shaping, it drops and returns NET_XMIT_DROP
 109         * (which is > 0). This will not be treated as an error.
 110         */
 111        return dev_queue_xmit(skb);
 112send_skb_err:
 113        kfree_skb(skb);
 114        return NET_XMIT_DROP;
 115}
 116
 117int batadv_send_broadcast_skb(struct sk_buff *skb,
 118                              struct batadv_hard_iface *hard_iface)
 119{
 120        return batadv_send_skb_packet(skb, hard_iface, batadv_broadcast_addr);
 121}
 122
 123int batadv_send_unicast_skb(struct sk_buff *skb,
 124                            struct batadv_neigh_node *neigh)
 125{
 126#ifdef CONFIG_BATMAN_ADV_BATMAN_V
 127        struct batadv_hardif_neigh_node *hardif_neigh;
 128#endif
 129        int ret;
 130
 131        ret = batadv_send_skb_packet(skb, neigh->if_incoming, neigh->addr);
 132
 133#ifdef CONFIG_BATMAN_ADV_BATMAN_V
 134        hardif_neigh = batadv_hardif_neigh_get(neigh->if_incoming, neigh->addr);
 135
 136        if ((hardif_neigh) && (ret != NET_XMIT_DROP))
 137                hardif_neigh->bat_v.last_unicast_tx = jiffies;
 138
 139        if (hardif_neigh)
 140                batadv_hardif_neigh_put(hardif_neigh);
 141#endif
 142
 143        return ret;
 144}
 145
 146/**
 147 * batadv_send_skb_to_orig - Lookup next-hop and transmit skb.
 148 * @skb: Packet to be transmitted.
 149 * @orig_node: Final destination of the packet.
 150 * @recv_if: Interface used when receiving the packet (can be NULL).
 151 *
 152 * Looks up the best next-hop towards the passed originator and passes the
 153 * skb on for preparation of MAC header. If the packet originated from this
 154 * host, NULL can be passed as recv_if and no interface alternating is
 155 * attempted.
 156 *
 157 * Return: NET_XMIT_SUCCESS on success, NET_XMIT_DROP on failure, or
 158 * NET_XMIT_POLICED if the skb is buffered for later transmit.
 159 */
 160int batadv_send_skb_to_orig(struct sk_buff *skb,
 161                            struct batadv_orig_node *orig_node,
 162                            struct batadv_hard_iface *recv_if)
 163{
 164        struct batadv_priv *bat_priv = orig_node->bat_priv;
 165        struct batadv_neigh_node *neigh_node;
 166        int ret = NET_XMIT_DROP;
 167
 168        /* batadv_find_router() increases neigh_nodes refcount if found. */
 169        neigh_node = batadv_find_router(bat_priv, orig_node, recv_if);
 170        if (!neigh_node)
 171                goto out;
 172
 173        /* Check if the skb is too large to send in one piece and fragment
 174         * it if needed.
 175         */
 176        if (atomic_read(&bat_priv->fragmentation) &&
 177            skb->len > neigh_node->if_incoming->net_dev->mtu) {
 178                /* Fragment and send packet. */
 179                if (batadv_frag_send_packet(skb, orig_node, neigh_node))
 180                        ret = NET_XMIT_SUCCESS;
 181
 182                goto out;
 183        }
 184
 185        /* try to network code the packet, if it is received on an interface
 186         * (i.e. being forwarded). If the packet originates from this node or if
 187         * network coding fails, then send the packet as usual.
 188         */
 189        if (recv_if && batadv_nc_skb_forward(skb, neigh_node)) {
 190                ret = NET_XMIT_POLICED;
 191        } else {
 192                batadv_send_unicast_skb(skb, neigh_node);
 193                ret = NET_XMIT_SUCCESS;
 194        }
 195
 196out:
 197        if (neigh_node)
 198                batadv_neigh_node_put(neigh_node);
 199
 200        return ret;
 201}
 202
 203/**
 204 * batadv_send_skb_push_fill_unicast - extend the buffer and initialize the
 205 *  common fields for unicast packets
 206 * @skb: the skb carrying the unicast header to initialize
 207 * @hdr_size: amount of bytes to push at the beginning of the skb
 208 * @orig_node: the destination node
 209 *
 210 * Return: false if the buffer extension was not possible or true otherwise.
 211 */
 212static bool
 213batadv_send_skb_push_fill_unicast(struct sk_buff *skb, int hdr_size,
 214                                  struct batadv_orig_node *orig_node)
 215{
 216        struct batadv_unicast_packet *unicast_packet;
 217        u8 ttvn = (u8)atomic_read(&orig_node->last_ttvn);
 218
 219        if (batadv_skb_head_push(skb, hdr_size) < 0)
 220                return false;
 221
 222        unicast_packet = (struct batadv_unicast_packet *)skb->data;
 223        unicast_packet->version = BATADV_COMPAT_VERSION;
 224        /* batman packet type: unicast */
 225        unicast_packet->packet_type = BATADV_UNICAST;
 226        /* set unicast ttl */
 227        unicast_packet->ttl = BATADV_TTL;
 228        /* copy the destination for faster routing */
 229        ether_addr_copy(unicast_packet->dest, orig_node->orig);
 230        /* set the destination tt version number */
 231        unicast_packet->ttvn = ttvn;
 232
 233        return true;
 234}
 235
 236/**
 237 * batadv_send_skb_prepare_unicast - encapsulate an skb with a unicast header
 238 * @skb: the skb containing the payload to encapsulate
 239 * @orig_node: the destination node
 240 *
 241 * Return: false if the payload could not be encapsulated or true otherwise.
 242 */
 243static bool batadv_send_skb_prepare_unicast(struct sk_buff *skb,
 244                                            struct batadv_orig_node *orig_node)
 245{
 246        size_t uni_size = sizeof(struct batadv_unicast_packet);
 247
 248        return batadv_send_skb_push_fill_unicast(skb, uni_size, orig_node);
 249}
 250
 251/**
 252 * batadv_send_skb_prepare_unicast_4addr - encapsulate an skb with a
 253 *  unicast 4addr header
 254 * @bat_priv: the bat priv with all the soft interface information
 255 * @skb: the skb containing the payload to encapsulate
 256 * @orig: the destination node
 257 * @packet_subtype: the unicast 4addr packet subtype to use
 258 *
 259 * Return: false if the payload could not be encapsulated or true otherwise.
 260 */
 261bool batadv_send_skb_prepare_unicast_4addr(struct batadv_priv *bat_priv,
 262                                           struct sk_buff *skb,
 263                                           struct batadv_orig_node *orig,
 264                                           int packet_subtype)
 265{
 266        struct batadv_hard_iface *primary_if;
 267        struct batadv_unicast_4addr_packet *uc_4addr_packet;
 268        bool ret = false;
 269
 270        primary_if = batadv_primary_if_get_selected(bat_priv);
 271        if (!primary_if)
 272                goto out;
 273
 274        /* Pull the header space and fill the unicast_packet substructure.
 275         * We can do that because the first member of the uc_4addr_packet
 276         * is of type struct unicast_packet
 277         */
 278        if (!batadv_send_skb_push_fill_unicast(skb, sizeof(*uc_4addr_packet),
 279                                               orig))
 280                goto out;
 281
 282        uc_4addr_packet = (struct batadv_unicast_4addr_packet *)skb->data;
 283        uc_4addr_packet->u.packet_type = BATADV_UNICAST_4ADDR;
 284        ether_addr_copy(uc_4addr_packet->src, primary_if->net_dev->dev_addr);
 285        uc_4addr_packet->subtype = packet_subtype;
 286        uc_4addr_packet->reserved = 0;
 287
 288        ret = true;
 289out:
 290        if (primary_if)
 291                batadv_hardif_put(primary_if);
 292        return ret;
 293}
 294
 295/**
 296 * batadv_send_skb_unicast - encapsulate and send an skb via unicast
 297 * @bat_priv: the bat priv with all the soft interface information
 298 * @skb: payload to send
 299 * @packet_type: the batman unicast packet type to use
 300 * @packet_subtype: the unicast 4addr packet subtype (only relevant for unicast
 301 *  4addr packets)
 302 * @orig_node: the originator to send the packet to
 303 * @vid: the vid to be used to search the translation table
 304 *
 305 * Wrap the given skb into a batman-adv unicast or unicast-4addr header
 306 * depending on whether BATADV_UNICAST or BATADV_UNICAST_4ADDR was supplied
 307 * as packet_type. Then send this frame to the given orig_node and release a
 308 * reference to this orig_node.
 309 *
 310 * Return: NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise.
 311 */
 312int batadv_send_skb_unicast(struct batadv_priv *bat_priv,
 313                            struct sk_buff *skb, int packet_type,
 314                            int packet_subtype,
 315                            struct batadv_orig_node *orig_node,
 316                            unsigned short vid)
 317{
 318        struct batadv_unicast_packet *unicast_packet;
 319        struct ethhdr *ethhdr;
 320        int ret = NET_XMIT_DROP;
 321
 322        if (!orig_node)
 323                goto out;
 324
 325        switch (packet_type) {
 326        case BATADV_UNICAST:
 327                if (!batadv_send_skb_prepare_unicast(skb, orig_node))
 328                        goto out;
 329                break;
 330        case BATADV_UNICAST_4ADDR:
 331                if (!batadv_send_skb_prepare_unicast_4addr(bat_priv, skb,
 332                                                           orig_node,
 333                                                           packet_subtype))
 334                        goto out;
 335                break;
 336        default:
 337                /* this function supports UNICAST and UNICAST_4ADDR only. It
 338                 * should never be invoked with any other packet type
 339                 */
 340                goto out;
 341        }
 342
 343        /* skb->data might have been reallocated by
 344         * batadv_send_skb_prepare_unicast{,_4addr}()
 345         */
 346        ethhdr = eth_hdr(skb);
 347        unicast_packet = (struct batadv_unicast_packet *)skb->data;
 348
 349        /* inform the destination node that we are still missing a correct route
 350         * for this client. The destination will receive this packet and will
 351         * try to reroute it because the ttvn contained in the header is less
 352         * than the current one
 353         */
 354        if (batadv_tt_global_client_is_roaming(bat_priv, ethhdr->h_dest, vid))
 355                unicast_packet->ttvn = unicast_packet->ttvn - 1;
 356
 357        if (batadv_send_skb_to_orig(skb, orig_node, NULL) != NET_XMIT_DROP)
 358                ret = NET_XMIT_SUCCESS;
 359
 360out:
 361        if (orig_node)
 362                batadv_orig_node_put(orig_node);
 363        if (ret == NET_XMIT_DROP)
 364                kfree_skb(skb);
 365        return ret;
 366}
 367
 368/**
 369 * batadv_send_skb_via_tt_generic - send an skb via TT lookup
 370 * @bat_priv: the bat priv with all the soft interface information
 371 * @skb: payload to send
 372 * @packet_type: the batman unicast packet type to use
 373 * @packet_subtype: the unicast 4addr packet subtype (only relevant for unicast
 374 *  4addr packets)
 375 * @dst_hint: can be used to override the destination contained in the skb
 376 * @vid: the vid to be used to search the translation table
 377 *
 378 * Look up the recipient node for the destination address in the ethernet
 379 * header via the translation table. Wrap the given skb into a batman-adv
 380 * unicast or unicast-4addr header depending on whether BATADV_UNICAST or
 381 * BATADV_UNICAST_4ADDR was supplied as packet_type. Then send this frame
 382 * to the according destination node.
 383 *
 384 * Return: NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise.
 385 */
 386int batadv_send_skb_via_tt_generic(struct batadv_priv *bat_priv,
 387                                   struct sk_buff *skb, int packet_type,
 388                                   int packet_subtype, u8 *dst_hint,
 389                                   unsigned short vid)
 390{
 391        struct ethhdr *ethhdr = (struct ethhdr *)skb->data;
 392        struct batadv_orig_node *orig_node;
 393        u8 *src, *dst;
 394
 395        src = ethhdr->h_source;
 396        dst = ethhdr->h_dest;
 397
 398        /* if we got an hint! let's send the packet to this client (if any) */
 399        if (dst_hint) {
 400                src = NULL;
 401                dst = dst_hint;
 402        }
 403        orig_node = batadv_transtable_search(bat_priv, src, dst, vid);
 404
 405        return batadv_send_skb_unicast(bat_priv, skb, packet_type,
 406                                       packet_subtype, orig_node, vid);
 407}
 408
 409/**
 410 * batadv_send_skb_via_gw - send an skb via gateway lookup
 411 * @bat_priv: the bat priv with all the soft interface information
 412 * @skb: payload to send
 413 * @vid: the vid to be used to search the translation table
 414 *
 415 * Look up the currently selected gateway. Wrap the given skb into a batman-adv
 416 * unicast header and send this frame to this gateway node.
 417 *
 418 * Return: NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise.
 419 */
 420int batadv_send_skb_via_gw(struct batadv_priv *bat_priv, struct sk_buff *skb,
 421                           unsigned short vid)
 422{
 423        struct batadv_orig_node *orig_node;
 424
 425        orig_node = batadv_gw_get_selected_orig(bat_priv);
 426        return batadv_send_skb_unicast(bat_priv, skb, BATADV_UNICAST, 0,
 427                                       orig_node, vid);
 428}
 429
 430void batadv_schedule_bat_ogm(struct batadv_hard_iface *hard_iface)
 431{
 432        struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
 433
 434        if ((hard_iface->if_status == BATADV_IF_NOT_IN_USE) ||
 435            (hard_iface->if_status == BATADV_IF_TO_BE_REMOVED))
 436                return;
 437
 438        /* the interface gets activated here to avoid race conditions between
 439         * the moment of activating the interface in
 440         * hardif_activate_interface() where the originator mac is set and
 441         * outdated packets (especially uninitialized mac addresses) in the
 442         * packet queue
 443         */
 444        if (hard_iface->if_status == BATADV_IF_TO_BE_ACTIVATED)
 445                hard_iface->if_status = BATADV_IF_ACTIVE;
 446
 447        bat_priv->bat_algo_ops->bat_ogm_schedule(hard_iface);
 448}
 449
 450static void batadv_forw_packet_free(struct batadv_forw_packet *forw_packet)
 451{
 452        kfree_skb(forw_packet->skb);
 453        if (forw_packet->if_incoming)
 454                batadv_hardif_put(forw_packet->if_incoming);
 455        if (forw_packet->if_outgoing)
 456                batadv_hardif_put(forw_packet->if_outgoing);
 457        kfree(forw_packet);
 458}
 459
 460static void
 461_batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv,
 462                                 struct batadv_forw_packet *forw_packet,
 463                                 unsigned long send_time)
 464{
 465        /* add new packet to packet list */
 466        spin_lock_bh(&bat_priv->forw_bcast_list_lock);
 467        hlist_add_head(&forw_packet->list, &bat_priv->forw_bcast_list);
 468        spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
 469
 470        /* start timer for this packet */
 471        queue_delayed_work(batadv_event_workqueue, &forw_packet->delayed_work,
 472                           send_time);
 473}
 474
 475/**
 476 * batadv_add_bcast_packet_to_list - queue broadcast packet for multiple sends
 477 * @bat_priv: the bat priv with all the soft interface information
 478 * @skb: broadcast packet to add
 479 * @delay: number of jiffies to wait before sending
 480 *
 481 * add a broadcast packet to the queue and setup timers. broadcast packets
 482 * are sent multiple times to increase probability for being received.
 483 *
 484 * The skb is not consumed, so the caller should make sure that the
 485 * skb is freed.
 486 *
 487 * Return: NETDEV_TX_OK on success and NETDEV_TX_BUSY on errors.
 488 */
 489int batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv,
 490                                    const struct sk_buff *skb,
 491                                    unsigned long delay)
 492{
 493        struct batadv_hard_iface *primary_if = NULL;
 494        struct batadv_forw_packet *forw_packet;
 495        struct batadv_bcast_packet *bcast_packet;
 496        struct sk_buff *newskb;
 497
 498        if (!batadv_atomic_dec_not_zero(&bat_priv->bcast_queue_left)) {
 499                batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
 500                           "bcast packet queue full\n");
 501                goto out;
 502        }
 503
 504        primary_if = batadv_primary_if_get_selected(bat_priv);
 505        if (!primary_if)
 506                goto out_and_inc;
 507
 508        forw_packet = kmalloc(sizeof(*forw_packet), GFP_ATOMIC);
 509
 510        if (!forw_packet)
 511                goto out_and_inc;
 512
 513        newskb = skb_copy(skb, GFP_ATOMIC);
 514        if (!newskb)
 515                goto packet_free;
 516
 517        /* as we have a copy now, it is safe to decrease the TTL */
 518        bcast_packet = (struct batadv_bcast_packet *)newskb->data;
 519        bcast_packet->ttl--;
 520
 521        skb_reset_mac_header(newskb);
 522
 523        forw_packet->skb = newskb;
 524        forw_packet->if_incoming = primary_if;
 525        forw_packet->if_outgoing = NULL;
 526
 527        /* how often did we send the bcast packet ? */
 528        forw_packet->num_packets = 0;
 529
 530        INIT_DELAYED_WORK(&forw_packet->delayed_work,
 531                          batadv_send_outstanding_bcast_packet);
 532
 533        _batadv_add_bcast_packet_to_list(bat_priv, forw_packet, delay);
 534        return NETDEV_TX_OK;
 535
 536packet_free:
 537        kfree(forw_packet);
 538out_and_inc:
 539        atomic_inc(&bat_priv->bcast_queue_left);
 540out:
 541        if (primary_if)
 542                batadv_hardif_put(primary_if);
 543        return NETDEV_TX_BUSY;
 544}
 545
 546static void batadv_send_outstanding_bcast_packet(struct work_struct *work)
 547{
 548        struct batadv_hard_iface *hard_iface;
 549        struct delayed_work *delayed_work;
 550        struct batadv_forw_packet *forw_packet;
 551        struct sk_buff *skb1;
 552        struct net_device *soft_iface;
 553        struct batadv_priv *bat_priv;
 554
 555        delayed_work = container_of(work, struct delayed_work, work);
 556        forw_packet = container_of(delayed_work, struct batadv_forw_packet,
 557                                   delayed_work);
 558        soft_iface = forw_packet->if_incoming->soft_iface;
 559        bat_priv = netdev_priv(soft_iface);
 560
 561        spin_lock_bh(&bat_priv->forw_bcast_list_lock);
 562        hlist_del(&forw_packet->list);
 563        spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
 564
 565        if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_DEACTIVATING)
 566                goto out;
 567
 568        if (batadv_dat_drop_broadcast_packet(bat_priv, forw_packet))
 569                goto out;
 570
 571        /* rebroadcast packet */
 572        rcu_read_lock();
 573        list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
 574                if (hard_iface->soft_iface != soft_iface)
 575                        continue;
 576
 577                if (forw_packet->num_packets >= hard_iface->num_bcasts)
 578                        continue;
 579
 580                /* send a copy of the saved skb */
 581                skb1 = skb_clone(forw_packet->skb, GFP_ATOMIC);
 582                if (skb1)
 583                        batadv_send_broadcast_skb(skb1, hard_iface);
 584        }
 585        rcu_read_unlock();
 586
 587        forw_packet->num_packets++;
 588
 589        /* if we still have some more bcasts to send */
 590        if (forw_packet->num_packets < BATADV_NUM_BCASTS_MAX) {
 591                _batadv_add_bcast_packet_to_list(bat_priv, forw_packet,
 592                                                 msecs_to_jiffies(5));
 593                return;
 594        }
 595
 596out:
 597        batadv_forw_packet_free(forw_packet);
 598        atomic_inc(&bat_priv->bcast_queue_left);
 599}
 600
 601void batadv_send_outstanding_bat_ogm_packet(struct work_struct *work)
 602{
 603        struct delayed_work *delayed_work;
 604        struct batadv_forw_packet *forw_packet;
 605        struct batadv_priv *bat_priv;
 606
 607        delayed_work = container_of(work, struct delayed_work, work);
 608        forw_packet = container_of(delayed_work, struct batadv_forw_packet,
 609                                   delayed_work);
 610        bat_priv = netdev_priv(forw_packet->if_incoming->soft_iface);
 611        spin_lock_bh(&bat_priv->forw_bat_list_lock);
 612        hlist_del(&forw_packet->list);
 613        spin_unlock_bh(&bat_priv->forw_bat_list_lock);
 614
 615        if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_DEACTIVATING)
 616                goto out;
 617
 618        bat_priv->bat_algo_ops->bat_ogm_emit(forw_packet);
 619
 620        /* we have to have at least one packet in the queue to determine the
 621         * queues wake up time unless we are shutting down.
 622         *
 623         * only re-schedule if this is the "original" copy, e.g. the OGM of the
 624         * primary interface should only be rescheduled once per period, but
 625         * this function will be called for the forw_packet instances of the
 626         * other secondary interfaces as well.
 627         */
 628        if (forw_packet->own &&
 629            forw_packet->if_incoming == forw_packet->if_outgoing)
 630                batadv_schedule_bat_ogm(forw_packet->if_incoming);
 631
 632out:
 633        /* don't count own packet */
 634        if (!forw_packet->own)
 635                atomic_inc(&bat_priv->batman_queue_left);
 636
 637        batadv_forw_packet_free(forw_packet);
 638}
 639
 640void
 641batadv_purge_outstanding_packets(struct batadv_priv *bat_priv,
 642                                 const struct batadv_hard_iface *hard_iface)
 643{
 644        struct batadv_forw_packet *forw_packet;
 645        struct hlist_node *safe_tmp_node;
 646        bool pending;
 647
 648        if (hard_iface)
 649                batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
 650                           "purge_outstanding_packets(): %s\n",
 651                           hard_iface->net_dev->name);
 652        else
 653                batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
 654                           "purge_outstanding_packets()\n");
 655
 656        /* free bcast list */
 657        spin_lock_bh(&bat_priv->forw_bcast_list_lock);
 658        hlist_for_each_entry_safe(forw_packet, safe_tmp_node,
 659                                  &bat_priv->forw_bcast_list, list) {
 660                /* if purge_outstanding_packets() was called with an argument
 661                 * we delete only packets belonging to the given interface
 662                 */
 663                if ((hard_iface) &&
 664                    (forw_packet->if_incoming != hard_iface) &&
 665                    (forw_packet->if_outgoing != hard_iface))
 666                        continue;
 667
 668                spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
 669
 670                /* batadv_send_outstanding_bcast_packet() will lock the list to
 671                 * delete the item from the list
 672                 */
 673                pending = cancel_delayed_work_sync(&forw_packet->delayed_work);
 674                spin_lock_bh(&bat_priv->forw_bcast_list_lock);
 675
 676                if (pending) {
 677                        hlist_del(&forw_packet->list);
 678                        if (!forw_packet->own)
 679                                atomic_inc(&bat_priv->bcast_queue_left);
 680
 681                        batadv_forw_packet_free(forw_packet);
 682                }
 683        }
 684        spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
 685
 686        /* free batman packet list */
 687        spin_lock_bh(&bat_priv->forw_bat_list_lock);
 688        hlist_for_each_entry_safe(forw_packet, safe_tmp_node,
 689                                  &bat_priv->forw_bat_list, list) {
 690                /* if purge_outstanding_packets() was called with an argument
 691                 * we delete only packets belonging to the given interface
 692                 */
 693                if ((hard_iface) &&
 694                    (forw_packet->if_incoming != hard_iface) &&
 695                    (forw_packet->if_outgoing != hard_iface))
 696                        continue;
 697
 698                spin_unlock_bh(&bat_priv->forw_bat_list_lock);
 699
 700                /* send_outstanding_bat_packet() will lock the list to
 701                 * delete the item from the list
 702                 */
 703                pending = cancel_delayed_work_sync(&forw_packet->delayed_work);
 704                spin_lock_bh(&bat_priv->forw_bat_list_lock);
 705
 706                if (pending) {
 707                        hlist_del(&forw_packet->list);
 708                        if (!forw_packet->own)
 709                                atomic_inc(&bat_priv->batman_queue_left);
 710
 711                        batadv_forw_packet_free(forw_packet);
 712                }
 713        }
 714        spin_unlock_bh(&bat_priv->forw_bat_list_lock);
 715}
 716