linux/net/batman-adv/send.c
<<
>>
Prefs
   1/* Copyright (C) 2007-2013 B.A.T.M.A.N. contributors:
   2 *
   3 * Marek Lindner, Simon Wunderlich
   4 *
   5 * This program is free software; you can redistribute it and/or
   6 * modify it under the terms of version 2 of the GNU General Public
   7 * License as published by the Free Software Foundation.
   8 *
   9 * This program is distributed in the hope that it will be useful, but
  10 * WITHOUT ANY WARRANTY; without even the implied warranty of
  11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  12 * General Public License for more details.
  13 *
  14 * You should have received a copy of the GNU General Public License
  15 * along with this program; if not, write to the Free Software
  16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
  17 * 02110-1301, USA
  18 */
  19
  20#include "main.h"
  21#include "distributed-arp-table.h"
  22#include "send.h"
  23#include "routing.h"
  24#include "translation-table.h"
  25#include "soft-interface.h"
  26#include "hard-interface.h"
  27#include "vis.h"
  28#include "gateway_common.h"
  29#include "originator.h"
  30#include "network-coding.h"
  31
  32#include <linux/if_ether.h>
  33
  34static void batadv_send_outstanding_bcast_packet(struct work_struct *work);
  35
  36/* send out an already prepared packet to the given address via the
  37 * specified batman interface
  38 */
  39int batadv_send_skb_packet(struct sk_buff *skb,
  40                           struct batadv_hard_iface *hard_iface,
  41                           const uint8_t *dst_addr)
  42{
  43        struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
  44        struct ethhdr *ethhdr;
  45
  46        if (hard_iface->if_status != BATADV_IF_ACTIVE)
  47                goto send_skb_err;
  48
  49        if (unlikely(!hard_iface->net_dev))
  50                goto send_skb_err;
  51
  52        if (!(hard_iface->net_dev->flags & IFF_UP)) {
  53                pr_warn("Interface %s is not up - can't send packet via that interface!\n",
  54                        hard_iface->net_dev->name);
  55                goto send_skb_err;
  56        }
  57
  58        /* push to the ethernet header. */
  59        if (batadv_skb_head_push(skb, ETH_HLEN) < 0)
  60                goto send_skb_err;
  61
  62        skb_reset_mac_header(skb);
  63
  64        ethhdr = (struct ethhdr *)skb_mac_header(skb);
  65        memcpy(ethhdr->h_source, hard_iface->net_dev->dev_addr, ETH_ALEN);
  66        memcpy(ethhdr->h_dest, dst_addr, ETH_ALEN);
  67        ethhdr->h_proto = __constant_htons(ETH_P_BATMAN);
  68
  69        skb_set_network_header(skb, ETH_HLEN);
  70        skb->priority = TC_PRIO_CONTROL;
  71        skb->protocol = __constant_htons(ETH_P_BATMAN);
  72
  73        skb->dev = hard_iface->net_dev;
  74
  75        /* Save a clone of the skb to use when decoding coded packets */
  76        batadv_nc_skb_store_for_decoding(bat_priv, skb);
  77
  78        /* dev_queue_xmit() returns a negative result on error.  However on
  79         * congestion and traffic shaping, it drops and returns NET_XMIT_DROP
  80         * (which is > 0). This will not be treated as an error.
  81         */
  82        return dev_queue_xmit(skb);
  83send_skb_err:
  84        kfree_skb(skb);
  85        return NET_XMIT_DROP;
  86}
  87
  88/**
  89 * batadv_send_skb_to_orig - Lookup next-hop and transmit skb.
  90 * @skb: Packet to be transmitted.
  91 * @orig_node: Final destination of the packet.
  92 * @recv_if: Interface used when receiving the packet (can be NULL).
  93 *
  94 * Looks up the best next-hop towards the passed originator and passes the
  95 * skb on for preparation of MAC header. If the packet originated from this
  96 * host, NULL can be passed as recv_if and no interface alternating is
  97 * attempted.
  98 *
  99 * Returns TRUE on success; FALSE otherwise.
 100 */
 101bool batadv_send_skb_to_orig(struct sk_buff *skb,
 102                             struct batadv_orig_node *orig_node,
 103                             struct batadv_hard_iface *recv_if)
 104{
 105        struct batadv_priv *bat_priv = orig_node->bat_priv;
 106        struct batadv_neigh_node *neigh_node;
 107
 108        /* batadv_find_router() increases neigh_nodes refcount if found. */
 109        neigh_node = batadv_find_router(bat_priv, orig_node, recv_if);
 110        if (!neigh_node)
 111                return false;
 112
 113        /* route it */
 114        batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
 115
 116        batadv_neigh_node_free_ref(neigh_node);
 117
 118        return true;
 119}
 120
 121void batadv_schedule_bat_ogm(struct batadv_hard_iface *hard_iface)
 122{
 123        struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
 124
 125        if ((hard_iface->if_status == BATADV_IF_NOT_IN_USE) ||
 126            (hard_iface->if_status == BATADV_IF_TO_BE_REMOVED))
 127                return;
 128
 129        /* the interface gets activated here to avoid race conditions between
 130         * the moment of activating the interface in
 131         * hardif_activate_interface() where the originator mac is set and
 132         * outdated packets (especially uninitialized mac addresses) in the
 133         * packet queue
 134         */
 135        if (hard_iface->if_status == BATADV_IF_TO_BE_ACTIVATED)
 136                hard_iface->if_status = BATADV_IF_ACTIVE;
 137
 138        bat_priv->bat_algo_ops->bat_ogm_schedule(hard_iface);
 139}
 140
 141static void batadv_forw_packet_free(struct batadv_forw_packet *forw_packet)
 142{
 143        if (forw_packet->skb)
 144                kfree_skb(forw_packet->skb);
 145        if (forw_packet->if_incoming)
 146                batadv_hardif_free_ref(forw_packet->if_incoming);
 147        kfree(forw_packet);
 148}
 149
 150static void
 151_batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv,
 152                                 struct batadv_forw_packet *forw_packet,
 153                                 unsigned long send_time)
 154{
 155        INIT_HLIST_NODE(&forw_packet->list);
 156
 157        /* add new packet to packet list */
 158        spin_lock_bh(&bat_priv->forw_bcast_list_lock);
 159        hlist_add_head(&forw_packet->list, &bat_priv->forw_bcast_list);
 160        spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
 161
 162        /* start timer for this packet */
 163        queue_delayed_work(batadv_event_workqueue, &forw_packet->delayed_work,
 164                           send_time);
 165}
 166
 167/* add a broadcast packet to the queue and setup timers. broadcast packets
 168 * are sent multiple times to increase probability for being received.
 169 *
 170 * This function returns NETDEV_TX_OK on success and NETDEV_TX_BUSY on
 171 * errors.
 172 *
 173 * The skb is not consumed, so the caller should make sure that the
 174 * skb is freed.
 175 */
 176int batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv,
 177                                    const struct sk_buff *skb,
 178                                    unsigned long delay)
 179{
 180        struct batadv_hard_iface *primary_if = NULL;
 181        struct batadv_forw_packet *forw_packet;
 182        struct batadv_bcast_packet *bcast_packet;
 183        struct sk_buff *newskb;
 184
 185        if (!batadv_atomic_dec_not_zero(&bat_priv->bcast_queue_left)) {
 186                batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
 187                           "bcast packet queue full\n");
 188                goto out;
 189        }
 190
 191        primary_if = batadv_primary_if_get_selected(bat_priv);
 192        if (!primary_if)
 193                goto out_and_inc;
 194
 195        forw_packet = kmalloc(sizeof(*forw_packet), GFP_ATOMIC);
 196
 197        if (!forw_packet)
 198                goto out_and_inc;
 199
 200        newskb = skb_copy(skb, GFP_ATOMIC);
 201        if (!newskb)
 202                goto packet_free;
 203
 204        /* as we have a copy now, it is safe to decrease the TTL */
 205        bcast_packet = (struct batadv_bcast_packet *)newskb->data;
 206        bcast_packet->header.ttl--;
 207
 208        skb_reset_mac_header(newskb);
 209
 210        forw_packet->skb = newskb;
 211        forw_packet->if_incoming = primary_if;
 212
 213        /* how often did we send the bcast packet ? */
 214        forw_packet->num_packets = 0;
 215
 216        INIT_DELAYED_WORK(&forw_packet->delayed_work,
 217                          batadv_send_outstanding_bcast_packet);
 218
 219        _batadv_add_bcast_packet_to_list(bat_priv, forw_packet, delay);
 220        return NETDEV_TX_OK;
 221
 222packet_free:
 223        kfree(forw_packet);
 224out_and_inc:
 225        atomic_inc(&bat_priv->bcast_queue_left);
 226out:
 227        if (primary_if)
 228                batadv_hardif_free_ref(primary_if);
 229        return NETDEV_TX_BUSY;
 230}
 231
 232static void batadv_send_outstanding_bcast_packet(struct work_struct *work)
 233{
 234        struct batadv_hard_iface *hard_iface;
 235        struct delayed_work *delayed_work;
 236        struct batadv_forw_packet *forw_packet;
 237        struct sk_buff *skb1;
 238        struct net_device *soft_iface;
 239        struct batadv_priv *bat_priv;
 240
 241        delayed_work = container_of(work, struct delayed_work, work);
 242        forw_packet = container_of(delayed_work, struct batadv_forw_packet,
 243                                   delayed_work);
 244        soft_iface = forw_packet->if_incoming->soft_iface;
 245        bat_priv = netdev_priv(soft_iface);
 246
 247        spin_lock_bh(&bat_priv->forw_bcast_list_lock);
 248        hlist_del(&forw_packet->list);
 249        spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
 250
 251        if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_DEACTIVATING)
 252                goto out;
 253
 254        if (batadv_dat_drop_broadcast_packet(bat_priv, forw_packet))
 255                goto out;
 256
 257        /* rebroadcast packet */
 258        rcu_read_lock();
 259        list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
 260                if (hard_iface->soft_iface != soft_iface)
 261                        continue;
 262
 263                /* send a copy of the saved skb */
 264                skb1 = skb_clone(forw_packet->skb, GFP_ATOMIC);
 265                if (skb1)
 266                        batadv_send_skb_packet(skb1, hard_iface,
 267                                               batadv_broadcast_addr);
 268        }
 269        rcu_read_unlock();
 270
 271        forw_packet->num_packets++;
 272
 273        /* if we still have some more bcasts to send */
 274        if (forw_packet->num_packets < 3) {
 275                _batadv_add_bcast_packet_to_list(bat_priv, forw_packet,
 276                                                 msecs_to_jiffies(5));
 277                return;
 278        }
 279
 280out:
 281        batadv_forw_packet_free(forw_packet);
 282        atomic_inc(&bat_priv->bcast_queue_left);
 283}
 284
 285void batadv_send_outstanding_bat_ogm_packet(struct work_struct *work)
 286{
 287        struct delayed_work *delayed_work;
 288        struct batadv_forw_packet *forw_packet;
 289        struct batadv_priv *bat_priv;
 290
 291        delayed_work = container_of(work, struct delayed_work, work);
 292        forw_packet = container_of(delayed_work, struct batadv_forw_packet,
 293                                   delayed_work);
 294        bat_priv = netdev_priv(forw_packet->if_incoming->soft_iface);
 295        spin_lock_bh(&bat_priv->forw_bat_list_lock);
 296        hlist_del(&forw_packet->list);
 297        spin_unlock_bh(&bat_priv->forw_bat_list_lock);
 298
 299        if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_DEACTIVATING)
 300                goto out;
 301
 302        bat_priv->bat_algo_ops->bat_ogm_emit(forw_packet);
 303
 304        /* we have to have at least one packet in the queue
 305         * to determine the queues wake up time unless we are
 306         * shutting down
 307         */
 308        if (forw_packet->own)
 309                batadv_schedule_bat_ogm(forw_packet->if_incoming);
 310
 311out:
 312        /* don't count own packet */
 313        if (!forw_packet->own)
 314                atomic_inc(&bat_priv->batman_queue_left);
 315
 316        batadv_forw_packet_free(forw_packet);
 317}
 318
 319void
 320batadv_purge_outstanding_packets(struct batadv_priv *bat_priv,
 321                                 const struct batadv_hard_iface *hard_iface)
 322{
 323        struct batadv_forw_packet *forw_packet;
 324        struct hlist_node *safe_tmp_node;
 325        bool pending;
 326
 327        if (hard_iface)
 328                batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
 329                           "purge_outstanding_packets(): %s\n",
 330                           hard_iface->net_dev->name);
 331        else
 332                batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
 333                           "purge_outstanding_packets()\n");
 334
 335        /* free bcast list */
 336        spin_lock_bh(&bat_priv->forw_bcast_list_lock);
 337        hlist_for_each_entry_safe(forw_packet, safe_tmp_node,
 338                                  &bat_priv->forw_bcast_list, list) {
 339                /* if purge_outstanding_packets() was called with an argument
 340                 * we delete only packets belonging to the given interface
 341                 */
 342                if ((hard_iface) &&
 343                    (forw_packet->if_incoming != hard_iface))
 344                        continue;
 345
 346                spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
 347
 348                /* batadv_send_outstanding_bcast_packet() will lock the list to
 349                 * delete the item from the list
 350                 */
 351                pending = cancel_delayed_work_sync(&forw_packet->delayed_work);
 352                spin_lock_bh(&bat_priv->forw_bcast_list_lock);
 353
 354                if (pending) {
 355                        hlist_del(&forw_packet->list);
 356                        batadv_forw_packet_free(forw_packet);
 357                }
 358        }
 359        spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
 360
 361        /* free batman packet list */
 362        spin_lock_bh(&bat_priv->forw_bat_list_lock);
 363        hlist_for_each_entry_safe(forw_packet, safe_tmp_node,
 364                                  &bat_priv->forw_bat_list, list) {
 365                /* if purge_outstanding_packets() was called with an argument
 366                 * we delete only packets belonging to the given interface
 367                 */
 368                if ((hard_iface) &&
 369                    (forw_packet->if_incoming != hard_iface))
 370                        continue;
 371
 372                spin_unlock_bh(&bat_priv->forw_bat_list_lock);
 373
 374                /* send_outstanding_bat_packet() will lock the list to
 375                 * delete the item from the list
 376                 */
 377                pending = cancel_delayed_work_sync(&forw_packet->delayed_work);
 378                spin_lock_bh(&bat_priv->forw_bat_list_lock);
 379
 380                if (pending) {
 381                        hlist_del(&forw_packet->list);
 382                        batadv_forw_packet_free(forw_packet);
 383                }
 384        }
 385        spin_unlock_bh(&bat_priv->forw_bat_list_lock);
 386}
 387