linux/net/x25/x25_forward.c
<<
>>
Prefs
   1/*
   2 *      This module:
   3 *              This module is free software; you can redistribute it and/or
   4 *              modify it under the terms of the GNU General Public License
   5 *              as published by the Free Software Foundation; either version
   6 *              2 of the License, or (at your option) any later version.
   7 *
   8 *      History
   9 *      03-01-2007      Added forwarding for x.25       Andrew Hendry
  10 */
  11
  12#define pr_fmt(fmt) "X25: " fmt
  13
  14#include <linux/if_arp.h>
  15#include <linux/init.h>
  16#include <linux/slab.h>
  17#include <net/x25.h>
  18
  19LIST_HEAD(x25_forward_list);
  20DEFINE_RWLOCK(x25_forward_list_lock);
  21
  22int x25_forward_call(struct x25_address *dest_addr, struct x25_neigh *from,
  23                        struct sk_buff *skb, int lci)
  24{
  25        struct x25_route *rt;
  26        struct x25_neigh *neigh_new = NULL;
  27        struct list_head *entry;
  28        struct x25_forward *x25_frwd, *new_frwd;
  29        struct sk_buff *skbn;
  30        short same_lci = 0;
  31        int rc = 0;
  32
  33        if ((rt = x25_get_route(dest_addr)) == NULL)
  34                goto out_no_route;
  35
  36        if ((neigh_new = x25_get_neigh(rt->dev)) == NULL) {
  37                /* This shouldn't happen, if it occurs somehow
  38                 * do something sensible
  39                 */
  40                goto out_put_route;
  41        }
  42
  43        /* Avoid a loop. This is the normal exit path for a
  44         * system with only one x.25 iface and default route
  45         */
  46        if (rt->dev == from->dev) {
  47                goto out_put_nb;
  48        }
  49
  50        /* Remote end sending a call request on an already
  51         * established LCI? It shouldn't happen, just in case..
  52         */
  53        read_lock_bh(&x25_forward_list_lock);
  54        list_for_each(entry, &x25_forward_list) {
  55                x25_frwd = list_entry(entry, struct x25_forward, node);
  56                if (x25_frwd->lci == lci) {
  57                        pr_warn("call request for lci which is already registered!, transmitting but not registering new pair\n");
  58                        same_lci = 1;
  59                }
  60        }
  61        read_unlock_bh(&x25_forward_list_lock);
  62
  63        /* Save the forwarding details for future traffic */
  64        if (!same_lci){
  65                if ((new_frwd = kmalloc(sizeof(struct x25_forward),
  66                                                GFP_ATOMIC)) == NULL){
  67                        rc = -ENOMEM;
  68                        goto out_put_nb;
  69                }
  70                new_frwd->lci = lci;
  71                new_frwd->dev1 = rt->dev;
  72                new_frwd->dev2 = from->dev;
  73                write_lock_bh(&x25_forward_list_lock);
  74                list_add(&new_frwd->node, &x25_forward_list);
  75                write_unlock_bh(&x25_forward_list_lock);
  76        }
  77
  78        /* Forward the call request */
  79        if ( (skbn = skb_clone(skb, GFP_ATOMIC)) == NULL){
  80                goto out_put_nb;
  81        }
  82        x25_transmit_link(skbn, neigh_new);
  83        rc = 1;
  84
  85
  86out_put_nb:
  87        x25_neigh_put(neigh_new);
  88
  89out_put_route:
  90        x25_route_put(rt);
  91
  92out_no_route:
  93        return rc;
  94}
  95
  96
  97int x25_forward_data(int lci, struct x25_neigh *from, struct sk_buff *skb) {
  98
  99        struct x25_forward *frwd;
 100        struct list_head *entry;
 101        struct net_device *peer = NULL;
 102        struct x25_neigh *nb;
 103        struct sk_buff *skbn;
 104        int rc = 0;
 105
 106        read_lock_bh(&x25_forward_list_lock);
 107        list_for_each(entry, &x25_forward_list) {
 108                frwd = list_entry(entry, struct x25_forward, node);
 109                if (frwd->lci == lci) {
 110                        /* The call is established, either side can send */
 111                        if (from->dev == frwd->dev1) {
 112                                peer = frwd->dev2;
 113                        } else {
 114                                peer = frwd->dev1;
 115                        }
 116                        break;
 117                }
 118        }
 119        read_unlock_bh(&x25_forward_list_lock);
 120
 121        if ( (nb = x25_get_neigh(peer)) == NULL)
 122                goto out;
 123
 124        if ( (skbn = pskb_copy(skb, GFP_ATOMIC)) == NULL){
 125                goto output;
 126
 127        }
 128        x25_transmit_link(skbn, nb);
 129
 130        rc = 1;
 131output:
 132        x25_neigh_put(nb);
 133out:
 134        return rc;
 135}
 136
 137void x25_clear_forward_by_lci(unsigned int lci)
 138{
 139        struct x25_forward *fwd;
 140        struct list_head *entry, *tmp;
 141
 142        write_lock_bh(&x25_forward_list_lock);
 143
 144        list_for_each_safe(entry, tmp, &x25_forward_list) {
 145                fwd = list_entry(entry, struct x25_forward, node);
 146                if (fwd->lci == lci) {
 147                        list_del(&fwd->node);
 148                        kfree(fwd);
 149                }
 150        }
 151        write_unlock_bh(&x25_forward_list_lock);
 152}
 153
 154
 155void x25_clear_forward_by_dev(struct net_device *dev)
 156{
 157        struct x25_forward *fwd;
 158        struct list_head *entry, *tmp;
 159
 160        write_lock_bh(&x25_forward_list_lock);
 161
 162        list_for_each_safe(entry, tmp, &x25_forward_list) {
 163                fwd = list_entry(entry, struct x25_forward, node);
 164                if ((fwd->dev1 == dev) || (fwd->dev2 == dev)){
 165                        list_del(&fwd->node);
 166                        kfree(fwd);
 167                }
 168        }
 169        write_unlock_bh(&x25_forward_list_lock);
 170}
 171