linux/net/bluetooth/6lowpan.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3   Copyright (c) 2013-2014 Intel Corp.
   4
   5*/
   6
   7#include <linux/if_arp.h>
   8#include <linux/netdevice.h>
   9#include <linux/etherdevice.h>
  10#include <linux/module.h>
  11#include <linux/debugfs.h>
  12
  13#include <net/ipv6.h>
  14#include <net/ip6_route.h>
  15#include <net/addrconf.h>
  16#include <net/pkt_sched.h>
  17
  18#include <net/bluetooth/bluetooth.h>
  19#include <net/bluetooth/hci_core.h>
  20#include <net/bluetooth/l2cap.h>
  21
  22#include <net/6lowpan.h> /* for the compression support */
  23
  24#define VERSION "0.1"
  25
  26static struct dentry *lowpan_enable_debugfs;
  27static struct dentry *lowpan_control_debugfs;
  28
  29#define IFACE_NAME_TEMPLATE "bt%d"
  30
  31struct skb_cb {
  32        struct in6_addr addr;
  33        struct in6_addr gw;
  34        struct l2cap_chan *chan;
  35};
  36#define lowpan_cb(skb) ((struct skb_cb *)((skb)->cb))
  37
  38/* The devices list contains those devices that we are acting
  39 * as a proxy. The BT 6LoWPAN device is a virtual device that
  40 * connects to the Bluetooth LE device. The real connection to
  41 * BT device is done via l2cap layer. There exists one
  42 * virtual device / one BT 6LoWPAN network (=hciX device).
  43 * The list contains struct lowpan_dev elements.
  44 */
  45static LIST_HEAD(bt_6lowpan_devices);
  46static DEFINE_SPINLOCK(devices_lock);
  47
  48static bool enable_6lowpan;
  49
  50/* We are listening incoming connections via this channel
  51 */
  52static struct l2cap_chan *listen_chan;
  53static DEFINE_MUTEX(set_lock);
  54
  55struct lowpan_peer {
  56        struct list_head list;
  57        struct rcu_head rcu;
  58        struct l2cap_chan *chan;
  59
  60        /* peer addresses in various formats */
  61        unsigned char lladdr[ETH_ALEN];
  62        struct in6_addr peer_addr;
  63};
  64
  65struct lowpan_btle_dev {
  66        struct list_head list;
  67
  68        struct hci_dev *hdev;
  69        struct net_device *netdev;
  70        struct list_head peers;
  71        atomic_t peer_count; /* number of items in peers list */
  72
  73        struct work_struct delete_netdev;
  74        struct delayed_work notify_peers;
  75};
  76
  77static inline struct lowpan_btle_dev *
  78lowpan_btle_dev(const struct net_device *netdev)
  79{
  80        return (struct lowpan_btle_dev *)lowpan_dev(netdev)->priv;
  81}
  82
  83static inline void peer_add(struct lowpan_btle_dev *dev,
  84                            struct lowpan_peer *peer)
  85{
  86        list_add_rcu(&peer->list, &dev->peers);
  87        atomic_inc(&dev->peer_count);
  88}
  89
  90static inline bool peer_del(struct lowpan_btle_dev *dev,
  91                            struct lowpan_peer *peer)
  92{
  93        list_del_rcu(&peer->list);
  94        kfree_rcu(peer, rcu);
  95
  96        module_put(THIS_MODULE);
  97
  98        if (atomic_dec_and_test(&dev->peer_count)) {
  99                BT_DBG("last peer");
 100                return true;
 101        }
 102
 103        return false;
 104}
 105
 106static inline struct lowpan_peer *
 107__peer_lookup_chan(struct lowpan_btle_dev *dev, struct l2cap_chan *chan)
 108{
 109        struct lowpan_peer *peer;
 110
 111        list_for_each_entry_rcu(peer, &dev->peers, list) {
 112                if (peer->chan == chan)
 113                        return peer;
 114        }
 115
 116        return NULL;
 117}
 118
 119static inline struct lowpan_peer *
 120__peer_lookup_conn(struct lowpan_btle_dev *dev, struct l2cap_conn *conn)
 121{
 122        struct lowpan_peer *peer;
 123
 124        list_for_each_entry_rcu(peer, &dev->peers, list) {
 125                if (peer->chan->conn == conn)
 126                        return peer;
 127        }
 128
 129        return NULL;
 130}
 131
 132static inline struct lowpan_peer *peer_lookup_dst(struct lowpan_btle_dev *dev,
 133                                                  struct in6_addr *daddr,
 134                                                  struct sk_buff *skb)
 135{
 136        struct rt6_info *rt = (struct rt6_info *)skb_dst(skb);
 137        int count = atomic_read(&dev->peer_count);
 138        const struct in6_addr *nexthop;
 139        struct lowpan_peer *peer;
 140        struct neighbour *neigh;
 141
 142        BT_DBG("peers %d addr %pI6c rt %p", count, daddr, rt);
 143
 144        if (!rt) {
 145                if (ipv6_addr_any(&lowpan_cb(skb)->gw)) {
 146                        /* There is neither route nor gateway,
 147                         * probably the destination is a direct peer.
 148                         */
 149                        nexthop = daddr;
 150                } else {
 151                        /* There is a known gateway
 152                         */
 153                        nexthop = &lowpan_cb(skb)->gw;
 154                }
 155        } else {
 156                nexthop = rt6_nexthop(rt, daddr);
 157
 158                /* We need to remember the address because it is needed
 159                 * by bt_xmit() when sending the packet. In bt_xmit(), the
 160                 * destination routing info is not set.
 161                 */
 162                memcpy(&lowpan_cb(skb)->gw, nexthop, sizeof(struct in6_addr));
 163        }
 164
 165        BT_DBG("gw %pI6c", nexthop);
 166
 167        rcu_read_lock();
 168
 169        list_for_each_entry_rcu(peer, &dev->peers, list) {
 170                BT_DBG("dst addr %pMR dst type %u ip %pI6c",
 171                       &peer->chan->dst, peer->chan->dst_type,
 172                       &peer->peer_addr);
 173
 174                if (!ipv6_addr_cmp(&peer->peer_addr, nexthop)) {
 175                        rcu_read_unlock();
 176                        return peer;
 177                }
 178        }
 179
 180        /* use the neighbour cache for matching addresses assigned by SLAAC */
 181        neigh = __ipv6_neigh_lookup(dev->netdev, nexthop);
 182        if (neigh) {
 183                list_for_each_entry_rcu(peer, &dev->peers, list) {
 184                        if (!memcmp(neigh->ha, peer->lladdr, ETH_ALEN)) {
 185                                neigh_release(neigh);
 186                                rcu_read_unlock();
 187                                return peer;
 188                        }
 189                }
 190                neigh_release(neigh);
 191        }
 192
 193        rcu_read_unlock();
 194
 195        return NULL;
 196}
 197
 198static struct lowpan_peer *lookup_peer(struct l2cap_conn *conn)
 199{
 200        struct lowpan_btle_dev *entry;
 201        struct lowpan_peer *peer = NULL;
 202
 203        rcu_read_lock();
 204
 205        list_for_each_entry_rcu(entry, &bt_6lowpan_devices, list) {
 206                peer = __peer_lookup_conn(entry, conn);
 207                if (peer)
 208                        break;
 209        }
 210
 211        rcu_read_unlock();
 212
 213        return peer;
 214}
 215
 216static struct lowpan_btle_dev *lookup_dev(struct l2cap_conn *conn)
 217{
 218        struct lowpan_btle_dev *entry;
 219        struct lowpan_btle_dev *dev = NULL;
 220
 221        rcu_read_lock();
 222
 223        list_for_each_entry_rcu(entry, &bt_6lowpan_devices, list) {
 224                if (conn->hcon->hdev == entry->hdev) {
 225                        dev = entry;
 226                        break;
 227                }
 228        }
 229
 230        rcu_read_unlock();
 231
 232        return dev;
 233}
 234
 235static int give_skb_to_upper(struct sk_buff *skb, struct net_device *dev)
 236{
 237        struct sk_buff *skb_cp;
 238
 239        skb_cp = skb_copy(skb, GFP_ATOMIC);
 240        if (!skb_cp)
 241                return NET_RX_DROP;
 242
 243        return netif_rx_ni(skb_cp);
 244}
 245
 246static int iphc_decompress(struct sk_buff *skb, struct net_device *netdev,
 247                           struct lowpan_peer *peer)
 248{
 249        const u8 *saddr;
 250
 251        saddr = peer->lladdr;
 252
 253        return lowpan_header_decompress(skb, netdev, netdev->dev_addr, saddr);
 254}
 255
 256static int recv_pkt(struct sk_buff *skb, struct net_device *dev,
 257                    struct lowpan_peer *peer)
 258{
 259        struct sk_buff *local_skb;
 260        int ret;
 261
 262        if (!netif_running(dev))
 263                goto drop;
 264
 265        if (dev->type != ARPHRD_6LOWPAN || !skb->len)
 266                goto drop;
 267
 268        skb_reset_network_header(skb);
 269
 270        skb = skb_share_check(skb, GFP_ATOMIC);
 271        if (!skb)
 272                goto drop;
 273
 274        /* check that it's our buffer */
 275        if (lowpan_is_ipv6(*skb_network_header(skb))) {
 276                /* Pull off the 1-byte of 6lowpan header. */
 277                skb_pull(skb, 1);
 278
 279                /* Copy the packet so that the IPv6 header is
 280                 * properly aligned.
 281                 */
 282                local_skb = skb_copy_expand(skb, NET_SKB_PAD - 1,
 283                                            skb_tailroom(skb), GFP_ATOMIC);
 284                if (!local_skb)
 285                        goto drop;
 286
 287                local_skb->protocol = htons(ETH_P_IPV6);
 288                local_skb->pkt_type = PACKET_HOST;
 289                local_skb->dev = dev;
 290
 291                skb_set_transport_header(local_skb, sizeof(struct ipv6hdr));
 292
 293                if (give_skb_to_upper(local_skb, dev) != NET_RX_SUCCESS) {
 294                        kfree_skb(local_skb);
 295                        goto drop;
 296                }
 297
 298                dev->stats.rx_bytes += skb->len;
 299                dev->stats.rx_packets++;
 300
 301                consume_skb(local_skb);
 302                consume_skb(skb);
 303        } else if (lowpan_is_iphc(*skb_network_header(skb))) {
 304                local_skb = skb_clone(skb, GFP_ATOMIC);
 305                if (!local_skb)
 306                        goto drop;
 307
 308                local_skb->dev = dev;
 309
 310                ret = iphc_decompress(local_skb, dev, peer);
 311                if (ret < 0) {
 312                        BT_DBG("iphc_decompress failed: %d", ret);
 313                        kfree_skb(local_skb);
 314                        goto drop;
 315                }
 316
 317                local_skb->protocol = htons(ETH_P_IPV6);
 318                local_skb->pkt_type = PACKET_HOST;
 319
 320                if (give_skb_to_upper(local_skb, dev)
 321                                != NET_RX_SUCCESS) {
 322                        kfree_skb(local_skb);
 323                        goto drop;
 324                }
 325
 326                dev->stats.rx_bytes += skb->len;
 327                dev->stats.rx_packets++;
 328
 329                consume_skb(local_skb);
 330                consume_skb(skb);
 331        } else {
 332                BT_DBG("unknown packet type");
 333                goto drop;
 334        }
 335
 336        return NET_RX_SUCCESS;
 337
 338drop:
 339        dev->stats.rx_dropped++;
 340        return NET_RX_DROP;
 341}
 342
 343/* Packet from BT LE device */
 344static int chan_recv_cb(struct l2cap_chan *chan, struct sk_buff *skb)
 345{
 346        struct lowpan_btle_dev *dev;
 347        struct lowpan_peer *peer;
 348        int err;
 349
 350        peer = lookup_peer(chan->conn);
 351        if (!peer)
 352                return -ENOENT;
 353
 354        dev = lookup_dev(chan->conn);
 355        if (!dev || !dev->netdev)
 356                return -ENOENT;
 357
 358        err = recv_pkt(skb, dev->netdev, peer);
 359        if (err) {
 360                BT_DBG("recv pkt %d", err);
 361                err = -EAGAIN;
 362        }
 363
 364        return err;
 365}
 366
 367static int setup_header(struct sk_buff *skb, struct net_device *netdev,
 368                        bdaddr_t *peer_addr, u8 *peer_addr_type)
 369{
 370        struct in6_addr ipv6_daddr;
 371        struct ipv6hdr *hdr;
 372        struct lowpan_btle_dev *dev;
 373        struct lowpan_peer *peer;
 374        u8 *daddr;
 375        int err, status = 0;
 376
 377        hdr = ipv6_hdr(skb);
 378
 379        dev = lowpan_btle_dev(netdev);
 380
 381        memcpy(&ipv6_daddr, &hdr->daddr, sizeof(ipv6_daddr));
 382
 383        if (ipv6_addr_is_multicast(&ipv6_daddr)) {
 384                lowpan_cb(skb)->chan = NULL;
 385                daddr = NULL;
 386        } else {
 387                BT_DBG("dest IP %pI6c", &ipv6_daddr);
 388
 389                /* The packet might be sent to 6lowpan interface
 390                 * because of routing (either via default route
 391                 * or user set route) so get peer according to
 392                 * the destination address.
 393                 */
 394                peer = peer_lookup_dst(dev, &ipv6_daddr, skb);
 395                if (!peer) {
 396                        BT_DBG("no such peer");
 397                        return -ENOENT;
 398                }
 399
 400                daddr = peer->lladdr;
 401                *peer_addr = peer->chan->dst;
 402                *peer_addr_type = peer->chan->dst_type;
 403                lowpan_cb(skb)->chan = peer->chan;
 404
 405                status = 1;
 406        }
 407
 408        lowpan_header_compress(skb, netdev, daddr, dev->netdev->dev_addr);
 409
 410        err = dev_hard_header(skb, netdev, ETH_P_IPV6, NULL, NULL, 0);
 411        if (err < 0)
 412                return err;
 413
 414        return status;
 415}
 416
 417static int header_create(struct sk_buff *skb, struct net_device *netdev,
 418                         unsigned short type, const void *_daddr,
 419                         const void *_saddr, unsigned int len)
 420{
 421        if (type != ETH_P_IPV6)
 422                return -EINVAL;
 423
 424        return 0;
 425}
 426
 427/* Packet to BT LE device */
 428static int send_pkt(struct l2cap_chan *chan, struct sk_buff *skb,
 429                    struct net_device *netdev)
 430{
 431        struct msghdr msg;
 432        struct kvec iv;
 433        int err;
 434
 435        /* Remember the skb so that we can send EAGAIN to the caller if
 436         * we run out of credits.
 437         */
 438        chan->data = skb;
 439
 440        iv.iov_base = skb->data;
 441        iv.iov_len = skb->len;
 442
 443        memset(&msg, 0, sizeof(msg));
 444        iov_iter_kvec(&msg.msg_iter, WRITE, &iv, 1, skb->len);
 445
 446        err = l2cap_chan_send(chan, &msg, skb->len);
 447        if (err > 0) {
 448                netdev->stats.tx_bytes += err;
 449                netdev->stats.tx_packets++;
 450                return 0;
 451        }
 452
 453        if (err < 0)
 454                netdev->stats.tx_errors++;
 455
 456        return err;
 457}
 458
 459static int send_mcast_pkt(struct sk_buff *skb, struct net_device *netdev)
 460{
 461        struct sk_buff *local_skb;
 462        struct lowpan_btle_dev *entry;
 463        int err = 0;
 464
 465        rcu_read_lock();
 466
 467        list_for_each_entry_rcu(entry, &bt_6lowpan_devices, list) {
 468                struct lowpan_peer *pentry;
 469                struct lowpan_btle_dev *dev;
 470
 471                if (entry->netdev != netdev)
 472                        continue;
 473
 474                dev = lowpan_btle_dev(entry->netdev);
 475
 476                list_for_each_entry_rcu(pentry, &dev->peers, list) {
 477                        int ret;
 478
 479                        local_skb = skb_clone(skb, GFP_ATOMIC);
 480
 481                        BT_DBG("xmit %s to %pMR type %u IP %pI6c chan %p",
 482                               netdev->name,
 483                               &pentry->chan->dst, pentry->chan->dst_type,
 484                               &pentry->peer_addr, pentry->chan);
 485                        ret = send_pkt(pentry->chan, local_skb, netdev);
 486                        if (ret < 0)
 487                                err = ret;
 488
 489                        kfree_skb(local_skb);
 490                }
 491        }
 492
 493        rcu_read_unlock();
 494
 495        return err;
 496}
 497
 498static netdev_tx_t bt_xmit(struct sk_buff *skb, struct net_device *netdev)
 499{
 500        int err = 0;
 501        bdaddr_t addr;
 502        u8 addr_type;
 503
 504        /* We must take a copy of the skb before we modify/replace the ipv6
 505         * header as the header could be used elsewhere
 506         */
 507        skb = skb_unshare(skb, GFP_ATOMIC);
 508        if (!skb)
 509                return NET_XMIT_DROP;
 510
 511        /* Return values from setup_header()
 512         *  <0 - error, packet is dropped
 513         *   0 - this is a multicast packet
 514         *   1 - this is unicast packet
 515         */
 516        err = setup_header(skb, netdev, &addr, &addr_type);
 517        if (err < 0) {
 518                kfree_skb(skb);
 519                return NET_XMIT_DROP;
 520        }
 521
 522        if (err) {
 523                if (lowpan_cb(skb)->chan) {
 524                        BT_DBG("xmit %s to %pMR type %u IP %pI6c chan %p",
 525                               netdev->name, &addr, addr_type,
 526                               &lowpan_cb(skb)->addr, lowpan_cb(skb)->chan);
 527                        err = send_pkt(lowpan_cb(skb)->chan, skb, netdev);
 528                } else {
 529                        err = -ENOENT;
 530                }
 531        } else {
 532                /* We need to send the packet to every device behind this
 533                 * interface.
 534                 */
 535                err = send_mcast_pkt(skb, netdev);
 536        }
 537
 538        dev_kfree_skb(skb);
 539
 540        if (err)
 541                BT_DBG("ERROR: xmit failed (%d)", err);
 542
 543        return err < 0 ? NET_XMIT_DROP : err;
 544}
 545
 546static int bt_dev_init(struct net_device *dev)
 547{
 548        netdev_lockdep_set_classes(dev);
 549
 550        return 0;
 551}
 552
 553static const struct net_device_ops netdev_ops = {
 554        .ndo_init               = bt_dev_init,
 555        .ndo_start_xmit         = bt_xmit,
 556};
 557
 558static const struct header_ops header_ops = {
 559        .create = header_create,
 560};
 561
 562static void netdev_setup(struct net_device *dev)
 563{
 564        dev->hard_header_len    = 0;
 565        dev->needed_tailroom    = 0;
 566        dev->flags              = IFF_RUNNING | IFF_MULTICAST;
 567        dev->watchdog_timeo     = 0;
 568        dev->tx_queue_len       = DEFAULT_TX_QUEUE_LEN;
 569
 570        dev->netdev_ops         = &netdev_ops;
 571        dev->header_ops         = &header_ops;
 572        dev->needs_free_netdev  = true;
 573}
 574
 575static struct device_type bt_type = {
 576        .name   = "bluetooth",
 577};
 578
 579static void ifup(struct net_device *netdev)
 580{
 581        int err;
 582
 583        rtnl_lock();
 584        err = dev_open(netdev, NULL);
 585        if (err < 0)
 586                BT_INFO("iface %s cannot be opened (%d)", netdev->name, err);
 587        rtnl_unlock();
 588}
 589
 590static void ifdown(struct net_device *netdev)
 591{
 592        rtnl_lock();
 593        dev_close(netdev);
 594        rtnl_unlock();
 595}
 596
 597static void do_notify_peers(struct work_struct *work)
 598{
 599        struct lowpan_btle_dev *dev = container_of(work, struct lowpan_btle_dev,
 600                                                   notify_peers.work);
 601
 602        netdev_notify_peers(dev->netdev); /* send neighbour adv at startup */
 603}
 604
 605static bool is_bt_6lowpan(struct hci_conn *hcon)
 606{
 607        if (hcon->type != LE_LINK)
 608                return false;
 609
 610        if (!enable_6lowpan)
 611                return false;
 612
 613        return true;
 614}
 615
 616static struct l2cap_chan *chan_create(void)
 617{
 618        struct l2cap_chan *chan;
 619
 620        chan = l2cap_chan_create();
 621        if (!chan)
 622                return NULL;
 623
 624        l2cap_chan_set_defaults(chan);
 625
 626        chan->chan_type = L2CAP_CHAN_CONN_ORIENTED;
 627        chan->mode = L2CAP_MODE_LE_FLOWCTL;
 628        chan->imtu = 1280;
 629
 630        return chan;
 631}
 632
 633static struct l2cap_chan *add_peer_chan(struct l2cap_chan *chan,
 634                                        struct lowpan_btle_dev *dev,
 635                                        bool new_netdev)
 636{
 637        struct lowpan_peer *peer;
 638
 639        peer = kzalloc(sizeof(*peer), GFP_ATOMIC);
 640        if (!peer)
 641                return NULL;
 642
 643        peer->chan = chan;
 644        memset(&peer->peer_addr, 0, sizeof(struct in6_addr));
 645
 646        baswap((void *)peer->lladdr, &chan->dst);
 647
 648        lowpan_iphc_uncompress_eui48_lladdr(&peer->peer_addr, peer->lladdr);
 649
 650        spin_lock(&devices_lock);
 651        INIT_LIST_HEAD(&peer->list);
 652        peer_add(dev, peer);
 653        spin_unlock(&devices_lock);
 654
 655        /* Notifying peers about us needs to be done without locks held */
 656        if (new_netdev)
 657                INIT_DELAYED_WORK(&dev->notify_peers, do_notify_peers);
 658        schedule_delayed_work(&dev->notify_peers, msecs_to_jiffies(100));
 659
 660        return peer->chan;
 661}
 662
 663static int setup_netdev(struct l2cap_chan *chan, struct lowpan_btle_dev **dev)
 664{
 665        struct net_device *netdev;
 666        bdaddr_t addr;
 667        int err;
 668
 669        netdev = alloc_netdev(LOWPAN_PRIV_SIZE(sizeof(struct lowpan_btle_dev)),
 670                              IFACE_NAME_TEMPLATE, NET_NAME_UNKNOWN,
 671                              netdev_setup);
 672        if (!netdev)
 673                return -ENOMEM;
 674
 675        netdev->addr_assign_type = NET_ADDR_PERM;
 676        baswap(&addr, &chan->src);
 677        __dev_addr_set(netdev, &addr, sizeof(addr));
 678
 679        netdev->netdev_ops = &netdev_ops;
 680        SET_NETDEV_DEV(netdev, &chan->conn->hcon->hdev->dev);
 681        SET_NETDEV_DEVTYPE(netdev, &bt_type);
 682
 683        *dev = lowpan_btle_dev(netdev);
 684        (*dev)->netdev = netdev;
 685        (*dev)->hdev = chan->conn->hcon->hdev;
 686        INIT_LIST_HEAD(&(*dev)->peers);
 687
 688        spin_lock(&devices_lock);
 689        INIT_LIST_HEAD(&(*dev)->list);
 690        list_add_rcu(&(*dev)->list, &bt_6lowpan_devices);
 691        spin_unlock(&devices_lock);
 692
 693        err = lowpan_register_netdev(netdev, LOWPAN_LLTYPE_BTLE);
 694        if (err < 0) {
 695                BT_INFO("register_netdev failed %d", err);
 696                spin_lock(&devices_lock);
 697                list_del_rcu(&(*dev)->list);
 698                spin_unlock(&devices_lock);
 699                free_netdev(netdev);
 700                goto out;
 701        }
 702
 703        BT_DBG("ifindex %d peer bdaddr %pMR type %d my addr %pMR type %d",
 704               netdev->ifindex, &chan->dst, chan->dst_type,
 705               &chan->src, chan->src_type);
 706        set_bit(__LINK_STATE_PRESENT, &netdev->state);
 707
 708        return 0;
 709
 710out:
 711        return err;
 712}
 713
 714static inline void chan_ready_cb(struct l2cap_chan *chan)
 715{
 716        struct lowpan_btle_dev *dev;
 717        bool new_netdev = false;
 718
 719        dev = lookup_dev(chan->conn);
 720
 721        BT_DBG("chan %p conn %p dev %p", chan, chan->conn, dev);
 722
 723        if (!dev) {
 724                if (setup_netdev(chan, &dev) < 0) {
 725                        l2cap_chan_del(chan, -ENOENT);
 726                        return;
 727                }
 728                new_netdev = true;
 729        }
 730
 731        if (!try_module_get(THIS_MODULE))
 732                return;
 733
 734        add_peer_chan(chan, dev, new_netdev);
 735        ifup(dev->netdev);
 736}
 737
 738static inline struct l2cap_chan *chan_new_conn_cb(struct l2cap_chan *pchan)
 739{
 740        struct l2cap_chan *chan;
 741
 742        chan = chan_create();
 743        if (!chan)
 744                return NULL;
 745
 746        chan->ops = pchan->ops;
 747
 748        BT_DBG("chan %p pchan %p", chan, pchan);
 749
 750        return chan;
 751}
 752
 753static void delete_netdev(struct work_struct *work)
 754{
 755        struct lowpan_btle_dev *entry = container_of(work,
 756                                                     struct lowpan_btle_dev,
 757                                                     delete_netdev);
 758
 759        lowpan_unregister_netdev(entry->netdev);
 760
 761        /* The entry pointer is deleted by the netdev destructor. */
 762}
 763
 764static void chan_close_cb(struct l2cap_chan *chan)
 765{
 766        struct lowpan_btle_dev *entry;
 767        struct lowpan_btle_dev *dev = NULL;
 768        struct lowpan_peer *peer;
 769        int err = -ENOENT;
 770        bool last = false, remove = true;
 771
 772        BT_DBG("chan %p conn %p", chan, chan->conn);
 773
 774        if (chan->conn && chan->conn->hcon) {
 775                if (!is_bt_6lowpan(chan->conn->hcon))
 776                        return;
 777
 778                /* If conn is set, then the netdev is also there and we should
 779                 * not remove it.
 780                 */
 781                remove = false;
 782        }
 783
 784        spin_lock(&devices_lock);
 785
 786        list_for_each_entry_rcu(entry, &bt_6lowpan_devices, list) {
 787                dev = lowpan_btle_dev(entry->netdev);
 788                peer = __peer_lookup_chan(dev, chan);
 789                if (peer) {
 790                        last = peer_del(dev, peer);
 791                        err = 0;
 792
 793                        BT_DBG("dev %p removing %speer %p", dev,
 794                               last ? "last " : "1 ", peer);
 795                        BT_DBG("chan %p orig refcnt %u", chan,
 796                               kref_read(&chan->kref));
 797
 798                        l2cap_chan_put(chan);
 799                        break;
 800                }
 801        }
 802
 803        if (!err && last && dev && !atomic_read(&dev->peer_count)) {
 804                spin_unlock(&devices_lock);
 805
 806                cancel_delayed_work_sync(&dev->notify_peers);
 807
 808                ifdown(dev->netdev);
 809
 810                if (remove) {
 811                        INIT_WORK(&entry->delete_netdev, delete_netdev);
 812                        schedule_work(&entry->delete_netdev);
 813                }
 814        } else {
 815                spin_unlock(&devices_lock);
 816        }
 817}
 818
 819static void chan_state_change_cb(struct l2cap_chan *chan, int state, int err)
 820{
 821        BT_DBG("chan %p conn %p state %s err %d", chan, chan->conn,
 822               state_to_string(state), err);
 823}
 824
 825static struct sk_buff *chan_alloc_skb_cb(struct l2cap_chan *chan,
 826                                         unsigned long hdr_len,
 827                                         unsigned long len, int nb)
 828{
 829        /* Note that we must allocate using GFP_ATOMIC here as
 830         * this function is called originally from netdev hard xmit
 831         * function in atomic context.
 832         */
 833        return bt_skb_alloc(hdr_len + len, GFP_ATOMIC);
 834}
 835
 836static void chan_suspend_cb(struct l2cap_chan *chan)
 837{
 838        struct lowpan_btle_dev *dev;
 839
 840        BT_DBG("chan %p suspend", chan);
 841
 842        dev = lookup_dev(chan->conn);
 843        if (!dev || !dev->netdev)
 844                return;
 845
 846        netif_stop_queue(dev->netdev);
 847}
 848
 849static void chan_resume_cb(struct l2cap_chan *chan)
 850{
 851        struct lowpan_btle_dev *dev;
 852
 853        BT_DBG("chan %p resume", chan);
 854
 855        dev = lookup_dev(chan->conn);
 856        if (!dev || !dev->netdev)
 857                return;
 858
 859        netif_wake_queue(dev->netdev);
 860}
 861
 862static long chan_get_sndtimeo_cb(struct l2cap_chan *chan)
 863{
 864        return L2CAP_CONN_TIMEOUT;
 865}
 866
 867static const struct l2cap_ops bt_6lowpan_chan_ops = {
 868        .name                   = "L2CAP 6LoWPAN channel",
 869        .new_connection         = chan_new_conn_cb,
 870        .recv                   = chan_recv_cb,
 871        .close                  = chan_close_cb,
 872        .state_change           = chan_state_change_cb,
 873        .ready                  = chan_ready_cb,
 874        .resume                 = chan_resume_cb,
 875        .suspend                = chan_suspend_cb,
 876        .get_sndtimeo           = chan_get_sndtimeo_cb,
 877        .alloc_skb              = chan_alloc_skb_cb,
 878
 879        .teardown               = l2cap_chan_no_teardown,
 880        .defer                  = l2cap_chan_no_defer,
 881        .set_shutdown           = l2cap_chan_no_set_shutdown,
 882};
 883
 884static int bt_6lowpan_connect(bdaddr_t *addr, u8 dst_type)
 885{
 886        struct l2cap_chan *chan;
 887        int err;
 888
 889        chan = chan_create();
 890        if (!chan)
 891                return -EINVAL;
 892
 893        chan->ops = &bt_6lowpan_chan_ops;
 894
 895        err = l2cap_chan_connect(chan, cpu_to_le16(L2CAP_PSM_IPSP), 0,
 896                                 addr, dst_type);
 897
 898        BT_DBG("chan %p err %d", chan, err);
 899        if (err < 0)
 900                l2cap_chan_put(chan);
 901
 902        return err;
 903}
 904
 905static int bt_6lowpan_disconnect(struct l2cap_conn *conn, u8 dst_type)
 906{
 907        struct lowpan_peer *peer;
 908
 909        BT_DBG("conn %p dst type %u", conn, dst_type);
 910
 911        peer = lookup_peer(conn);
 912        if (!peer)
 913                return -ENOENT;
 914
 915        BT_DBG("peer %p chan %p", peer, peer->chan);
 916
 917        l2cap_chan_close(peer->chan, ENOENT);
 918
 919        return 0;
 920}
 921
 922static struct l2cap_chan *bt_6lowpan_listen(void)
 923{
 924        bdaddr_t *addr = BDADDR_ANY;
 925        struct l2cap_chan *chan;
 926        int err;
 927
 928        if (!enable_6lowpan)
 929                return NULL;
 930
 931        chan = chan_create();
 932        if (!chan)
 933                return NULL;
 934
 935        chan->ops = &bt_6lowpan_chan_ops;
 936        chan->state = BT_LISTEN;
 937        chan->src_type = BDADDR_LE_PUBLIC;
 938
 939        atomic_set(&chan->nesting, L2CAP_NESTING_PARENT);
 940
 941        BT_DBG("chan %p src type %u", chan, chan->src_type);
 942
 943        err = l2cap_add_psm(chan, addr, cpu_to_le16(L2CAP_PSM_IPSP));
 944        if (err) {
 945                l2cap_chan_put(chan);
 946                BT_ERR("psm cannot be added err %d", err);
 947                return NULL;
 948        }
 949
 950        return chan;
 951}
 952
 953static int get_l2cap_conn(char *buf, bdaddr_t *addr, u8 *addr_type,
 954                          struct l2cap_conn **conn)
 955{
 956        struct hci_conn *hcon;
 957        struct hci_dev *hdev;
 958        int n;
 959
 960        n = sscanf(buf, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx %hhu",
 961                   &addr->b[5], &addr->b[4], &addr->b[3],
 962                   &addr->b[2], &addr->b[1], &addr->b[0],
 963                   addr_type);
 964
 965        if (n < 7)
 966                return -EINVAL;
 967
 968        /* The LE_PUBLIC address type is ignored because of BDADDR_ANY */
 969        hdev = hci_get_route(addr, BDADDR_ANY, BDADDR_LE_PUBLIC);
 970        if (!hdev)
 971                return -ENOENT;
 972
 973        hci_dev_lock(hdev);
 974        hcon = hci_conn_hash_lookup_le(hdev, addr, *addr_type);
 975        hci_dev_unlock(hdev);
 976
 977        if (!hcon)
 978                return -ENOENT;
 979
 980        *conn = (struct l2cap_conn *)hcon->l2cap_data;
 981
 982        BT_DBG("conn %p dst %pMR type %u", *conn, &hcon->dst, hcon->dst_type);
 983
 984        return 0;
 985}
 986
 987static void disconnect_all_peers(void)
 988{
 989        struct lowpan_btle_dev *entry;
 990        struct lowpan_peer *peer, *tmp_peer, *new_peer;
 991        struct list_head peers;
 992
 993        INIT_LIST_HEAD(&peers);
 994
 995        /* We make a separate list of peers as the close_cb() will
 996         * modify the device peers list so it is better not to mess
 997         * with the same list at the same time.
 998         */
 999
1000        rcu_read_lock();
1001
1002        list_for_each_entry_rcu(entry, &bt_6lowpan_devices, list) {
1003                list_for_each_entry_rcu(peer, &entry->peers, list) {
1004                        new_peer = kmalloc(sizeof(*new_peer), GFP_ATOMIC);
1005                        if (!new_peer)
1006                                break;
1007
1008                        new_peer->chan = peer->chan;
1009                        INIT_LIST_HEAD(&new_peer->list);
1010
1011                        list_add(&new_peer->list, &peers);
1012                }
1013        }
1014
1015        rcu_read_unlock();
1016
1017        spin_lock(&devices_lock);
1018        list_for_each_entry_safe(peer, tmp_peer, &peers, list) {
1019                l2cap_chan_close(peer->chan, ENOENT);
1020
1021                list_del_rcu(&peer->list);
1022                kfree_rcu(peer, rcu);
1023        }
1024        spin_unlock(&devices_lock);
1025}
1026
1027struct set_enable {
1028        struct work_struct work;
1029        bool flag;
1030};
1031
1032static void do_enable_set(struct work_struct *work)
1033{
1034        struct set_enable *set_enable = container_of(work,
1035                                                     struct set_enable, work);
1036
1037        if (!set_enable->flag || enable_6lowpan != set_enable->flag)
1038                /* Disconnect existing connections if 6lowpan is
1039                 * disabled
1040                 */
1041                disconnect_all_peers();
1042
1043        enable_6lowpan = set_enable->flag;
1044
1045        mutex_lock(&set_lock);
1046        if (listen_chan) {
1047                l2cap_chan_close(listen_chan, 0);
1048                l2cap_chan_put(listen_chan);
1049        }
1050
1051        listen_chan = bt_6lowpan_listen();
1052        mutex_unlock(&set_lock);
1053
1054        kfree(set_enable);
1055}
1056
1057static int lowpan_enable_set(void *data, u64 val)
1058{
1059        struct set_enable *set_enable;
1060
1061        set_enable = kzalloc(sizeof(*set_enable), GFP_KERNEL);
1062        if (!set_enable)
1063                return -ENOMEM;
1064
1065        set_enable->flag = !!val;
1066        INIT_WORK(&set_enable->work, do_enable_set);
1067
1068        schedule_work(&set_enable->work);
1069
1070        return 0;
1071}
1072
1073static int lowpan_enable_get(void *data, u64 *val)
1074{
1075        *val = enable_6lowpan;
1076        return 0;
1077}
1078
1079DEFINE_DEBUGFS_ATTRIBUTE(lowpan_enable_fops, lowpan_enable_get,
1080                         lowpan_enable_set, "%llu\n");
1081
1082static ssize_t lowpan_control_write(struct file *fp,
1083                                    const char __user *user_buffer,
1084                                    size_t count,
1085                                    loff_t *position)
1086{
1087        char buf[32];
1088        size_t buf_size = min(count, sizeof(buf) - 1);
1089        int ret;
1090        bdaddr_t addr;
1091        u8 addr_type;
1092        struct l2cap_conn *conn = NULL;
1093
1094        if (copy_from_user(buf, user_buffer, buf_size))
1095                return -EFAULT;
1096
1097        buf[buf_size] = '\0';
1098
1099        if (memcmp(buf, "connect ", 8) == 0) {
1100                ret = get_l2cap_conn(&buf[8], &addr, &addr_type, &conn);
1101                if (ret == -EINVAL)
1102                        return ret;
1103
1104                mutex_lock(&set_lock);
1105                if (listen_chan) {
1106                        l2cap_chan_close(listen_chan, 0);
1107                        l2cap_chan_put(listen_chan);
1108                        listen_chan = NULL;
1109                }
1110                mutex_unlock(&set_lock);
1111
1112                if (conn) {
1113                        struct lowpan_peer *peer;
1114
1115                        if (!is_bt_6lowpan(conn->hcon))
1116                                return -EINVAL;
1117
1118                        peer = lookup_peer(conn);
1119                        if (peer) {
1120                                BT_DBG("6LoWPAN connection already exists");
1121                                return -EALREADY;
1122                        }
1123
1124                        BT_DBG("conn %p dst %pMR type %d user %u", conn,
1125                               &conn->hcon->dst, conn->hcon->dst_type,
1126                               addr_type);
1127                }
1128
1129                ret = bt_6lowpan_connect(&addr, addr_type);
1130                if (ret < 0)
1131                        return ret;
1132
1133                return count;
1134        }
1135
1136        if (memcmp(buf, "disconnect ", 11) == 0) {
1137                ret = get_l2cap_conn(&buf[11], &addr, &addr_type, &conn);
1138                if (ret < 0)
1139                        return ret;
1140
1141                ret = bt_6lowpan_disconnect(conn, addr_type);
1142                if (ret < 0)
1143                        return ret;
1144
1145                return count;
1146        }
1147
1148        return count;
1149}
1150
1151static int lowpan_control_show(struct seq_file *f, void *ptr)
1152{
1153        struct lowpan_btle_dev *entry;
1154        struct lowpan_peer *peer;
1155
1156        spin_lock(&devices_lock);
1157
1158        list_for_each_entry(entry, &bt_6lowpan_devices, list) {
1159                list_for_each_entry(peer, &entry->peers, list)
1160                        seq_printf(f, "%pMR (type %u)\n",
1161                                   &peer->chan->dst, peer->chan->dst_type);
1162        }
1163
1164        spin_unlock(&devices_lock);
1165
1166        return 0;
1167}
1168
1169static int lowpan_control_open(struct inode *inode, struct file *file)
1170{
1171        return single_open(file, lowpan_control_show, inode->i_private);
1172}
1173
1174static const struct file_operations lowpan_control_fops = {
1175        .open           = lowpan_control_open,
1176        .read           = seq_read,
1177        .write          = lowpan_control_write,
1178        .llseek         = seq_lseek,
1179        .release        = single_release,
1180};
1181
1182static void disconnect_devices(void)
1183{
1184        struct lowpan_btle_dev *entry, *tmp, *new_dev;
1185        struct list_head devices;
1186
1187        INIT_LIST_HEAD(&devices);
1188
1189        /* We make a separate list of devices because the unregister_netdev()
1190         * will call device_event() which will also want to modify the same
1191         * devices list.
1192         */
1193
1194        rcu_read_lock();
1195
1196        list_for_each_entry_rcu(entry, &bt_6lowpan_devices, list) {
1197                new_dev = kmalloc(sizeof(*new_dev), GFP_ATOMIC);
1198                if (!new_dev)
1199                        break;
1200
1201                new_dev->netdev = entry->netdev;
1202                INIT_LIST_HEAD(&new_dev->list);
1203
1204                list_add_rcu(&new_dev->list, &devices);
1205        }
1206
1207        rcu_read_unlock();
1208
1209        list_for_each_entry_safe(entry, tmp, &devices, list) {
1210                ifdown(entry->netdev);
1211                BT_DBG("Unregistering netdev %s %p",
1212                       entry->netdev->name, entry->netdev);
1213                lowpan_unregister_netdev(entry->netdev);
1214                kfree(entry);
1215        }
1216}
1217
1218static int device_event(struct notifier_block *unused,
1219                        unsigned long event, void *ptr)
1220{
1221        struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
1222        struct lowpan_btle_dev *entry;
1223
1224        if (netdev->type != ARPHRD_6LOWPAN)
1225                return NOTIFY_DONE;
1226
1227        switch (event) {
1228        case NETDEV_UNREGISTER:
1229                spin_lock(&devices_lock);
1230                list_for_each_entry(entry, &bt_6lowpan_devices, list) {
1231                        if (entry->netdev == netdev) {
1232                                BT_DBG("Unregistered netdev %s %p",
1233                                       netdev->name, netdev);
1234                                list_del(&entry->list);
1235                                break;
1236                        }
1237                }
1238                spin_unlock(&devices_lock);
1239                break;
1240        }
1241
1242        return NOTIFY_DONE;
1243}
1244
1245static struct notifier_block bt_6lowpan_dev_notifier = {
1246        .notifier_call = device_event,
1247};
1248
1249static int __init bt_6lowpan_init(void)
1250{
1251        lowpan_enable_debugfs = debugfs_create_file_unsafe("6lowpan_enable",
1252                                                           0644, bt_debugfs,
1253                                                           NULL,
1254                                                           &lowpan_enable_fops);
1255        lowpan_control_debugfs = debugfs_create_file("6lowpan_control", 0644,
1256                                                     bt_debugfs, NULL,
1257                                                     &lowpan_control_fops);
1258
1259        return register_netdevice_notifier(&bt_6lowpan_dev_notifier);
1260}
1261
1262static void __exit bt_6lowpan_exit(void)
1263{
1264        debugfs_remove(lowpan_enable_debugfs);
1265        debugfs_remove(lowpan_control_debugfs);
1266
1267        if (listen_chan) {
1268                l2cap_chan_close(listen_chan, 0);
1269                l2cap_chan_put(listen_chan);
1270        }
1271
1272        disconnect_devices();
1273
1274        unregister_netdevice_notifier(&bt_6lowpan_dev_notifier);
1275}
1276
1277module_init(bt_6lowpan_init);
1278module_exit(bt_6lowpan_exit);
1279
1280MODULE_AUTHOR("Jukka Rissanen <jukka.rissanen@linux.intel.com>");
1281MODULE_DESCRIPTION("Bluetooth 6LoWPAN");
1282MODULE_VERSION(VERSION);
1283MODULE_LICENSE("GPL");
1284