linux/net/bluetooth/6lowpan.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3   Copyright (c) 2013-2014 Intel Corp.
   4
   5*/
   6
   7#include <linux/if_arp.h>
   8#include <linux/netdevice.h>
   9#include <linux/etherdevice.h>
  10#include <linux/module.h>
  11#include <linux/debugfs.h>
  12
  13#include <net/ipv6.h>
  14#include <net/ip6_route.h>
  15#include <net/addrconf.h>
  16#include <net/pkt_sched.h>
  17
  18#include <net/bluetooth/bluetooth.h>
  19#include <net/bluetooth/hci_core.h>
  20#include <net/bluetooth/l2cap.h>
  21
  22#include <net/6lowpan.h> /* for the compression support */
  23
  24#define VERSION "0.1"
  25
  26static struct dentry *lowpan_enable_debugfs;
  27static struct dentry *lowpan_control_debugfs;
  28
  29#define IFACE_NAME_TEMPLATE "bt%d"
  30
  31struct skb_cb {
  32        struct in6_addr addr;
  33        struct in6_addr gw;
  34        struct l2cap_chan *chan;
  35};
  36#define lowpan_cb(skb) ((struct skb_cb *)((skb)->cb))
  37
  38/* The devices list contains those devices that we are acting
  39 * as a proxy. The BT 6LoWPAN device is a virtual device that
  40 * connects to the Bluetooth LE device. The real connection to
  41 * BT device is done via l2cap layer. There exists one
  42 * virtual device / one BT 6LoWPAN network (=hciX device).
  43 * The list contains struct lowpan_dev elements.
  44 */
  45static LIST_HEAD(bt_6lowpan_devices);
  46static DEFINE_SPINLOCK(devices_lock);
  47
  48static bool enable_6lowpan;
  49
  50/* We are listening incoming connections via this channel
  51 */
  52static struct l2cap_chan *listen_chan;
  53static DEFINE_MUTEX(set_lock);
  54
  55struct lowpan_peer {
  56        struct list_head list;
  57        struct rcu_head rcu;
  58        struct l2cap_chan *chan;
  59
  60        /* peer addresses in various formats */
  61        unsigned char lladdr[ETH_ALEN];
  62        struct in6_addr peer_addr;
  63};
  64
  65struct lowpan_btle_dev {
  66        struct list_head list;
  67
  68        struct hci_dev *hdev;
  69        struct net_device *netdev;
  70        struct list_head peers;
  71        atomic_t peer_count; /* number of items in peers list */
  72
  73        struct work_struct delete_netdev;
  74        struct delayed_work notify_peers;
  75};
  76
  77static inline struct lowpan_btle_dev *
  78lowpan_btle_dev(const struct net_device *netdev)
  79{
  80        return (struct lowpan_btle_dev *)lowpan_dev(netdev)->priv;
  81}
  82
  83static inline void peer_add(struct lowpan_btle_dev *dev,
  84                            struct lowpan_peer *peer)
  85{
  86        list_add_rcu(&peer->list, &dev->peers);
  87        atomic_inc(&dev->peer_count);
  88}
  89
  90static inline bool peer_del(struct lowpan_btle_dev *dev,
  91                            struct lowpan_peer *peer)
  92{
  93        list_del_rcu(&peer->list);
  94        kfree_rcu(peer, rcu);
  95
  96        module_put(THIS_MODULE);
  97
  98        if (atomic_dec_and_test(&dev->peer_count)) {
  99                BT_DBG("last peer");
 100                return true;
 101        }
 102
 103        return false;
 104}
 105
 106static inline struct lowpan_peer *
 107__peer_lookup_chan(struct lowpan_btle_dev *dev, struct l2cap_chan *chan)
 108{
 109        struct lowpan_peer *peer;
 110
 111        list_for_each_entry_rcu(peer, &dev->peers, list) {
 112                if (peer->chan == chan)
 113                        return peer;
 114        }
 115
 116        return NULL;
 117}
 118
 119static inline struct lowpan_peer *
 120__peer_lookup_conn(struct lowpan_btle_dev *dev, struct l2cap_conn *conn)
 121{
 122        struct lowpan_peer *peer;
 123
 124        list_for_each_entry_rcu(peer, &dev->peers, list) {
 125                if (peer->chan->conn == conn)
 126                        return peer;
 127        }
 128
 129        return NULL;
 130}
 131
 132static inline struct lowpan_peer *peer_lookup_dst(struct lowpan_btle_dev *dev,
 133                                                  struct in6_addr *daddr,
 134                                                  struct sk_buff *skb)
 135{
 136        struct rt6_info *rt = (struct rt6_info *)skb_dst(skb);
 137        int count = atomic_read(&dev->peer_count);
 138        const struct in6_addr *nexthop;
 139        struct lowpan_peer *peer;
 140        struct neighbour *neigh;
 141
 142        BT_DBG("peers %d addr %pI6c rt %p", count, daddr, rt);
 143
 144        if (!rt) {
 145                if (ipv6_addr_any(&lowpan_cb(skb)->gw)) {
 146                        /* There is neither route nor gateway,
 147                         * probably the destination is a direct peer.
 148                         */
 149                        nexthop = daddr;
 150                } else {
 151                        /* There is a known gateway
 152                         */
 153                        nexthop = &lowpan_cb(skb)->gw;
 154                }
 155        } else {
 156                nexthop = rt6_nexthop(rt, daddr);
 157
 158                /* We need to remember the address because it is needed
 159                 * by bt_xmit() when sending the packet. In bt_xmit(), the
 160                 * destination routing info is not set.
 161                 */
 162                memcpy(&lowpan_cb(skb)->gw, nexthop, sizeof(struct in6_addr));
 163        }
 164
 165        BT_DBG("gw %pI6c", nexthop);
 166
 167        rcu_read_lock();
 168
 169        list_for_each_entry_rcu(peer, &dev->peers, list) {
 170                BT_DBG("dst addr %pMR dst type %u ip %pI6c",
 171                       &peer->chan->dst, peer->chan->dst_type,
 172                       &peer->peer_addr);
 173
 174                if (!ipv6_addr_cmp(&peer->peer_addr, nexthop)) {
 175                        rcu_read_unlock();
 176                        return peer;
 177                }
 178        }
 179
 180        /* use the neighbour cache for matching addresses assigned by SLAAC */
 181        neigh = __ipv6_neigh_lookup(dev->netdev, nexthop);
 182        if (neigh) {
 183                list_for_each_entry_rcu(peer, &dev->peers, list) {
 184                        if (!memcmp(neigh->ha, peer->lladdr, ETH_ALEN)) {
 185                                neigh_release(neigh);
 186                                rcu_read_unlock();
 187                                return peer;
 188                        }
 189                }
 190                neigh_release(neigh);
 191        }
 192
 193        rcu_read_unlock();
 194
 195        return NULL;
 196}
 197
 198static struct lowpan_peer *lookup_peer(struct l2cap_conn *conn)
 199{
 200        struct lowpan_btle_dev *entry;
 201        struct lowpan_peer *peer = NULL;
 202
 203        rcu_read_lock();
 204
 205        list_for_each_entry_rcu(entry, &bt_6lowpan_devices, list) {
 206                peer = __peer_lookup_conn(entry, conn);
 207                if (peer)
 208                        break;
 209        }
 210
 211        rcu_read_unlock();
 212
 213        return peer;
 214}
 215
 216static struct lowpan_btle_dev *lookup_dev(struct l2cap_conn *conn)
 217{
 218        struct lowpan_btle_dev *entry;
 219        struct lowpan_btle_dev *dev = NULL;
 220
 221        rcu_read_lock();
 222
 223        list_for_each_entry_rcu(entry, &bt_6lowpan_devices, list) {
 224                if (conn->hcon->hdev == entry->hdev) {
 225                        dev = entry;
 226                        break;
 227                }
 228        }
 229
 230        rcu_read_unlock();
 231
 232        return dev;
 233}
 234
 235static int give_skb_to_upper(struct sk_buff *skb, struct net_device *dev)
 236{
 237        struct sk_buff *skb_cp;
 238
 239        skb_cp = skb_copy(skb, GFP_ATOMIC);
 240        if (!skb_cp)
 241                return NET_RX_DROP;
 242
 243        return netif_rx_ni(skb_cp);
 244}
 245
 246static int iphc_decompress(struct sk_buff *skb, struct net_device *netdev,
 247                           struct lowpan_peer *peer)
 248{
 249        const u8 *saddr;
 250
 251        saddr = peer->lladdr;
 252
 253        return lowpan_header_decompress(skb, netdev, netdev->dev_addr, saddr);
 254}
 255
 256static int recv_pkt(struct sk_buff *skb, struct net_device *dev,
 257                    struct lowpan_peer *peer)
 258{
 259        struct sk_buff *local_skb;
 260        int ret;
 261
 262        if (!netif_running(dev))
 263                goto drop;
 264
 265        if (dev->type != ARPHRD_6LOWPAN || !skb->len)
 266                goto drop;
 267
 268        skb_reset_network_header(skb);
 269
 270        skb = skb_share_check(skb, GFP_ATOMIC);
 271        if (!skb)
 272                goto drop;
 273
 274        /* check that it's our buffer */
 275        if (lowpan_is_ipv6(*skb_network_header(skb))) {
 276                /* Pull off the 1-byte of 6lowpan header. */
 277                skb_pull(skb, 1);
 278
 279                /* Copy the packet so that the IPv6 header is
 280                 * properly aligned.
 281                 */
 282                local_skb = skb_copy_expand(skb, NET_SKB_PAD - 1,
 283                                            skb_tailroom(skb), GFP_ATOMIC);
 284                if (!local_skb)
 285                        goto drop;
 286
 287                local_skb->protocol = htons(ETH_P_IPV6);
 288                local_skb->pkt_type = PACKET_HOST;
 289                local_skb->dev = dev;
 290
 291                skb_set_transport_header(local_skb, sizeof(struct ipv6hdr));
 292
 293                if (give_skb_to_upper(local_skb, dev) != NET_RX_SUCCESS) {
 294                        kfree_skb(local_skb);
 295                        goto drop;
 296                }
 297
 298                dev->stats.rx_bytes += skb->len;
 299                dev->stats.rx_packets++;
 300
 301                consume_skb(local_skb);
 302                consume_skb(skb);
 303        } else if (lowpan_is_iphc(*skb_network_header(skb))) {
 304                local_skb = skb_clone(skb, GFP_ATOMIC);
 305                if (!local_skb)
 306                        goto drop;
 307
 308                local_skb->dev = dev;
 309
 310                ret = iphc_decompress(local_skb, dev, peer);
 311                if (ret < 0) {
 312                        BT_DBG("iphc_decompress failed: %d", ret);
 313                        kfree_skb(local_skb);
 314                        goto drop;
 315                }
 316
 317                local_skb->protocol = htons(ETH_P_IPV6);
 318                local_skb->pkt_type = PACKET_HOST;
 319
 320                if (give_skb_to_upper(local_skb, dev)
 321                                != NET_RX_SUCCESS) {
 322                        kfree_skb(local_skb);
 323                        goto drop;
 324                }
 325
 326                dev->stats.rx_bytes += skb->len;
 327                dev->stats.rx_packets++;
 328
 329                consume_skb(local_skb);
 330                consume_skb(skb);
 331        } else {
 332                BT_DBG("unknown packet type");
 333                goto drop;
 334        }
 335
 336        return NET_RX_SUCCESS;
 337
 338drop:
 339        dev->stats.rx_dropped++;
 340        return NET_RX_DROP;
 341}
 342
 343/* Packet from BT LE device */
 344static int chan_recv_cb(struct l2cap_chan *chan, struct sk_buff *skb)
 345{
 346        struct lowpan_btle_dev *dev;
 347        struct lowpan_peer *peer;
 348        int err;
 349
 350        peer = lookup_peer(chan->conn);
 351        if (!peer)
 352                return -ENOENT;
 353
 354        dev = lookup_dev(chan->conn);
 355        if (!dev || !dev->netdev)
 356                return -ENOENT;
 357
 358        err = recv_pkt(skb, dev->netdev, peer);
 359        if (err) {
 360                BT_DBG("recv pkt %d", err);
 361                err = -EAGAIN;
 362        }
 363
 364        return err;
 365}
 366
 367static int setup_header(struct sk_buff *skb, struct net_device *netdev,
 368                        bdaddr_t *peer_addr, u8 *peer_addr_type)
 369{
 370        struct in6_addr ipv6_daddr;
 371        struct ipv6hdr *hdr;
 372        struct lowpan_btle_dev *dev;
 373        struct lowpan_peer *peer;
 374        u8 *daddr;
 375        int err, status = 0;
 376
 377        hdr = ipv6_hdr(skb);
 378
 379        dev = lowpan_btle_dev(netdev);
 380
 381        memcpy(&ipv6_daddr, &hdr->daddr, sizeof(ipv6_daddr));
 382
 383        if (ipv6_addr_is_multicast(&ipv6_daddr)) {
 384                lowpan_cb(skb)->chan = NULL;
 385                daddr = NULL;
 386        } else {
 387                BT_DBG("dest IP %pI6c", &ipv6_daddr);
 388
 389                /* The packet might be sent to 6lowpan interface
 390                 * because of routing (either via default route
 391                 * or user set route) so get peer according to
 392                 * the destination address.
 393                 */
 394                peer = peer_lookup_dst(dev, &ipv6_daddr, skb);
 395                if (!peer) {
 396                        BT_DBG("no such peer");
 397                        return -ENOENT;
 398                }
 399
 400                daddr = peer->lladdr;
 401                *peer_addr = peer->chan->dst;
 402                *peer_addr_type = peer->chan->dst_type;
 403                lowpan_cb(skb)->chan = peer->chan;
 404
 405                status = 1;
 406        }
 407
 408        lowpan_header_compress(skb, netdev, daddr, dev->netdev->dev_addr);
 409
 410        err = dev_hard_header(skb, netdev, ETH_P_IPV6, NULL, NULL, 0);
 411        if (err < 0)
 412                return err;
 413
 414        return status;
 415}
 416
 417static int header_create(struct sk_buff *skb, struct net_device *netdev,
 418                         unsigned short type, const void *_daddr,
 419                         const void *_saddr, unsigned int len)
 420{
 421        if (type != ETH_P_IPV6)
 422                return -EINVAL;
 423
 424        return 0;
 425}
 426
 427/* Packet to BT LE device */
 428static int send_pkt(struct l2cap_chan *chan, struct sk_buff *skb,
 429                    struct net_device *netdev)
 430{
 431        struct msghdr msg;
 432        struct kvec iv;
 433        int err;
 434
 435        /* Remember the skb so that we can send EAGAIN to the caller if
 436         * we run out of credits.
 437         */
 438        chan->data = skb;
 439
 440        iv.iov_base = skb->data;
 441        iv.iov_len = skb->len;
 442
 443        memset(&msg, 0, sizeof(msg));
 444        iov_iter_kvec(&msg.msg_iter, WRITE, &iv, 1, skb->len);
 445
 446        err = l2cap_chan_send(chan, &msg, skb->len);
 447        if (err > 0) {
 448                netdev->stats.tx_bytes += err;
 449                netdev->stats.tx_packets++;
 450                return 0;
 451        }
 452
 453        if (err < 0)
 454                netdev->stats.tx_errors++;
 455
 456        return err;
 457}
 458
 459static int send_mcast_pkt(struct sk_buff *skb, struct net_device *netdev)
 460{
 461        struct sk_buff *local_skb;
 462        struct lowpan_btle_dev *entry;
 463        int err = 0;
 464
 465        rcu_read_lock();
 466
 467        list_for_each_entry_rcu(entry, &bt_6lowpan_devices, list) {
 468                struct lowpan_peer *pentry;
 469                struct lowpan_btle_dev *dev;
 470
 471                if (entry->netdev != netdev)
 472                        continue;
 473
 474                dev = lowpan_btle_dev(entry->netdev);
 475
 476                list_for_each_entry_rcu(pentry, &dev->peers, list) {
 477                        int ret;
 478
 479                        local_skb = skb_clone(skb, GFP_ATOMIC);
 480
 481                        BT_DBG("xmit %s to %pMR type %u IP %pI6c chan %p",
 482                               netdev->name,
 483                               &pentry->chan->dst, pentry->chan->dst_type,
 484                               &pentry->peer_addr, pentry->chan);
 485                        ret = send_pkt(pentry->chan, local_skb, netdev);
 486                        if (ret < 0)
 487                                err = ret;
 488
 489                        kfree_skb(local_skb);
 490                }
 491        }
 492
 493        rcu_read_unlock();
 494
 495        return err;
 496}
 497
 498static netdev_tx_t bt_xmit(struct sk_buff *skb, struct net_device *netdev)
 499{
 500        int err = 0;
 501        bdaddr_t addr;
 502        u8 addr_type;
 503
 504        /* We must take a copy of the skb before we modify/replace the ipv6
 505         * header as the header could be used elsewhere
 506         */
 507        skb = skb_unshare(skb, GFP_ATOMIC);
 508        if (!skb)
 509                return NET_XMIT_DROP;
 510
 511        /* Return values from setup_header()
 512         *  <0 - error, packet is dropped
 513         *   0 - this is a multicast packet
 514         *   1 - this is unicast packet
 515         */
 516        err = setup_header(skb, netdev, &addr, &addr_type);
 517        if (err < 0) {
 518                kfree_skb(skb);
 519                return NET_XMIT_DROP;
 520        }
 521
 522        if (err) {
 523                if (lowpan_cb(skb)->chan) {
 524                        BT_DBG("xmit %s to %pMR type %u IP %pI6c chan %p",
 525                               netdev->name, &addr, addr_type,
 526                               &lowpan_cb(skb)->addr, lowpan_cb(skb)->chan);
 527                        err = send_pkt(lowpan_cb(skb)->chan, skb, netdev);
 528                } else {
 529                        err = -ENOENT;
 530                }
 531        } else {
 532                /* We need to send the packet to every device behind this
 533                 * interface.
 534                 */
 535                err = send_mcast_pkt(skb, netdev);
 536        }
 537
 538        dev_kfree_skb(skb);
 539
 540        if (err)
 541                BT_DBG("ERROR: xmit failed (%d)", err);
 542
 543        return err < 0 ? NET_XMIT_DROP : err;
 544}
 545
 546static int bt_dev_init(struct net_device *dev)
 547{
 548        netdev_lockdep_set_classes(dev);
 549
 550        return 0;
 551}
 552
 553static const struct net_device_ops netdev_ops = {
 554        .ndo_init               = bt_dev_init,
 555        .ndo_start_xmit         = bt_xmit,
 556};
 557
 558static const struct header_ops header_ops = {
 559        .create = header_create,
 560};
 561
 562static void netdev_setup(struct net_device *dev)
 563{
 564        dev->hard_header_len    = 0;
 565        dev->needed_tailroom    = 0;
 566        dev->flags              = IFF_RUNNING | IFF_MULTICAST;
 567        dev->watchdog_timeo     = 0;
 568        dev->tx_queue_len       = DEFAULT_TX_QUEUE_LEN;
 569
 570        dev->netdev_ops         = &netdev_ops;
 571        dev->header_ops         = &header_ops;
 572        dev->needs_free_netdev  = true;
 573}
 574
 575static struct device_type bt_type = {
 576        .name   = "bluetooth",
 577};
 578
 579static void ifup(struct net_device *netdev)
 580{
 581        int err;
 582
 583        rtnl_lock();
 584        err = dev_open(netdev, NULL);
 585        if (err < 0)
 586                BT_INFO("iface %s cannot be opened (%d)", netdev->name, err);
 587        rtnl_unlock();
 588}
 589
 590static void ifdown(struct net_device *netdev)
 591{
 592        rtnl_lock();
 593        dev_close(netdev);
 594        rtnl_unlock();
 595}
 596
 597static void do_notify_peers(struct work_struct *work)
 598{
 599        struct lowpan_btle_dev *dev = container_of(work, struct lowpan_btle_dev,
 600                                                   notify_peers.work);
 601
 602        netdev_notify_peers(dev->netdev); /* send neighbour adv at startup */
 603}
 604
 605static bool is_bt_6lowpan(struct hci_conn *hcon)
 606{
 607        if (hcon->type != LE_LINK)
 608                return false;
 609
 610        if (!enable_6lowpan)
 611                return false;
 612
 613        return true;
 614}
 615
 616static struct l2cap_chan *chan_create(void)
 617{
 618        struct l2cap_chan *chan;
 619
 620        chan = l2cap_chan_create();
 621        if (!chan)
 622                return NULL;
 623
 624        l2cap_chan_set_defaults(chan);
 625
 626        chan->chan_type = L2CAP_CHAN_CONN_ORIENTED;
 627        chan->mode = L2CAP_MODE_LE_FLOWCTL;
 628        chan->imtu = 1280;
 629
 630        return chan;
 631}
 632
 633static struct l2cap_chan *add_peer_chan(struct l2cap_chan *chan,
 634                                        struct lowpan_btle_dev *dev,
 635                                        bool new_netdev)
 636{
 637        struct lowpan_peer *peer;
 638
 639        peer = kzalloc(sizeof(*peer), GFP_ATOMIC);
 640        if (!peer)
 641                return NULL;
 642
 643        peer->chan = chan;
 644        memset(&peer->peer_addr, 0, sizeof(struct in6_addr));
 645
 646        baswap((void *)peer->lladdr, &chan->dst);
 647
 648        lowpan_iphc_uncompress_eui48_lladdr(&peer->peer_addr, peer->lladdr);
 649
 650        spin_lock(&devices_lock);
 651        INIT_LIST_HEAD(&peer->list);
 652        peer_add(dev, peer);
 653        spin_unlock(&devices_lock);
 654
 655        /* Notifying peers about us needs to be done without locks held */
 656        if (new_netdev)
 657                INIT_DELAYED_WORK(&dev->notify_peers, do_notify_peers);
 658        schedule_delayed_work(&dev->notify_peers, msecs_to_jiffies(100));
 659
 660        return peer->chan;
 661}
 662
 663static int setup_netdev(struct l2cap_chan *chan, struct lowpan_btle_dev **dev)
 664{
 665        struct net_device *netdev;
 666        int err;
 667
 668        netdev = alloc_netdev(LOWPAN_PRIV_SIZE(sizeof(struct lowpan_btle_dev)),
 669                              IFACE_NAME_TEMPLATE, NET_NAME_UNKNOWN,
 670                              netdev_setup);
 671        if (!netdev)
 672                return -ENOMEM;
 673
 674        netdev->addr_assign_type = NET_ADDR_PERM;
 675        baswap((void *)netdev->dev_addr, &chan->src);
 676
 677        netdev->netdev_ops = &netdev_ops;
 678        SET_NETDEV_DEV(netdev, &chan->conn->hcon->hdev->dev);
 679        SET_NETDEV_DEVTYPE(netdev, &bt_type);
 680
 681        *dev = lowpan_btle_dev(netdev);
 682        (*dev)->netdev = netdev;
 683        (*dev)->hdev = chan->conn->hcon->hdev;
 684        INIT_LIST_HEAD(&(*dev)->peers);
 685
 686        spin_lock(&devices_lock);
 687        INIT_LIST_HEAD(&(*dev)->list);
 688        list_add_rcu(&(*dev)->list, &bt_6lowpan_devices);
 689        spin_unlock(&devices_lock);
 690
 691        err = lowpan_register_netdev(netdev, LOWPAN_LLTYPE_BTLE);
 692        if (err < 0) {
 693                BT_INFO("register_netdev failed %d", err);
 694                spin_lock(&devices_lock);
 695                list_del_rcu(&(*dev)->list);
 696                spin_unlock(&devices_lock);
 697                free_netdev(netdev);
 698                goto out;
 699        }
 700
 701        BT_DBG("ifindex %d peer bdaddr %pMR type %d my addr %pMR type %d",
 702               netdev->ifindex, &chan->dst, chan->dst_type,
 703               &chan->src, chan->src_type);
 704        set_bit(__LINK_STATE_PRESENT, &netdev->state);
 705
 706        return 0;
 707
 708out:
 709        return err;
 710}
 711
 712static inline void chan_ready_cb(struct l2cap_chan *chan)
 713{
 714        struct lowpan_btle_dev *dev;
 715        bool new_netdev = false;
 716
 717        dev = lookup_dev(chan->conn);
 718
 719        BT_DBG("chan %p conn %p dev %p", chan, chan->conn, dev);
 720
 721        if (!dev) {
 722                if (setup_netdev(chan, &dev) < 0) {
 723                        l2cap_chan_del(chan, -ENOENT);
 724                        return;
 725                }
 726                new_netdev = true;
 727        }
 728
 729        if (!try_module_get(THIS_MODULE))
 730                return;
 731
 732        add_peer_chan(chan, dev, new_netdev);
 733        ifup(dev->netdev);
 734}
 735
 736static inline struct l2cap_chan *chan_new_conn_cb(struct l2cap_chan *pchan)
 737{
 738        struct l2cap_chan *chan;
 739
 740        chan = chan_create();
 741        if (!chan)
 742                return NULL;
 743
 744        chan->ops = pchan->ops;
 745
 746        BT_DBG("chan %p pchan %p", chan, pchan);
 747
 748        return chan;
 749}
 750
 751static void delete_netdev(struct work_struct *work)
 752{
 753        struct lowpan_btle_dev *entry = container_of(work,
 754                                                     struct lowpan_btle_dev,
 755                                                     delete_netdev);
 756
 757        lowpan_unregister_netdev(entry->netdev);
 758
 759        /* The entry pointer is deleted by the netdev destructor. */
 760}
 761
 762static void chan_close_cb(struct l2cap_chan *chan)
 763{
 764        struct lowpan_btle_dev *entry;
 765        struct lowpan_btle_dev *dev = NULL;
 766        struct lowpan_peer *peer;
 767        int err = -ENOENT;
 768        bool last = false, remove = true;
 769
 770        BT_DBG("chan %p conn %p", chan, chan->conn);
 771
 772        if (chan->conn && chan->conn->hcon) {
 773                if (!is_bt_6lowpan(chan->conn->hcon))
 774                        return;
 775
 776                /* If conn is set, then the netdev is also there and we should
 777                 * not remove it.
 778                 */
 779                remove = false;
 780        }
 781
 782        spin_lock(&devices_lock);
 783
 784        list_for_each_entry_rcu(entry, &bt_6lowpan_devices, list) {
 785                dev = lowpan_btle_dev(entry->netdev);
 786                peer = __peer_lookup_chan(dev, chan);
 787                if (peer) {
 788                        last = peer_del(dev, peer);
 789                        err = 0;
 790
 791                        BT_DBG("dev %p removing %speer %p", dev,
 792                               last ? "last " : "1 ", peer);
 793                        BT_DBG("chan %p orig refcnt %u", chan,
 794                               kref_read(&chan->kref));
 795
 796                        l2cap_chan_put(chan);
 797                        break;
 798                }
 799        }
 800
 801        if (!err && last && dev && !atomic_read(&dev->peer_count)) {
 802                spin_unlock(&devices_lock);
 803
 804                cancel_delayed_work_sync(&dev->notify_peers);
 805
 806                ifdown(dev->netdev);
 807
 808                if (remove) {
 809                        INIT_WORK(&entry->delete_netdev, delete_netdev);
 810                        schedule_work(&entry->delete_netdev);
 811                }
 812        } else {
 813                spin_unlock(&devices_lock);
 814        }
 815}
 816
 817static void chan_state_change_cb(struct l2cap_chan *chan, int state, int err)
 818{
 819        BT_DBG("chan %p conn %p state %s err %d", chan, chan->conn,
 820               state_to_string(state), err);
 821}
 822
 823static struct sk_buff *chan_alloc_skb_cb(struct l2cap_chan *chan,
 824                                         unsigned long hdr_len,
 825                                         unsigned long len, int nb)
 826{
 827        /* Note that we must allocate using GFP_ATOMIC here as
 828         * this function is called originally from netdev hard xmit
 829         * function in atomic context.
 830         */
 831        return bt_skb_alloc(hdr_len + len, GFP_ATOMIC);
 832}
 833
 834static void chan_suspend_cb(struct l2cap_chan *chan)
 835{
 836        struct lowpan_btle_dev *dev;
 837
 838        BT_DBG("chan %p suspend", chan);
 839
 840        dev = lookup_dev(chan->conn);
 841        if (!dev || !dev->netdev)
 842                return;
 843
 844        netif_stop_queue(dev->netdev);
 845}
 846
 847static void chan_resume_cb(struct l2cap_chan *chan)
 848{
 849        struct lowpan_btle_dev *dev;
 850
 851        BT_DBG("chan %p resume", chan);
 852
 853        dev = lookup_dev(chan->conn);
 854        if (!dev || !dev->netdev)
 855                return;
 856
 857        netif_wake_queue(dev->netdev);
 858}
 859
 860static long chan_get_sndtimeo_cb(struct l2cap_chan *chan)
 861{
 862        return L2CAP_CONN_TIMEOUT;
 863}
 864
 865static const struct l2cap_ops bt_6lowpan_chan_ops = {
 866        .name                   = "L2CAP 6LoWPAN channel",
 867        .new_connection         = chan_new_conn_cb,
 868        .recv                   = chan_recv_cb,
 869        .close                  = chan_close_cb,
 870        .state_change           = chan_state_change_cb,
 871        .ready                  = chan_ready_cb,
 872        .resume                 = chan_resume_cb,
 873        .suspend                = chan_suspend_cb,
 874        .get_sndtimeo           = chan_get_sndtimeo_cb,
 875        .alloc_skb              = chan_alloc_skb_cb,
 876
 877        .teardown               = l2cap_chan_no_teardown,
 878        .defer                  = l2cap_chan_no_defer,
 879        .set_shutdown           = l2cap_chan_no_set_shutdown,
 880};
 881
 882static int bt_6lowpan_connect(bdaddr_t *addr, u8 dst_type)
 883{
 884        struct l2cap_chan *chan;
 885        int err;
 886
 887        chan = chan_create();
 888        if (!chan)
 889                return -EINVAL;
 890
 891        chan->ops = &bt_6lowpan_chan_ops;
 892
 893        err = l2cap_chan_connect(chan, cpu_to_le16(L2CAP_PSM_IPSP), 0,
 894                                 addr, dst_type);
 895
 896        BT_DBG("chan %p err %d", chan, err);
 897        if (err < 0)
 898                l2cap_chan_put(chan);
 899
 900        return err;
 901}
 902
 903static int bt_6lowpan_disconnect(struct l2cap_conn *conn, u8 dst_type)
 904{
 905        struct lowpan_peer *peer;
 906
 907        BT_DBG("conn %p dst type %u", conn, dst_type);
 908
 909        peer = lookup_peer(conn);
 910        if (!peer)
 911                return -ENOENT;
 912
 913        BT_DBG("peer %p chan %p", peer, peer->chan);
 914
 915        l2cap_chan_close(peer->chan, ENOENT);
 916
 917        return 0;
 918}
 919
 920static struct l2cap_chan *bt_6lowpan_listen(void)
 921{
 922        bdaddr_t *addr = BDADDR_ANY;
 923        struct l2cap_chan *chan;
 924        int err;
 925
 926        if (!enable_6lowpan)
 927                return NULL;
 928
 929        chan = chan_create();
 930        if (!chan)
 931                return NULL;
 932
 933        chan->ops = &bt_6lowpan_chan_ops;
 934        chan->state = BT_LISTEN;
 935        chan->src_type = BDADDR_LE_PUBLIC;
 936
 937        atomic_set(&chan->nesting, L2CAP_NESTING_PARENT);
 938
 939        BT_DBG("chan %p src type %u", chan, chan->src_type);
 940
 941        err = l2cap_add_psm(chan, addr, cpu_to_le16(L2CAP_PSM_IPSP));
 942        if (err) {
 943                l2cap_chan_put(chan);
 944                BT_ERR("psm cannot be added err %d", err);
 945                return NULL;
 946        }
 947
 948        return chan;
 949}
 950
 951static int get_l2cap_conn(char *buf, bdaddr_t *addr, u8 *addr_type,
 952                          struct l2cap_conn **conn)
 953{
 954        struct hci_conn *hcon;
 955        struct hci_dev *hdev;
 956        int n;
 957
 958        n = sscanf(buf, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx %hhu",
 959                   &addr->b[5], &addr->b[4], &addr->b[3],
 960                   &addr->b[2], &addr->b[1], &addr->b[0],
 961                   addr_type);
 962
 963        if (n < 7)
 964                return -EINVAL;
 965
 966        /* The LE_PUBLIC address type is ignored because of BDADDR_ANY */
 967        hdev = hci_get_route(addr, BDADDR_ANY, BDADDR_LE_PUBLIC);
 968        if (!hdev)
 969                return -ENOENT;
 970
 971        hci_dev_lock(hdev);
 972        hcon = hci_conn_hash_lookup_le(hdev, addr, *addr_type);
 973        hci_dev_unlock(hdev);
 974
 975        if (!hcon)
 976                return -ENOENT;
 977
 978        *conn = (struct l2cap_conn *)hcon->l2cap_data;
 979
 980        BT_DBG("conn %p dst %pMR type %u", *conn, &hcon->dst, hcon->dst_type);
 981
 982        return 0;
 983}
 984
 985static void disconnect_all_peers(void)
 986{
 987        struct lowpan_btle_dev *entry;
 988        struct lowpan_peer *peer, *tmp_peer, *new_peer;
 989        struct list_head peers;
 990
 991        INIT_LIST_HEAD(&peers);
 992
 993        /* We make a separate list of peers as the close_cb() will
 994         * modify the device peers list so it is better not to mess
 995         * with the same list at the same time.
 996         */
 997
 998        rcu_read_lock();
 999
1000        list_for_each_entry_rcu(entry, &bt_6lowpan_devices, list) {
1001                list_for_each_entry_rcu(peer, &entry->peers, list) {
1002                        new_peer = kmalloc(sizeof(*new_peer), GFP_ATOMIC);
1003                        if (!new_peer)
1004                                break;
1005
1006                        new_peer->chan = peer->chan;
1007                        INIT_LIST_HEAD(&new_peer->list);
1008
1009                        list_add(&new_peer->list, &peers);
1010                }
1011        }
1012
1013        rcu_read_unlock();
1014
1015        spin_lock(&devices_lock);
1016        list_for_each_entry_safe(peer, tmp_peer, &peers, list) {
1017                l2cap_chan_close(peer->chan, ENOENT);
1018
1019                list_del_rcu(&peer->list);
1020                kfree_rcu(peer, rcu);
1021        }
1022        spin_unlock(&devices_lock);
1023}
1024
1025struct set_enable {
1026        struct work_struct work;
1027        bool flag;
1028};
1029
1030static void do_enable_set(struct work_struct *work)
1031{
1032        struct set_enable *set_enable = container_of(work,
1033                                                     struct set_enable, work);
1034
1035        if (!set_enable->flag || enable_6lowpan != set_enable->flag)
1036                /* Disconnect existing connections if 6lowpan is
1037                 * disabled
1038                 */
1039                disconnect_all_peers();
1040
1041        enable_6lowpan = set_enable->flag;
1042
1043        mutex_lock(&set_lock);
1044        if (listen_chan) {
1045                l2cap_chan_close(listen_chan, 0);
1046                l2cap_chan_put(listen_chan);
1047        }
1048
1049        listen_chan = bt_6lowpan_listen();
1050        mutex_unlock(&set_lock);
1051
1052        kfree(set_enable);
1053}
1054
1055static int lowpan_enable_set(void *data, u64 val)
1056{
1057        struct set_enable *set_enable;
1058
1059        set_enable = kzalloc(sizeof(*set_enable), GFP_KERNEL);
1060        if (!set_enable)
1061                return -ENOMEM;
1062
1063        set_enable->flag = !!val;
1064        INIT_WORK(&set_enable->work, do_enable_set);
1065
1066        schedule_work(&set_enable->work);
1067
1068        return 0;
1069}
1070
1071static int lowpan_enable_get(void *data, u64 *val)
1072{
1073        *val = enable_6lowpan;
1074        return 0;
1075}
1076
1077DEFINE_DEBUGFS_ATTRIBUTE(lowpan_enable_fops, lowpan_enable_get,
1078                         lowpan_enable_set, "%llu\n");
1079
1080static ssize_t lowpan_control_write(struct file *fp,
1081                                    const char __user *user_buffer,
1082                                    size_t count,
1083                                    loff_t *position)
1084{
1085        char buf[32];
1086        size_t buf_size = min(count, sizeof(buf) - 1);
1087        int ret;
1088        bdaddr_t addr;
1089        u8 addr_type;
1090        struct l2cap_conn *conn = NULL;
1091
1092        if (copy_from_user(buf, user_buffer, buf_size))
1093                return -EFAULT;
1094
1095        buf[buf_size] = '\0';
1096
1097        if (memcmp(buf, "connect ", 8) == 0) {
1098                ret = get_l2cap_conn(&buf[8], &addr, &addr_type, &conn);
1099                if (ret == -EINVAL)
1100                        return ret;
1101
1102                mutex_lock(&set_lock);
1103                if (listen_chan) {
1104                        l2cap_chan_close(listen_chan, 0);
1105                        l2cap_chan_put(listen_chan);
1106                        listen_chan = NULL;
1107                }
1108                mutex_unlock(&set_lock);
1109
1110                if (conn) {
1111                        struct lowpan_peer *peer;
1112
1113                        if (!is_bt_6lowpan(conn->hcon))
1114                                return -EINVAL;
1115
1116                        peer = lookup_peer(conn);
1117                        if (peer) {
1118                                BT_DBG("6LoWPAN connection already exists");
1119                                return -EALREADY;
1120                        }
1121
1122                        BT_DBG("conn %p dst %pMR type %d user %u", conn,
1123                               &conn->hcon->dst, conn->hcon->dst_type,
1124                               addr_type);
1125                }
1126
1127                ret = bt_6lowpan_connect(&addr, addr_type);
1128                if (ret < 0)
1129                        return ret;
1130
1131                return count;
1132        }
1133
1134        if (memcmp(buf, "disconnect ", 11) == 0) {
1135                ret = get_l2cap_conn(&buf[11], &addr, &addr_type, &conn);
1136                if (ret < 0)
1137                        return ret;
1138
1139                ret = bt_6lowpan_disconnect(conn, addr_type);
1140                if (ret < 0)
1141                        return ret;
1142
1143                return count;
1144        }
1145
1146        return count;
1147}
1148
1149static int lowpan_control_show(struct seq_file *f, void *ptr)
1150{
1151        struct lowpan_btle_dev *entry;
1152        struct lowpan_peer *peer;
1153
1154        spin_lock(&devices_lock);
1155
1156        list_for_each_entry(entry, &bt_6lowpan_devices, list) {
1157                list_for_each_entry(peer, &entry->peers, list)
1158                        seq_printf(f, "%pMR (type %u)\n",
1159                                   &peer->chan->dst, peer->chan->dst_type);
1160        }
1161
1162        spin_unlock(&devices_lock);
1163
1164        return 0;
1165}
1166
1167static int lowpan_control_open(struct inode *inode, struct file *file)
1168{
1169        return single_open(file, lowpan_control_show, inode->i_private);
1170}
1171
1172static const struct file_operations lowpan_control_fops = {
1173        .open           = lowpan_control_open,
1174        .read           = seq_read,
1175        .write          = lowpan_control_write,
1176        .llseek         = seq_lseek,
1177        .release        = single_release,
1178};
1179
1180static void disconnect_devices(void)
1181{
1182        struct lowpan_btle_dev *entry, *tmp, *new_dev;
1183        struct list_head devices;
1184
1185        INIT_LIST_HEAD(&devices);
1186
1187        /* We make a separate list of devices because the unregister_netdev()
1188         * will call device_event() which will also want to modify the same
1189         * devices list.
1190         */
1191
1192        rcu_read_lock();
1193
1194        list_for_each_entry_rcu(entry, &bt_6lowpan_devices, list) {
1195                new_dev = kmalloc(sizeof(*new_dev), GFP_ATOMIC);
1196                if (!new_dev)
1197                        break;
1198
1199                new_dev->netdev = entry->netdev;
1200                INIT_LIST_HEAD(&new_dev->list);
1201
1202                list_add_rcu(&new_dev->list, &devices);
1203        }
1204
1205        rcu_read_unlock();
1206
1207        list_for_each_entry_safe(entry, tmp, &devices, list) {
1208                ifdown(entry->netdev);
1209                BT_DBG("Unregistering netdev %s %p",
1210                       entry->netdev->name, entry->netdev);
1211                lowpan_unregister_netdev(entry->netdev);
1212                kfree(entry);
1213        }
1214}
1215
1216static int device_event(struct notifier_block *unused,
1217                        unsigned long event, void *ptr)
1218{
1219        struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
1220        struct lowpan_btle_dev *entry;
1221
1222        if (netdev->type != ARPHRD_6LOWPAN)
1223                return NOTIFY_DONE;
1224
1225        switch (event) {
1226        case NETDEV_UNREGISTER:
1227                spin_lock(&devices_lock);
1228                list_for_each_entry(entry, &bt_6lowpan_devices, list) {
1229                        if (entry->netdev == netdev) {
1230                                BT_DBG("Unregistered netdev %s %p",
1231                                       netdev->name, netdev);
1232                                list_del(&entry->list);
1233                                break;
1234                        }
1235                }
1236                spin_unlock(&devices_lock);
1237                break;
1238        }
1239
1240        return NOTIFY_DONE;
1241}
1242
1243static struct notifier_block bt_6lowpan_dev_notifier = {
1244        .notifier_call = device_event,
1245};
1246
1247static int __init bt_6lowpan_init(void)
1248{
1249        lowpan_enable_debugfs = debugfs_create_file_unsafe("6lowpan_enable",
1250                                                           0644, bt_debugfs,
1251                                                           NULL,
1252                                                           &lowpan_enable_fops);
1253        lowpan_control_debugfs = debugfs_create_file("6lowpan_control", 0644,
1254                                                     bt_debugfs, NULL,
1255                                                     &lowpan_control_fops);
1256
1257        return register_netdevice_notifier(&bt_6lowpan_dev_notifier);
1258}
1259
1260static void __exit bt_6lowpan_exit(void)
1261{
1262        debugfs_remove(lowpan_enable_debugfs);
1263        debugfs_remove(lowpan_control_debugfs);
1264
1265        if (listen_chan) {
1266                l2cap_chan_close(listen_chan, 0);
1267                l2cap_chan_put(listen_chan);
1268        }
1269
1270        disconnect_devices();
1271
1272        unregister_netdevice_notifier(&bt_6lowpan_dev_notifier);
1273}
1274
1275module_init(bt_6lowpan_init);
1276module_exit(bt_6lowpan_exit);
1277
1278MODULE_AUTHOR("Jukka Rissanen <jukka.rissanen@linux.intel.com>");
1279MODULE_DESCRIPTION("Bluetooth 6LoWPAN");
1280MODULE_VERSION(VERSION);
1281MODULE_LICENSE("GPL");
1282