linux/net/bluetooth/hci_sock.c
<<
>>
Prefs
   1/*
   2   BlueZ - Bluetooth protocol stack for Linux
   3   Copyright (C) 2000-2001 Qualcomm Incorporated
   4
   5   Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
   6
   7   This program is free software; you can redistribute it and/or modify
   8   it under the terms of the GNU General Public License version 2 as
   9   published by the Free Software Foundation;
  10
  11   THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
  12   OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  13   FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
  14   IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
  15   CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
  16   WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  17   ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  18   OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  19
  20   ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
  21   COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
  22   SOFTWARE IS DISCLAIMED.
  23*/
  24
  25/* Bluetooth HCI sockets. */
  26
  27#include <linux/export.h>
  28#include <linux/utsname.h>
  29#include <asm/unaligned.h>
  30
  31#include <net/bluetooth/bluetooth.h>
  32#include <net/bluetooth/hci_core.h>
  33#include <net/bluetooth/hci_mon.h>
  34#include <net/bluetooth/mgmt.h>
  35
  36#include "mgmt_util.h"
  37
  38static LIST_HEAD(mgmt_chan_list);
  39static DEFINE_MUTEX(mgmt_chan_list_lock);
  40
  41static atomic_t monitor_promisc = ATOMIC_INIT(0);
  42
  43/* ----- HCI socket interface ----- */
  44
  45/* Socket info */
  46#define hci_pi(sk) ((struct hci_pinfo *) sk)
  47
  48struct hci_pinfo {
  49        struct bt_sock    bt;
  50        struct hci_dev    *hdev;
  51        struct hci_filter filter;
  52        __u32             cmsg_mask;
  53        unsigned short    channel;
  54        unsigned long     flags;
  55};
  56
  57void hci_sock_set_flag(struct sock *sk, int nr)
  58{
  59        set_bit(nr, &hci_pi(sk)->flags);
  60}
  61
  62void hci_sock_clear_flag(struct sock *sk, int nr)
  63{
  64        clear_bit(nr, &hci_pi(sk)->flags);
  65}
  66
  67int hci_sock_test_flag(struct sock *sk, int nr)
  68{
  69        return test_bit(nr, &hci_pi(sk)->flags);
  70}
  71
  72unsigned short hci_sock_get_channel(struct sock *sk)
  73{
  74        return hci_pi(sk)->channel;
  75}
  76
  77static inline int hci_test_bit(int nr, const void *addr)
  78{
  79        return *((const __u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
  80}
  81
  82/* Security filter */
  83#define HCI_SFLT_MAX_OGF  5
  84
  85struct hci_sec_filter {
  86        __u32 type_mask;
  87        __u32 event_mask[2];
  88        __u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4];
  89};
  90
  91static const struct hci_sec_filter hci_sec_filter = {
  92        /* Packet types */
  93        0x10,
  94        /* Events */
  95        { 0x1000d9fe, 0x0000b00c },
  96        /* Commands */
  97        {
  98                { 0x0 },
  99                /* OGF_LINK_CTL */
 100                { 0xbe000006, 0x00000001, 0x00000000, 0x00 },
 101                /* OGF_LINK_POLICY */
 102                { 0x00005200, 0x00000000, 0x00000000, 0x00 },
 103                /* OGF_HOST_CTL */
 104                { 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
 105                /* OGF_INFO_PARAM */
 106                { 0x000002be, 0x00000000, 0x00000000, 0x00 },
 107                /* OGF_STATUS_PARAM */
 108                { 0x000000ea, 0x00000000, 0x00000000, 0x00 }
 109        }
 110};
 111
 112static struct bt_sock_list hci_sk_list = {
 113        .lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
 114};
 115
 116static bool is_filtered_packet(struct sock *sk, struct sk_buff *skb)
 117{
 118        struct hci_filter *flt;
 119        int flt_type, flt_event;
 120
 121        /* Apply filter */
 122        flt = &hci_pi(sk)->filter;
 123
 124        flt_type = hci_skb_pkt_type(skb) & HCI_FLT_TYPE_BITS;
 125
 126        if (!test_bit(flt_type, &flt->type_mask))
 127                return true;
 128
 129        /* Extra filter for event packets only */
 130        if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT)
 131                return false;
 132
 133        flt_event = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
 134
 135        if (!hci_test_bit(flt_event, &flt->event_mask))
 136                return true;
 137
 138        /* Check filter only when opcode is set */
 139        if (!flt->opcode)
 140                return false;
 141
 142        if (flt_event == HCI_EV_CMD_COMPLETE &&
 143            flt->opcode != get_unaligned((__le16 *)(skb->data + 3)))
 144                return true;
 145
 146        if (flt_event == HCI_EV_CMD_STATUS &&
 147            flt->opcode != get_unaligned((__le16 *)(skb->data + 4)))
 148                return true;
 149
 150        return false;
 151}
 152
 153/* Send frame to RAW socket */
 154void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
 155{
 156        struct sock *sk;
 157        struct sk_buff *skb_copy = NULL;
 158
 159        BT_DBG("hdev %p len %d", hdev, skb->len);
 160
 161        read_lock(&hci_sk_list.lock);
 162
 163        sk_for_each(sk, &hci_sk_list.head) {
 164                struct sk_buff *nskb;
 165
 166                if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
 167                        continue;
 168
 169                /* Don't send frame to the socket it came from */
 170                if (skb->sk == sk)
 171                        continue;
 172
 173                if (hci_pi(sk)->channel == HCI_CHANNEL_RAW) {
 174                        if (hci_skb_pkt_type(skb) != HCI_COMMAND_PKT &&
 175                            hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
 176                            hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
 177                            hci_skb_pkt_type(skb) != HCI_SCODATA_PKT)
 178                                continue;
 179                        if (is_filtered_packet(sk, skb))
 180                                continue;
 181                } else if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
 182                        if (!bt_cb(skb)->incoming)
 183                                continue;
 184                        if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
 185                            hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
 186                            hci_skb_pkt_type(skb) != HCI_SCODATA_PKT)
 187                                continue;
 188                } else {
 189                        /* Don't send frame to other channel types */
 190                        continue;
 191                }
 192
 193                if (!skb_copy) {
 194                        /* Create a private copy with headroom */
 195                        skb_copy = __pskb_copy_fclone(skb, 1, GFP_ATOMIC, true);
 196                        if (!skb_copy)
 197                                continue;
 198
 199                        /* Put type byte before the data */
 200                        memcpy(skb_push(skb_copy, 1), &hci_skb_pkt_type(skb), 1);
 201                }
 202
 203                nskb = skb_clone(skb_copy, GFP_ATOMIC);
 204                if (!nskb)
 205                        continue;
 206
 207                if (sock_queue_rcv_skb(sk, nskb))
 208                        kfree_skb(nskb);
 209        }
 210
 211        read_unlock(&hci_sk_list.lock);
 212
 213        kfree_skb(skb_copy);
 214}
 215
 216/* Send frame to sockets with specific channel */
 217void hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
 218                         int flag, struct sock *skip_sk)
 219{
 220        struct sock *sk;
 221
 222        BT_DBG("channel %u len %d", channel, skb->len);
 223
 224        read_lock(&hci_sk_list.lock);
 225
 226        sk_for_each(sk, &hci_sk_list.head) {
 227                struct sk_buff *nskb;
 228
 229                /* Ignore socket without the flag set */
 230                if (!hci_sock_test_flag(sk, flag))
 231                        continue;
 232
 233                /* Skip the original socket */
 234                if (sk == skip_sk)
 235                        continue;
 236
 237                if (sk->sk_state != BT_BOUND)
 238                        continue;
 239
 240                if (hci_pi(sk)->channel != channel)
 241                        continue;
 242
 243                nskb = skb_clone(skb, GFP_ATOMIC);
 244                if (!nskb)
 245                        continue;
 246
 247                if (sock_queue_rcv_skb(sk, nskb))
 248                        kfree_skb(nskb);
 249        }
 250
 251        read_unlock(&hci_sk_list.lock);
 252}
 253
 254/* Send frame to monitor socket */
 255void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
 256{
 257        struct sk_buff *skb_copy = NULL;
 258        struct hci_mon_hdr *hdr;
 259        __le16 opcode;
 260
 261        if (!atomic_read(&monitor_promisc))
 262                return;
 263
 264        BT_DBG("hdev %p len %d", hdev, skb->len);
 265
 266        switch (hci_skb_pkt_type(skb)) {
 267        case HCI_COMMAND_PKT:
 268                opcode = cpu_to_le16(HCI_MON_COMMAND_PKT);
 269                break;
 270        case HCI_EVENT_PKT:
 271                opcode = cpu_to_le16(HCI_MON_EVENT_PKT);
 272                break;
 273        case HCI_ACLDATA_PKT:
 274                if (bt_cb(skb)->incoming)
 275                        opcode = cpu_to_le16(HCI_MON_ACL_RX_PKT);
 276                else
 277                        opcode = cpu_to_le16(HCI_MON_ACL_TX_PKT);
 278                break;
 279        case HCI_SCODATA_PKT:
 280                if (bt_cb(skb)->incoming)
 281                        opcode = cpu_to_le16(HCI_MON_SCO_RX_PKT);
 282                else
 283                        opcode = cpu_to_le16(HCI_MON_SCO_TX_PKT);
 284                break;
 285        case HCI_DIAG_PKT:
 286                opcode = cpu_to_le16(HCI_MON_VENDOR_DIAG);
 287                break;
 288        default:
 289                return;
 290        }
 291
 292        /* Create a private copy with headroom */
 293        skb_copy = __pskb_copy_fclone(skb, HCI_MON_HDR_SIZE, GFP_ATOMIC, true);
 294        if (!skb_copy)
 295                return;
 296
 297        /* Put header before the data */
 298        hdr = (void *)skb_push(skb_copy, HCI_MON_HDR_SIZE);
 299        hdr->opcode = opcode;
 300        hdr->index = cpu_to_le16(hdev->id);
 301        hdr->len = cpu_to_le16(skb->len);
 302
 303        hci_send_to_channel(HCI_CHANNEL_MONITOR, skb_copy,
 304                            HCI_SOCK_TRUSTED, NULL);
 305        kfree_skb(skb_copy);
 306}
 307
 308static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
 309{
 310        struct hci_mon_hdr *hdr;
 311        struct hci_mon_new_index *ni;
 312        struct hci_mon_index_info *ii;
 313        struct sk_buff *skb;
 314        __le16 opcode;
 315
 316        switch (event) {
 317        case HCI_DEV_REG:
 318                skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
 319                if (!skb)
 320                        return NULL;
 321
 322                ni = (void *)skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
 323                ni->type = hdev->dev_type;
 324                ni->bus = hdev->bus;
 325                bacpy(&ni->bdaddr, &hdev->bdaddr);
 326                memcpy(ni->name, hdev->name, 8);
 327
 328                opcode = cpu_to_le16(HCI_MON_NEW_INDEX);
 329                break;
 330
 331        case HCI_DEV_UNREG:
 332                skb = bt_skb_alloc(0, GFP_ATOMIC);
 333                if (!skb)
 334                        return NULL;
 335
 336                opcode = cpu_to_le16(HCI_MON_DEL_INDEX);
 337                break;
 338
 339        case HCI_DEV_SETUP:
 340                if (hdev->manufacturer == 0xffff)
 341                        return NULL;
 342
 343                /* fall through */
 344
 345        case HCI_DEV_UP:
 346                skb = bt_skb_alloc(HCI_MON_INDEX_INFO_SIZE, GFP_ATOMIC);
 347                if (!skb)
 348                        return NULL;
 349
 350                ii = (void *)skb_put(skb, HCI_MON_INDEX_INFO_SIZE);
 351                bacpy(&ii->bdaddr, &hdev->bdaddr);
 352                ii->manufacturer = cpu_to_le16(hdev->manufacturer);
 353
 354                opcode = cpu_to_le16(HCI_MON_INDEX_INFO);
 355                break;
 356
 357        case HCI_DEV_OPEN:
 358                skb = bt_skb_alloc(0, GFP_ATOMIC);
 359                if (!skb)
 360                        return NULL;
 361
 362                opcode = cpu_to_le16(HCI_MON_OPEN_INDEX);
 363                break;
 364
 365        case HCI_DEV_CLOSE:
 366                skb = bt_skb_alloc(0, GFP_ATOMIC);
 367                if (!skb)
 368                        return NULL;
 369
 370                opcode = cpu_to_le16(HCI_MON_CLOSE_INDEX);
 371                break;
 372
 373        default:
 374                return NULL;
 375        }
 376
 377        __net_timestamp(skb);
 378
 379        hdr = (void *)skb_push(skb, HCI_MON_HDR_SIZE);
 380        hdr->opcode = opcode;
 381        hdr->index = cpu_to_le16(hdev->id);
 382        hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
 383
 384        return skb;
 385}
 386
 387static void __printf(2, 3)
 388send_monitor_note(struct sock *sk, const char *fmt, ...)
 389{
 390        size_t len;
 391        struct hci_mon_hdr *hdr;
 392        struct sk_buff *skb;
 393        va_list args;
 394
 395        va_start(args, fmt);
 396        len = vsnprintf(NULL, 0, fmt, args);
 397        va_end(args);
 398
 399        skb = bt_skb_alloc(len + 1, GFP_ATOMIC);
 400        if (!skb)
 401                return;
 402
 403        va_start(args, fmt);
 404        vsprintf(skb_put(skb, len), fmt, args);
 405        *skb_put(skb, 1) = 0;
 406        va_end(args);
 407
 408        __net_timestamp(skb);
 409
 410        hdr = (void *)skb_push(skb, HCI_MON_HDR_SIZE);
 411        hdr->opcode = cpu_to_le16(HCI_MON_SYSTEM_NOTE);
 412        hdr->index = cpu_to_le16(HCI_DEV_NONE);
 413        hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
 414
 415        if (sock_queue_rcv_skb(sk, skb))
 416                kfree_skb(skb);
 417}
 418
 419static void send_monitor_replay(struct sock *sk)
 420{
 421        struct hci_dev *hdev;
 422
 423        read_lock(&hci_dev_list_lock);
 424
 425        list_for_each_entry(hdev, &hci_dev_list, list) {
 426                struct sk_buff *skb;
 427
 428                skb = create_monitor_event(hdev, HCI_DEV_REG);
 429                if (!skb)
 430                        continue;
 431
 432                if (sock_queue_rcv_skb(sk, skb))
 433                        kfree_skb(skb);
 434
 435                if (!test_bit(HCI_RUNNING, &hdev->flags))
 436                        continue;
 437
 438                skb = create_monitor_event(hdev, HCI_DEV_OPEN);
 439                if (!skb)
 440                        continue;
 441
 442                if (sock_queue_rcv_skb(sk, skb))
 443                        kfree_skb(skb);
 444
 445                if (test_bit(HCI_UP, &hdev->flags))
 446                        skb = create_monitor_event(hdev, HCI_DEV_UP);
 447                else if (hci_dev_test_flag(hdev, HCI_SETUP))
 448                        skb = create_monitor_event(hdev, HCI_DEV_SETUP);
 449                else
 450                        skb = NULL;
 451
 452                if (skb) {
 453                        if (sock_queue_rcv_skb(sk, skb))
 454                                kfree_skb(skb);
 455                }
 456        }
 457
 458        read_unlock(&hci_dev_list_lock);
 459}
 460
 461/* Generate internal stack event */
 462static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
 463{
 464        struct hci_event_hdr *hdr;
 465        struct hci_ev_stack_internal *ev;
 466        struct sk_buff *skb;
 467
 468        skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
 469        if (!skb)
 470                return;
 471
 472        hdr = (void *)skb_put(skb, HCI_EVENT_HDR_SIZE);
 473        hdr->evt  = HCI_EV_STACK_INTERNAL;
 474        hdr->plen = sizeof(*ev) + dlen;
 475
 476        ev  = (void *)skb_put(skb, sizeof(*ev) + dlen);
 477        ev->type = type;
 478        memcpy(ev->data, data, dlen);
 479
 480        bt_cb(skb)->incoming = 1;
 481        __net_timestamp(skb);
 482
 483        hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
 484        hci_send_to_sock(hdev, skb);
 485        kfree_skb(skb);
 486}
 487
 488void hci_sock_dev_event(struct hci_dev *hdev, int event)
 489{
 490        BT_DBG("hdev %s event %d", hdev->name, event);
 491
 492        if (atomic_read(&monitor_promisc)) {
 493                struct sk_buff *skb;
 494
 495                /* Send event to monitor */
 496                skb = create_monitor_event(hdev, event);
 497                if (skb) {
 498                        hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
 499                                            HCI_SOCK_TRUSTED, NULL);
 500                        kfree_skb(skb);
 501                }
 502        }
 503
 504        if (event <= HCI_DEV_DOWN) {
 505                struct hci_ev_si_device ev;
 506
 507                /* Send event to sockets */
 508                ev.event  = event;
 509                ev.dev_id = hdev->id;
 510                hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
 511        }
 512
 513        if (event == HCI_DEV_UNREG) {
 514                struct sock *sk;
 515
 516                /* Detach sockets from device */
 517                read_lock(&hci_sk_list.lock);
 518                sk_for_each(sk, &hci_sk_list.head) {
 519                        bh_lock_sock_nested(sk);
 520                        if (hci_pi(sk)->hdev == hdev) {
 521                                hci_pi(sk)->hdev = NULL;
 522                                sk->sk_err = EPIPE;
 523                                sk->sk_state = BT_OPEN;
 524                                sk->sk_state_change(sk);
 525
 526                                hci_dev_put(hdev);
 527                        }
 528                        bh_unlock_sock(sk);
 529                }
 530                read_unlock(&hci_sk_list.lock);
 531        }
 532}
 533
 534static struct hci_mgmt_chan *__hci_mgmt_chan_find(unsigned short channel)
 535{
 536        struct hci_mgmt_chan *c;
 537
 538        list_for_each_entry(c, &mgmt_chan_list, list) {
 539                if (c->channel == channel)
 540                        return c;
 541        }
 542
 543        return NULL;
 544}
 545
 546static struct hci_mgmt_chan *hci_mgmt_chan_find(unsigned short channel)
 547{
 548        struct hci_mgmt_chan *c;
 549
 550        mutex_lock(&mgmt_chan_list_lock);
 551        c = __hci_mgmt_chan_find(channel);
 552        mutex_unlock(&mgmt_chan_list_lock);
 553
 554        return c;
 555}
 556
 557int hci_mgmt_chan_register(struct hci_mgmt_chan *c)
 558{
 559        if (c->channel < HCI_CHANNEL_CONTROL)
 560                return -EINVAL;
 561
 562        mutex_lock(&mgmt_chan_list_lock);
 563        if (__hci_mgmt_chan_find(c->channel)) {
 564                mutex_unlock(&mgmt_chan_list_lock);
 565                return -EALREADY;
 566        }
 567
 568        list_add_tail(&c->list, &mgmt_chan_list);
 569
 570        mutex_unlock(&mgmt_chan_list_lock);
 571
 572        return 0;
 573}
 574EXPORT_SYMBOL(hci_mgmt_chan_register);
 575
 576void hci_mgmt_chan_unregister(struct hci_mgmt_chan *c)
 577{
 578        mutex_lock(&mgmt_chan_list_lock);
 579        list_del(&c->list);
 580        mutex_unlock(&mgmt_chan_list_lock);
 581}
 582EXPORT_SYMBOL(hci_mgmt_chan_unregister);
 583
 584static int hci_sock_release(struct socket *sock)
 585{
 586        struct sock *sk = sock->sk;
 587        struct hci_dev *hdev;
 588
 589        BT_DBG("sock %p sk %p", sock, sk);
 590
 591        if (!sk)
 592                return 0;
 593
 594        hdev = hci_pi(sk)->hdev;
 595
 596        if (hci_pi(sk)->channel == HCI_CHANNEL_MONITOR)
 597                atomic_dec(&monitor_promisc);
 598
 599        bt_sock_unlink(&hci_sk_list, sk);
 600
 601        if (hdev) {
 602                if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
 603                        /* When releasing an user channel exclusive access,
 604                         * call hci_dev_do_close directly instead of calling
 605                         * hci_dev_close to ensure the exclusive access will
 606                         * be released and the controller brought back down.
 607                         *
 608                         * The checking of HCI_AUTO_OFF is not needed in this
 609                         * case since it will have been cleared already when
 610                         * opening the user channel.
 611                         */
 612                        hci_dev_do_close(hdev);
 613                        hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
 614                        mgmt_index_added(hdev);
 615                }
 616
 617                atomic_dec(&hdev->promisc);
 618                hci_dev_put(hdev);
 619        }
 620
 621        sock_orphan(sk);
 622
 623        skb_queue_purge(&sk->sk_receive_queue);
 624        skb_queue_purge(&sk->sk_write_queue);
 625
 626        sock_put(sk);
 627        return 0;
 628}
 629
 630static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)
 631{
 632        bdaddr_t bdaddr;
 633        int err;
 634
 635        if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
 636                return -EFAULT;
 637
 638        hci_dev_lock(hdev);
 639
 640        err = hci_bdaddr_list_add(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
 641
 642        hci_dev_unlock(hdev);
 643
 644        return err;
 645}
 646
 647static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
 648{
 649        bdaddr_t bdaddr;
 650        int err;
 651
 652        if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
 653                return -EFAULT;
 654
 655        hci_dev_lock(hdev);
 656
 657        err = hci_bdaddr_list_del(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
 658
 659        hci_dev_unlock(hdev);
 660
 661        return err;
 662}
 663
 664/* Ioctls that require bound socket */
 665static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
 666                                unsigned long arg)
 667{
 668        struct hci_dev *hdev = hci_pi(sk)->hdev;
 669
 670        if (!hdev)
 671                return -EBADFD;
 672
 673        if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
 674                return -EBUSY;
 675
 676        if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
 677                return -EOPNOTSUPP;
 678
 679        if (hdev->dev_type != HCI_BREDR)
 680                return -EOPNOTSUPP;
 681
 682        switch (cmd) {
 683        case HCISETRAW:
 684                if (!capable(CAP_NET_ADMIN))
 685                        return -EPERM;
 686                return -EOPNOTSUPP;
 687
 688        case HCIGETCONNINFO:
 689                return hci_get_conn_info(hdev, (void __user *)arg);
 690
 691        case HCIGETAUTHINFO:
 692                return hci_get_auth_info(hdev, (void __user *)arg);
 693
 694        case HCIBLOCKADDR:
 695                if (!capable(CAP_NET_ADMIN))
 696                        return -EPERM;
 697                return hci_sock_blacklist_add(hdev, (void __user *)arg);
 698
 699        case HCIUNBLOCKADDR:
 700                if (!capable(CAP_NET_ADMIN))
 701                        return -EPERM;
 702                return hci_sock_blacklist_del(hdev, (void __user *)arg);
 703        }
 704
 705        return -ENOIOCTLCMD;
 706}
 707
 708static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
 709                          unsigned long arg)
 710{
 711        void __user *argp = (void __user *)arg;
 712        struct sock *sk = sock->sk;
 713        int err;
 714
 715        BT_DBG("cmd %x arg %lx", cmd, arg);
 716
 717        lock_sock(sk);
 718
 719        if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
 720                err = -EBADFD;
 721                goto done;
 722        }
 723
 724        release_sock(sk);
 725
 726        switch (cmd) {
 727        case HCIGETDEVLIST:
 728                return hci_get_dev_list(argp);
 729
 730        case HCIGETDEVINFO:
 731                return hci_get_dev_info(argp);
 732
 733        case HCIGETCONNLIST:
 734                return hci_get_conn_list(argp);
 735
 736        case HCIDEVUP:
 737                if (!capable(CAP_NET_ADMIN))
 738                        return -EPERM;
 739                return hci_dev_open(arg);
 740
 741        case HCIDEVDOWN:
 742                if (!capable(CAP_NET_ADMIN))
 743                        return -EPERM;
 744                return hci_dev_close(arg);
 745
 746        case HCIDEVRESET:
 747                if (!capable(CAP_NET_ADMIN))
 748                        return -EPERM;
 749                return hci_dev_reset(arg);
 750
 751        case HCIDEVRESTAT:
 752                if (!capable(CAP_NET_ADMIN))
 753                        return -EPERM;
 754                return hci_dev_reset_stat(arg);
 755
 756        case HCISETSCAN:
 757        case HCISETAUTH:
 758        case HCISETENCRYPT:
 759        case HCISETPTYPE:
 760        case HCISETLINKPOL:
 761        case HCISETLINKMODE:
 762        case HCISETACLMTU:
 763        case HCISETSCOMTU:
 764                if (!capable(CAP_NET_ADMIN))
 765                        return -EPERM;
 766                return hci_dev_cmd(cmd, argp);
 767
 768        case HCIINQUIRY:
 769                return hci_inquiry(argp);
 770        }
 771
 772        lock_sock(sk);
 773
 774        err = hci_sock_bound_ioctl(sk, cmd, arg);
 775
 776done:
 777        release_sock(sk);
 778        return err;
 779}
 780
 781static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
 782                         int addr_len)
 783{
 784        struct sockaddr_hci haddr;
 785        struct sock *sk = sock->sk;
 786        struct hci_dev *hdev = NULL;
 787        int len, err = 0;
 788
 789        BT_DBG("sock %p sk %p", sock, sk);
 790
 791        if (!addr)
 792                return -EINVAL;
 793
 794        memset(&haddr, 0, sizeof(haddr));
 795        len = min_t(unsigned int, sizeof(haddr), addr_len);
 796        memcpy(&haddr, addr, len);
 797
 798        if (haddr.hci_family != AF_BLUETOOTH)
 799                return -EINVAL;
 800
 801        lock_sock(sk);
 802
 803        if (sk->sk_state == BT_BOUND) {
 804                err = -EALREADY;
 805                goto done;
 806        }
 807
 808        switch (haddr.hci_channel) {
 809        case HCI_CHANNEL_RAW:
 810                if (hci_pi(sk)->hdev) {
 811                        err = -EALREADY;
 812                        goto done;
 813                }
 814
 815                if (haddr.hci_dev != HCI_DEV_NONE) {
 816                        hdev = hci_dev_get(haddr.hci_dev);
 817                        if (!hdev) {
 818                                err = -ENODEV;
 819                                goto done;
 820                        }
 821
 822                        atomic_inc(&hdev->promisc);
 823                }
 824
 825                hci_pi(sk)->hdev = hdev;
 826                break;
 827
 828        case HCI_CHANNEL_USER:
 829                if (hci_pi(sk)->hdev) {
 830                        err = -EALREADY;
 831                        goto done;
 832                }
 833
 834                if (haddr.hci_dev == HCI_DEV_NONE) {
 835                        err = -EINVAL;
 836                        goto done;
 837                }
 838
 839                if (!capable(CAP_NET_ADMIN)) {
 840                        err = -EPERM;
 841                        goto done;
 842                }
 843
 844                hdev = hci_dev_get(haddr.hci_dev);
 845                if (!hdev) {
 846                        err = -ENODEV;
 847                        goto done;
 848                }
 849
 850                if (test_bit(HCI_INIT, &hdev->flags) ||
 851                    hci_dev_test_flag(hdev, HCI_SETUP) ||
 852                    hci_dev_test_flag(hdev, HCI_CONFIG) ||
 853                    (!hci_dev_test_flag(hdev, HCI_AUTO_OFF) &&
 854                     test_bit(HCI_UP, &hdev->flags))) {
 855                        err = -EBUSY;
 856                        hci_dev_put(hdev);
 857                        goto done;
 858                }
 859
 860                if (hci_dev_test_and_set_flag(hdev, HCI_USER_CHANNEL)) {
 861                        err = -EUSERS;
 862                        hci_dev_put(hdev);
 863                        goto done;
 864                }
 865
 866                mgmt_index_removed(hdev);
 867
 868                err = hci_dev_open(hdev->id);
 869                if (err) {
 870                        if (err == -EALREADY) {
 871                                /* In case the transport is already up and
 872                                 * running, clear the error here.
 873                                 *
 874                                 * This can happen when opening an user
 875                                 * channel and HCI_AUTO_OFF grace period
 876                                 * is still active.
 877                                 */
 878                                err = 0;
 879                        } else {
 880                                hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
 881                                mgmt_index_added(hdev);
 882                                hci_dev_put(hdev);
 883                                goto done;
 884                        }
 885                }
 886
 887                atomic_inc(&hdev->promisc);
 888
 889                hci_pi(sk)->hdev = hdev;
 890                break;
 891
 892        case HCI_CHANNEL_MONITOR:
 893                if (haddr.hci_dev != HCI_DEV_NONE) {
 894                        err = -EINVAL;
 895                        goto done;
 896                }
 897
 898                if (!capable(CAP_NET_RAW)) {
 899                        err = -EPERM;
 900                        goto done;
 901                }
 902
 903                /* The monitor interface is restricted to CAP_NET_RAW
 904                 * capabilities and with that implicitly trusted.
 905                 */
 906                hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
 907
 908                send_monitor_note(sk, "Linux version %s (%s)",
 909                                  init_utsname()->release,
 910                                  init_utsname()->machine);
 911                send_monitor_note(sk, "Bluetooth subsystem version %s",
 912                                  BT_SUBSYS_VERSION);
 913                send_monitor_replay(sk);
 914
 915                atomic_inc(&monitor_promisc);
 916                break;
 917
 918        case HCI_CHANNEL_LOGGING:
 919                if (haddr.hci_dev != HCI_DEV_NONE) {
 920                        err = -EINVAL;
 921                        goto done;
 922                }
 923
 924                if (!capable(CAP_NET_ADMIN)) {
 925                        err = -EPERM;
 926                        goto done;
 927                }
 928                break;
 929
 930        default:
 931                if (!hci_mgmt_chan_find(haddr.hci_channel)) {
 932                        err = -EINVAL;
 933                        goto done;
 934                }
 935
 936                if (haddr.hci_dev != HCI_DEV_NONE) {
 937                        err = -EINVAL;
 938                        goto done;
 939                }
 940
 941                /* Users with CAP_NET_ADMIN capabilities are allowed
 942                 * access to all management commands and events. For
 943                 * untrusted users the interface is restricted and
 944                 * also only untrusted events are sent.
 945                 */
 946                if (capable(CAP_NET_ADMIN))
 947                        hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
 948
 949                /* At the moment the index and unconfigured index events
 950                 * are enabled unconditionally. Setting them on each
 951                 * socket when binding keeps this functionality. They
 952                 * however might be cleared later and then sending of these
 953                 * events will be disabled, but that is then intentional.
 954                 *
 955                 * This also enables generic events that are safe to be
 956                 * received by untrusted users. Example for such events
 957                 * are changes to settings, class of device, name etc.
 958                 */
 959                if (haddr.hci_channel == HCI_CHANNEL_CONTROL) {
 960                        hci_sock_set_flag(sk, HCI_MGMT_INDEX_EVENTS);
 961                        hci_sock_set_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
 962                        hci_sock_set_flag(sk, HCI_MGMT_GENERIC_EVENTS);
 963                }
 964                break;
 965        }
 966
 967
 968        hci_pi(sk)->channel = haddr.hci_channel;
 969        sk->sk_state = BT_BOUND;
 970
 971done:
 972        release_sock(sk);
 973        return err;
 974}
 975
 976static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
 977                            int *addr_len, int peer)
 978{
 979        struct sockaddr_hci *haddr = (struct sockaddr_hci *)addr;
 980        struct sock *sk = sock->sk;
 981        struct hci_dev *hdev;
 982        int err = 0;
 983
 984        BT_DBG("sock %p sk %p", sock, sk);
 985
 986        if (peer)
 987                return -EOPNOTSUPP;
 988
 989        lock_sock(sk);
 990
 991        hdev = hci_pi(sk)->hdev;
 992        if (!hdev) {
 993                err = -EBADFD;
 994                goto done;
 995        }
 996
 997        *addr_len = sizeof(*haddr);
 998        haddr->hci_family = AF_BLUETOOTH;
 999        haddr->hci_dev    = hdev->id;
1000        haddr->hci_channel= hci_pi(sk)->channel;
1001
1002done:
1003        release_sock(sk);
1004        return err;
1005}
1006
1007static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
1008                          struct sk_buff *skb)
1009{
1010        __u32 mask = hci_pi(sk)->cmsg_mask;
1011
1012        if (mask & HCI_CMSG_DIR) {
1013                int incoming = bt_cb(skb)->incoming;
1014                put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming),
1015                         &incoming);
1016        }
1017
1018        if (mask & HCI_CMSG_TSTAMP) {
1019#ifdef CONFIG_COMPAT
1020                struct compat_timeval ctv;
1021#endif
1022                struct timeval tv;
1023                void *data;
1024                int len;
1025
1026                skb_get_timestamp(skb, &tv);
1027
1028                data = &tv;
1029                len = sizeof(tv);
1030#ifdef CONFIG_COMPAT
1031                if (!COMPAT_USE_64BIT_TIME &&
1032                    (msg->msg_flags & MSG_CMSG_COMPAT)) {
1033                        ctv.tv_sec = tv.tv_sec;
1034                        ctv.tv_usec = tv.tv_usec;
1035                        data = &ctv;
1036                        len = sizeof(ctv);
1037                }
1038#endif
1039
1040                put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
1041        }
1042}
1043
1044static int hci_sock_recvmsg(struct socket *sock, struct msghdr *msg,
1045                            size_t len, int flags)
1046{
1047        int noblock = flags & MSG_DONTWAIT;
1048        struct sock *sk = sock->sk;
1049        struct sk_buff *skb;
1050        int copied, err;
1051
1052        BT_DBG("sock %p, sk %p", sock, sk);
1053
1054        if (flags & MSG_OOB)
1055                return -EOPNOTSUPP;
1056
1057        if (hci_pi(sk)->channel == HCI_CHANNEL_LOGGING)
1058                return -EOPNOTSUPP;
1059
1060        if (sk->sk_state == BT_CLOSED)
1061                return 0;
1062
1063        skb = skb_recv_datagram(sk, flags, noblock, &err);
1064        if (!skb)
1065                return err;
1066
1067        copied = skb->len;
1068        if (len < copied) {
1069                msg->msg_flags |= MSG_TRUNC;
1070                copied = len;
1071        }
1072
1073        skb_reset_transport_header(skb);
1074        err = skb_copy_datagram_msg(skb, 0, msg, copied);
1075
1076        switch (hci_pi(sk)->channel) {
1077        case HCI_CHANNEL_RAW:
1078                hci_sock_cmsg(sk, msg, skb);
1079                break;
1080        case HCI_CHANNEL_USER:
1081        case HCI_CHANNEL_MONITOR:
1082                sock_recv_timestamp(msg, sk, skb);
1083                break;
1084        default:
1085                if (hci_mgmt_chan_find(hci_pi(sk)->channel))
1086                        sock_recv_timestamp(msg, sk, skb);
1087                break;
1088        }
1089
1090        skb_free_datagram(sk, skb);
1091
1092        return err ? : copied;
1093}
1094
1095static int hci_mgmt_cmd(struct hci_mgmt_chan *chan, struct sock *sk,
1096                        struct msghdr *msg, size_t msglen)
1097{
1098        void *buf;
1099        u8 *cp;
1100        struct mgmt_hdr *hdr;
1101        u16 opcode, index, len;
1102        struct hci_dev *hdev = NULL;
1103        const struct hci_mgmt_handler *handler;
1104        bool var_len, no_hdev;
1105        int err;
1106
1107        BT_DBG("got %zu bytes", msglen);
1108
1109        if (msglen < sizeof(*hdr))
1110                return -EINVAL;
1111
1112        buf = kmalloc(msglen, GFP_KERNEL);
1113        if (!buf)
1114                return -ENOMEM;
1115
1116        if (memcpy_from_msg(buf, msg, msglen)) {
1117                err = -EFAULT;
1118                goto done;
1119        }
1120
1121        hdr = buf;
1122        opcode = __le16_to_cpu(hdr->opcode);
1123        index = __le16_to_cpu(hdr->index);
1124        len = __le16_to_cpu(hdr->len);
1125
1126        if (len != msglen - sizeof(*hdr)) {
1127                err = -EINVAL;
1128                goto done;
1129        }
1130
1131        if (opcode >= chan->handler_count ||
1132            chan->handlers[opcode].func == NULL) {
1133                BT_DBG("Unknown op %u", opcode);
1134                err = mgmt_cmd_status(sk, index, opcode,
1135                                      MGMT_STATUS_UNKNOWN_COMMAND);
1136                goto done;
1137        }
1138
1139        handler = &chan->handlers[opcode];
1140
1141        if (!hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) &&
1142            !(handler->flags & HCI_MGMT_UNTRUSTED)) {
1143                err = mgmt_cmd_status(sk, index, opcode,
1144                                      MGMT_STATUS_PERMISSION_DENIED);
1145                goto done;
1146        }
1147
1148        if (index != MGMT_INDEX_NONE) {
1149                hdev = hci_dev_get(index);
1150                if (!hdev) {
1151                        err = mgmt_cmd_status(sk, index, opcode,
1152                                              MGMT_STATUS_INVALID_INDEX);
1153                        goto done;
1154                }
1155
1156                if (hci_dev_test_flag(hdev, HCI_SETUP) ||
1157                    hci_dev_test_flag(hdev, HCI_CONFIG) ||
1158                    hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1159                        err = mgmt_cmd_status(sk, index, opcode,
1160                                              MGMT_STATUS_INVALID_INDEX);
1161                        goto done;
1162                }
1163
1164                if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1165                    !(handler->flags & HCI_MGMT_UNCONFIGURED)) {
1166                        err = mgmt_cmd_status(sk, index, opcode,
1167                                              MGMT_STATUS_INVALID_INDEX);
1168                        goto done;
1169                }
1170        }
1171
1172        no_hdev = (handler->flags & HCI_MGMT_NO_HDEV);
1173        if (no_hdev != !hdev) {
1174                err = mgmt_cmd_status(sk, index, opcode,
1175                                      MGMT_STATUS_INVALID_INDEX);
1176                goto done;
1177        }
1178
1179        var_len = (handler->flags & HCI_MGMT_VAR_LEN);
1180        if ((var_len && len < handler->data_len) ||
1181            (!var_len && len != handler->data_len)) {
1182                err = mgmt_cmd_status(sk, index, opcode,
1183                                      MGMT_STATUS_INVALID_PARAMS);
1184                goto done;
1185        }
1186
1187        if (hdev && chan->hdev_init)
1188                chan->hdev_init(sk, hdev);
1189
1190        cp = buf + sizeof(*hdr);
1191
1192        err = handler->func(sk, hdev, cp, len);
1193        if (err < 0)
1194                goto done;
1195
1196        err = msglen;
1197
1198done:
1199        if (hdev)
1200                hci_dev_put(hdev);
1201
1202        kfree(buf);
1203        return err;
1204}
1205
1206static int hci_logging_frame(struct sock *sk, struct msghdr *msg, int len)
1207{
1208        struct hci_mon_hdr *hdr;
1209        struct sk_buff *skb;
1210        struct hci_dev *hdev;
1211        u16 index;
1212        int err;
1213
1214        /* The logging frame consists at minimum of the standard header,
1215         * the priority byte, the ident length byte and at least one string
1216         * terminator NUL byte. Anything shorter are invalid packets.
1217         */
1218        if (len < sizeof(*hdr) + 3)
1219                return -EINVAL;
1220
1221        skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
1222        if (!skb)
1223                return err;
1224
1225        if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
1226                err = -EFAULT;
1227                goto drop;
1228        }
1229
1230        hdr = (void *)skb->data;
1231
1232        if (__le16_to_cpu(hdr->len) != len - sizeof(*hdr)) {
1233                err = -EINVAL;
1234                goto drop;
1235        }
1236
1237        if (__le16_to_cpu(hdr->opcode) == 0x0000) {
1238                __u8 priority = skb->data[sizeof(*hdr)];
1239                __u8 ident_len = skb->data[sizeof(*hdr) + 1];
1240
1241                /* Only the priorities 0-7 are valid and with that any other
1242                 * value results in an invalid packet.
1243                 *
1244                 * The priority byte is followed by an ident length byte and
1245                 * the NUL terminated ident string. Check that the ident
1246                 * length is not overflowing the packet and also that the
1247                 * ident string itself is NUL terminated. In case the ident
1248                 * length is zero, the length value actually doubles as NUL
1249                 * terminator identifier.
1250                 *
1251                 * The message follows the ident string (if present) and
1252                 * must be NUL terminated. Otherwise it is not a valid packet.
1253                 */
1254                if (priority > 7 || skb->data[len - 1] != 0x00 ||
1255                    ident_len > len - sizeof(*hdr) - 3 ||
1256                    skb->data[sizeof(*hdr) + ident_len + 1] != 0x00) {
1257                        err = -EINVAL;
1258                        goto drop;
1259                }
1260        } else {
1261                err = -EINVAL;
1262                goto drop;
1263        }
1264
1265        index = __le16_to_cpu(hdr->index);
1266
1267        if (index != MGMT_INDEX_NONE) {
1268                hdev = hci_dev_get(index);
1269                if (!hdev) {
1270                        err = -ENODEV;
1271                        goto drop;
1272                }
1273        } else {
1274                hdev = NULL;
1275        }
1276
1277        hdr->opcode = cpu_to_le16(HCI_MON_USER_LOGGING);
1278
1279        hci_send_to_channel(HCI_CHANNEL_MONITOR, skb, HCI_SOCK_TRUSTED, NULL);
1280        err = len;
1281
1282        if (hdev)
1283                hci_dev_put(hdev);
1284
1285drop:
1286        kfree_skb(skb);
1287        return err;
1288}
1289
1290static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
1291                            size_t len)
1292{
1293        struct sock *sk = sock->sk;
1294        struct hci_mgmt_chan *chan;
1295        struct hci_dev *hdev;
1296        struct sk_buff *skb;
1297        int err;
1298
1299        BT_DBG("sock %p sk %p", sock, sk);
1300
1301        if (msg->msg_flags & MSG_OOB)
1302                return -EOPNOTSUPP;
1303
1304        if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE))
1305                return -EINVAL;
1306
1307        if (len < 4 || len > HCI_MAX_FRAME_SIZE)
1308                return -EINVAL;
1309
1310        lock_sock(sk);
1311
1312        switch (hci_pi(sk)->channel) {
1313        case HCI_CHANNEL_RAW:
1314        case HCI_CHANNEL_USER:
1315                break;
1316        case HCI_CHANNEL_MONITOR:
1317                err = -EOPNOTSUPP;
1318                goto done;
1319        case HCI_CHANNEL_LOGGING:
1320                err = hci_logging_frame(sk, msg, len);
1321                goto done;
1322        default:
1323                mutex_lock(&mgmt_chan_list_lock);
1324                chan = __hci_mgmt_chan_find(hci_pi(sk)->channel);
1325                if (chan)
1326                        err = hci_mgmt_cmd(chan, sk, msg, len);
1327                else
1328                        err = -EINVAL;
1329
1330                mutex_unlock(&mgmt_chan_list_lock);
1331                goto done;
1332        }
1333
1334        hdev = hci_pi(sk)->hdev;
1335        if (!hdev) {
1336                err = -EBADFD;
1337                goto done;
1338        }
1339
1340        if (!test_bit(HCI_UP, &hdev->flags)) {
1341                err = -ENETDOWN;
1342                goto done;
1343        }
1344
1345        skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
1346        if (!skb)
1347                goto done;
1348
1349        if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
1350                err = -EFAULT;
1351                goto drop;
1352        }
1353
1354        hci_skb_pkt_type(skb) = skb->data[0];
1355        skb_pull(skb, 1);
1356
1357        if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
1358                /* No permission check is needed for user channel
1359                 * since that gets enforced when binding the socket.
1360                 *
1361                 * However check that the packet type is valid.
1362                 */
1363                if (hci_skb_pkt_type(skb) != HCI_COMMAND_PKT &&
1364                    hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
1365                    hci_skb_pkt_type(skb) != HCI_SCODATA_PKT) {
1366                        err = -EINVAL;
1367                        goto drop;
1368                }
1369
1370                skb_queue_tail(&hdev->raw_q, skb);
1371                queue_work(hdev->workqueue, &hdev->tx_work);
1372        } else if (hci_skb_pkt_type(skb) == HCI_COMMAND_PKT) {
1373                u16 opcode = get_unaligned_le16(skb->data);
1374                u16 ogf = hci_opcode_ogf(opcode);
1375                u16 ocf = hci_opcode_ocf(opcode);
1376
1377                if (((ogf > HCI_SFLT_MAX_OGF) ||
1378                     !hci_test_bit(ocf & HCI_FLT_OCF_BITS,
1379                                   &hci_sec_filter.ocf_mask[ogf])) &&
1380                    !capable(CAP_NET_RAW)) {
1381                        err = -EPERM;
1382                        goto drop;
1383                }
1384
1385                /* Since the opcode has already been extracted here, store
1386                 * a copy of the value for later use by the drivers.
1387                 */
1388                hci_skb_opcode(skb) = opcode;
1389
1390                if (ogf == 0x3f) {
1391                        skb_queue_tail(&hdev->raw_q, skb);
1392                        queue_work(hdev->workqueue, &hdev->tx_work);
1393                } else {
1394                        /* Stand-alone HCI commands must be flagged as
1395                         * single-command requests.
1396                         */
1397                        bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
1398
1399                        skb_queue_tail(&hdev->cmd_q, skb);
1400                        queue_work(hdev->workqueue, &hdev->cmd_work);
1401                }
1402        } else {
1403                if (!capable(CAP_NET_RAW)) {
1404                        err = -EPERM;
1405                        goto drop;
1406                }
1407
1408                if (hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
1409                    hci_skb_pkt_type(skb) != HCI_SCODATA_PKT) {
1410                        err = -EINVAL;
1411                        goto drop;
1412                }
1413
1414                skb_queue_tail(&hdev->raw_q, skb);
1415                queue_work(hdev->workqueue, &hdev->tx_work);
1416        }
1417
1418        err = len;
1419
1420done:
1421        release_sock(sk);
1422        return err;
1423
1424drop:
1425        kfree_skb(skb);
1426        goto done;
1427}
1428
1429static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
1430                               char __user *optval, unsigned int len)
1431{
1432        struct hci_ufilter uf = { .opcode = 0 };
1433        struct sock *sk = sock->sk;
1434        int err = 0, opt = 0;
1435
1436        BT_DBG("sk %p, opt %d", sk, optname);
1437
1438        lock_sock(sk);
1439
1440        if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1441                err = -EBADFD;
1442                goto done;
1443        }
1444
1445        switch (optname) {
1446        case HCI_DATA_DIR:
1447                if (get_user(opt, (int __user *)optval)) {
1448                        err = -EFAULT;
1449                        break;
1450                }
1451
1452                if (opt)
1453                        hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
1454                else
1455                        hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
1456                break;
1457
1458        case HCI_TIME_STAMP:
1459                if (get_user(opt, (int __user *)optval)) {
1460                        err = -EFAULT;
1461                        break;
1462                }
1463
1464                if (opt)
1465                        hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
1466                else
1467                        hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
1468                break;
1469
1470        case HCI_FILTER:
1471                {
1472                        struct hci_filter *f = &hci_pi(sk)->filter;
1473
1474                        uf.type_mask = f->type_mask;
1475                        uf.opcode    = f->opcode;
1476                        uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1477                        uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1478                }
1479
1480                len = min_t(unsigned int, len, sizeof(uf));
1481                if (copy_from_user(&uf, optval, len)) {
1482                        err = -EFAULT;
1483                        break;
1484                }
1485
1486                if (!capable(CAP_NET_RAW)) {
1487                        uf.type_mask &= hci_sec_filter.type_mask;
1488                        uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0);
1489                        uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1);
1490                }
1491
1492                {
1493                        struct hci_filter *f = &hci_pi(sk)->filter;
1494
1495                        f->type_mask = uf.type_mask;
1496                        f->opcode    = uf.opcode;
1497                        *((u32 *) f->event_mask + 0) = uf.event_mask[0];
1498                        *((u32 *) f->event_mask + 1) = uf.event_mask[1];
1499                }
1500                break;
1501
1502        default:
1503                err = -ENOPROTOOPT;
1504                break;
1505        }
1506
1507done:
1508        release_sock(sk);
1509        return err;
1510}
1511
1512static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
1513                               char __user *optval, int __user *optlen)
1514{
1515        struct hci_ufilter uf;
1516        struct sock *sk = sock->sk;
1517        int len, opt, err = 0;
1518
1519        BT_DBG("sk %p, opt %d", sk, optname);
1520
1521        if (get_user(len, optlen))
1522                return -EFAULT;
1523
1524        lock_sock(sk);
1525
1526        if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1527                err = -EBADFD;
1528                goto done;
1529        }
1530
1531        switch (optname) {
1532        case HCI_DATA_DIR:
1533                if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
1534                        opt = 1;
1535                else
1536                        opt = 0;
1537
1538                if (put_user(opt, optval))
1539                        err = -EFAULT;
1540                break;
1541
1542        case HCI_TIME_STAMP:
1543                if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
1544                        opt = 1;
1545                else
1546                        opt = 0;
1547
1548                if (put_user(opt, optval))
1549                        err = -EFAULT;
1550                break;
1551
1552        case HCI_FILTER:
1553                {
1554                        struct hci_filter *f = &hci_pi(sk)->filter;
1555
1556                        memset(&uf, 0, sizeof(uf));
1557                        uf.type_mask = f->type_mask;
1558                        uf.opcode    = f->opcode;
1559                        uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1560                        uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1561                }
1562
1563                len = min_t(unsigned int, len, sizeof(uf));
1564                if (copy_to_user(optval, &uf, len))
1565                        err = -EFAULT;
1566                break;
1567
1568        default:
1569                err = -ENOPROTOOPT;
1570                break;
1571        }
1572
1573done:
1574        release_sock(sk);
1575        return err;
1576}
1577
1578static const struct proto_ops hci_sock_ops = {
1579        .family         = PF_BLUETOOTH,
1580        .owner          = THIS_MODULE,
1581        .release        = hci_sock_release,
1582        .bind           = hci_sock_bind,
1583        .getname        = hci_sock_getname,
1584        .sendmsg        = hci_sock_sendmsg,
1585        .recvmsg        = hci_sock_recvmsg,
1586        .ioctl          = hci_sock_ioctl,
1587        .poll           = datagram_poll,
1588        .listen         = sock_no_listen,
1589        .shutdown       = sock_no_shutdown,
1590        .setsockopt     = hci_sock_setsockopt,
1591        .getsockopt     = hci_sock_getsockopt,
1592        .connect        = sock_no_connect,
1593        .socketpair     = sock_no_socketpair,
1594        .accept         = sock_no_accept,
1595        .mmap           = sock_no_mmap
1596};
1597
1598static struct proto hci_sk_proto = {
1599        .name           = "HCI",
1600        .owner          = THIS_MODULE,
1601        .obj_size       = sizeof(struct hci_pinfo)
1602};
1603
1604static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
1605                           int kern)
1606{
1607        struct sock *sk;
1608
1609        BT_DBG("sock %p", sock);
1610
1611        if (sock->type != SOCK_RAW)
1612                return -ESOCKTNOSUPPORT;
1613
1614        sock->ops = &hci_sock_ops;
1615
1616        sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto, kern);
1617        if (!sk)
1618                return -ENOMEM;
1619
1620        sock_init_data(sock, sk);
1621
1622        sock_reset_flag(sk, SOCK_ZAPPED);
1623
1624        sk->sk_protocol = protocol;
1625
1626        sock->state = SS_UNCONNECTED;
1627        sk->sk_state = BT_OPEN;
1628
1629        bt_sock_link(&hci_sk_list, sk);
1630        return 0;
1631}
1632
1633static const struct net_proto_family hci_sock_family_ops = {
1634        .family = PF_BLUETOOTH,
1635        .owner  = THIS_MODULE,
1636        .create = hci_sock_create,
1637};
1638
1639int __init hci_sock_init(void)
1640{
1641        int err;
1642
1643        BUILD_BUG_ON(sizeof(struct sockaddr_hci) > sizeof(struct sockaddr));
1644
1645        err = proto_register(&hci_sk_proto, 0);
1646        if (err < 0)
1647                return err;
1648
1649        err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
1650        if (err < 0) {
1651                BT_ERR("HCI socket registration failed");
1652                goto error;
1653        }
1654
1655        err = bt_procfs_init(&init_net, "hci", &hci_sk_list, NULL);
1656        if (err < 0) {
1657                BT_ERR("Failed to create HCI proc file");
1658                bt_sock_unregister(BTPROTO_HCI);
1659                goto error;
1660        }
1661
1662        BT_INFO("HCI socket layer initialized");
1663
1664        return 0;
1665
1666error:
1667        proto_unregister(&hci_sk_proto);
1668        return err;
1669}
1670
1671void hci_sock_cleanup(void)
1672{
1673        bt_procfs_cleanup(&init_net, "hci");
1674        bt_sock_unregister(BTPROTO_HCI);
1675        proto_unregister(&hci_sk_proto);
1676}
1677