linux/net/bluetooth/hci_core.c
<<
>>
Prefs
   1/*
   2   BlueZ - Bluetooth protocol stack for Linux
   3   Copyright (C) 2000-2001 Qualcomm Incorporated
   4   Copyright (C) 2011 ProFUSION Embedded Systems
   5
   6   Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
   7
   8   This program is free software; you can redistribute it and/or modify
   9   it under the terms of the GNU General Public License version 2 as
  10   published by the Free Software Foundation;
  11
  12   THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
  13   OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  14   FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
  15   IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
  16   CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
  17   WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  18   ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  19   OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  20
  21   ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
  22   COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
  23   SOFTWARE IS DISCLAIMED.
  24*/
  25
  26/* Bluetooth HCI core. */
  27
  28#include <linux/export.h>
  29#include <linux/idr.h>
  30#include <linux/rfkill.h>
  31#include <linux/debugfs.h>
  32#include <linux/crypto.h>
  33#include <asm/unaligned.h>
  34
  35#include <net/bluetooth/bluetooth.h>
  36#include <net/bluetooth/hci_core.h>
  37#include <net/bluetooth/l2cap.h>
  38#include <net/bluetooth/mgmt.h>
  39
  40#include "hci_request.h"
  41#include "hci_debugfs.h"
  42#include "smp.h"
  43#include "leds.h"
  44
  45static void hci_rx_work(struct work_struct *work);
  46static void hci_cmd_work(struct work_struct *work);
  47static void hci_tx_work(struct work_struct *work);
  48
  49/* HCI device list */
  50LIST_HEAD(hci_dev_list);
  51DEFINE_RWLOCK(hci_dev_list_lock);
  52
  53/* HCI callback list */
  54LIST_HEAD(hci_cb_list);
  55DEFINE_MUTEX(hci_cb_list_lock);
  56
  57/* HCI ID Numbering */
  58static DEFINE_IDA(hci_index_ida);
  59
  60/* ---- HCI debugfs entries ---- */
  61
  62static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
  63                             size_t count, loff_t *ppos)
  64{
  65        struct hci_dev *hdev = file->private_data;
  66        char buf[3];
  67
  68        buf[0] = hci_dev_test_flag(hdev, HCI_DUT_MODE) ? 'Y' : 'N';
  69        buf[1] = '\n';
  70        buf[2] = '\0';
  71        return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
  72}
  73
  74static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
  75                              size_t count, loff_t *ppos)
  76{
  77        struct hci_dev *hdev = file->private_data;
  78        struct sk_buff *skb;
  79        char buf[32];
  80        size_t buf_size = min(count, (sizeof(buf)-1));
  81        bool enable;
  82
  83        if (!test_bit(HCI_UP, &hdev->flags))
  84                return -ENETDOWN;
  85
  86        if (copy_from_user(buf, user_buf, buf_size))
  87                return -EFAULT;
  88
  89        buf[buf_size] = '\0';
  90        if (strtobool(buf, &enable))
  91                return -EINVAL;
  92
  93        if (enable == hci_dev_test_flag(hdev, HCI_DUT_MODE))
  94                return -EALREADY;
  95
  96        hci_req_sync_lock(hdev);
  97        if (enable)
  98                skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
  99                                     HCI_CMD_TIMEOUT);
 100        else
 101                skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
 102                                     HCI_CMD_TIMEOUT);
 103        hci_req_sync_unlock(hdev);
 104
 105        if (IS_ERR(skb))
 106                return PTR_ERR(skb);
 107
 108        kfree_skb(skb);
 109
 110        hci_dev_change_flag(hdev, HCI_DUT_MODE);
 111
 112        return count;
 113}
 114
 115static const struct file_operations dut_mode_fops = {
 116        .open           = simple_open,
 117        .read           = dut_mode_read,
 118        .write          = dut_mode_write,
 119        .llseek         = default_llseek,
 120};
 121
 122static ssize_t vendor_diag_read(struct file *file, char __user *user_buf,
 123                                size_t count, loff_t *ppos)
 124{
 125        struct hci_dev *hdev = file->private_data;
 126        char buf[3];
 127
 128        buf[0] = hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) ? 'Y' : 'N';
 129        buf[1] = '\n';
 130        buf[2] = '\0';
 131        return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
 132}
 133
 134static ssize_t vendor_diag_write(struct file *file, const char __user *user_buf,
 135                                 size_t count, loff_t *ppos)
 136{
 137        struct hci_dev *hdev = file->private_data;
 138        char buf[32];
 139        size_t buf_size = min(count, (sizeof(buf)-1));
 140        bool enable;
 141        int err;
 142
 143        if (copy_from_user(buf, user_buf, buf_size))
 144                return -EFAULT;
 145
 146        buf[buf_size] = '\0';
 147        if (strtobool(buf, &enable))
 148                return -EINVAL;
 149
 150        /* When the diagnostic flags are not persistent and the transport
 151         * is not active, then there is no need for the vendor callback.
 152         *
 153         * Instead just store the desired value. If needed the setting
 154         * will be programmed when the controller gets powered on.
 155         */
 156        if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
 157            !test_bit(HCI_RUNNING, &hdev->flags))
 158                goto done;
 159
 160        hci_req_sync_lock(hdev);
 161        err = hdev->set_diag(hdev, enable);
 162        hci_req_sync_unlock(hdev);
 163
 164        if (err < 0)
 165                return err;
 166
 167done:
 168        if (enable)
 169                hci_dev_set_flag(hdev, HCI_VENDOR_DIAG);
 170        else
 171                hci_dev_clear_flag(hdev, HCI_VENDOR_DIAG);
 172
 173        return count;
 174}
 175
 176static const struct file_operations vendor_diag_fops = {
 177        .open           = simple_open,
 178        .read           = vendor_diag_read,
 179        .write          = vendor_diag_write,
 180        .llseek         = default_llseek,
 181};
 182
 183static void hci_debugfs_create_basic(struct hci_dev *hdev)
 184{
 185        debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
 186                            &dut_mode_fops);
 187
 188        if (hdev->set_diag)
 189                debugfs_create_file("vendor_diag", 0644, hdev->debugfs, hdev,
 190                                    &vendor_diag_fops);
 191}
 192
 193static int hci_reset_req(struct hci_request *req, unsigned long opt)
 194{
 195        BT_DBG("%s %ld", req->hdev->name, opt);
 196
 197        /* Reset device */
 198        set_bit(HCI_RESET, &req->hdev->flags);
 199        hci_req_add(req, HCI_OP_RESET, 0, NULL);
 200        return 0;
 201}
 202
 203static void bredr_init(struct hci_request *req)
 204{
 205        req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
 206
 207        /* Read Local Supported Features */
 208        hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
 209
 210        /* Read Local Version */
 211        hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
 212
 213        /* Read BD Address */
 214        hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
 215}
 216
 217static void amp_init1(struct hci_request *req)
 218{
 219        req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
 220
 221        /* Read Local Version */
 222        hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
 223
 224        /* Read Local Supported Commands */
 225        hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
 226
 227        /* Read Local AMP Info */
 228        hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
 229
 230        /* Read Data Blk size */
 231        hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
 232
 233        /* Read Flow Control Mode */
 234        hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
 235
 236        /* Read Location Data */
 237        hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
 238}
 239
 240static int amp_init2(struct hci_request *req)
 241{
 242        /* Read Local Supported Features. Not all AMP controllers
 243         * support this so it's placed conditionally in the second
 244         * stage init.
 245         */
 246        if (req->hdev->commands[14] & 0x20)
 247                hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
 248
 249        return 0;
 250}
 251
 252static int hci_init1_req(struct hci_request *req, unsigned long opt)
 253{
 254        struct hci_dev *hdev = req->hdev;
 255
 256        BT_DBG("%s %ld", hdev->name, opt);
 257
 258        /* Reset */
 259        if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
 260                hci_reset_req(req, 0);
 261
 262        switch (hdev->dev_type) {
 263        case HCI_PRIMARY:
 264                bredr_init(req);
 265                break;
 266        case HCI_AMP:
 267                amp_init1(req);
 268                break;
 269        default:
 270                BT_ERR("Unknown device type %d", hdev->dev_type);
 271                break;
 272        }
 273
 274        return 0;
 275}
 276
 277static void bredr_setup(struct hci_request *req)
 278{
 279        __le16 param;
 280        __u8 flt_type;
 281
 282        /* Read Buffer Size (ACL mtu, max pkt, etc.) */
 283        hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
 284
 285        /* Read Class of Device */
 286        hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
 287
 288        /* Read Local Name */
 289        hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
 290
 291        /* Read Voice Setting */
 292        hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
 293
 294        /* Read Number of Supported IAC */
 295        hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
 296
 297        /* Read Current IAC LAP */
 298        hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
 299
 300        /* Clear Event Filters */
 301        flt_type = HCI_FLT_CLEAR_ALL;
 302        hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
 303
 304        /* Connection accept timeout ~20 secs */
 305        param = cpu_to_le16(0x7d00);
 306        hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
 307}
 308
 309static void le_setup(struct hci_request *req)
 310{
 311        struct hci_dev *hdev = req->hdev;
 312
 313        /* Read LE Buffer Size */
 314        hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
 315
 316        /* Read LE Local Supported Features */
 317        hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
 318
 319        /* Read LE Supported States */
 320        hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
 321
 322        /* LE-only controllers have LE implicitly enabled */
 323        if (!lmp_bredr_capable(hdev))
 324                hci_dev_set_flag(hdev, HCI_LE_ENABLED);
 325}
 326
 327static void hci_setup_event_mask(struct hci_request *req)
 328{
 329        struct hci_dev *hdev = req->hdev;
 330
 331        /* The second byte is 0xff instead of 0x9f (two reserved bits
 332         * disabled) since a Broadcom 1.2 dongle doesn't respond to the
 333         * command otherwise.
 334         */
 335        u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
 336
 337        /* CSR 1.1 dongles does not accept any bitfield so don't try to set
 338         * any event mask for pre 1.2 devices.
 339         */
 340        if (hdev->hci_ver < BLUETOOTH_VER_1_2)
 341                return;
 342
 343        if (lmp_bredr_capable(hdev)) {
 344                events[4] |= 0x01; /* Flow Specification Complete */
 345        } else {
 346                /* Use a different default for LE-only devices */
 347                memset(events, 0, sizeof(events));
 348                events[1] |= 0x20; /* Command Complete */
 349                events[1] |= 0x40; /* Command Status */
 350                events[1] |= 0x80; /* Hardware Error */
 351
 352                /* If the controller supports the Disconnect command, enable
 353                 * the corresponding event. In addition enable packet flow
 354                 * control related events.
 355                 */
 356                if (hdev->commands[0] & 0x20) {
 357                        events[0] |= 0x10; /* Disconnection Complete */
 358                        events[2] |= 0x04; /* Number of Completed Packets */
 359                        events[3] |= 0x02; /* Data Buffer Overflow */
 360                }
 361
 362                /* If the controller supports the Read Remote Version
 363                 * Information command, enable the corresponding event.
 364                 */
 365                if (hdev->commands[2] & 0x80)
 366                        events[1] |= 0x08; /* Read Remote Version Information
 367                                            * Complete
 368                                            */
 369
 370                if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
 371                        events[0] |= 0x80; /* Encryption Change */
 372                        events[5] |= 0x80; /* Encryption Key Refresh Complete */
 373                }
 374        }
 375
 376        if (lmp_inq_rssi_capable(hdev) ||
 377            test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks))
 378                events[4] |= 0x02; /* Inquiry Result with RSSI */
 379
 380        if (lmp_ext_feat_capable(hdev))
 381                events[4] |= 0x04; /* Read Remote Extended Features Complete */
 382
 383        if (lmp_esco_capable(hdev)) {
 384                events[5] |= 0x08; /* Synchronous Connection Complete */
 385                events[5] |= 0x10; /* Synchronous Connection Changed */
 386        }
 387
 388        if (lmp_sniffsubr_capable(hdev))
 389                events[5] |= 0x20; /* Sniff Subrating */
 390
 391        if (lmp_pause_enc_capable(hdev))
 392                events[5] |= 0x80; /* Encryption Key Refresh Complete */
 393
 394        if (lmp_ext_inq_capable(hdev))
 395                events[5] |= 0x40; /* Extended Inquiry Result */
 396
 397        if (lmp_no_flush_capable(hdev))
 398                events[7] |= 0x01; /* Enhanced Flush Complete */
 399
 400        if (lmp_lsto_capable(hdev))
 401                events[6] |= 0x80; /* Link Supervision Timeout Changed */
 402
 403        if (lmp_ssp_capable(hdev)) {
 404                events[6] |= 0x01;      /* IO Capability Request */
 405                events[6] |= 0x02;      /* IO Capability Response */
 406                events[6] |= 0x04;      /* User Confirmation Request */
 407                events[6] |= 0x08;      /* User Passkey Request */
 408                events[6] |= 0x10;      /* Remote OOB Data Request */
 409                events[6] |= 0x20;      /* Simple Pairing Complete */
 410                events[7] |= 0x04;      /* User Passkey Notification */
 411                events[7] |= 0x08;      /* Keypress Notification */
 412                events[7] |= 0x10;      /* Remote Host Supported
 413                                         * Features Notification
 414                                         */
 415        }
 416
 417        if (lmp_le_capable(hdev))
 418                events[7] |= 0x20;      /* LE Meta-Event */
 419
 420        hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
 421}
 422
 423static int hci_init2_req(struct hci_request *req, unsigned long opt)
 424{
 425        struct hci_dev *hdev = req->hdev;
 426
 427        if (hdev->dev_type == HCI_AMP)
 428                return amp_init2(req);
 429
 430        if (lmp_bredr_capable(hdev))
 431                bredr_setup(req);
 432        else
 433                hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
 434
 435        if (lmp_le_capable(hdev))
 436                le_setup(req);
 437
 438        /* All Bluetooth 1.2 and later controllers should support the
 439         * HCI command for reading the local supported commands.
 440         *
 441         * Unfortunately some controllers indicate Bluetooth 1.2 support,
 442         * but do not have support for this command. If that is the case,
 443         * the driver can quirk the behavior and skip reading the local
 444         * supported commands.
 445         */
 446        if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
 447            !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
 448                hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
 449
 450        if (lmp_ssp_capable(hdev)) {
 451                /* When SSP is available, then the host features page
 452                 * should also be available as well. However some
 453                 * controllers list the max_page as 0 as long as SSP
 454                 * has not been enabled. To achieve proper debugging
 455                 * output, force the minimum max_page to 1 at least.
 456                 */
 457                hdev->max_page = 0x01;
 458
 459                if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
 460                        u8 mode = 0x01;
 461
 462                        hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
 463                                    sizeof(mode), &mode);
 464                } else {
 465                        struct hci_cp_write_eir cp;
 466
 467                        memset(hdev->eir, 0, sizeof(hdev->eir));
 468                        memset(&cp, 0, sizeof(cp));
 469
 470                        hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
 471                }
 472        }
 473
 474        if (lmp_inq_rssi_capable(hdev) ||
 475            test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) {
 476                u8 mode;
 477
 478                /* If Extended Inquiry Result events are supported, then
 479                 * they are clearly preferred over Inquiry Result with RSSI
 480                 * events.
 481                 */
 482                mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
 483
 484                hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
 485        }
 486
 487        if (lmp_inq_tx_pwr_capable(hdev))
 488                hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
 489
 490        if (lmp_ext_feat_capable(hdev)) {
 491                struct hci_cp_read_local_ext_features cp;
 492
 493                cp.page = 0x01;
 494                hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
 495                            sizeof(cp), &cp);
 496        }
 497
 498        if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
 499                u8 enable = 1;
 500                hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
 501                            &enable);
 502        }
 503
 504        return 0;
 505}
 506
 507static void hci_setup_link_policy(struct hci_request *req)
 508{
 509        struct hci_dev *hdev = req->hdev;
 510        struct hci_cp_write_def_link_policy cp;
 511        u16 link_policy = 0;
 512
 513        if (lmp_rswitch_capable(hdev))
 514                link_policy |= HCI_LP_RSWITCH;
 515        if (lmp_hold_capable(hdev))
 516                link_policy |= HCI_LP_HOLD;
 517        if (lmp_sniff_capable(hdev))
 518                link_policy |= HCI_LP_SNIFF;
 519        if (lmp_park_capable(hdev))
 520                link_policy |= HCI_LP_PARK;
 521
 522        cp.policy = cpu_to_le16(link_policy);
 523        hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
 524}
 525
 526static void hci_set_le_support(struct hci_request *req)
 527{
 528        struct hci_dev *hdev = req->hdev;
 529        struct hci_cp_write_le_host_supported cp;
 530
 531        /* LE-only devices do not support explicit enablement */
 532        if (!lmp_bredr_capable(hdev))
 533                return;
 534
 535        memset(&cp, 0, sizeof(cp));
 536
 537        if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
 538                cp.le = 0x01;
 539                cp.simul = 0x00;
 540        }
 541
 542        if (cp.le != lmp_host_le_capable(hdev))
 543                hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
 544                            &cp);
 545}
 546
 547static void hci_set_event_mask_page_2(struct hci_request *req)
 548{
 549        struct hci_dev *hdev = req->hdev;
 550        u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
 551
 552        /* If Connectionless Slave Broadcast master role is supported
 553         * enable all necessary events for it.
 554         */
 555        if (lmp_csb_master_capable(hdev)) {
 556                events[1] |= 0x40;      /* Triggered Clock Capture */
 557                events[1] |= 0x80;      /* Synchronization Train Complete */
 558                events[2] |= 0x10;      /* Slave Page Response Timeout */
 559                events[2] |= 0x20;      /* CSB Channel Map Change */
 560        }
 561
 562        /* If Connectionless Slave Broadcast slave role is supported
 563         * enable all necessary events for it.
 564         */
 565        if (lmp_csb_slave_capable(hdev)) {
 566                events[2] |= 0x01;      /* Synchronization Train Received */
 567                events[2] |= 0x02;      /* CSB Receive */
 568                events[2] |= 0x04;      /* CSB Timeout */
 569                events[2] |= 0x08;      /* Truncated Page Complete */
 570        }
 571
 572        /* Enable Authenticated Payload Timeout Expired event if supported */
 573        if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
 574                events[2] |= 0x80;
 575
 576        hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
 577}
 578
 579static int hci_init3_req(struct hci_request *req, unsigned long opt)
 580{
 581        struct hci_dev *hdev = req->hdev;
 582        u8 p;
 583
 584        hci_setup_event_mask(req);
 585
 586        if (hdev->commands[6] & 0x20 &&
 587            !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
 588                struct hci_cp_read_stored_link_key cp;
 589
 590                bacpy(&cp.bdaddr, BDADDR_ANY);
 591                cp.read_all = 0x01;
 592                hci_req_add(req, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp);
 593        }
 594
 595        if (hdev->commands[5] & 0x10)
 596                hci_setup_link_policy(req);
 597
 598        if (hdev->commands[8] & 0x01)
 599                hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
 600
 601        /* Some older Broadcom based Bluetooth 1.2 controllers do not
 602         * support the Read Page Scan Type command. Check support for
 603         * this command in the bit mask of supported commands.
 604         */
 605        if (hdev->commands[13] & 0x01)
 606                hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
 607
 608        if (lmp_le_capable(hdev)) {
 609                u8 events[8];
 610
 611                memset(events, 0, sizeof(events));
 612
 613                if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
 614                        events[0] |= 0x10;      /* LE Long Term Key Request */
 615
 616                /* If controller supports the Connection Parameters Request
 617                 * Link Layer Procedure, enable the corresponding event.
 618                 */
 619                if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
 620                        events[0] |= 0x20;      /* LE Remote Connection
 621                                                 * Parameter Request
 622                                                 */
 623
 624                /* If the controller supports the Data Length Extension
 625                 * feature, enable the corresponding event.
 626                 */
 627                if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
 628                        events[0] |= 0x40;      /* LE Data Length Change */
 629
 630                /* If the controller supports Extended Scanner Filter
 631                 * Policies, enable the correspondig event.
 632                 */
 633                if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
 634                        events[1] |= 0x04;      /* LE Direct Advertising
 635                                                 * Report
 636                                                 */
 637
 638                /* If the controller supports the LE Set Scan Enable command,
 639                 * enable the corresponding advertising report event.
 640                 */
 641                if (hdev->commands[26] & 0x08)
 642                        events[0] |= 0x02;      /* LE Advertising Report */
 643
 644                /* If the controller supports the LE Create Connection
 645                 * command, enable the corresponding event.
 646                 */
 647                if (hdev->commands[26] & 0x10)
 648                        events[0] |= 0x01;      /* LE Connection Complete */
 649
 650                /* If the controller supports the LE Connection Update
 651                 * command, enable the corresponding event.
 652                 */
 653                if (hdev->commands[27] & 0x04)
 654                        events[0] |= 0x04;      /* LE Connection Update
 655                                                 * Complete
 656                                                 */
 657
 658                /* If the controller supports the LE Read Remote Used Features
 659                 * command, enable the corresponding event.
 660                 */
 661                if (hdev->commands[27] & 0x20)
 662                        events[0] |= 0x08;      /* LE Read Remote Used
 663                                                 * Features Complete
 664                                                 */
 665
 666                /* If the controller supports the LE Read Local P-256
 667                 * Public Key command, enable the corresponding event.
 668                 */
 669                if (hdev->commands[34] & 0x02)
 670                        events[0] |= 0x80;      /* LE Read Local P-256
 671                                                 * Public Key Complete
 672                                                 */
 673
 674                /* If the controller supports the LE Generate DHKey
 675                 * command, enable the corresponding event.
 676                 */
 677                if (hdev->commands[34] & 0x04)
 678                        events[1] |= 0x01;      /* LE Generate DHKey Complete */
 679
 680                hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
 681                            events);
 682
 683                if (hdev->commands[25] & 0x40) {
 684                        /* Read LE Advertising Channel TX Power */
 685                        hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
 686                }
 687
 688                if (hdev->commands[26] & 0x40) {
 689                        /* Read LE White List Size */
 690                        hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE,
 691                                    0, NULL);
 692                }
 693
 694                if (hdev->commands[26] & 0x80) {
 695                        /* Clear LE White List */
 696                        hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
 697                }
 698
 699                if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
 700                        /* Read LE Maximum Data Length */
 701                        hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
 702
 703                        /* Read LE Suggested Default Data Length */
 704                        hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
 705                }
 706
 707                hci_set_le_support(req);
 708        }
 709
 710        /* Read features beyond page 1 if available */
 711        for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
 712                struct hci_cp_read_local_ext_features cp;
 713
 714                cp.page = p;
 715                hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
 716                            sizeof(cp), &cp);
 717        }
 718
 719        return 0;
 720}
 721
 722static int hci_init4_req(struct hci_request *req, unsigned long opt)
 723{
 724        struct hci_dev *hdev = req->hdev;
 725
 726        /* Some Broadcom based Bluetooth controllers do not support the
 727         * Delete Stored Link Key command. They are clearly indicating its
 728         * absence in the bit mask of supported commands.
 729         *
 730         * Check the supported commands and only if the the command is marked
 731         * as supported send it. If not supported assume that the controller
 732         * does not have actual support for stored link keys which makes this
 733         * command redundant anyway.
 734         *
 735         * Some controllers indicate that they support handling deleting
 736         * stored link keys, but they don't. The quirk lets a driver
 737         * just disable this command.
 738         */
 739        if (hdev->commands[6] & 0x80 &&
 740            !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
 741                struct hci_cp_delete_stored_link_key cp;
 742
 743                bacpy(&cp.bdaddr, BDADDR_ANY);
 744                cp.delete_all = 0x01;
 745                hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
 746                            sizeof(cp), &cp);
 747        }
 748
 749        /* Set event mask page 2 if the HCI command for it is supported */
 750        if (hdev->commands[22] & 0x04)
 751                hci_set_event_mask_page_2(req);
 752
 753        /* Read local codec list if the HCI command is supported */
 754        if (hdev->commands[29] & 0x20)
 755                hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
 756
 757        /* Get MWS transport configuration if the HCI command is supported */
 758        if (hdev->commands[30] & 0x08)
 759                hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
 760
 761        /* Check for Synchronization Train support */
 762        if (lmp_sync_train_capable(hdev))
 763                hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
 764
 765        /* Enable Secure Connections if supported and configured */
 766        if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
 767            bredr_sc_enabled(hdev)) {
 768                u8 support = 0x01;
 769
 770                hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
 771                            sizeof(support), &support);
 772        }
 773
 774        return 0;
 775}
 776
 777static int __hci_init(struct hci_dev *hdev)
 778{
 779        int err;
 780
 781        err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT, NULL);
 782        if (err < 0)
 783                return err;
 784
 785        if (hci_dev_test_flag(hdev, HCI_SETUP))
 786                hci_debugfs_create_basic(hdev);
 787
 788        err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT, NULL);
 789        if (err < 0)
 790                return err;
 791
 792        /* HCI_PRIMARY covers both single-mode LE, BR/EDR and dual-mode
 793         * BR/EDR/LE type controllers. AMP controllers only need the
 794         * first two stages of init.
 795         */
 796        if (hdev->dev_type != HCI_PRIMARY)
 797                return 0;
 798
 799        err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT, NULL);
 800        if (err < 0)
 801                return err;
 802
 803        err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT, NULL);
 804        if (err < 0)
 805                return err;
 806
 807        /* This function is only called when the controller is actually in
 808         * configured state. When the controller is marked as unconfigured,
 809         * this initialization procedure is not run.
 810         *
 811         * It means that it is possible that a controller runs through its
 812         * setup phase and then discovers missing settings. If that is the
 813         * case, then this function will not be called. It then will only
 814         * be called during the config phase.
 815         *
 816         * So only when in setup phase or config phase, create the debugfs
 817         * entries and register the SMP channels.
 818         */
 819        if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
 820            !hci_dev_test_flag(hdev, HCI_CONFIG))
 821                return 0;
 822
 823        hci_debugfs_create_common(hdev);
 824
 825        if (lmp_bredr_capable(hdev))
 826                hci_debugfs_create_bredr(hdev);
 827
 828        if (lmp_le_capable(hdev))
 829                hci_debugfs_create_le(hdev);
 830
 831        return 0;
 832}
 833
 834static int hci_init0_req(struct hci_request *req, unsigned long opt)
 835{
 836        struct hci_dev *hdev = req->hdev;
 837
 838        BT_DBG("%s %ld", hdev->name, opt);
 839
 840        /* Reset */
 841        if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
 842                hci_reset_req(req, 0);
 843
 844        /* Read Local Version */
 845        hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
 846
 847        /* Read BD Address */
 848        if (hdev->set_bdaddr)
 849                hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
 850
 851        return 0;
 852}
 853
 854static int __hci_unconf_init(struct hci_dev *hdev)
 855{
 856        int err;
 857
 858        if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
 859                return 0;
 860
 861        err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT, NULL);
 862        if (err < 0)
 863                return err;
 864
 865        if (hci_dev_test_flag(hdev, HCI_SETUP))
 866                hci_debugfs_create_basic(hdev);
 867
 868        return 0;
 869}
 870
 871static int hci_scan_req(struct hci_request *req, unsigned long opt)
 872{
 873        __u8 scan = opt;
 874
 875        BT_DBG("%s %x", req->hdev->name, scan);
 876
 877        /* Inquiry and Page scans */
 878        hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
 879        return 0;
 880}
 881
 882static int hci_auth_req(struct hci_request *req, unsigned long opt)
 883{
 884        __u8 auth = opt;
 885
 886        BT_DBG("%s %x", req->hdev->name, auth);
 887
 888        /* Authentication */
 889        hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
 890        return 0;
 891}
 892
 893static int hci_encrypt_req(struct hci_request *req, unsigned long opt)
 894{
 895        __u8 encrypt = opt;
 896
 897        BT_DBG("%s %x", req->hdev->name, encrypt);
 898
 899        /* Encryption */
 900        hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
 901        return 0;
 902}
 903
 904static int hci_linkpol_req(struct hci_request *req, unsigned long opt)
 905{
 906        __le16 policy = cpu_to_le16(opt);
 907
 908        BT_DBG("%s %x", req->hdev->name, policy);
 909
 910        /* Default link policy */
 911        hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
 912        return 0;
 913}
 914
 915/* Get HCI device by index.
 916 * Device is held on return. */
 917struct hci_dev *hci_dev_get(int index)
 918{
 919        struct hci_dev *hdev = NULL, *d;
 920
 921        BT_DBG("%d", index);
 922
 923        if (index < 0)
 924                return NULL;
 925
 926        read_lock(&hci_dev_list_lock);
 927        list_for_each_entry(d, &hci_dev_list, list) {
 928                if (d->id == index) {
 929                        hdev = hci_dev_hold(d);
 930                        break;
 931                }
 932        }
 933        read_unlock(&hci_dev_list_lock);
 934        return hdev;
 935}
 936
 937/* ---- Inquiry support ---- */
 938
 939bool hci_discovery_active(struct hci_dev *hdev)
 940{
 941        struct discovery_state *discov = &hdev->discovery;
 942
 943        switch (discov->state) {
 944        case DISCOVERY_FINDING:
 945        case DISCOVERY_RESOLVING:
 946                return true;
 947
 948        default:
 949                return false;
 950        }
 951}
 952
 953void hci_discovery_set_state(struct hci_dev *hdev, int state)
 954{
 955        int old_state = hdev->discovery.state;
 956
 957        BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
 958
 959        if (old_state == state)
 960                return;
 961
 962        hdev->discovery.state = state;
 963
 964        switch (state) {
 965        case DISCOVERY_STOPPED:
 966                hci_update_background_scan(hdev);
 967
 968                if (old_state != DISCOVERY_STARTING)
 969                        mgmt_discovering(hdev, 0);
 970                break;
 971        case DISCOVERY_STARTING:
 972                break;
 973        case DISCOVERY_FINDING:
 974                mgmt_discovering(hdev, 1);
 975                break;
 976        case DISCOVERY_RESOLVING:
 977                break;
 978        case DISCOVERY_STOPPING:
 979                break;
 980        }
 981}
 982
 983void hci_inquiry_cache_flush(struct hci_dev *hdev)
 984{
 985        struct discovery_state *cache = &hdev->discovery;
 986        struct inquiry_entry *p, *n;
 987
 988        list_for_each_entry_safe(p, n, &cache->all, all) {
 989                list_del(&p->all);
 990                kfree(p);
 991        }
 992
 993        INIT_LIST_HEAD(&cache->unknown);
 994        INIT_LIST_HEAD(&cache->resolve);
 995}
 996
 997struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
 998                                               bdaddr_t *bdaddr)
 999{
1000        struct discovery_state *cache = &hdev->discovery;
1001        struct inquiry_entry *e;
1002
1003        BT_DBG("cache %p, %pMR", cache, bdaddr);
1004
1005        list_for_each_entry(e, &cache->all, all) {
1006                if (!bacmp(&e->data.bdaddr, bdaddr))
1007                        return e;
1008        }
1009
1010        return NULL;
1011}
1012
1013struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
1014                                                       bdaddr_t *bdaddr)
1015{
1016        struct discovery_state *cache = &hdev->discovery;
1017        struct inquiry_entry *e;
1018
1019        BT_DBG("cache %p, %pMR", cache, bdaddr);
1020
1021        list_for_each_entry(e, &cache->unknown, list) {
1022                if (!bacmp(&e->data.bdaddr, bdaddr))
1023                        return e;
1024        }
1025
1026        return NULL;
1027}
1028
1029struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
1030                                                       bdaddr_t *bdaddr,
1031                                                       int state)
1032{
1033        struct discovery_state *cache = &hdev->discovery;
1034        struct inquiry_entry *e;
1035
1036        BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
1037
1038        list_for_each_entry(e, &cache->resolve, list) {
1039                if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1040                        return e;
1041                if (!bacmp(&e->data.bdaddr, bdaddr))
1042                        return e;
1043        }
1044
1045        return NULL;
1046}
1047
1048void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
1049                                      struct inquiry_entry *ie)
1050{
1051        struct discovery_state *cache = &hdev->discovery;
1052        struct list_head *pos = &cache->resolve;
1053        struct inquiry_entry *p;
1054
1055        list_del(&ie->list);
1056
1057        list_for_each_entry(p, &cache->resolve, list) {
1058                if (p->name_state != NAME_PENDING &&
1059                    abs(p->data.rssi) >= abs(ie->data.rssi))
1060                        break;
1061                pos = &p->list;
1062        }
1063
1064        list_add(&ie->list, pos);
1065}
1066
1067u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1068                             bool name_known)
1069{
1070        struct discovery_state *cache = &hdev->discovery;
1071        struct inquiry_entry *ie;
1072        u32 flags = 0;
1073
1074        BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1075
1076        hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
1077
1078        if (!data->ssp_mode)
1079                flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1080
1081        ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
1082        if (ie) {
1083                if (!ie->data.ssp_mode)
1084                        flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1085
1086                if (ie->name_state == NAME_NEEDED &&
1087                    data->rssi != ie->data.rssi) {
1088                        ie->data.rssi = data->rssi;
1089                        hci_inquiry_cache_update_resolve(hdev, ie);
1090                }
1091
1092                goto update;
1093        }
1094
1095        /* Entry not in the cache. Add new one. */
1096        ie = kzalloc(sizeof(*ie), GFP_KERNEL);
1097        if (!ie) {
1098                flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1099                goto done;
1100        }
1101
1102        list_add(&ie->all, &cache->all);
1103
1104        if (name_known) {
1105                ie->name_state = NAME_KNOWN;
1106        } else {
1107                ie->name_state = NAME_NOT_KNOWN;
1108                list_add(&ie->list, &cache->unknown);
1109        }
1110
1111update:
1112        if (name_known && ie->name_state != NAME_KNOWN &&
1113            ie->name_state != NAME_PENDING) {
1114                ie->name_state = NAME_KNOWN;
1115                list_del(&ie->list);
1116        }
1117
1118        memcpy(&ie->data, data, sizeof(*data));
1119        ie->timestamp = jiffies;
1120        cache->timestamp = jiffies;
1121
1122        if (ie->name_state == NAME_NOT_KNOWN)
1123                flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1124
1125done:
1126        return flags;
1127}
1128
1129static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1130{
1131        struct discovery_state *cache = &hdev->discovery;
1132        struct inquiry_info *info = (struct inquiry_info *) buf;
1133        struct inquiry_entry *e;
1134        int copied = 0;
1135
1136        list_for_each_entry(e, &cache->all, all) {
1137                struct inquiry_data *data = &e->data;
1138
1139                if (copied >= num)
1140                        break;
1141
1142                bacpy(&info->bdaddr, &data->bdaddr);
1143                info->pscan_rep_mode    = data->pscan_rep_mode;
1144                info->pscan_period_mode = data->pscan_period_mode;
1145                info->pscan_mode        = data->pscan_mode;
1146                memcpy(info->dev_class, data->dev_class, 3);
1147                info->clock_offset      = data->clock_offset;
1148
1149                info++;
1150                copied++;
1151        }
1152
1153        BT_DBG("cache %p, copied %d", cache, copied);
1154        return copied;
1155}
1156
1157static int hci_inq_req(struct hci_request *req, unsigned long opt)
1158{
1159        struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
1160        struct hci_dev *hdev = req->hdev;
1161        struct hci_cp_inquiry cp;
1162
1163        BT_DBG("%s", hdev->name);
1164
1165        if (test_bit(HCI_INQUIRY, &hdev->flags))
1166                return 0;
1167
1168        /* Start Inquiry */
1169        memcpy(&cp.lap, &ir->lap, 3);
1170        cp.length  = ir->length;
1171        cp.num_rsp = ir->num_rsp;
1172        hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1173
1174        return 0;
1175}
1176
1177int hci_inquiry(void __user *arg)
1178{
1179        __u8 __user *ptr = arg;
1180        struct hci_inquiry_req ir;
1181        struct hci_dev *hdev;
1182        int err = 0, do_inquiry = 0, max_rsp;
1183        long timeo;
1184        __u8 *buf;
1185
1186        if (copy_from_user(&ir, ptr, sizeof(ir)))
1187                return -EFAULT;
1188
1189        hdev = hci_dev_get(ir.dev_id);
1190        if (!hdev)
1191                return -ENODEV;
1192
1193        if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1194                err = -EBUSY;
1195                goto done;
1196        }
1197
1198        if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1199                err = -EOPNOTSUPP;
1200                goto done;
1201        }
1202
1203        if (hdev->dev_type != HCI_PRIMARY) {
1204                err = -EOPNOTSUPP;
1205                goto done;
1206        }
1207
1208        if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1209                err = -EOPNOTSUPP;
1210                goto done;
1211        }
1212
1213        hci_dev_lock(hdev);
1214        if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
1215            inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1216                hci_inquiry_cache_flush(hdev);
1217                do_inquiry = 1;
1218        }
1219        hci_dev_unlock(hdev);
1220
1221        timeo = ir.length * msecs_to_jiffies(2000);
1222
1223        if (do_inquiry) {
1224                err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1225                                   timeo, NULL);
1226                if (err < 0)
1227                        goto done;
1228
1229                /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1230                 * cleared). If it is interrupted by a signal, return -EINTR.
1231                 */
1232                if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
1233                                TASK_INTERRUPTIBLE))
1234                        return -EINTR;
1235        }
1236
1237        /* for unlimited number of responses we will use buffer with
1238         * 255 entries
1239         */
1240        max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1241
1242        /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1243         * copy it to the user space.
1244         */
1245        buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
1246        if (!buf) {
1247                err = -ENOMEM;
1248                goto done;
1249        }
1250
1251        hci_dev_lock(hdev);
1252        ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
1253        hci_dev_unlock(hdev);
1254
1255        BT_DBG("num_rsp %d", ir.num_rsp);
1256
1257        if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1258                ptr += sizeof(ir);
1259                if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
1260                                 ir.num_rsp))
1261                        err = -EFAULT;
1262        } else
1263                err = -EFAULT;
1264
1265        kfree(buf);
1266
1267done:
1268        hci_dev_put(hdev);
1269        return err;
1270}
1271
1272static int hci_dev_do_open(struct hci_dev *hdev)
1273{
1274        int ret = 0;
1275
1276        BT_DBG("%s %p", hdev->name, hdev);
1277
1278        hci_req_sync_lock(hdev);
1279
1280        if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
1281                ret = -ENODEV;
1282                goto done;
1283        }
1284
1285        if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1286            !hci_dev_test_flag(hdev, HCI_CONFIG)) {
1287                /* Check for rfkill but allow the HCI setup stage to
1288                 * proceed (which in itself doesn't cause any RF activity).
1289                 */
1290                if (hci_dev_test_flag(hdev, HCI_RFKILLED)) {
1291                        ret = -ERFKILL;
1292                        goto done;
1293                }
1294
1295                /* Check for valid public address or a configured static
1296                 * random adddress, but let the HCI setup proceed to
1297                 * be able to determine if there is a public address
1298                 * or not.
1299                 *
1300                 * In case of user channel usage, it is not important
1301                 * if a public address or static random address is
1302                 * available.
1303                 *
1304                 * This check is only valid for BR/EDR controllers
1305                 * since AMP controllers do not have an address.
1306                 */
1307                if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1308                    hdev->dev_type == HCI_PRIMARY &&
1309                    !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1310                    !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1311                        ret = -EADDRNOTAVAIL;
1312                        goto done;
1313                }
1314        }
1315
1316        if (test_bit(HCI_UP, &hdev->flags)) {
1317                ret = -EALREADY;
1318                goto done;
1319        }
1320
1321        if (hdev->open(hdev)) {
1322                ret = -EIO;
1323                goto done;
1324        }
1325
1326        set_bit(HCI_RUNNING, &hdev->flags);
1327        hci_sock_dev_event(hdev, HCI_DEV_OPEN);
1328
1329        atomic_set(&hdev->cmd_cnt, 1);
1330        set_bit(HCI_INIT, &hdev->flags);
1331
1332        if (hci_dev_test_flag(hdev, HCI_SETUP)) {
1333                hci_sock_dev_event(hdev, HCI_DEV_SETUP);
1334
1335                if (hdev->setup)
1336                        ret = hdev->setup(hdev);
1337
1338                /* The transport driver can set these quirks before
1339                 * creating the HCI device or in its setup callback.
1340                 *
1341                 * In case any of them is set, the controller has to
1342                 * start up as unconfigured.
1343                 */
1344                if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
1345                    test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
1346                        hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
1347
1348                /* For an unconfigured controller it is required to
1349                 * read at least the version information provided by
1350                 * the Read Local Version Information command.
1351                 *
1352                 * If the set_bdaddr driver callback is provided, then
1353                 * also the original Bluetooth public device address
1354                 * will be read using the Read BD Address command.
1355                 */
1356                if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1357                        ret = __hci_unconf_init(hdev);
1358        }
1359
1360        if (hci_dev_test_flag(hdev, HCI_CONFIG)) {
1361                /* If public address change is configured, ensure that
1362                 * the address gets programmed. If the driver does not
1363                 * support changing the public address, fail the power
1364                 * on procedure.
1365                 */
1366                if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1367                    hdev->set_bdaddr)
1368                        ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
1369                else
1370                        ret = -EADDRNOTAVAIL;
1371        }
1372
1373        if (!ret) {
1374                if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1375                    !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1376                        ret = __hci_init(hdev);
1377                        if (!ret && hdev->post_init)
1378                                ret = hdev->post_init(hdev);
1379                }
1380        }
1381
1382        /* If the HCI Reset command is clearing all diagnostic settings,
1383         * then they need to be reprogrammed after the init procedure
1384         * completed.
1385         */
1386        if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
1387            hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) && hdev->set_diag)
1388                ret = hdev->set_diag(hdev, true);
1389
1390        clear_bit(HCI_INIT, &hdev->flags);
1391
1392        if (!ret) {
1393                hci_dev_hold(hdev);
1394                hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1395                set_bit(HCI_UP, &hdev->flags);
1396                hci_sock_dev_event(hdev, HCI_DEV_UP);
1397                hci_leds_update_powered(hdev, true);
1398                if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1399                    !hci_dev_test_flag(hdev, HCI_CONFIG) &&
1400                    !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1401                    !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1402                    hci_dev_test_flag(hdev, HCI_MGMT) &&
1403                    hdev->dev_type == HCI_PRIMARY) {
1404                        ret = __hci_req_hci_power_on(hdev);
1405                        mgmt_power_on(hdev, ret);
1406                }
1407        } else {
1408                /* Init failed, cleanup */
1409                flush_work(&hdev->tx_work);
1410                flush_work(&hdev->cmd_work);
1411                flush_work(&hdev->rx_work);
1412
1413                skb_queue_purge(&hdev->cmd_q);
1414                skb_queue_purge(&hdev->rx_q);
1415
1416                if (hdev->flush)
1417                        hdev->flush(hdev);
1418
1419                if (hdev->sent_cmd) {
1420                        kfree_skb(hdev->sent_cmd);
1421                        hdev->sent_cmd = NULL;
1422                }
1423
1424                clear_bit(HCI_RUNNING, &hdev->flags);
1425                hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
1426
1427                hdev->close(hdev);
1428                hdev->flags &= BIT(HCI_RAW);
1429        }
1430
1431done:
1432        hci_req_sync_unlock(hdev);
1433        return ret;
1434}
1435
1436/* ---- HCI ioctl helpers ---- */
1437
1438int hci_dev_open(__u16 dev)
1439{
1440        struct hci_dev *hdev;
1441        int err;
1442
1443        hdev = hci_dev_get(dev);
1444        if (!hdev)
1445                return -ENODEV;
1446
1447        /* Devices that are marked as unconfigured can only be powered
1448         * up as user channel. Trying to bring them up as normal devices
1449         * will result into a failure. Only user channel operation is
1450         * possible.
1451         *
1452         * When this function is called for a user channel, the flag
1453         * HCI_USER_CHANNEL will be set first before attempting to
1454         * open the device.
1455         */
1456        if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1457            !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1458                err = -EOPNOTSUPP;
1459                goto done;
1460        }
1461
1462        /* We need to ensure that no other power on/off work is pending
1463         * before proceeding to call hci_dev_do_open. This is
1464         * particularly important if the setup procedure has not yet
1465         * completed.
1466         */
1467        if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1468                cancel_delayed_work(&hdev->power_off);
1469
1470        /* After this call it is guaranteed that the setup procedure
1471         * has finished. This means that error conditions like RFKILL
1472         * or no valid public or static random address apply.
1473         */
1474        flush_workqueue(hdev->req_workqueue);
1475
1476        /* For controllers not using the management interface and that
1477         * are brought up using legacy ioctl, set the HCI_BONDABLE bit
1478         * so that pairing works for them. Once the management interface
1479         * is in use this bit will be cleared again and userspace has
1480         * to explicitly enable it.
1481         */
1482        if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1483            !hci_dev_test_flag(hdev, HCI_MGMT))
1484                hci_dev_set_flag(hdev, HCI_BONDABLE);
1485
1486        err = hci_dev_do_open(hdev);
1487
1488done:
1489        hci_dev_put(hdev);
1490        return err;
1491}
1492
1493/* This function requires the caller holds hdev->lock */
1494static void hci_pend_le_actions_clear(struct hci_dev *hdev)
1495{
1496        struct hci_conn_params *p;
1497
1498        list_for_each_entry(p, &hdev->le_conn_params, list) {
1499                if (p->conn) {
1500                        hci_conn_drop(p->conn);
1501                        hci_conn_put(p->conn);
1502                        p->conn = NULL;
1503                }
1504                list_del_init(&p->action);
1505        }
1506
1507        BT_DBG("All LE pending actions cleared");
1508}
1509
1510int hci_dev_do_close(struct hci_dev *hdev)
1511{
1512        bool auto_off;
1513
1514        BT_DBG("%s %p", hdev->name, hdev);
1515
1516        if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
1517            !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1518            test_bit(HCI_UP, &hdev->flags)) {
1519                /* Execute vendor specific shutdown routine */
1520                if (hdev->shutdown)
1521                        hdev->shutdown(hdev);
1522        }
1523
1524        cancel_delayed_work(&hdev->power_off);
1525
1526        hci_request_cancel_all(hdev);
1527        hci_req_sync_lock(hdev);
1528
1529        if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1530                cancel_delayed_work_sync(&hdev->cmd_timer);
1531                hci_req_sync_unlock(hdev);
1532                return 0;
1533        }
1534
1535        hci_leds_update_powered(hdev, false);
1536
1537        /* Flush RX and TX works */
1538        flush_work(&hdev->tx_work);
1539        flush_work(&hdev->rx_work);
1540
1541        if (hdev->discov_timeout > 0) {
1542                hdev->discov_timeout = 0;
1543                hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1544                hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1545        }
1546
1547        if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1548                cancel_delayed_work(&hdev->service_cache);
1549
1550        if (hci_dev_test_flag(hdev, HCI_MGMT))
1551                cancel_delayed_work_sync(&hdev->rpa_expired);
1552
1553        /* Avoid potential lockdep warnings from the *_flush() calls by
1554         * ensuring the workqueue is empty up front.
1555         */
1556        drain_workqueue(hdev->workqueue);
1557
1558        hci_dev_lock(hdev);
1559
1560        hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1561
1562        auto_off = hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF);
1563
1564        if (!auto_off && hdev->dev_type == HCI_PRIMARY &&
1565            !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1566            hci_dev_test_flag(hdev, HCI_MGMT))
1567                __mgmt_power_off(hdev);
1568
1569        hci_inquiry_cache_flush(hdev);
1570        hci_pend_le_actions_clear(hdev);
1571        hci_conn_hash_flush(hdev);
1572        hci_dev_unlock(hdev);
1573
1574        smp_unregister(hdev);
1575
1576        hci_sock_dev_event(hdev, HCI_DEV_DOWN);
1577
1578        if (hdev->flush)
1579                hdev->flush(hdev);
1580
1581        /* Reset device */
1582        skb_queue_purge(&hdev->cmd_q);
1583        atomic_set(&hdev->cmd_cnt, 1);
1584        if (test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks) &&
1585            !auto_off && !hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1586                set_bit(HCI_INIT, &hdev->flags);
1587                __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT, NULL);
1588                clear_bit(HCI_INIT, &hdev->flags);
1589        }
1590
1591        /* flush cmd  work */
1592        flush_work(&hdev->cmd_work);
1593
1594        /* Drop queues */
1595        skb_queue_purge(&hdev->rx_q);
1596        skb_queue_purge(&hdev->cmd_q);
1597        skb_queue_purge(&hdev->raw_q);
1598
1599        /* Drop last sent command */
1600        if (hdev->sent_cmd) {
1601                cancel_delayed_work_sync(&hdev->cmd_timer);
1602                kfree_skb(hdev->sent_cmd);
1603                hdev->sent_cmd = NULL;
1604        }
1605
1606        clear_bit(HCI_RUNNING, &hdev->flags);
1607        hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
1608
1609        /* After this point our queues are empty
1610         * and no tasks are scheduled. */
1611        hdev->close(hdev);
1612
1613        /* Clear flags */
1614        hdev->flags &= BIT(HCI_RAW);
1615        hci_dev_clear_volatile_flags(hdev);
1616
1617        /* Controller radio is available but is currently powered down */
1618        hdev->amp_status = AMP_STATUS_POWERED_DOWN;
1619
1620        memset(hdev->eir, 0, sizeof(hdev->eir));
1621        memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1622        bacpy(&hdev->random_addr, BDADDR_ANY);
1623
1624        hci_req_sync_unlock(hdev);
1625
1626        hci_dev_put(hdev);
1627        return 0;
1628}
1629
1630int hci_dev_close(__u16 dev)
1631{
1632        struct hci_dev *hdev;
1633        int err;
1634
1635        hdev = hci_dev_get(dev);
1636        if (!hdev)
1637                return -ENODEV;
1638
1639        if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1640                err = -EBUSY;
1641                goto done;
1642        }
1643
1644        if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1645                cancel_delayed_work(&hdev->power_off);
1646
1647        err = hci_dev_do_close(hdev);
1648
1649done:
1650        hci_dev_put(hdev);
1651        return err;
1652}
1653
1654static int hci_dev_do_reset(struct hci_dev *hdev)
1655{
1656        int ret;
1657
1658        BT_DBG("%s %p", hdev->name, hdev);
1659
1660        hci_req_sync_lock(hdev);
1661
1662        /* Drop queues */
1663        skb_queue_purge(&hdev->rx_q);
1664        skb_queue_purge(&hdev->cmd_q);
1665
1666        /* Avoid potential lockdep warnings from the *_flush() calls by
1667         * ensuring the workqueue is empty up front.
1668         */
1669        drain_workqueue(hdev->workqueue);
1670
1671        hci_dev_lock(hdev);
1672        hci_inquiry_cache_flush(hdev);
1673        hci_conn_hash_flush(hdev);
1674        hci_dev_unlock(hdev);
1675
1676        if (hdev->flush)
1677                hdev->flush(hdev);
1678
1679        atomic_set(&hdev->cmd_cnt, 1);
1680        hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1681
1682        ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT, NULL);
1683
1684        hci_req_sync_unlock(hdev);
1685        return ret;
1686}
1687
1688int hci_dev_reset(__u16 dev)
1689{
1690        struct hci_dev *hdev;
1691        int err;
1692
1693        hdev = hci_dev_get(dev);
1694        if (!hdev)
1695                return -ENODEV;
1696
1697        if (!test_bit(HCI_UP, &hdev->flags)) {
1698                err = -ENETDOWN;
1699                goto done;
1700        }
1701
1702        if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1703                err = -EBUSY;
1704                goto done;
1705        }
1706
1707        if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1708                err = -EOPNOTSUPP;
1709                goto done;
1710        }
1711
1712        err = hci_dev_do_reset(hdev);
1713
1714done:
1715        hci_dev_put(hdev);
1716        return err;
1717}
1718
1719int hci_dev_reset_stat(__u16 dev)
1720{
1721        struct hci_dev *hdev;
1722        int ret = 0;
1723
1724        hdev = hci_dev_get(dev);
1725        if (!hdev)
1726                return -ENODEV;
1727
1728        if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1729                ret = -EBUSY;
1730                goto done;
1731        }
1732
1733        if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1734                ret = -EOPNOTSUPP;
1735                goto done;
1736        }
1737
1738        memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1739
1740done:
1741        hci_dev_put(hdev);
1742        return ret;
1743}
1744
1745static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
1746{
1747        bool conn_changed, discov_changed;
1748
1749        BT_DBG("%s scan 0x%02x", hdev->name, scan);
1750
1751        if ((scan & SCAN_PAGE))
1752                conn_changed = !hci_dev_test_and_set_flag(hdev,
1753                                                          HCI_CONNECTABLE);
1754        else
1755                conn_changed = hci_dev_test_and_clear_flag(hdev,
1756                                                           HCI_CONNECTABLE);
1757
1758        if ((scan & SCAN_INQUIRY)) {
1759                discov_changed = !hci_dev_test_and_set_flag(hdev,
1760                                                            HCI_DISCOVERABLE);
1761        } else {
1762                hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1763                discov_changed = hci_dev_test_and_clear_flag(hdev,
1764                                                             HCI_DISCOVERABLE);
1765        }
1766
1767        if (!hci_dev_test_flag(hdev, HCI_MGMT))
1768                return;
1769
1770        if (conn_changed || discov_changed) {
1771                /* In case this was disabled through mgmt */
1772                hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
1773
1774                if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1775                        hci_req_update_adv_data(hdev, hdev->cur_adv_instance);
1776
1777                mgmt_new_settings(hdev);
1778        }
1779}
1780
1781int hci_dev_cmd(unsigned int cmd, void __user *arg)
1782{
1783        struct hci_dev *hdev;
1784        struct hci_dev_req dr;
1785        int err = 0;
1786
1787        if (copy_from_user(&dr, arg, sizeof(dr)))
1788                return -EFAULT;
1789
1790        hdev = hci_dev_get(dr.dev_id);
1791        if (!hdev)
1792                return -ENODEV;
1793
1794        if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1795                err = -EBUSY;
1796                goto done;
1797        }
1798
1799        if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1800                err = -EOPNOTSUPP;
1801                goto done;
1802        }
1803
1804        if (hdev->dev_type != HCI_PRIMARY) {
1805                err = -EOPNOTSUPP;
1806                goto done;
1807        }
1808
1809        if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1810                err = -EOPNOTSUPP;
1811                goto done;
1812        }
1813
1814        switch (cmd) {
1815        case HCISETAUTH:
1816                err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1817                                   HCI_INIT_TIMEOUT, NULL);
1818                break;
1819
1820        case HCISETENCRYPT:
1821                if (!lmp_encrypt_capable(hdev)) {
1822                        err = -EOPNOTSUPP;
1823                        break;
1824                }
1825
1826                if (!test_bit(HCI_AUTH, &hdev->flags)) {
1827                        /* Auth must be enabled first */
1828                        err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1829                                           HCI_INIT_TIMEOUT, NULL);
1830                        if (err)
1831                                break;
1832                }
1833
1834                err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1835                                   HCI_INIT_TIMEOUT, NULL);
1836                break;
1837
1838        case HCISETSCAN:
1839                err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1840                                   HCI_INIT_TIMEOUT, NULL);
1841
1842                /* Ensure that the connectable and discoverable states
1843                 * get correctly modified as this was a non-mgmt change.
1844                 */
1845                if (!err)
1846                        hci_update_scan_state(hdev, dr.dev_opt);
1847                break;
1848
1849        case HCISETLINKPOL:
1850                err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1851                                   HCI_INIT_TIMEOUT, NULL);
1852                break;
1853
1854        case HCISETLINKMODE:
1855                hdev->link_mode = ((__u16) dr.dev_opt) &
1856                                        (HCI_LM_MASTER | HCI_LM_ACCEPT);
1857                break;
1858
1859        case HCISETPTYPE:
1860                hdev->pkt_type = (__u16) dr.dev_opt;
1861                break;
1862
1863        case HCISETACLMTU:
1864                hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
1865                hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1866                break;
1867
1868        case HCISETSCOMTU:
1869                hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
1870                hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1871                break;
1872
1873        default:
1874                err = -EINVAL;
1875                break;
1876        }
1877
1878done:
1879        hci_dev_put(hdev);
1880        return err;
1881}
1882
1883int hci_get_dev_list(void __user *arg)
1884{
1885        struct hci_dev *hdev;
1886        struct hci_dev_list_req *dl;
1887        struct hci_dev_req *dr;
1888        int n = 0, size, err;
1889        __u16 dev_num;
1890
1891        if (get_user(dev_num, (__u16 __user *) arg))
1892                return -EFAULT;
1893
1894        if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1895                return -EINVAL;
1896
1897        size = sizeof(*dl) + dev_num * sizeof(*dr);
1898
1899        dl = kzalloc(size, GFP_KERNEL);
1900        if (!dl)
1901                return -ENOMEM;
1902
1903        dr = dl->dev_req;
1904
1905        read_lock(&hci_dev_list_lock);
1906        list_for_each_entry(hdev, &hci_dev_list, list) {
1907                unsigned long flags = hdev->flags;
1908
1909                /* When the auto-off is configured it means the transport
1910                 * is running, but in that case still indicate that the
1911                 * device is actually down.
1912                 */
1913                if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
1914                        flags &= ~BIT(HCI_UP);
1915
1916                (dr + n)->dev_id  = hdev->id;
1917                (dr + n)->dev_opt = flags;
1918
1919                if (++n >= dev_num)
1920                        break;
1921        }
1922        read_unlock(&hci_dev_list_lock);
1923
1924        dl->dev_num = n;
1925        size = sizeof(*dl) + n * sizeof(*dr);
1926
1927        err = copy_to_user(arg, dl, size);
1928        kfree(dl);
1929
1930        return err ? -EFAULT : 0;
1931}
1932
1933int hci_get_dev_info(void __user *arg)
1934{
1935        struct hci_dev *hdev;
1936        struct hci_dev_info di;
1937        unsigned long flags;
1938        int err = 0;
1939
1940        if (copy_from_user(&di, arg, sizeof(di)))
1941                return -EFAULT;
1942
1943        hdev = hci_dev_get(di.dev_id);
1944        if (!hdev)
1945                return -ENODEV;
1946
1947        /* When the auto-off is configured it means the transport
1948         * is running, but in that case still indicate that the
1949         * device is actually down.
1950         */
1951        if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
1952                flags = hdev->flags & ~BIT(HCI_UP);
1953        else
1954                flags = hdev->flags;
1955
1956        strcpy(di.name, hdev->name);
1957        di.bdaddr   = hdev->bdaddr;
1958        di.type     = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
1959        di.flags    = flags;
1960        di.pkt_type = hdev->pkt_type;
1961        if (lmp_bredr_capable(hdev)) {
1962                di.acl_mtu  = hdev->acl_mtu;
1963                di.acl_pkts = hdev->acl_pkts;
1964                di.sco_mtu  = hdev->sco_mtu;
1965                di.sco_pkts = hdev->sco_pkts;
1966        } else {
1967                di.acl_mtu  = hdev->le_mtu;
1968                di.acl_pkts = hdev->le_pkts;
1969                di.sco_mtu  = 0;
1970                di.sco_pkts = 0;
1971        }
1972        di.link_policy = hdev->link_policy;
1973        di.link_mode   = hdev->link_mode;
1974
1975        memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1976        memcpy(&di.features, &hdev->features, sizeof(di.features));
1977
1978        if (copy_to_user(arg, &di, sizeof(di)))
1979                err = -EFAULT;
1980
1981        hci_dev_put(hdev);
1982
1983        return err;
1984}
1985
1986/* ---- Interface to HCI drivers ---- */
1987
1988static int hci_rfkill_set_block(void *data, bool blocked)
1989{
1990        struct hci_dev *hdev = data;
1991
1992        BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1993
1994        if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
1995                return -EBUSY;
1996
1997        if (blocked) {
1998                hci_dev_set_flag(hdev, HCI_RFKILLED);
1999                if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
2000                    !hci_dev_test_flag(hdev, HCI_CONFIG))
2001                        hci_dev_do_close(hdev);
2002        } else {
2003                hci_dev_clear_flag(hdev, HCI_RFKILLED);
2004        }
2005
2006        return 0;
2007}
2008
2009static const struct rfkill_ops hci_rfkill_ops = {
2010        .set_block = hci_rfkill_set_block,
2011};
2012
2013static void hci_power_on(struct work_struct *work)
2014{
2015        struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2016        int err;
2017
2018        BT_DBG("%s", hdev->name);
2019
2020        if (test_bit(HCI_UP, &hdev->flags) &&
2021            hci_dev_test_flag(hdev, HCI_MGMT) &&
2022            hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
2023                cancel_delayed_work(&hdev->power_off);
2024                hci_req_sync_lock(hdev);
2025                err = __hci_req_hci_power_on(hdev);
2026                hci_req_sync_unlock(hdev);
2027                mgmt_power_on(hdev, err);
2028                return;
2029        }
2030
2031        err = hci_dev_do_open(hdev);
2032        if (err < 0) {
2033                hci_dev_lock(hdev);
2034                mgmt_set_powered_failed(hdev, err);
2035                hci_dev_unlock(hdev);
2036                return;
2037        }
2038
2039        /* During the HCI setup phase, a few error conditions are
2040         * ignored and they need to be checked now. If they are still
2041         * valid, it is important to turn the device back off.
2042         */
2043        if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
2044            hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
2045            (hdev->dev_type == HCI_PRIMARY &&
2046             !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2047             !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2048                hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
2049                hci_dev_do_close(hdev);
2050        } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
2051                queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2052                                   HCI_AUTO_OFF_TIMEOUT);
2053        }
2054
2055        if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
2056                /* For unconfigured devices, set the HCI_RAW flag
2057                 * so that userspace can easily identify them.
2058                 */
2059                if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2060                        set_bit(HCI_RAW, &hdev->flags);
2061
2062                /* For fully configured devices, this will send
2063                 * the Index Added event. For unconfigured devices,
2064                 * it will send Unconfigued Index Added event.
2065                 *
2066                 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2067                 * and no event will be send.
2068                 */
2069                mgmt_index_added(hdev);
2070        } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
2071                /* When the controller is now configured, then it
2072                 * is important to clear the HCI_RAW flag.
2073                 */
2074                if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2075                        clear_bit(HCI_RAW, &hdev->flags);
2076
2077                /* Powering on the controller with HCI_CONFIG set only
2078                 * happens with the transition from unconfigured to
2079                 * configured. This will send the Index Added event.
2080                 */
2081                mgmt_index_added(hdev);
2082        }
2083}
2084
2085static void hci_power_off(struct work_struct *work)
2086{
2087        struct hci_dev *hdev = container_of(work, struct hci_dev,
2088                                            power_off.work);
2089
2090        BT_DBG("%s", hdev->name);
2091
2092        hci_dev_do_close(hdev);
2093}
2094
2095static void hci_error_reset(struct work_struct *work)
2096{
2097        struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
2098
2099        BT_DBG("%s", hdev->name);
2100
2101        if (hdev->hw_error)
2102                hdev->hw_error(hdev, hdev->hw_error_code);
2103        else
2104                BT_ERR("%s hardware error 0x%2.2x", hdev->name,
2105                       hdev->hw_error_code);
2106
2107        if (hci_dev_do_close(hdev))
2108                return;
2109
2110        hci_dev_do_open(hdev);
2111}
2112
2113void hci_uuids_clear(struct hci_dev *hdev)
2114{
2115        struct bt_uuid *uuid, *tmp;
2116
2117        list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2118                list_del(&uuid->list);
2119                kfree(uuid);
2120        }
2121}
2122
2123void hci_link_keys_clear(struct hci_dev *hdev)
2124{
2125        struct link_key *key;
2126
2127        list_for_each_entry_rcu(key, &hdev->link_keys, list) {
2128                list_del_rcu(&key->list);
2129                kfree_rcu(key, rcu);
2130        }
2131}
2132
2133void hci_smp_ltks_clear(struct hci_dev *hdev)
2134{
2135        struct smp_ltk *k;
2136
2137        list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2138                list_del_rcu(&k->list);
2139                kfree_rcu(k, rcu);
2140        }
2141}
2142
2143void hci_smp_irks_clear(struct hci_dev *hdev)
2144{
2145        struct smp_irk *k;
2146
2147        list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2148                list_del_rcu(&k->list);
2149                kfree_rcu(k, rcu);
2150        }
2151}
2152
2153struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2154{
2155        struct link_key *k;
2156
2157        rcu_read_lock();
2158        list_for_each_entry_rcu(k, &hdev->link_keys, list) {
2159                if (bacmp(bdaddr, &k->bdaddr) == 0) {
2160                        rcu_read_unlock();
2161                        return k;
2162                }
2163        }
2164        rcu_read_unlock();
2165
2166        return NULL;
2167}
2168
2169static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
2170                               u8 key_type, u8 old_key_type)
2171{
2172        /* Legacy key */
2173        if (key_type < 0x03)
2174                return true;
2175
2176        /* Debug keys are insecure so don't store them persistently */
2177        if (key_type == HCI_LK_DEBUG_COMBINATION)
2178                return false;
2179
2180        /* Changed combination key and there's no previous one */
2181        if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
2182                return false;
2183
2184        /* Security mode 3 case */
2185        if (!conn)
2186                return true;
2187
2188        /* BR/EDR key derived using SC from an LE link */
2189        if (conn->type == LE_LINK)
2190                return true;
2191
2192        /* Neither local nor remote side had no-bonding as requirement */
2193        if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
2194                return true;
2195
2196        /* Local side had dedicated bonding as requirement */
2197        if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
2198                return true;
2199
2200        /* Remote side had dedicated bonding as requirement */
2201        if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
2202                return true;
2203
2204        /* If none of the above criteria match, then don't store the key
2205         * persistently */
2206        return false;
2207}
2208
2209static u8 ltk_role(u8 type)
2210{
2211        if (type == SMP_LTK)
2212                return HCI_ROLE_MASTER;
2213
2214        return HCI_ROLE_SLAVE;
2215}
2216
2217struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2218                             u8 addr_type, u8 role)
2219{
2220        struct smp_ltk *k;
2221
2222        rcu_read_lock();
2223        list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2224                if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
2225                        continue;
2226
2227                if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
2228                        rcu_read_unlock();
2229                        return k;
2230                }
2231        }
2232        rcu_read_unlock();
2233
2234        return NULL;
2235}
2236
2237struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2238{
2239        struct smp_irk *irk;
2240
2241        rcu_read_lock();
2242        list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2243                if (!bacmp(&irk->rpa, rpa)) {
2244                        rcu_read_unlock();
2245                        return irk;
2246                }
2247        }
2248
2249        list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2250                if (smp_irk_matches(hdev, irk->val, rpa)) {
2251                        bacpy(&irk->rpa, rpa);
2252                        rcu_read_unlock();
2253                        return irk;
2254                }
2255        }
2256        rcu_read_unlock();
2257
2258        return NULL;
2259}
2260
2261struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2262                                     u8 addr_type)
2263{
2264        struct smp_irk *irk;
2265
2266        /* Identity Address must be public or static random */
2267        if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2268                return NULL;
2269
2270        rcu_read_lock();
2271        list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2272                if (addr_type == irk->addr_type &&
2273                    bacmp(bdaddr, &irk->bdaddr) == 0) {
2274                        rcu_read_unlock();
2275                        return irk;
2276                }
2277        }
2278        rcu_read_unlock();
2279
2280        return NULL;
2281}
2282
2283struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
2284                                  bdaddr_t *bdaddr, u8 *val, u8 type,
2285                                  u8 pin_len, bool *persistent)
2286{
2287        struct link_key *key, *old_key;
2288        u8 old_key_type;
2289
2290        old_key = hci_find_link_key(hdev, bdaddr);
2291        if (old_key) {
2292                old_key_type = old_key->type;
2293                key = old_key;
2294        } else {
2295                old_key_type = conn ? conn->key_type : 0xff;
2296                key = kzalloc(sizeof(*key), GFP_KERNEL);
2297                if (!key)
2298                        return NULL;
2299                list_add_rcu(&key->list, &hdev->link_keys);
2300        }
2301
2302        BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
2303
2304        /* Some buggy controller combinations generate a changed
2305         * combination key for legacy pairing even when there's no
2306         * previous key */
2307        if (type == HCI_LK_CHANGED_COMBINATION &&
2308            (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
2309                type = HCI_LK_COMBINATION;
2310                if (conn)
2311                        conn->key_type = type;
2312        }
2313
2314        bacpy(&key->bdaddr, bdaddr);
2315        memcpy(key->val, val, HCI_LINK_KEY_SIZE);
2316        key->pin_len = pin_len;
2317
2318        if (type == HCI_LK_CHANGED_COMBINATION)
2319                key->type = old_key_type;
2320        else
2321                key->type = type;
2322
2323        if (persistent)
2324                *persistent = hci_persistent_key(hdev, conn, type,
2325                                                 old_key_type);
2326
2327        return key;
2328}
2329
2330struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2331                            u8 addr_type, u8 type, u8 authenticated,
2332                            u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
2333{
2334        struct smp_ltk *key, *old_key;
2335        u8 role = ltk_role(type);
2336
2337        old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
2338        if (old_key)
2339                key = old_key;
2340        else {
2341                key = kzalloc(sizeof(*key), GFP_KERNEL);
2342                if (!key)
2343                        return NULL;
2344                list_add_rcu(&key->list, &hdev->long_term_keys);
2345        }
2346
2347        bacpy(&key->bdaddr, bdaddr);
2348        key->bdaddr_type = addr_type;
2349        memcpy(key->val, tk, sizeof(key->val));
2350        key->authenticated = authenticated;
2351        key->ediv = ediv;
2352        key->rand = rand;
2353        key->enc_size = enc_size;
2354        key->type = type;
2355
2356        return key;
2357}
2358
2359struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2360                            u8 addr_type, u8 val[16], bdaddr_t *rpa)
2361{
2362        struct smp_irk *irk;
2363
2364        irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2365        if (!irk) {
2366                irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2367                if (!irk)
2368                        return NULL;
2369
2370                bacpy(&irk->bdaddr, bdaddr);
2371                irk->addr_type = addr_type;
2372
2373                list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
2374        }
2375
2376        memcpy(irk->val, val, 16);
2377        bacpy(&irk->rpa, rpa);
2378
2379        return irk;
2380}
2381
2382int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2383{
2384        struct link_key *key;
2385
2386        key = hci_find_link_key(hdev, bdaddr);
2387        if (!key)
2388                return -ENOENT;
2389
2390        BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2391
2392        list_del_rcu(&key->list);
2393        kfree_rcu(key, rcu);
2394
2395        return 0;
2396}
2397
2398int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
2399{
2400        struct smp_ltk *k;
2401        int removed = 0;
2402
2403        list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2404                if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
2405                        continue;
2406
2407                BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2408
2409                list_del_rcu(&k->list);
2410                kfree_rcu(k, rcu);
2411                removed++;
2412        }
2413
2414        return removed ? 0 : -ENOENT;
2415}
2416
2417void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2418{
2419        struct smp_irk *k;
2420
2421        list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2422                if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2423                        continue;
2424
2425                BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2426
2427                list_del_rcu(&k->list);
2428                kfree_rcu(k, rcu);
2429        }
2430}
2431
2432bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2433{
2434        struct smp_ltk *k;
2435        struct smp_irk *irk;
2436        u8 addr_type;
2437
2438        if (type == BDADDR_BREDR) {
2439                if (hci_find_link_key(hdev, bdaddr))
2440                        return true;
2441                return false;
2442        }
2443
2444        /* Convert to HCI addr type which struct smp_ltk uses */
2445        if (type == BDADDR_LE_PUBLIC)
2446                addr_type = ADDR_LE_DEV_PUBLIC;
2447        else
2448                addr_type = ADDR_LE_DEV_RANDOM;
2449
2450        irk = hci_get_irk(hdev, bdaddr, addr_type);
2451        if (irk) {
2452                bdaddr = &irk->bdaddr;
2453                addr_type = irk->addr_type;
2454        }
2455
2456        rcu_read_lock();
2457        list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2458                if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
2459                        rcu_read_unlock();
2460                        return true;
2461                }
2462        }
2463        rcu_read_unlock();
2464
2465        return false;
2466}
2467
2468/* HCI command timer function */
2469static void hci_cmd_timeout(struct work_struct *work)
2470{
2471        struct hci_dev *hdev = container_of(work, struct hci_dev,
2472                                            cmd_timer.work);
2473
2474        if (hdev->sent_cmd) {
2475                struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2476                u16 opcode = __le16_to_cpu(sent->opcode);
2477
2478                BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2479        } else {
2480                BT_ERR("%s command tx timeout", hdev->name);
2481        }
2482
2483        atomic_set(&hdev->cmd_cnt, 1);
2484        queue_work(hdev->workqueue, &hdev->cmd_work);
2485}
2486
2487struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
2488                                          bdaddr_t *bdaddr, u8 bdaddr_type)
2489{
2490        struct oob_data *data;
2491
2492        list_for_each_entry(data, &hdev->remote_oob_data, list) {
2493                if (bacmp(bdaddr, &data->bdaddr) != 0)
2494                        continue;
2495                if (data->bdaddr_type != bdaddr_type)
2496                        continue;
2497                return data;
2498        }
2499
2500        return NULL;
2501}
2502
2503int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2504                               u8 bdaddr_type)
2505{
2506        struct oob_data *data;
2507
2508        data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2509        if (!data)
2510                return -ENOENT;
2511
2512        BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
2513
2514        list_del(&data->list);
2515        kfree(data);
2516
2517        return 0;
2518}
2519
2520void hci_remote_oob_data_clear(struct hci_dev *hdev)
2521{
2522        struct oob_data *data, *n;
2523
2524        list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2525                list_del(&data->list);
2526                kfree(data);
2527        }
2528}
2529
2530int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2531                            u8 bdaddr_type, u8 *hash192, u8 *rand192,
2532                            u8 *hash256, u8 *rand256)
2533{
2534        struct oob_data *data;
2535
2536        data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2537        if (!data) {
2538                data = kmalloc(sizeof(*data), GFP_KERNEL);
2539                if (!data)
2540                        return -ENOMEM;
2541
2542                bacpy(&data->bdaddr, bdaddr);
2543                data->bdaddr_type = bdaddr_type;
2544                list_add(&data->list, &hdev->remote_oob_data);
2545        }
2546
2547        if (hash192 && rand192) {
2548                memcpy(data->hash192, hash192, sizeof(data->hash192));
2549                memcpy(data->rand192, rand192, sizeof(data->rand192));
2550                if (hash256 && rand256)
2551                        data->present = 0x03;
2552        } else {
2553                memset(data->hash192, 0, sizeof(data->hash192));
2554                memset(data->rand192, 0, sizeof(data->rand192));
2555                if (hash256 && rand256)
2556                        data->present = 0x02;
2557                else
2558                        data->present = 0x00;
2559        }
2560
2561        if (hash256 && rand256) {
2562                memcpy(data->hash256, hash256, sizeof(data->hash256));
2563                memcpy(data->rand256, rand256, sizeof(data->rand256));
2564        } else {
2565                memset(data->hash256, 0, sizeof(data->hash256));
2566                memset(data->rand256, 0, sizeof(data->rand256));
2567                if (hash192 && rand192)
2568                        data->present = 0x01;
2569        }
2570
2571        BT_DBG("%s for %pMR", hdev->name, bdaddr);
2572
2573        return 0;
2574}
2575
2576/* This function requires the caller holds hdev->lock */
2577struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
2578{
2579        struct adv_info *adv_instance;
2580
2581        list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
2582                if (adv_instance->instance == instance)
2583                        return adv_instance;
2584        }
2585
2586        return NULL;
2587}
2588
2589/* This function requires the caller holds hdev->lock */
2590struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance)
2591{
2592        struct adv_info *cur_instance;
2593
2594        cur_instance = hci_find_adv_instance(hdev, instance);
2595        if (!cur_instance)
2596                return NULL;
2597
2598        if (cur_instance == list_last_entry(&hdev->adv_instances,
2599                                            struct adv_info, list))
2600                return list_first_entry(&hdev->adv_instances,
2601                                                 struct adv_info, list);
2602        else
2603                return list_next_entry(cur_instance, list);
2604}
2605
2606/* This function requires the caller holds hdev->lock */
2607int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
2608{
2609        struct adv_info *adv_instance;
2610
2611        adv_instance = hci_find_adv_instance(hdev, instance);
2612        if (!adv_instance)
2613                return -ENOENT;
2614
2615        BT_DBG("%s removing %dMR", hdev->name, instance);
2616
2617        if (hdev->cur_adv_instance == instance) {
2618                if (hdev->adv_instance_timeout) {
2619                        cancel_delayed_work(&hdev->adv_instance_expire);
2620                        hdev->adv_instance_timeout = 0;
2621                }
2622                hdev->cur_adv_instance = 0x00;
2623        }
2624
2625        list_del(&adv_instance->list);
2626        kfree(adv_instance);
2627
2628        hdev->adv_instance_cnt--;
2629
2630        return 0;
2631}
2632
2633/* This function requires the caller holds hdev->lock */
2634void hci_adv_instances_clear(struct hci_dev *hdev)
2635{
2636        struct adv_info *adv_instance, *n;
2637
2638        if (hdev->adv_instance_timeout) {
2639                cancel_delayed_work(&hdev->adv_instance_expire);
2640                hdev->adv_instance_timeout = 0;
2641        }
2642
2643        list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
2644                list_del(&adv_instance->list);
2645                kfree(adv_instance);
2646        }
2647
2648        hdev->adv_instance_cnt = 0;
2649        hdev->cur_adv_instance = 0x00;
2650}
2651
2652/* This function requires the caller holds hdev->lock */
2653int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
2654                         u16 adv_data_len, u8 *adv_data,
2655                         u16 scan_rsp_len, u8 *scan_rsp_data,
2656                         u16 timeout, u16 duration)
2657{
2658        struct adv_info *adv_instance;
2659
2660        adv_instance = hci_find_adv_instance(hdev, instance);
2661        if (adv_instance) {
2662                memset(adv_instance->adv_data, 0,
2663                       sizeof(adv_instance->adv_data));
2664                memset(adv_instance->scan_rsp_data, 0,
2665                       sizeof(adv_instance->scan_rsp_data));
2666        } else {
2667                if (hdev->adv_instance_cnt >= HCI_MAX_ADV_INSTANCES ||
2668                    instance < 1 || instance > HCI_MAX_ADV_INSTANCES)
2669                        return -EOVERFLOW;
2670
2671                adv_instance = kzalloc(sizeof(*adv_instance), GFP_KERNEL);
2672                if (!adv_instance)
2673                        return -ENOMEM;
2674
2675                adv_instance->pending = true;
2676                adv_instance->instance = instance;
2677                list_add(&adv_instance->list, &hdev->adv_instances);
2678                hdev->adv_instance_cnt++;
2679        }
2680
2681        adv_instance->flags = flags;
2682        adv_instance->adv_data_len = adv_data_len;
2683        adv_instance->scan_rsp_len = scan_rsp_len;
2684
2685        if (adv_data_len)
2686                memcpy(adv_instance->adv_data, adv_data, adv_data_len);
2687
2688        if (scan_rsp_len)
2689                memcpy(adv_instance->scan_rsp_data,
2690                       scan_rsp_data, scan_rsp_len);
2691
2692        adv_instance->timeout = timeout;
2693        adv_instance->remaining_time = timeout;
2694
2695        if (duration == 0)
2696                adv_instance->duration = HCI_DEFAULT_ADV_DURATION;
2697        else
2698                adv_instance->duration = duration;
2699
2700        BT_DBG("%s for %dMR", hdev->name, instance);
2701
2702        return 0;
2703}
2704
2705struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
2706                                         bdaddr_t *bdaddr, u8 type)
2707{
2708        struct bdaddr_list *b;
2709
2710        list_for_each_entry(b, bdaddr_list, list) {
2711                if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2712                        return b;
2713        }
2714
2715        return NULL;
2716}
2717
2718void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
2719{
2720        struct bdaddr_list *b, *n;
2721
2722        list_for_each_entry_safe(b, n, bdaddr_list, list) {
2723                list_del(&b->list);
2724                kfree(b);
2725        }
2726}
2727
2728int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2729{
2730        struct bdaddr_list *entry;
2731
2732        if (!bacmp(bdaddr, BDADDR_ANY))
2733                return -EBADF;
2734
2735        if (hci_bdaddr_list_lookup(list, bdaddr, type))
2736                return -EEXIST;
2737
2738        entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2739        if (!entry)
2740                return -ENOMEM;
2741
2742        bacpy(&entry->bdaddr, bdaddr);
2743        entry->bdaddr_type = type;
2744
2745        list_add(&entry->list, list);
2746
2747        return 0;
2748}
2749
2750int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2751{
2752        struct bdaddr_list *entry;
2753
2754        if (!bacmp(bdaddr, BDADDR_ANY)) {
2755                hci_bdaddr_list_clear(list);
2756                return 0;
2757        }
2758
2759        entry = hci_bdaddr_list_lookup(list, bdaddr, type);
2760        if (!entry)
2761                return -ENOENT;
2762
2763        list_del(&entry->list);
2764        kfree(entry);
2765
2766        return 0;
2767}
2768
2769/* This function requires the caller holds hdev->lock */
2770struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2771                                               bdaddr_t *addr, u8 addr_type)
2772{
2773        struct hci_conn_params *params;
2774
2775        list_for_each_entry(params, &hdev->le_conn_params, list) {
2776                if (bacmp(&params->addr, addr) == 0 &&
2777                    params->addr_type == addr_type) {
2778                        return params;
2779                }
2780        }
2781
2782        return NULL;
2783}
2784
2785/* This function requires the caller holds hdev->lock */
2786struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2787                                                  bdaddr_t *addr, u8 addr_type)
2788{
2789        struct hci_conn_params *param;
2790
2791        list_for_each_entry(param, list, action) {
2792                if (bacmp(&param->addr, addr) == 0 &&
2793                    param->addr_type == addr_type)
2794                        return param;
2795        }
2796
2797        return NULL;
2798}
2799
2800/* This function requires the caller holds hdev->lock */
2801struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2802                                            bdaddr_t *addr, u8 addr_type)
2803{
2804        struct hci_conn_params *params;
2805
2806        params = hci_conn_params_lookup(hdev, addr, addr_type);
2807        if (params)
2808                return params;
2809
2810        params = kzalloc(sizeof(*params), GFP_KERNEL);
2811        if (!params) {
2812                BT_ERR("Out of memory");
2813                return NULL;
2814        }
2815
2816        bacpy(&params->addr, addr);
2817        params->addr_type = addr_type;
2818
2819        list_add(&params->list, &hdev->le_conn_params);
2820        INIT_LIST_HEAD(&params->action);
2821
2822        params->conn_min_interval = hdev->le_conn_min_interval;
2823        params->conn_max_interval = hdev->le_conn_max_interval;
2824        params->conn_latency = hdev->le_conn_latency;
2825        params->supervision_timeout = hdev->le_supv_timeout;
2826        params->auto_connect = HCI_AUTO_CONN_DISABLED;
2827
2828        BT_DBG("addr %pMR (type %u)", addr, addr_type);
2829
2830        return params;
2831}
2832
2833static void hci_conn_params_free(struct hci_conn_params *params)
2834{
2835        if (params->conn) {
2836                hci_conn_drop(params->conn);
2837                hci_conn_put(params->conn);
2838        }
2839
2840        list_del(&params->action);
2841        list_del(&params->list);
2842        kfree(params);
2843}
2844
2845/* This function requires the caller holds hdev->lock */
2846void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2847{
2848        struct hci_conn_params *params;
2849
2850        params = hci_conn_params_lookup(hdev, addr, addr_type);
2851        if (!params)
2852                return;
2853
2854        hci_conn_params_free(params);
2855
2856        hci_update_background_scan(hdev);
2857
2858        BT_DBG("addr %pMR (type %u)", addr, addr_type);
2859}
2860
2861/* This function requires the caller holds hdev->lock */
2862void hci_conn_params_clear_disabled(struct hci_dev *hdev)
2863{
2864        struct hci_conn_params *params, *tmp;
2865
2866        list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
2867                if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
2868                        continue;
2869
2870                /* If trying to estabilish one time connection to disabled
2871                 * device, leave the params, but mark them as just once.
2872                 */
2873                if (params->explicit_connect) {
2874                        params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2875                        continue;
2876                }
2877
2878                list_del(&params->list);
2879                kfree(params);
2880        }
2881
2882        BT_DBG("All LE disabled connection parameters were removed");
2883}
2884
2885/* This function requires the caller holds hdev->lock */
2886static void hci_conn_params_clear_all(struct hci_dev *hdev)
2887{
2888        struct hci_conn_params *params, *tmp;
2889
2890        list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
2891                hci_conn_params_free(params);
2892
2893        BT_DBG("All LE connection parameters were removed");
2894}
2895
2896/* Copy the Identity Address of the controller.
2897 *
2898 * If the controller has a public BD_ADDR, then by default use that one.
2899 * If this is a LE only controller without a public address, default to
2900 * the static random address.
2901 *
2902 * For debugging purposes it is possible to force controllers with a
2903 * public address to use the static random address instead.
2904 *
2905 * In case BR/EDR has been disabled on a dual-mode controller and
2906 * userspace has configured a static address, then that address
2907 * becomes the identity address instead of the public BR/EDR address.
2908 */
2909void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
2910                               u8 *bdaddr_type)
2911{
2912        if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
2913            !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
2914            (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
2915             bacmp(&hdev->static_addr, BDADDR_ANY))) {
2916                bacpy(bdaddr, &hdev->static_addr);
2917                *bdaddr_type = ADDR_LE_DEV_RANDOM;
2918        } else {
2919                bacpy(bdaddr, &hdev->bdaddr);
2920                *bdaddr_type = ADDR_LE_DEV_PUBLIC;
2921        }
2922}
2923
2924/* Alloc HCI device */
2925struct hci_dev *hci_alloc_dev(void)
2926{
2927        struct hci_dev *hdev;
2928
2929        hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
2930        if (!hdev)
2931                return NULL;
2932
2933        hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2934        hdev->esco_type = (ESCO_HV1);
2935        hdev->link_mode = (HCI_LM_ACCEPT);
2936        hdev->num_iac = 0x01;           /* One IAC support is mandatory */
2937        hdev->io_capability = 0x03;     /* No Input No Output */
2938        hdev->manufacturer = 0xffff;    /* Default to internal use */
2939        hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2940        hdev->adv_tx_power = HCI_TX_POWER_INVALID;
2941        hdev->adv_instance_cnt = 0;
2942        hdev->cur_adv_instance = 0x00;
2943        hdev->adv_instance_timeout = 0;
2944
2945        hdev->sniff_max_interval = 800;
2946        hdev->sniff_min_interval = 80;
2947
2948        hdev->le_adv_channel_map = 0x07;
2949        hdev->le_adv_min_interval = 0x0800;
2950        hdev->le_adv_max_interval = 0x0800;
2951        hdev->le_scan_interval = 0x0060;
2952        hdev->le_scan_window = 0x0030;
2953        hdev->le_conn_min_interval = 0x0028;
2954        hdev->le_conn_max_interval = 0x0038;
2955        hdev->le_conn_latency = 0x0000;
2956        hdev->le_supv_timeout = 0x002a;
2957        hdev->le_def_tx_len = 0x001b;
2958        hdev->le_def_tx_time = 0x0148;
2959        hdev->le_max_tx_len = 0x001b;
2960        hdev->le_max_tx_time = 0x0148;
2961        hdev->le_max_rx_len = 0x001b;
2962        hdev->le_max_rx_time = 0x0148;
2963
2964        hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
2965        hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
2966        hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
2967        hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
2968
2969        mutex_init(&hdev->lock);
2970        mutex_init(&hdev->req_lock);
2971
2972        INIT_LIST_HEAD(&hdev->mgmt_pending);
2973        INIT_LIST_HEAD(&hdev->blacklist);
2974        INIT_LIST_HEAD(&hdev->whitelist);
2975        INIT_LIST_HEAD(&hdev->uuids);
2976        INIT_LIST_HEAD(&hdev->link_keys);
2977        INIT_LIST_HEAD(&hdev->long_term_keys);
2978        INIT_LIST_HEAD(&hdev->identity_resolving_keys);
2979        INIT_LIST_HEAD(&hdev->remote_oob_data);
2980        INIT_LIST_HEAD(&hdev->le_white_list);
2981        INIT_LIST_HEAD(&hdev->le_conn_params);
2982        INIT_LIST_HEAD(&hdev->pend_le_conns);
2983        INIT_LIST_HEAD(&hdev->pend_le_reports);
2984        INIT_LIST_HEAD(&hdev->conn_hash.list);
2985        INIT_LIST_HEAD(&hdev->adv_instances);
2986
2987        INIT_WORK(&hdev->rx_work, hci_rx_work);
2988        INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2989        INIT_WORK(&hdev->tx_work, hci_tx_work);
2990        INIT_WORK(&hdev->power_on, hci_power_on);
2991        INIT_WORK(&hdev->error_reset, hci_error_reset);
2992
2993        INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2994
2995        skb_queue_head_init(&hdev->rx_q);
2996        skb_queue_head_init(&hdev->cmd_q);
2997        skb_queue_head_init(&hdev->raw_q);
2998
2999        init_waitqueue_head(&hdev->req_wait_q);
3000
3001        INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
3002
3003        hci_request_setup(hdev);
3004
3005        hci_init_sysfs(hdev);
3006        discovery_init(hdev);
3007
3008        return hdev;
3009}
3010EXPORT_SYMBOL(hci_alloc_dev);
3011
3012/* Free HCI device */
3013void hci_free_dev(struct hci_dev *hdev)
3014{
3015        /* will free via device release */
3016        put_device(&hdev->dev);
3017}
3018EXPORT_SYMBOL(hci_free_dev);
3019
3020/* Register HCI device */
3021int hci_register_dev(struct hci_dev *hdev)
3022{
3023        int id, error;
3024
3025        if (!hdev->open || !hdev->close || !hdev->send)
3026                return -EINVAL;
3027
3028        /* Do not allow HCI_AMP devices to register at index 0,
3029         * so the index can be used as the AMP controller ID.
3030         */
3031        switch (hdev->dev_type) {
3032        case HCI_PRIMARY:
3033                id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3034                break;
3035        case HCI_AMP:
3036                id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3037                break;
3038        default:
3039                return -EINVAL;
3040        }
3041
3042        if (id < 0)
3043                return id;
3044
3045        sprintf(hdev->name, "hci%d", id);
3046        hdev->id = id;
3047
3048        BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3049
3050        hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3051                                          WQ_MEM_RECLAIM, 1, hdev->name);
3052        if (!hdev->workqueue) {
3053                error = -ENOMEM;
3054                goto err;
3055        }
3056
3057        hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3058                                              WQ_MEM_RECLAIM, 1, hdev->name);
3059        if (!hdev->req_workqueue) {
3060                destroy_workqueue(hdev->workqueue);
3061                error = -ENOMEM;
3062                goto err;
3063        }
3064
3065        if (!IS_ERR_OR_NULL(bt_debugfs))
3066                hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3067
3068        dev_set_name(&hdev->dev, "%s", hdev->name);
3069
3070        error = device_add(&hdev->dev);
3071        if (error < 0)
3072                goto err_wqueue;
3073
3074        hci_leds_init(hdev);
3075
3076        hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
3077                                    RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3078                                    hdev);
3079        if (hdev->rfkill) {
3080                if (rfkill_register(hdev->rfkill) < 0) {
3081                        rfkill_destroy(hdev->rfkill);
3082                        hdev->rfkill = NULL;
3083                }
3084        }
3085
3086        if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3087                hci_dev_set_flag(hdev, HCI_RFKILLED);
3088
3089        hci_dev_set_flag(hdev, HCI_SETUP);
3090        hci_dev_set_flag(hdev, HCI_AUTO_OFF);
3091
3092        if (hdev->dev_type == HCI_PRIMARY) {
3093                /* Assume BR/EDR support until proven otherwise (such as
3094                 * through reading supported features during init.
3095                 */
3096                hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
3097        }
3098
3099        write_lock(&hci_dev_list_lock);
3100        list_add(&hdev->list, &hci_dev_list);
3101        write_unlock(&hci_dev_list_lock);
3102
3103        /* Devices that are marked for raw-only usage are unconfigured
3104         * and should not be included in normal operation.
3105         */
3106        if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
3107                hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
3108
3109        hci_sock_dev_event(hdev, HCI_DEV_REG);
3110        hci_dev_hold(hdev);
3111
3112        queue_work(hdev->req_workqueue, &hdev->power_on);
3113
3114        return id;
3115
3116err_wqueue:
3117        destroy_workqueue(hdev->workqueue);
3118        destroy_workqueue(hdev->req_workqueue);
3119err:
3120        ida_simple_remove(&hci_index_ida, hdev->id);
3121
3122        return error;
3123}
3124EXPORT_SYMBOL(hci_register_dev);
3125
3126/* Unregister HCI device */
3127void hci_unregister_dev(struct hci_dev *hdev)
3128{
3129        int id;
3130
3131        BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3132
3133        hci_dev_set_flag(hdev, HCI_UNREGISTER);
3134
3135        id = hdev->id;
3136
3137        write_lock(&hci_dev_list_lock);
3138        list_del(&hdev->list);
3139        write_unlock(&hci_dev_list_lock);
3140
3141        cancel_work_sync(&hdev->power_on);
3142
3143        hci_dev_do_close(hdev);
3144
3145        if (!test_bit(HCI_INIT, &hdev->flags) &&
3146            !hci_dev_test_flag(hdev, HCI_SETUP) &&
3147            !hci_dev_test_flag(hdev, HCI_CONFIG)) {
3148                hci_dev_lock(hdev);
3149                mgmt_index_removed(hdev);
3150                hci_dev_unlock(hdev);
3151        }
3152
3153        /* mgmt_index_removed should take care of emptying the
3154         * pending list */
3155        BUG_ON(!list_empty(&hdev->mgmt_pending));
3156
3157        hci_sock_dev_event(hdev, HCI_DEV_UNREG);
3158
3159        if (hdev->rfkill) {
3160                rfkill_unregister(hdev->rfkill);
3161                rfkill_destroy(hdev->rfkill);
3162        }
3163
3164        device_del(&hdev->dev);
3165
3166        debugfs_remove_recursive(hdev->debugfs);
3167        kfree_const(hdev->hw_info);
3168        kfree_const(hdev->fw_info);
3169
3170        destroy_workqueue(hdev->workqueue);
3171        destroy_workqueue(hdev->req_workqueue);
3172
3173        hci_dev_lock(hdev);
3174        hci_bdaddr_list_clear(&hdev->blacklist);
3175        hci_bdaddr_list_clear(&hdev->whitelist);
3176        hci_uuids_clear(hdev);
3177        hci_link_keys_clear(hdev);
3178        hci_smp_ltks_clear(hdev);
3179        hci_smp_irks_clear(hdev);
3180        hci_remote_oob_data_clear(hdev);
3181        hci_adv_instances_clear(hdev);
3182        hci_bdaddr_list_clear(&hdev->le_white_list);
3183        hci_conn_params_clear_all(hdev);
3184        hci_discovery_filter_clear(hdev);
3185        hci_dev_unlock(hdev);
3186
3187        hci_dev_put(hdev);
3188
3189        ida_simple_remove(&hci_index_ida, id);
3190}
3191EXPORT_SYMBOL(hci_unregister_dev);
3192
3193/* Suspend HCI device */
3194int hci_suspend_dev(struct hci_dev *hdev)
3195{
3196        hci_sock_dev_event(hdev, HCI_DEV_SUSPEND);
3197        return 0;
3198}
3199EXPORT_SYMBOL(hci_suspend_dev);
3200
3201/* Resume HCI device */
3202int hci_resume_dev(struct hci_dev *hdev)
3203{
3204        hci_sock_dev_event(hdev, HCI_DEV_RESUME);
3205        return 0;
3206}
3207EXPORT_SYMBOL(hci_resume_dev);
3208
3209/* Reset HCI device */
3210int hci_reset_dev(struct hci_dev *hdev)
3211{
3212        const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
3213        struct sk_buff *skb;
3214
3215        skb = bt_skb_alloc(3, GFP_ATOMIC);
3216        if (!skb)
3217                return -ENOMEM;
3218
3219        hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
3220        memcpy(skb_put(skb, 3), hw_err, 3);
3221
3222        /* Send Hardware Error to upper stack */
3223        return hci_recv_frame(hdev, skb);
3224}
3225EXPORT_SYMBOL(hci_reset_dev);
3226
3227/* Receive frame from HCI drivers */
3228int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
3229{
3230        if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
3231                      && !test_bit(HCI_INIT, &hdev->flags))) {
3232                kfree_skb(skb);
3233                return -ENXIO;
3234        }
3235
3236        if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
3237            hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
3238            hci_skb_pkt_type(skb) != HCI_SCODATA_PKT) {
3239                kfree_skb(skb);
3240                return -EINVAL;
3241        }
3242
3243        /* Incoming skb */
3244        bt_cb(skb)->incoming = 1;
3245
3246        /* Time stamp */
3247        __net_timestamp(skb);
3248
3249        skb_queue_tail(&hdev->rx_q, skb);
3250        queue_work(hdev->workqueue, &hdev->rx_work);
3251
3252        return 0;
3253}
3254EXPORT_SYMBOL(hci_recv_frame);
3255
3256/* Receive diagnostic message from HCI drivers */
3257int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
3258{
3259        /* Mark as diagnostic packet */
3260        hci_skb_pkt_type(skb) = HCI_DIAG_PKT;
3261
3262        /* Time stamp */
3263        __net_timestamp(skb);
3264
3265        skb_queue_tail(&hdev->rx_q, skb);
3266        queue_work(hdev->workqueue, &hdev->rx_work);
3267
3268        return 0;
3269}
3270EXPORT_SYMBOL(hci_recv_diag);
3271
3272void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...)
3273{
3274        va_list vargs;
3275
3276        va_start(vargs, fmt);
3277        kfree_const(hdev->hw_info);
3278        hdev->hw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
3279        va_end(vargs);
3280}
3281EXPORT_SYMBOL(hci_set_hw_info);
3282
3283void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...)
3284{
3285        va_list vargs;
3286
3287        va_start(vargs, fmt);
3288        kfree_const(hdev->fw_info);
3289        hdev->fw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
3290        va_end(vargs);
3291}
3292EXPORT_SYMBOL(hci_set_fw_info);
3293
3294/* ---- Interface to upper protocols ---- */
3295
3296int hci_register_cb(struct hci_cb *cb)
3297{
3298        BT_DBG("%p name %s", cb, cb->name);
3299
3300        mutex_lock(&hci_cb_list_lock);
3301        list_add_tail(&cb->list, &hci_cb_list);
3302        mutex_unlock(&hci_cb_list_lock);
3303
3304        return 0;
3305}
3306EXPORT_SYMBOL(hci_register_cb);
3307
3308int hci_unregister_cb(struct hci_cb *cb)
3309{
3310        BT_DBG("%p name %s", cb, cb->name);
3311
3312        mutex_lock(&hci_cb_list_lock);
3313        list_del(&cb->list);
3314        mutex_unlock(&hci_cb_list_lock);
3315
3316        return 0;
3317}
3318EXPORT_SYMBOL(hci_unregister_cb);
3319
3320static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
3321{
3322        int err;
3323
3324        BT_DBG("%s type %d len %d", hdev->name, hci_skb_pkt_type(skb),
3325               skb->len);
3326
3327        /* Time stamp */
3328        __net_timestamp(skb);
3329
3330        /* Send copy to monitor */
3331        hci_send_to_monitor(hdev, skb);
3332
3333        if (atomic_read(&hdev->promisc)) {
3334                /* Send copy to the sockets */
3335                hci_send_to_sock(hdev, skb);
3336        }
3337
3338        /* Get rid of skb owner, prior to sending to the driver. */
3339        skb_orphan(skb);
3340
3341        if (!test_bit(HCI_RUNNING, &hdev->flags)) {
3342                kfree_skb(skb);
3343                return;
3344        }
3345
3346        err = hdev->send(hdev, skb);
3347        if (err < 0) {
3348                BT_ERR("%s sending frame failed (%d)", hdev->name, err);
3349                kfree_skb(skb);
3350        }
3351}
3352
3353/* Send HCI command */
3354int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3355                 const void *param)
3356{
3357        struct sk_buff *skb;
3358
3359        BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3360
3361        skb = hci_prepare_cmd(hdev, opcode, plen, param);
3362        if (!skb) {
3363                BT_ERR("%s no memory for command", hdev->name);
3364                return -ENOMEM;
3365        }
3366
3367        /* Stand-alone HCI commands must be flagged as
3368         * single-command requests.
3369         */
3370        bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
3371
3372        skb_queue_tail(&hdev->cmd_q, skb);
3373        queue_work(hdev->workqueue, &hdev->cmd_work);
3374
3375        return 0;
3376}
3377
3378/* Get data from the previously sent command */
3379void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
3380{
3381        struct hci_command_hdr *hdr;
3382
3383        if (!hdev->sent_cmd)
3384                return NULL;
3385
3386        hdr = (void *) hdev->sent_cmd->data;
3387
3388        if (hdr->opcode != cpu_to_le16(opcode))
3389                return NULL;
3390
3391        BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
3392
3393        return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3394}
3395
3396/* Send HCI command and wait for command commplete event */
3397struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
3398                             const void *param, u32 timeout)
3399{
3400        struct sk_buff *skb;
3401
3402        if (!test_bit(HCI_UP, &hdev->flags))
3403                return ERR_PTR(-ENETDOWN);
3404
3405        bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
3406
3407        hci_req_sync_lock(hdev);
3408        skb = __hci_cmd_sync(hdev, opcode, plen, param, timeout);
3409        hci_req_sync_unlock(hdev);
3410
3411        return skb;
3412}
3413EXPORT_SYMBOL(hci_cmd_sync);
3414
3415/* Send ACL data */
3416static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3417{
3418        struct hci_acl_hdr *hdr;
3419        int len = skb->len;
3420
3421        skb_push(skb, HCI_ACL_HDR_SIZE);
3422        skb_reset_transport_header(skb);
3423        hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
3424        hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3425        hdr->dlen   = cpu_to_le16(len);
3426}
3427
3428static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
3429                          struct sk_buff *skb, __u16 flags)
3430{
3431        struct hci_conn *conn = chan->conn;
3432        struct hci_dev *hdev = conn->hdev;
3433        struct sk_buff *list;
3434
3435        skb->len = skb_headlen(skb);
3436        skb->data_len = 0;
3437
3438        hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3439
3440        switch (hdev->dev_type) {
3441        case HCI_PRIMARY:
3442                hci_add_acl_hdr(skb, conn->handle, flags);
3443                break;
3444        case HCI_AMP:
3445                hci_add_acl_hdr(skb, chan->handle, flags);
3446                break;
3447        default:
3448                BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3449                return;
3450        }
3451
3452        list = skb_shinfo(skb)->frag_list;
3453        if (!list) {
3454                /* Non fragmented */
3455                BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3456
3457                skb_queue_tail(queue, skb);
3458        } else {
3459                /* Fragmented */
3460                BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3461
3462                skb_shinfo(skb)->frag_list = NULL;
3463
3464                /* Queue all fragments atomically. We need to use spin_lock_bh
3465                 * here because of 6LoWPAN links, as there this function is
3466                 * called from softirq and using normal spin lock could cause
3467                 * deadlocks.
3468                 */
3469                spin_lock_bh(&queue->lock);
3470
3471                __skb_queue_tail(queue, skb);
3472
3473                flags &= ~ACL_START;
3474                flags |= ACL_CONT;
3475                do {
3476                        skb = list; list = list->next;
3477
3478                        hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3479                        hci_add_acl_hdr(skb, conn->handle, flags);
3480
3481                        BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3482
3483                        __skb_queue_tail(queue, skb);
3484                } while (list);
3485
3486                spin_unlock_bh(&queue->lock);
3487        }
3488}
3489
3490void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3491{
3492        struct hci_dev *hdev = chan->conn->hdev;
3493
3494        BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
3495
3496        hci_queue_acl(chan, &chan->data_q, skb, flags);
3497
3498        queue_work(hdev->workqueue, &hdev->tx_work);
3499}
3500
3501/* Send SCO data */
3502void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
3503{
3504        struct hci_dev *hdev = conn->hdev;
3505        struct hci_sco_hdr hdr;
3506
3507        BT_DBG("%s len %d", hdev->name, skb->len);
3508
3509        hdr.handle = cpu_to_le16(conn->handle);
3510        hdr.dlen   = skb->len;
3511
3512        skb_push(skb, HCI_SCO_HDR_SIZE);
3513        skb_reset_transport_header(skb);
3514        memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
3515
3516        hci_skb_pkt_type(skb) = HCI_SCODATA_PKT;
3517
3518        skb_queue_tail(&conn->data_q, skb);
3519        queue_work(hdev->workqueue, &hdev->tx_work);
3520}
3521
3522/* ---- HCI TX task (outgoing data) ---- */
3523
3524/* HCI Connection scheduler */
3525static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3526                                     int *quote)
3527{
3528        struct hci_conn_hash *h = &hdev->conn_hash;
3529        struct hci_conn *conn = NULL, *c;
3530        unsigned int num = 0, min = ~0;
3531
3532        /* We don't have to lock device here. Connections are always
3533         * added and removed with TX task disabled. */
3534
3535        rcu_read_lock();
3536
3537        list_for_each_entry_rcu(c, &h->list, list) {
3538                if (c->type != type || skb_queue_empty(&c->data_q))
3539                        continue;
3540
3541                if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3542                        continue;
3543
3544                num++;
3545
3546                if (c->sent < min) {
3547                        min  = c->sent;
3548                        conn = c;
3549                }
3550
3551                if (hci_conn_num(hdev, type) == num)
3552                        break;
3553        }
3554
3555        rcu_read_unlock();
3556
3557        if (conn) {
3558                int cnt, q;
3559
3560                switch (conn->type) {
3561                case ACL_LINK:
3562                        cnt = hdev->acl_cnt;
3563                        break;
3564                case SCO_LINK:
3565                case ESCO_LINK:
3566                        cnt = hdev->sco_cnt;
3567                        break;
3568                case LE_LINK:
3569                        cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3570                        break;
3571                default:
3572                        cnt = 0;
3573                        BT_ERR("Unknown link type");
3574                }
3575
3576                q = cnt / num;
3577                *quote = q ? q : 1;
3578        } else
3579                *quote = 0;
3580
3581        BT_DBG("conn %p quote %d", conn, *quote);
3582        return conn;
3583}
3584
3585static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
3586{
3587        struct hci_conn_hash *h = &hdev->conn_hash;
3588        struct hci_conn *c;
3589
3590        BT_ERR("%s link tx timeout", hdev->name);
3591
3592        rcu_read_lock();
3593
3594        /* Kill stalled connections */
3595        list_for_each_entry_rcu(c, &h->list, list) {
3596                if (c->type == type && c->sent) {
3597                        BT_ERR("%s killing stalled connection %pMR",
3598                               hdev->name, &c->dst);
3599                        hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
3600                }
3601        }
3602
3603        rcu_read_unlock();
3604}
3605
3606static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3607                                      int *quote)
3608{
3609        struct hci_conn_hash *h = &hdev->conn_hash;
3610        struct hci_chan *chan = NULL;
3611        unsigned int num = 0, min = ~0, cur_prio = 0;
3612        struct hci_conn *conn;
3613        int cnt, q, conn_num = 0;
3614
3615        BT_DBG("%s", hdev->name);
3616
3617        rcu_read_lock();
3618
3619        list_for_each_entry_rcu(conn, &h->list, list) {
3620                struct hci_chan *tmp;
3621
3622                if (conn->type != type)
3623                        continue;
3624
3625                if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3626                        continue;
3627
3628                conn_num++;
3629
3630                list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
3631                        struct sk_buff *skb;
3632
3633                        if (skb_queue_empty(&tmp->data_q))
3634                                continue;
3635
3636                        skb = skb_peek(&tmp->data_q);
3637                        if (skb->priority < cur_prio)
3638                                continue;
3639
3640                        if (skb->priority > cur_prio) {
3641                                num = 0;
3642                                min = ~0;
3643                                cur_prio = skb->priority;
3644                        }
3645
3646                        num++;
3647
3648                        if (conn->sent < min) {
3649                                min  = conn->sent;
3650                                chan = tmp;
3651                        }
3652                }
3653
3654                if (hci_conn_num(hdev, type) == conn_num)
3655                        break;
3656        }
3657
3658        rcu_read_unlock();
3659
3660        if (!chan)
3661                return NULL;
3662
3663        switch (chan->conn->type) {
3664        case ACL_LINK:
3665                cnt = hdev->acl_cnt;
3666                break;
3667        case AMP_LINK:
3668                cnt = hdev->block_cnt;
3669                break;
3670        case SCO_LINK:
3671        case ESCO_LINK:
3672                cnt = hdev->sco_cnt;
3673                break;
3674        case LE_LINK:
3675                cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3676                break;
3677        default:
3678                cnt = 0;
3679                BT_ERR("Unknown link type");
3680        }
3681
3682        q = cnt / num;
3683        *quote = q ? q : 1;
3684        BT_DBG("chan %p quote %d", chan, *quote);
3685        return chan;
3686}
3687
3688static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3689{
3690        struct hci_conn_hash *h = &hdev->conn_hash;
3691        struct hci_conn *conn;
3692        int num = 0;
3693
3694        BT_DBG("%s", hdev->name);
3695
3696        rcu_read_lock();
3697
3698        list_for_each_entry_rcu(conn, &h->list, list) {
3699                struct hci_chan *chan;
3700
3701                if (conn->type != type)
3702                        continue;
3703
3704                if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3705                        continue;
3706
3707                num++;
3708
3709                list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3710                        struct sk_buff *skb;
3711
3712                        if (chan->sent) {
3713                                chan->sent = 0;
3714                                continue;
3715                        }
3716
3717                        if (skb_queue_empty(&chan->data_q))
3718                                continue;
3719
3720                        skb = skb_peek(&chan->data_q);
3721                        if (skb->priority >= HCI_PRIO_MAX - 1)
3722                                continue;
3723
3724                        skb->priority = HCI_PRIO_MAX - 1;
3725
3726                        BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3727                               skb->priority);
3728                }
3729
3730                if (hci_conn_num(hdev, type) == num)
3731                        break;
3732        }
3733
3734        rcu_read_unlock();
3735
3736}
3737
3738static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3739{
3740        /* Calculate count of blocks used by this packet */
3741        return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3742}
3743
3744static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
3745{
3746        if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
3747                /* ACL tx timeout must be longer than maximum
3748                 * link supervision timeout (40.9 seconds) */
3749                if (!cnt && time_after(jiffies, hdev->acl_last_tx +
3750                                       HCI_ACL_TX_TIMEOUT))
3751                        hci_link_tx_to(hdev, ACL_LINK);
3752        }
3753}
3754
3755static void hci_sched_acl_pkt(struct hci_dev *hdev)
3756{
3757        unsigned int cnt = hdev->acl_cnt;
3758        struct hci_chan *chan;
3759        struct sk_buff *skb;
3760        int quote;
3761
3762        __check_timeout(hdev, cnt);
3763
3764        while (hdev->acl_cnt &&
3765               (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
3766                u32 priority = (skb_peek(&chan->data_q))->priority;
3767                while (quote-- && (skb = skb_peek(&chan->data_q))) {
3768                        BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3769                               skb->len, skb->priority);
3770
3771                        /* Stop if priority has changed */
3772                        if (skb->priority < priority)
3773                                break;
3774
3775                        skb = skb_dequeue(&chan->data_q);
3776
3777                        hci_conn_enter_active_mode(chan->conn,
3778                                                   bt_cb(skb)->force_active);
3779
3780                        hci_send_frame(hdev, skb);
3781                        hdev->acl_last_tx = jiffies;
3782
3783                        hdev->acl_cnt--;
3784                        chan->sent++;
3785                        chan->conn->sent++;
3786                }
3787        }
3788
3789        if (cnt != hdev->acl_cnt)
3790                hci_prio_recalculate(hdev, ACL_LINK);
3791}
3792
3793static void hci_sched_acl_blk(struct hci_dev *hdev)
3794{
3795        unsigned int cnt = hdev->block_cnt;
3796        struct hci_chan *chan;
3797        struct sk_buff *skb;
3798        int quote;
3799        u8 type;
3800
3801        __check_timeout(hdev, cnt);
3802
3803        BT_DBG("%s", hdev->name);
3804
3805        if (hdev->dev_type == HCI_AMP)
3806                type = AMP_LINK;
3807        else
3808                type = ACL_LINK;
3809
3810        while (hdev->block_cnt > 0 &&
3811               (chan = hci_chan_sent(hdev, type, &quote))) {
3812                u32 priority = (skb_peek(&chan->data_q))->priority;
3813                while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3814                        int blocks;
3815
3816                        BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3817                               skb->len, skb->priority);
3818
3819                        /* Stop if priority has changed */
3820                        if (skb->priority < priority)
3821                                break;
3822
3823                        skb = skb_dequeue(&chan->data_q);
3824
3825                        blocks = __get_blocks(hdev, skb);
3826                        if (blocks > hdev->block_cnt)
3827                                return;
3828
3829                        hci_conn_enter_active_mode(chan->conn,
3830                                                   bt_cb(skb)->force_active);
3831
3832                        hci_send_frame(hdev, skb);
3833                        hdev->acl_last_tx = jiffies;
3834
3835                        hdev->block_cnt -= blocks;
3836                        quote -= blocks;
3837
3838                        chan->sent += blocks;
3839                        chan->conn->sent += blocks;
3840                }
3841        }
3842
3843        if (cnt != hdev->block_cnt)
3844                hci_prio_recalculate(hdev, type);
3845}
3846
3847static void hci_sched_acl(struct hci_dev *hdev)
3848{
3849        BT_DBG("%s", hdev->name);
3850
3851        /* No ACL link over BR/EDR controller */
3852        if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_PRIMARY)
3853                return;
3854
3855        /* No AMP link over AMP controller */
3856        if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
3857                return;
3858
3859        switch (hdev->flow_ctl_mode) {
3860        case HCI_FLOW_CTL_MODE_PACKET_BASED:
3861                hci_sched_acl_pkt(hdev);
3862                break;
3863
3864        case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3865                hci_sched_acl_blk(hdev);
3866                break;
3867        }
3868}
3869
3870/* Schedule SCO */
3871static void hci_sched_sco(struct hci_dev *hdev)
3872{
3873        struct hci_conn *conn;
3874        struct sk_buff *skb;
3875        int quote;
3876
3877        BT_DBG("%s", hdev->name);
3878
3879        if (!hci_conn_num(hdev, SCO_LINK))
3880                return;
3881
3882        while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3883                while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3884                        BT_DBG("skb %p len %d", skb, skb->len);
3885                        hci_send_frame(hdev, skb);
3886
3887                        conn->sent++;
3888                        if (conn->sent == ~0)
3889                                conn->sent = 0;
3890                }
3891        }
3892}
3893
3894static void hci_sched_esco(struct hci_dev *hdev)
3895{
3896        struct hci_conn *conn;
3897        struct sk_buff *skb;
3898        int quote;
3899
3900        BT_DBG("%s", hdev->name);
3901
3902        if (!hci_conn_num(hdev, ESCO_LINK))
3903                return;
3904
3905        while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3906                                                     &quote))) {
3907                while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3908                        BT_DBG("skb %p len %d", skb, skb->len);
3909                        hci_send_frame(hdev, skb);
3910
3911                        conn->sent++;
3912                        if (conn->sent == ~0)
3913                                conn->sent = 0;
3914                }
3915        }
3916}
3917
3918static void hci_sched_le(struct hci_dev *hdev)
3919{
3920        struct hci_chan *chan;
3921        struct sk_buff *skb;
3922        int quote, cnt, tmp;
3923
3924        BT_DBG("%s", hdev->name);
3925
3926        if (!hci_conn_num(hdev, LE_LINK))
3927                return;
3928
3929        if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
3930                /* LE tx timeout must be longer than maximum
3931                 * link supervision timeout (40.9 seconds) */
3932                if (!hdev->le_cnt && hdev->le_pkts &&
3933                    time_after(jiffies, hdev->le_last_tx + HZ * 45))
3934                        hci_link_tx_to(hdev, LE_LINK);
3935        }
3936
3937        cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
3938        tmp = cnt;
3939        while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
3940                u32 priority = (skb_peek(&chan->data_q))->priority;
3941                while (quote-- && (skb = skb_peek(&chan->data_q))) {
3942                        BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3943                               skb->len, skb->priority);
3944
3945                        /* Stop if priority has changed */
3946                        if (skb->priority < priority)
3947                                break;
3948
3949                        skb = skb_dequeue(&chan->data_q);
3950
3951                        hci_send_frame(hdev, skb);
3952                        hdev->le_last_tx = jiffies;
3953
3954                        cnt--;
3955                        chan->sent++;
3956                        chan->conn->sent++;
3957                }
3958        }
3959
3960        if (hdev->le_pkts)
3961                hdev->le_cnt = cnt;
3962        else
3963                hdev->acl_cnt = cnt;
3964
3965        if (cnt != tmp)
3966                hci_prio_recalculate(hdev, LE_LINK);
3967}
3968
3969static void hci_tx_work(struct work_struct *work)
3970{
3971        struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
3972        struct sk_buff *skb;
3973
3974        BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
3975               hdev->sco_cnt, hdev->le_cnt);
3976
3977        if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
3978                /* Schedule queues and send stuff to HCI driver */
3979                hci_sched_acl(hdev);
3980                hci_sched_sco(hdev);
3981                hci_sched_esco(hdev);
3982                hci_sched_le(hdev);
3983        }
3984
3985        /* Send next queued raw (unknown type) packet */
3986        while ((skb = skb_dequeue(&hdev->raw_q)))
3987                hci_send_frame(hdev, skb);
3988}
3989
3990/* ----- HCI RX task (incoming data processing) ----- */
3991
3992/* ACL data packet */
3993static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3994{
3995        struct hci_acl_hdr *hdr = (void *) skb->data;
3996        struct hci_conn *conn;
3997        __u16 handle, flags;
3998
3999        skb_pull(skb, HCI_ACL_HDR_SIZE);
4000
4001        handle = __le16_to_cpu(hdr->handle);
4002        flags  = hci_flags(handle);
4003        handle = hci_handle(handle);
4004
4005        BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
4006               handle, flags);
4007
4008        hdev->stat.acl_rx++;
4009
4010        hci_dev_lock(hdev);
4011        conn = hci_conn_hash_lookup_handle(hdev, handle);
4012        hci_dev_unlock(hdev);
4013
4014        if (conn) {
4015                hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
4016
4017                /* Send to upper protocol */
4018                l2cap_recv_acldata(conn, skb, flags);
4019                return;
4020        } else {
4021                BT_ERR("%s ACL packet for unknown connection handle %d",
4022                       hdev->name, handle);
4023        }
4024
4025        kfree_skb(skb);
4026}
4027
4028/* SCO data packet */
4029static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4030{
4031        struct hci_sco_hdr *hdr = (void *) skb->data;
4032        struct hci_conn *conn;
4033        __u16 handle;
4034
4035        skb_pull(skb, HCI_SCO_HDR_SIZE);
4036
4037        handle = __le16_to_cpu(hdr->handle);
4038
4039        BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
4040
4041        hdev->stat.sco_rx++;
4042
4043        hci_dev_lock(hdev);
4044        conn = hci_conn_hash_lookup_handle(hdev, handle);
4045        hci_dev_unlock(hdev);
4046
4047        if (conn) {
4048                /* Send to upper protocol */
4049                sco_recv_scodata(conn, skb);
4050                return;
4051        } else {
4052                BT_ERR("%s SCO packet for unknown connection handle %d",
4053                       hdev->name, handle);
4054        }
4055
4056        kfree_skb(skb);
4057}
4058
4059static bool hci_req_is_complete(struct hci_dev *hdev)
4060{
4061        struct sk_buff *skb;
4062
4063        skb = skb_peek(&hdev->cmd_q);
4064        if (!skb)
4065                return true;
4066
4067        return (bt_cb(skb)->hci.req_flags & HCI_REQ_START);
4068}
4069
4070static void hci_resend_last(struct hci_dev *hdev)
4071{
4072        struct hci_command_hdr *sent;
4073        struct sk_buff *skb;
4074        u16 opcode;
4075
4076        if (!hdev->sent_cmd)
4077                return;
4078
4079        sent = (void *) hdev->sent_cmd->data;
4080        opcode = __le16_to_cpu(sent->opcode);
4081        if (opcode == HCI_OP_RESET)
4082                return;
4083
4084        skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4085        if (!skb)
4086                return;
4087
4088        skb_queue_head(&hdev->cmd_q, skb);
4089        queue_work(hdev->workqueue, &hdev->cmd_work);
4090}
4091
4092void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
4093                          hci_req_complete_t *req_complete,
4094                          hci_req_complete_skb_t *req_complete_skb)
4095{
4096        struct sk_buff *skb;
4097        unsigned long flags;
4098
4099        BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4100
4101        /* If the completed command doesn't match the last one that was
4102         * sent we need to do special handling of it.
4103         */
4104        if (!hci_sent_cmd_data(hdev, opcode)) {
4105                /* Some CSR based controllers generate a spontaneous
4106                 * reset complete event during init and any pending
4107                 * command will never be completed. In such a case we
4108                 * need to resend whatever was the last sent
4109                 * command.
4110                 */
4111                if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4112                        hci_resend_last(hdev);
4113
4114                return;
4115        }
4116
4117        /* If the command succeeded and there's still more commands in
4118         * this request the request is not yet complete.
4119         */
4120        if (!status && !hci_req_is_complete(hdev))
4121                return;
4122
4123        /* If this was the last command in a request the complete
4124         * callback would be found in hdev->sent_cmd instead of the
4125         * command queue (hdev->cmd_q).
4126         */
4127        if (bt_cb(hdev->sent_cmd)->hci.req_flags & HCI_REQ_SKB) {
4128                *req_complete_skb = bt_cb(hdev->sent_cmd)->hci.req_complete_skb;
4129                return;
4130        }
4131
4132        if (bt_cb(hdev->sent_cmd)->hci.req_complete) {
4133                *req_complete = bt_cb(hdev->sent_cmd)->hci.req_complete;
4134                return;
4135        }
4136
4137        /* Remove all pending commands belonging to this request */
4138        spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4139        while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4140                if (bt_cb(skb)->hci.req_flags & HCI_REQ_START) {
4141                        __skb_queue_head(&hdev->cmd_q, skb);
4142                        break;
4143                }
4144
4145                if (bt_cb(skb)->hci.req_flags & HCI_REQ_SKB)
4146                        *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
4147                else
4148                        *req_complete = bt_cb(skb)->hci.req_complete;
4149                kfree_skb(skb);
4150        }
4151        spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4152}
4153
4154static void hci_rx_work(struct work_struct *work)
4155{
4156        struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
4157        struct sk_buff *skb;
4158
4159        BT_DBG("%s", hdev->name);
4160
4161        while ((skb = skb_dequeue(&hdev->rx_q))) {
4162                /* Send copy to monitor */
4163                hci_send_to_monitor(hdev, skb);
4164
4165                if (atomic_read(&hdev->promisc)) {
4166                        /* Send copy to the sockets */
4167                        hci_send_to_sock(hdev, skb);
4168                }
4169
4170                if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
4171                        kfree_skb(skb);
4172                        continue;
4173                }
4174
4175                if (test_bit(HCI_INIT, &hdev->flags)) {
4176                        /* Don't process data packets in this states. */
4177                        switch (hci_skb_pkt_type(skb)) {
4178                        case HCI_ACLDATA_PKT:
4179                        case HCI_SCODATA_PKT:
4180                                kfree_skb(skb);
4181                                continue;
4182                        }
4183                }
4184
4185                /* Process frame */
4186                switch (hci_skb_pkt_type(skb)) {
4187                case HCI_EVENT_PKT:
4188                        BT_DBG("%s Event packet", hdev->name);
4189                        hci_event_packet(hdev, skb);
4190                        break;
4191
4192                case HCI_ACLDATA_PKT:
4193                        BT_DBG("%s ACL data packet", hdev->name);
4194                        hci_acldata_packet(hdev, skb);
4195                        break;
4196
4197                case HCI_SCODATA_PKT:
4198                        BT_DBG("%s SCO data packet", hdev->name);
4199                        hci_scodata_packet(hdev, skb);
4200                        break;
4201
4202                default:
4203                        kfree_skb(skb);
4204                        break;
4205                }
4206        }
4207}
4208
4209static void hci_cmd_work(struct work_struct *work)
4210{
4211        struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
4212        struct sk_buff *skb;
4213
4214        BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4215               atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
4216
4217        /* Send queued commands */
4218        if (atomic_read(&hdev->cmd_cnt)) {
4219                skb = skb_dequeue(&hdev->cmd_q);
4220                if (!skb)
4221                        return;
4222
4223                kfree_skb(hdev->sent_cmd);
4224
4225                hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
4226                if (hdev->sent_cmd) {
4227                        atomic_dec(&hdev->cmd_cnt);
4228                        hci_send_frame(hdev, skb);
4229                        if (test_bit(HCI_RESET, &hdev->flags))
4230                                cancel_delayed_work(&hdev->cmd_timer);
4231                        else
4232                                schedule_delayed_work(&hdev->cmd_timer,
4233                                                      HCI_CMD_TIMEOUT);
4234                } else {
4235                        skb_queue_head(&hdev->cmd_q, skb);
4236                        queue_work(hdev->workqueue, &hdev->cmd_work);
4237                }
4238        }
4239}
4240