linux/net/bluetooth/hci_core.c
<<
>>
Prefs
   1/*
   2   BlueZ - Bluetooth protocol stack for Linux
   3   Copyright (C) 2000-2001 Qualcomm Incorporated
   4   Copyright (C) 2011 ProFUSION Embedded Systems
   5
   6   Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
   7
   8   This program is free software; you can redistribute it and/or modify
   9   it under the terms of the GNU General Public License version 2 as
  10   published by the Free Software Foundation;
  11
  12   THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
  13   OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  14   FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
  15   IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
  16   CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
  17   WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  18   ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  19   OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  20
  21   ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
  22   COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
  23   SOFTWARE IS DISCLAIMED.
  24*/
  25
  26/* Bluetooth HCI core. */
  27
  28#include <linux/export.h>
  29#include <linux/idr.h>
  30#include <linux/rfkill.h>
  31#include <linux/debugfs.h>
  32#include <linux/crypto.h>
  33#include <asm/unaligned.h>
  34
  35#include <net/bluetooth/bluetooth.h>
  36#include <net/bluetooth/hci_core.h>
  37#include <net/bluetooth/l2cap.h>
  38#include <net/bluetooth/mgmt.h>
  39
  40#include "hci_request.h"
  41#include "hci_debugfs.h"
  42#include "smp.h"
  43#include "leds.h"
  44
  45static void hci_rx_work(struct work_struct *work);
  46static void hci_cmd_work(struct work_struct *work);
  47static void hci_tx_work(struct work_struct *work);
  48
  49/* HCI device list */
  50LIST_HEAD(hci_dev_list);
  51DEFINE_RWLOCK(hci_dev_list_lock);
  52
  53/* HCI callback list */
  54LIST_HEAD(hci_cb_list);
  55DEFINE_MUTEX(hci_cb_list_lock);
  56
  57/* HCI ID Numbering */
  58static DEFINE_IDA(hci_index_ida);
  59
  60/* ---- HCI debugfs entries ---- */
  61
  62static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
  63                             size_t count, loff_t *ppos)
  64{
  65        struct hci_dev *hdev = file->private_data;
  66        char buf[3];
  67
  68        buf[0] = hci_dev_test_flag(hdev, HCI_DUT_MODE) ? 'Y' : 'N';
  69        buf[1] = '\n';
  70        buf[2] = '\0';
  71        return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
  72}
  73
  74static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
  75                              size_t count, loff_t *ppos)
  76{
  77        struct hci_dev *hdev = file->private_data;
  78        struct sk_buff *skb;
  79        char buf[32];
  80        size_t buf_size = min(count, (sizeof(buf)-1));
  81        bool enable;
  82
  83        if (!test_bit(HCI_UP, &hdev->flags))
  84                return -ENETDOWN;
  85
  86        if (copy_from_user(buf, user_buf, buf_size))
  87                return -EFAULT;
  88
  89        buf[buf_size] = '\0';
  90        if (strtobool(buf, &enable))
  91                return -EINVAL;
  92
  93        if (enable == hci_dev_test_flag(hdev, HCI_DUT_MODE))
  94                return -EALREADY;
  95
  96        hci_req_sync_lock(hdev);
  97        if (enable)
  98                skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
  99                                     HCI_CMD_TIMEOUT);
 100        else
 101                skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
 102                                     HCI_CMD_TIMEOUT);
 103        hci_req_sync_unlock(hdev);
 104
 105        if (IS_ERR(skb))
 106                return PTR_ERR(skb);
 107
 108        kfree_skb(skb);
 109
 110        hci_dev_change_flag(hdev, HCI_DUT_MODE);
 111
 112        return count;
 113}
 114
 115static const struct file_operations dut_mode_fops = {
 116        .open           = simple_open,
 117        .read           = dut_mode_read,
 118        .write          = dut_mode_write,
 119        .llseek         = default_llseek,
 120};
 121
 122static ssize_t vendor_diag_read(struct file *file, char __user *user_buf,
 123                                size_t count, loff_t *ppos)
 124{
 125        struct hci_dev *hdev = file->private_data;
 126        char buf[3];
 127
 128        buf[0] = hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) ? 'Y' : 'N';
 129        buf[1] = '\n';
 130        buf[2] = '\0';
 131        return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
 132}
 133
 134static ssize_t vendor_diag_write(struct file *file, const char __user *user_buf,
 135                                 size_t count, loff_t *ppos)
 136{
 137        struct hci_dev *hdev = file->private_data;
 138        char buf[32];
 139        size_t buf_size = min(count, (sizeof(buf)-1));
 140        bool enable;
 141        int err;
 142
 143        if (copy_from_user(buf, user_buf, buf_size))
 144                return -EFAULT;
 145
 146        buf[buf_size] = '\0';
 147        if (strtobool(buf, &enable))
 148                return -EINVAL;
 149
 150        /* When the diagnostic flags are not persistent and the transport
 151         * is not active or in user channel operation, then there is no need
 152         * for the vendor callback. Instead just store the desired value and
 153         * the setting will be programmed when the controller gets powered on.
 154         */
 155        if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
 156            (!test_bit(HCI_RUNNING, &hdev->flags) ||
 157             hci_dev_test_flag(hdev, HCI_USER_CHANNEL)))
 158                goto done;
 159
 160        hci_req_sync_lock(hdev);
 161        err = hdev->set_diag(hdev, enable);
 162        hci_req_sync_unlock(hdev);
 163
 164        if (err < 0)
 165                return err;
 166
 167done:
 168        if (enable)
 169                hci_dev_set_flag(hdev, HCI_VENDOR_DIAG);
 170        else
 171                hci_dev_clear_flag(hdev, HCI_VENDOR_DIAG);
 172
 173        return count;
 174}
 175
 176static const struct file_operations vendor_diag_fops = {
 177        .open           = simple_open,
 178        .read           = vendor_diag_read,
 179        .write          = vendor_diag_write,
 180        .llseek         = default_llseek,
 181};
 182
 183static void hci_debugfs_create_basic(struct hci_dev *hdev)
 184{
 185        debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
 186                            &dut_mode_fops);
 187
 188        if (hdev->set_diag)
 189                debugfs_create_file("vendor_diag", 0644, hdev->debugfs, hdev,
 190                                    &vendor_diag_fops);
 191}
 192
 193static int hci_reset_req(struct hci_request *req, unsigned long opt)
 194{
 195        BT_DBG("%s %ld", req->hdev->name, opt);
 196
 197        /* Reset device */
 198        set_bit(HCI_RESET, &req->hdev->flags);
 199        hci_req_add(req, HCI_OP_RESET, 0, NULL);
 200        return 0;
 201}
 202
 203static void bredr_init(struct hci_request *req)
 204{
 205        req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
 206
 207        /* Read Local Supported Features */
 208        hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
 209
 210        /* Read Local Version */
 211        hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
 212
 213        /* Read BD Address */
 214        hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
 215}
 216
 217static void amp_init1(struct hci_request *req)
 218{
 219        req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
 220
 221        /* Read Local Version */
 222        hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
 223
 224        /* Read Local Supported Commands */
 225        hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
 226
 227        /* Read Local AMP Info */
 228        hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
 229
 230        /* Read Data Blk size */
 231        hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
 232
 233        /* Read Flow Control Mode */
 234        hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
 235
 236        /* Read Location Data */
 237        hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
 238}
 239
 240static int amp_init2(struct hci_request *req)
 241{
 242        /* Read Local Supported Features. Not all AMP controllers
 243         * support this so it's placed conditionally in the second
 244         * stage init.
 245         */
 246        if (req->hdev->commands[14] & 0x20)
 247                hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
 248
 249        return 0;
 250}
 251
 252static int hci_init1_req(struct hci_request *req, unsigned long opt)
 253{
 254        struct hci_dev *hdev = req->hdev;
 255
 256        BT_DBG("%s %ld", hdev->name, opt);
 257
 258        /* Reset */
 259        if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
 260                hci_reset_req(req, 0);
 261
 262        switch (hdev->dev_type) {
 263        case HCI_PRIMARY:
 264                bredr_init(req);
 265                break;
 266        case HCI_AMP:
 267                amp_init1(req);
 268                break;
 269        default:
 270                bt_dev_err(hdev, "Unknown device type %d", hdev->dev_type);
 271                break;
 272        }
 273
 274        return 0;
 275}
 276
 277static void bredr_setup(struct hci_request *req)
 278{
 279        __le16 param;
 280        __u8 flt_type;
 281
 282        /* Read Buffer Size (ACL mtu, max pkt, etc.) */
 283        hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
 284
 285        /* Read Class of Device */
 286        hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
 287
 288        /* Read Local Name */
 289        hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
 290
 291        /* Read Voice Setting */
 292        hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
 293
 294        /* Read Number of Supported IAC */
 295        hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
 296
 297        /* Read Current IAC LAP */
 298        hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
 299
 300        /* Clear Event Filters */
 301        flt_type = HCI_FLT_CLEAR_ALL;
 302        hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
 303
 304        /* Connection accept timeout ~20 secs */
 305        param = cpu_to_le16(0x7d00);
 306        hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
 307}
 308
 309static void le_setup(struct hci_request *req)
 310{
 311        struct hci_dev *hdev = req->hdev;
 312
 313        /* Read LE Buffer Size */
 314        hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
 315
 316        /* Read LE Local Supported Features */
 317        hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
 318
 319        /* Read LE Supported States */
 320        hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
 321
 322        /* LE-only controllers have LE implicitly enabled */
 323        if (!lmp_bredr_capable(hdev))
 324                hci_dev_set_flag(hdev, HCI_LE_ENABLED);
 325}
 326
 327static void hci_setup_event_mask(struct hci_request *req)
 328{
 329        struct hci_dev *hdev = req->hdev;
 330
 331        /* The second byte is 0xff instead of 0x9f (two reserved bits
 332         * disabled) since a Broadcom 1.2 dongle doesn't respond to the
 333         * command otherwise.
 334         */
 335        u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
 336
 337        /* CSR 1.1 dongles does not accept any bitfield so don't try to set
 338         * any event mask for pre 1.2 devices.
 339         */
 340        if (hdev->hci_ver < BLUETOOTH_VER_1_2)
 341                return;
 342
 343        if (lmp_bredr_capable(hdev)) {
 344                events[4] |= 0x01; /* Flow Specification Complete */
 345        } else {
 346                /* Use a different default for LE-only devices */
 347                memset(events, 0, sizeof(events));
 348                events[1] |= 0x20; /* Command Complete */
 349                events[1] |= 0x40; /* Command Status */
 350                events[1] |= 0x80; /* Hardware Error */
 351
 352                /* If the controller supports the Disconnect command, enable
 353                 * the corresponding event. In addition enable packet flow
 354                 * control related events.
 355                 */
 356                if (hdev->commands[0] & 0x20) {
 357                        events[0] |= 0x10; /* Disconnection Complete */
 358                        events[2] |= 0x04; /* Number of Completed Packets */
 359                        events[3] |= 0x02; /* Data Buffer Overflow */
 360                }
 361
 362                /* If the controller supports the Read Remote Version
 363                 * Information command, enable the corresponding event.
 364                 */
 365                if (hdev->commands[2] & 0x80)
 366                        events[1] |= 0x08; /* Read Remote Version Information
 367                                            * Complete
 368                                            */
 369
 370                if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
 371                        events[0] |= 0x80; /* Encryption Change */
 372                        events[5] |= 0x80; /* Encryption Key Refresh Complete */
 373                }
 374        }
 375
 376        if (lmp_inq_rssi_capable(hdev) ||
 377            test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks))
 378                events[4] |= 0x02; /* Inquiry Result with RSSI */
 379
 380        if (lmp_ext_feat_capable(hdev))
 381                events[4] |= 0x04; /* Read Remote Extended Features Complete */
 382
 383        if (lmp_esco_capable(hdev)) {
 384                events[5] |= 0x08; /* Synchronous Connection Complete */
 385                events[5] |= 0x10; /* Synchronous Connection Changed */
 386        }
 387
 388        if (lmp_sniffsubr_capable(hdev))
 389                events[5] |= 0x20; /* Sniff Subrating */
 390
 391        if (lmp_pause_enc_capable(hdev))
 392                events[5] |= 0x80; /* Encryption Key Refresh Complete */
 393
 394        if (lmp_ext_inq_capable(hdev))
 395                events[5] |= 0x40; /* Extended Inquiry Result */
 396
 397        if (lmp_no_flush_capable(hdev))
 398                events[7] |= 0x01; /* Enhanced Flush Complete */
 399
 400        if (lmp_lsto_capable(hdev))
 401                events[6] |= 0x80; /* Link Supervision Timeout Changed */
 402
 403        if (lmp_ssp_capable(hdev)) {
 404                events[6] |= 0x01;      /* IO Capability Request */
 405                events[6] |= 0x02;      /* IO Capability Response */
 406                events[6] |= 0x04;      /* User Confirmation Request */
 407                events[6] |= 0x08;      /* User Passkey Request */
 408                events[6] |= 0x10;      /* Remote OOB Data Request */
 409                events[6] |= 0x20;      /* Simple Pairing Complete */
 410                events[7] |= 0x04;      /* User Passkey Notification */
 411                events[7] |= 0x08;      /* Keypress Notification */
 412                events[7] |= 0x10;      /* Remote Host Supported
 413                                         * Features Notification
 414                                         */
 415        }
 416
 417        if (lmp_le_capable(hdev))
 418                events[7] |= 0x20;      /* LE Meta-Event */
 419
 420        hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
 421}
 422
 423static int hci_init2_req(struct hci_request *req, unsigned long opt)
 424{
 425        struct hci_dev *hdev = req->hdev;
 426
 427        if (hdev->dev_type == HCI_AMP)
 428                return amp_init2(req);
 429
 430        if (lmp_bredr_capable(hdev))
 431                bredr_setup(req);
 432        else
 433                hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
 434
 435        if (lmp_le_capable(hdev))
 436                le_setup(req);
 437
 438        /* All Bluetooth 1.2 and later controllers should support the
 439         * HCI command for reading the local supported commands.
 440         *
 441         * Unfortunately some controllers indicate Bluetooth 1.2 support,
 442         * but do not have support for this command. If that is the case,
 443         * the driver can quirk the behavior and skip reading the local
 444         * supported commands.
 445         */
 446        if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
 447            !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
 448                hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
 449
 450        if (lmp_ssp_capable(hdev)) {
 451                /* When SSP is available, then the host features page
 452                 * should also be available as well. However some
 453                 * controllers list the max_page as 0 as long as SSP
 454                 * has not been enabled. To achieve proper debugging
 455                 * output, force the minimum max_page to 1 at least.
 456                 */
 457                hdev->max_page = 0x01;
 458
 459                if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
 460                        u8 mode = 0x01;
 461
 462                        hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
 463                                    sizeof(mode), &mode);
 464                } else {
 465                        struct hci_cp_write_eir cp;
 466
 467                        memset(hdev->eir, 0, sizeof(hdev->eir));
 468                        memset(&cp, 0, sizeof(cp));
 469
 470                        hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
 471                }
 472        }
 473
 474        if (lmp_inq_rssi_capable(hdev) ||
 475            test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) {
 476                u8 mode;
 477
 478                /* If Extended Inquiry Result events are supported, then
 479                 * they are clearly preferred over Inquiry Result with RSSI
 480                 * events.
 481                 */
 482                mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
 483
 484                hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
 485        }
 486
 487        if (lmp_inq_tx_pwr_capable(hdev))
 488                hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
 489
 490        if (lmp_ext_feat_capable(hdev)) {
 491                struct hci_cp_read_local_ext_features cp;
 492
 493                cp.page = 0x01;
 494                hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
 495                            sizeof(cp), &cp);
 496        }
 497
 498        if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
 499                u8 enable = 1;
 500                hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
 501                            &enable);
 502        }
 503
 504        return 0;
 505}
 506
 507static void hci_setup_link_policy(struct hci_request *req)
 508{
 509        struct hci_dev *hdev = req->hdev;
 510        struct hci_cp_write_def_link_policy cp;
 511        u16 link_policy = 0;
 512
 513        if (lmp_rswitch_capable(hdev))
 514                link_policy |= HCI_LP_RSWITCH;
 515        if (lmp_hold_capable(hdev))
 516                link_policy |= HCI_LP_HOLD;
 517        if (lmp_sniff_capable(hdev))
 518                link_policy |= HCI_LP_SNIFF;
 519        if (lmp_park_capable(hdev))
 520                link_policy |= HCI_LP_PARK;
 521
 522        cp.policy = cpu_to_le16(link_policy);
 523        hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
 524}
 525
 526static void hci_set_le_support(struct hci_request *req)
 527{
 528        struct hci_dev *hdev = req->hdev;
 529        struct hci_cp_write_le_host_supported cp;
 530
 531        /* LE-only devices do not support explicit enablement */
 532        if (!lmp_bredr_capable(hdev))
 533                return;
 534
 535        memset(&cp, 0, sizeof(cp));
 536
 537        if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
 538                cp.le = 0x01;
 539                cp.simul = 0x00;
 540        }
 541
 542        if (cp.le != lmp_host_le_capable(hdev))
 543                hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
 544                            &cp);
 545}
 546
 547static void hci_set_event_mask_page_2(struct hci_request *req)
 548{
 549        struct hci_dev *hdev = req->hdev;
 550        u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
 551        bool changed = false;
 552
 553        /* If Connectionless Slave Broadcast master role is supported
 554         * enable all necessary events for it.
 555         */
 556        if (lmp_csb_master_capable(hdev)) {
 557                events[1] |= 0x40;      /* Triggered Clock Capture */
 558                events[1] |= 0x80;      /* Synchronization Train Complete */
 559                events[2] |= 0x10;      /* Slave Page Response Timeout */
 560                events[2] |= 0x20;      /* CSB Channel Map Change */
 561                changed = true;
 562        }
 563
 564        /* If Connectionless Slave Broadcast slave role is supported
 565         * enable all necessary events for it.
 566         */
 567        if (lmp_csb_slave_capable(hdev)) {
 568                events[2] |= 0x01;      /* Synchronization Train Received */
 569                events[2] |= 0x02;      /* CSB Receive */
 570                events[2] |= 0x04;      /* CSB Timeout */
 571                events[2] |= 0x08;      /* Truncated Page Complete */
 572                changed = true;
 573        }
 574
 575        /* Enable Authenticated Payload Timeout Expired event if supported */
 576        if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING) {
 577                events[2] |= 0x80;
 578                changed = true;
 579        }
 580
 581        /* Some Broadcom based controllers indicate support for Set Event
 582         * Mask Page 2 command, but then actually do not support it. Since
 583         * the default value is all bits set to zero, the command is only
 584         * required if the event mask has to be changed. In case no change
 585         * to the event mask is needed, skip this command.
 586         */
 587        if (changed)
 588                hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2,
 589                            sizeof(events), events);
 590}
 591
 592static int hci_init3_req(struct hci_request *req, unsigned long opt)
 593{
 594        struct hci_dev *hdev = req->hdev;
 595        u8 p;
 596
 597        hci_setup_event_mask(req);
 598
 599        if (hdev->commands[6] & 0x20 &&
 600            !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
 601                struct hci_cp_read_stored_link_key cp;
 602
 603                bacpy(&cp.bdaddr, BDADDR_ANY);
 604                cp.read_all = 0x01;
 605                hci_req_add(req, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp);
 606        }
 607
 608        if (hdev->commands[5] & 0x10)
 609                hci_setup_link_policy(req);
 610
 611        if (hdev->commands[8] & 0x01)
 612                hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
 613
 614        /* Some older Broadcom based Bluetooth 1.2 controllers do not
 615         * support the Read Page Scan Type command. Check support for
 616         * this command in the bit mask of supported commands.
 617         */
 618        if (hdev->commands[13] & 0x01)
 619                hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
 620
 621        if (lmp_le_capable(hdev)) {
 622                u8 events[8];
 623
 624                memset(events, 0, sizeof(events));
 625
 626                if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
 627                        events[0] |= 0x10;      /* LE Long Term Key Request */
 628
 629                /* If controller supports the Connection Parameters Request
 630                 * Link Layer Procedure, enable the corresponding event.
 631                 */
 632                if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
 633                        events[0] |= 0x20;      /* LE Remote Connection
 634                                                 * Parameter Request
 635                                                 */
 636
 637                /* If the controller supports the Data Length Extension
 638                 * feature, enable the corresponding event.
 639                 */
 640                if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
 641                        events[0] |= 0x40;      /* LE Data Length Change */
 642
 643                /* If the controller supports Extended Scanner Filter
 644                 * Policies, enable the correspondig event.
 645                 */
 646                if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
 647                        events[1] |= 0x04;      /* LE Direct Advertising
 648                                                 * Report
 649                                                 */
 650
 651                /* If the controller supports Channel Selection Algorithm #2
 652                 * feature, enable the corresponding event.
 653                 */
 654                if (hdev->le_features[1] & HCI_LE_CHAN_SEL_ALG2)
 655                        events[2] |= 0x08;      /* LE Channel Selection
 656                                                 * Algorithm
 657                                                 */
 658
 659                /* If the controller supports the LE Set Scan Enable command,
 660                 * enable the corresponding advertising report event.
 661                 */
 662                if (hdev->commands[26] & 0x08)
 663                        events[0] |= 0x02;      /* LE Advertising Report */
 664
 665                /* If the controller supports the LE Create Connection
 666                 * command, enable the corresponding event.
 667                 */
 668                if (hdev->commands[26] & 0x10)
 669                        events[0] |= 0x01;      /* LE Connection Complete */
 670
 671                /* If the controller supports the LE Connection Update
 672                 * command, enable the corresponding event.
 673                 */
 674                if (hdev->commands[27] & 0x04)
 675                        events[0] |= 0x04;      /* LE Connection Update
 676                                                 * Complete
 677                                                 */
 678
 679                /* If the controller supports the LE Read Remote Used Features
 680                 * command, enable the corresponding event.
 681                 */
 682                if (hdev->commands[27] & 0x20)
 683                        events[0] |= 0x08;      /* LE Read Remote Used
 684                                                 * Features Complete
 685                                                 */
 686
 687                /* If the controller supports the LE Read Local P-256
 688                 * Public Key command, enable the corresponding event.
 689                 */
 690                if (hdev->commands[34] & 0x02)
 691                        events[0] |= 0x80;      /* LE Read Local P-256
 692                                                 * Public Key Complete
 693                                                 */
 694
 695                /* If the controller supports the LE Generate DHKey
 696                 * command, enable the corresponding event.
 697                 */
 698                if (hdev->commands[34] & 0x04)
 699                        events[1] |= 0x01;      /* LE Generate DHKey Complete */
 700
 701                /* If the controller supports the LE Set Default PHY or
 702                 * LE Set PHY commands, enable the corresponding event.
 703                 */
 704                if (hdev->commands[35] & (0x20 | 0x40))
 705                        events[1] |= 0x08;        /* LE PHY Update Complete */
 706
 707                hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
 708                            events);
 709
 710                if (hdev->commands[25] & 0x40) {
 711                        /* Read LE Advertising Channel TX Power */
 712                        hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
 713                }
 714
 715                if (hdev->commands[26] & 0x40) {
 716                        /* Read LE White List Size */
 717                        hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE,
 718                                    0, NULL);
 719                }
 720
 721                if (hdev->commands[26] & 0x80) {
 722                        /* Clear LE White List */
 723                        hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
 724                }
 725
 726                if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
 727                        /* Read LE Maximum Data Length */
 728                        hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
 729
 730                        /* Read LE Suggested Default Data Length */
 731                        hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
 732                }
 733
 734                hci_set_le_support(req);
 735        }
 736
 737        /* Read features beyond page 1 if available */
 738        for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
 739                struct hci_cp_read_local_ext_features cp;
 740
 741                cp.page = p;
 742                hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
 743                            sizeof(cp), &cp);
 744        }
 745
 746        return 0;
 747}
 748
 749static int hci_init4_req(struct hci_request *req, unsigned long opt)
 750{
 751        struct hci_dev *hdev = req->hdev;
 752
 753        /* Some Broadcom based Bluetooth controllers do not support the
 754         * Delete Stored Link Key command. They are clearly indicating its
 755         * absence in the bit mask of supported commands.
 756         *
 757         * Check the supported commands and only if the the command is marked
 758         * as supported send it. If not supported assume that the controller
 759         * does not have actual support for stored link keys which makes this
 760         * command redundant anyway.
 761         *
 762         * Some controllers indicate that they support handling deleting
 763         * stored link keys, but they don't. The quirk lets a driver
 764         * just disable this command.
 765         */
 766        if (hdev->commands[6] & 0x80 &&
 767            !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
 768                struct hci_cp_delete_stored_link_key cp;
 769
 770                bacpy(&cp.bdaddr, BDADDR_ANY);
 771                cp.delete_all = 0x01;
 772                hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
 773                            sizeof(cp), &cp);
 774        }
 775
 776        /* Set event mask page 2 if the HCI command for it is supported */
 777        if (hdev->commands[22] & 0x04)
 778                hci_set_event_mask_page_2(req);
 779
 780        /* Read local codec list if the HCI command is supported */
 781        if (hdev->commands[29] & 0x20)
 782                hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
 783
 784        /* Get MWS transport configuration if the HCI command is supported */
 785        if (hdev->commands[30] & 0x08)
 786                hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
 787
 788        /* Check for Synchronization Train support */
 789        if (lmp_sync_train_capable(hdev))
 790                hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
 791
 792        /* Enable Secure Connections if supported and configured */
 793        if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
 794            bredr_sc_enabled(hdev)) {
 795                u8 support = 0x01;
 796
 797                hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
 798                            sizeof(support), &support);
 799        }
 800
 801        /* Set Suggested Default Data Length to maximum if supported */
 802        if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
 803                struct hci_cp_le_write_def_data_len cp;
 804
 805                cp.tx_len = hdev->le_max_tx_len;
 806                cp.tx_time = hdev->le_max_tx_time;
 807                hci_req_add(req, HCI_OP_LE_WRITE_DEF_DATA_LEN, sizeof(cp), &cp);
 808        }
 809
 810        /* Set Default PHY parameters if command is supported */
 811        if (hdev->commands[35] & 0x20) {
 812                struct hci_cp_le_set_default_phy cp;
 813
 814                /* No transmitter PHY or receiver PHY preferences */
 815                cp.all_phys = 0x03;
 816                cp.tx_phys = 0;
 817                cp.rx_phys = 0;
 818
 819                hci_req_add(req, HCI_OP_LE_SET_DEFAULT_PHY, sizeof(cp), &cp);
 820        }
 821
 822        return 0;
 823}
 824
 825static int __hci_init(struct hci_dev *hdev)
 826{
 827        int err;
 828
 829        err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT, NULL);
 830        if (err < 0)
 831                return err;
 832
 833        if (hci_dev_test_flag(hdev, HCI_SETUP))
 834                hci_debugfs_create_basic(hdev);
 835
 836        err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT, NULL);
 837        if (err < 0)
 838                return err;
 839
 840        /* HCI_PRIMARY covers both single-mode LE, BR/EDR and dual-mode
 841         * BR/EDR/LE type controllers. AMP controllers only need the
 842         * first two stages of init.
 843         */
 844        if (hdev->dev_type != HCI_PRIMARY)
 845                return 0;
 846
 847        err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT, NULL);
 848        if (err < 0)
 849                return err;
 850
 851        err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT, NULL);
 852        if (err < 0)
 853                return err;
 854
 855        /* This function is only called when the controller is actually in
 856         * configured state. When the controller is marked as unconfigured,
 857         * this initialization procedure is not run.
 858         *
 859         * It means that it is possible that a controller runs through its
 860         * setup phase and then discovers missing settings. If that is the
 861         * case, then this function will not be called. It then will only
 862         * be called during the config phase.
 863         *
 864         * So only when in setup phase or config phase, create the debugfs
 865         * entries and register the SMP channels.
 866         */
 867        if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
 868            !hci_dev_test_flag(hdev, HCI_CONFIG))
 869                return 0;
 870
 871        hci_debugfs_create_common(hdev);
 872
 873        if (lmp_bredr_capable(hdev))
 874                hci_debugfs_create_bredr(hdev);
 875
 876        if (lmp_le_capable(hdev))
 877                hci_debugfs_create_le(hdev);
 878
 879        return 0;
 880}
 881
 882static int hci_init0_req(struct hci_request *req, unsigned long opt)
 883{
 884        struct hci_dev *hdev = req->hdev;
 885
 886        BT_DBG("%s %ld", hdev->name, opt);
 887
 888        /* Reset */
 889        if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
 890                hci_reset_req(req, 0);
 891
 892        /* Read Local Version */
 893        hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
 894
 895        /* Read BD Address */
 896        if (hdev->set_bdaddr)
 897                hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
 898
 899        return 0;
 900}
 901
 902static int __hci_unconf_init(struct hci_dev *hdev)
 903{
 904        int err;
 905
 906        if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
 907                return 0;
 908
 909        err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT, NULL);
 910        if (err < 0)
 911                return err;
 912
 913        if (hci_dev_test_flag(hdev, HCI_SETUP))
 914                hci_debugfs_create_basic(hdev);
 915
 916        return 0;
 917}
 918
 919static int hci_scan_req(struct hci_request *req, unsigned long opt)
 920{
 921        __u8 scan = opt;
 922
 923        BT_DBG("%s %x", req->hdev->name, scan);
 924
 925        /* Inquiry and Page scans */
 926        hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
 927        return 0;
 928}
 929
 930static int hci_auth_req(struct hci_request *req, unsigned long opt)
 931{
 932        __u8 auth = opt;
 933
 934        BT_DBG("%s %x", req->hdev->name, auth);
 935
 936        /* Authentication */
 937        hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
 938        return 0;
 939}
 940
 941static int hci_encrypt_req(struct hci_request *req, unsigned long opt)
 942{
 943        __u8 encrypt = opt;
 944
 945        BT_DBG("%s %x", req->hdev->name, encrypt);
 946
 947        /* Encryption */
 948        hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
 949        return 0;
 950}
 951
 952static int hci_linkpol_req(struct hci_request *req, unsigned long opt)
 953{
 954        __le16 policy = cpu_to_le16(opt);
 955
 956        BT_DBG("%s %x", req->hdev->name, policy);
 957
 958        /* Default link policy */
 959        hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
 960        return 0;
 961}
 962
 963/* Get HCI device by index.
 964 * Device is held on return. */
 965struct hci_dev *hci_dev_get(int index)
 966{
 967        struct hci_dev *hdev = NULL, *d;
 968
 969        BT_DBG("%d", index);
 970
 971        if (index < 0)
 972                return NULL;
 973
 974        read_lock(&hci_dev_list_lock);
 975        list_for_each_entry(d, &hci_dev_list, list) {
 976                if (d->id == index) {
 977                        hdev = hci_dev_hold(d);
 978                        break;
 979                }
 980        }
 981        read_unlock(&hci_dev_list_lock);
 982        return hdev;
 983}
 984
 985/* ---- Inquiry support ---- */
 986
 987bool hci_discovery_active(struct hci_dev *hdev)
 988{
 989        struct discovery_state *discov = &hdev->discovery;
 990
 991        switch (discov->state) {
 992        case DISCOVERY_FINDING:
 993        case DISCOVERY_RESOLVING:
 994                return true;
 995
 996        default:
 997                return false;
 998        }
 999}
1000
1001void hci_discovery_set_state(struct hci_dev *hdev, int state)
1002{
1003        int old_state = hdev->discovery.state;
1004
1005        BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1006
1007        if (old_state == state)
1008                return;
1009
1010        hdev->discovery.state = state;
1011
1012        switch (state) {
1013        case DISCOVERY_STOPPED:
1014                hci_update_background_scan(hdev);
1015
1016                if (old_state != DISCOVERY_STARTING)
1017                        mgmt_discovering(hdev, 0);
1018                break;
1019        case DISCOVERY_STARTING:
1020                break;
1021        case DISCOVERY_FINDING:
1022                mgmt_discovering(hdev, 1);
1023                break;
1024        case DISCOVERY_RESOLVING:
1025                break;
1026        case DISCOVERY_STOPPING:
1027                break;
1028        }
1029}
1030
1031void hci_inquiry_cache_flush(struct hci_dev *hdev)
1032{
1033        struct discovery_state *cache = &hdev->discovery;
1034        struct inquiry_entry *p, *n;
1035
1036        list_for_each_entry_safe(p, n, &cache->all, all) {
1037                list_del(&p->all);
1038                kfree(p);
1039        }
1040
1041        INIT_LIST_HEAD(&cache->unknown);
1042        INIT_LIST_HEAD(&cache->resolve);
1043}
1044
1045struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1046                                               bdaddr_t *bdaddr)
1047{
1048        struct discovery_state *cache = &hdev->discovery;
1049        struct inquiry_entry *e;
1050
1051        BT_DBG("cache %p, %pMR", cache, bdaddr);
1052
1053        list_for_each_entry(e, &cache->all, all) {
1054                if (!bacmp(&e->data.bdaddr, bdaddr))
1055                        return e;
1056        }
1057
1058        return NULL;
1059}
1060
1061struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
1062                                                       bdaddr_t *bdaddr)
1063{
1064        struct discovery_state *cache = &hdev->discovery;
1065        struct inquiry_entry *e;
1066
1067        BT_DBG("cache %p, %pMR", cache, bdaddr);
1068
1069        list_for_each_entry(e, &cache->unknown, list) {
1070                if (!bacmp(&e->data.bdaddr, bdaddr))
1071                        return e;
1072        }
1073
1074        return NULL;
1075}
1076
1077struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
1078                                                       bdaddr_t *bdaddr,
1079                                                       int state)
1080{
1081        struct discovery_state *cache = &hdev->discovery;
1082        struct inquiry_entry *e;
1083
1084        BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
1085
1086        list_for_each_entry(e, &cache->resolve, list) {
1087                if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1088                        return e;
1089                if (!bacmp(&e->data.bdaddr, bdaddr))
1090                        return e;
1091        }
1092
1093        return NULL;
1094}
1095
1096void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
1097                                      struct inquiry_entry *ie)
1098{
1099        struct discovery_state *cache = &hdev->discovery;
1100        struct list_head *pos = &cache->resolve;
1101        struct inquiry_entry *p;
1102
1103        list_del(&ie->list);
1104
1105        list_for_each_entry(p, &cache->resolve, list) {
1106                if (p->name_state != NAME_PENDING &&
1107                    abs(p->data.rssi) >= abs(ie->data.rssi))
1108                        break;
1109                pos = &p->list;
1110        }
1111
1112        list_add(&ie->list, pos);
1113}
1114
1115u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1116                             bool name_known)
1117{
1118        struct discovery_state *cache = &hdev->discovery;
1119        struct inquiry_entry *ie;
1120        u32 flags = 0;
1121
1122        BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1123
1124        hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
1125
1126        if (!data->ssp_mode)
1127                flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1128
1129        ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
1130        if (ie) {
1131                if (!ie->data.ssp_mode)
1132                        flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1133
1134                if (ie->name_state == NAME_NEEDED &&
1135                    data->rssi != ie->data.rssi) {
1136                        ie->data.rssi = data->rssi;
1137                        hci_inquiry_cache_update_resolve(hdev, ie);
1138                }
1139
1140                goto update;
1141        }
1142
1143        /* Entry not in the cache. Add new one. */
1144        ie = kzalloc(sizeof(*ie), GFP_KERNEL);
1145        if (!ie) {
1146                flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1147                goto done;
1148        }
1149
1150        list_add(&ie->all, &cache->all);
1151
1152        if (name_known) {
1153                ie->name_state = NAME_KNOWN;
1154        } else {
1155                ie->name_state = NAME_NOT_KNOWN;
1156                list_add(&ie->list, &cache->unknown);
1157        }
1158
1159update:
1160        if (name_known && ie->name_state != NAME_KNOWN &&
1161            ie->name_state != NAME_PENDING) {
1162                ie->name_state = NAME_KNOWN;
1163                list_del(&ie->list);
1164        }
1165
1166        memcpy(&ie->data, data, sizeof(*data));
1167        ie->timestamp = jiffies;
1168        cache->timestamp = jiffies;
1169
1170        if (ie->name_state == NAME_NOT_KNOWN)
1171                flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1172
1173done:
1174        return flags;
1175}
1176
1177static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1178{
1179        struct discovery_state *cache = &hdev->discovery;
1180        struct inquiry_info *info = (struct inquiry_info *) buf;
1181        struct inquiry_entry *e;
1182        int copied = 0;
1183
1184        list_for_each_entry(e, &cache->all, all) {
1185                struct inquiry_data *data = &e->data;
1186
1187                if (copied >= num)
1188                        break;
1189
1190                bacpy(&info->bdaddr, &data->bdaddr);
1191                info->pscan_rep_mode    = data->pscan_rep_mode;
1192                info->pscan_period_mode = data->pscan_period_mode;
1193                info->pscan_mode        = data->pscan_mode;
1194                memcpy(info->dev_class, data->dev_class, 3);
1195                info->clock_offset      = data->clock_offset;
1196
1197                info++;
1198                copied++;
1199        }
1200
1201        BT_DBG("cache %p, copied %d", cache, copied);
1202        return copied;
1203}
1204
1205static int hci_inq_req(struct hci_request *req, unsigned long opt)
1206{
1207        struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
1208        struct hci_dev *hdev = req->hdev;
1209        struct hci_cp_inquiry cp;
1210
1211        BT_DBG("%s", hdev->name);
1212
1213        if (test_bit(HCI_INQUIRY, &hdev->flags))
1214                return 0;
1215
1216        /* Start Inquiry */
1217        memcpy(&cp.lap, &ir->lap, 3);
1218        cp.length  = ir->length;
1219        cp.num_rsp = ir->num_rsp;
1220        hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1221
1222        return 0;
1223}
1224
1225int hci_inquiry(void __user *arg)
1226{
1227        __u8 __user *ptr = arg;
1228        struct hci_inquiry_req ir;
1229        struct hci_dev *hdev;
1230        int err = 0, do_inquiry = 0, max_rsp;
1231        long timeo;
1232        __u8 *buf;
1233
1234        if (copy_from_user(&ir, ptr, sizeof(ir)))
1235                return -EFAULT;
1236
1237        hdev = hci_dev_get(ir.dev_id);
1238        if (!hdev)
1239                return -ENODEV;
1240
1241        if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1242                err = -EBUSY;
1243                goto done;
1244        }
1245
1246        if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1247                err = -EOPNOTSUPP;
1248                goto done;
1249        }
1250
1251        if (hdev->dev_type != HCI_PRIMARY) {
1252                err = -EOPNOTSUPP;
1253                goto done;
1254        }
1255
1256        if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1257                err = -EOPNOTSUPP;
1258                goto done;
1259        }
1260
1261        hci_dev_lock(hdev);
1262        if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
1263            inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1264                hci_inquiry_cache_flush(hdev);
1265                do_inquiry = 1;
1266        }
1267        hci_dev_unlock(hdev);
1268
1269        timeo = ir.length * msecs_to_jiffies(2000);
1270
1271        if (do_inquiry) {
1272                err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1273                                   timeo, NULL);
1274                if (err < 0)
1275                        goto done;
1276
1277                /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1278                 * cleared). If it is interrupted by a signal, return -EINTR.
1279                 */
1280                if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
1281                                TASK_INTERRUPTIBLE))
1282                        return -EINTR;
1283        }
1284
1285        /* for unlimited number of responses we will use buffer with
1286         * 255 entries
1287         */
1288        max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1289
1290        /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1291         * copy it to the user space.
1292         */
1293        buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
1294        if (!buf) {
1295                err = -ENOMEM;
1296                goto done;
1297        }
1298
1299        hci_dev_lock(hdev);
1300        ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
1301        hci_dev_unlock(hdev);
1302
1303        BT_DBG("num_rsp %d", ir.num_rsp);
1304
1305        if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1306                ptr += sizeof(ir);
1307                if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
1308                                 ir.num_rsp))
1309                        err = -EFAULT;
1310        } else
1311                err = -EFAULT;
1312
1313        kfree(buf);
1314
1315done:
1316        hci_dev_put(hdev);
1317        return err;
1318}
1319
1320static int hci_dev_do_open(struct hci_dev *hdev)
1321{
1322        int ret = 0;
1323
1324        BT_DBG("%s %p", hdev->name, hdev);
1325
1326        hci_req_sync_lock(hdev);
1327
1328        if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
1329                ret = -ENODEV;
1330                goto done;
1331        }
1332
1333        if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1334            !hci_dev_test_flag(hdev, HCI_CONFIG)) {
1335                /* Check for rfkill but allow the HCI setup stage to
1336                 * proceed (which in itself doesn't cause any RF activity).
1337                 */
1338                if (hci_dev_test_flag(hdev, HCI_RFKILLED)) {
1339                        ret = -ERFKILL;
1340                        goto done;
1341                }
1342
1343                /* Check for valid public address or a configured static
1344                 * random adddress, but let the HCI setup proceed to
1345                 * be able to determine if there is a public address
1346                 * or not.
1347                 *
1348                 * In case of user channel usage, it is not important
1349                 * if a public address or static random address is
1350                 * available.
1351                 *
1352                 * This check is only valid for BR/EDR controllers
1353                 * since AMP controllers do not have an address.
1354                 */
1355                if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1356                    hdev->dev_type == HCI_PRIMARY &&
1357                    !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1358                    !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1359                        ret = -EADDRNOTAVAIL;
1360                        goto done;
1361                }
1362        }
1363
1364        if (test_bit(HCI_UP, &hdev->flags)) {
1365                ret = -EALREADY;
1366                goto done;
1367        }
1368
1369        if (hdev->open(hdev)) {
1370                ret = -EIO;
1371                goto done;
1372        }
1373
1374        set_bit(HCI_RUNNING, &hdev->flags);
1375        hci_sock_dev_event(hdev, HCI_DEV_OPEN);
1376
1377        atomic_set(&hdev->cmd_cnt, 1);
1378        set_bit(HCI_INIT, &hdev->flags);
1379
1380        if (hci_dev_test_flag(hdev, HCI_SETUP)) {
1381                hci_sock_dev_event(hdev, HCI_DEV_SETUP);
1382
1383                if (hdev->setup)
1384                        ret = hdev->setup(hdev);
1385
1386                /* The transport driver can set these quirks before
1387                 * creating the HCI device or in its setup callback.
1388                 *
1389                 * In case any of them is set, the controller has to
1390                 * start up as unconfigured.
1391                 */
1392                if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
1393                    test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
1394                        hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
1395
1396                /* For an unconfigured controller it is required to
1397                 * read at least the version information provided by
1398                 * the Read Local Version Information command.
1399                 *
1400                 * If the set_bdaddr driver callback is provided, then
1401                 * also the original Bluetooth public device address
1402                 * will be read using the Read BD Address command.
1403                 */
1404                if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1405                        ret = __hci_unconf_init(hdev);
1406        }
1407
1408        if (hci_dev_test_flag(hdev, HCI_CONFIG)) {
1409                /* If public address change is configured, ensure that
1410                 * the address gets programmed. If the driver does not
1411                 * support changing the public address, fail the power
1412                 * on procedure.
1413                 */
1414                if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1415                    hdev->set_bdaddr)
1416                        ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
1417                else
1418                        ret = -EADDRNOTAVAIL;
1419        }
1420
1421        if (!ret) {
1422                if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1423                    !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1424                        ret = __hci_init(hdev);
1425                        if (!ret && hdev->post_init)
1426                                ret = hdev->post_init(hdev);
1427                }
1428        }
1429
1430        /* If the HCI Reset command is clearing all diagnostic settings,
1431         * then they need to be reprogrammed after the init procedure
1432         * completed.
1433         */
1434        if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
1435            !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1436            hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) && hdev->set_diag)
1437                ret = hdev->set_diag(hdev, true);
1438
1439        clear_bit(HCI_INIT, &hdev->flags);
1440
1441        if (!ret) {
1442                hci_dev_hold(hdev);
1443                hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1444                set_bit(HCI_UP, &hdev->flags);
1445                hci_sock_dev_event(hdev, HCI_DEV_UP);
1446                hci_leds_update_powered(hdev, true);
1447                if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1448                    !hci_dev_test_flag(hdev, HCI_CONFIG) &&
1449                    !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1450                    !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1451                    hci_dev_test_flag(hdev, HCI_MGMT) &&
1452                    hdev->dev_type == HCI_PRIMARY) {
1453                        ret = __hci_req_hci_power_on(hdev);
1454                        mgmt_power_on(hdev, ret);
1455                }
1456        } else {
1457                /* Init failed, cleanup */
1458                flush_work(&hdev->tx_work);
1459                flush_work(&hdev->cmd_work);
1460                flush_work(&hdev->rx_work);
1461
1462                skb_queue_purge(&hdev->cmd_q);
1463                skb_queue_purge(&hdev->rx_q);
1464
1465                if (hdev->flush)
1466                        hdev->flush(hdev);
1467
1468                if (hdev->sent_cmd) {
1469                        kfree_skb(hdev->sent_cmd);
1470                        hdev->sent_cmd = NULL;
1471                }
1472
1473                clear_bit(HCI_RUNNING, &hdev->flags);
1474                hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
1475
1476                hdev->close(hdev);
1477                hdev->flags &= BIT(HCI_RAW);
1478        }
1479
1480done:
1481        hci_req_sync_unlock(hdev);
1482        return ret;
1483}
1484
1485/* ---- HCI ioctl helpers ---- */
1486
1487int hci_dev_open(__u16 dev)
1488{
1489        struct hci_dev *hdev;
1490        int err;
1491
1492        hdev = hci_dev_get(dev);
1493        if (!hdev)
1494                return -ENODEV;
1495
1496        /* Devices that are marked as unconfigured can only be powered
1497         * up as user channel. Trying to bring them up as normal devices
1498         * will result into a failure. Only user channel operation is
1499         * possible.
1500         *
1501         * When this function is called for a user channel, the flag
1502         * HCI_USER_CHANNEL will be set first before attempting to
1503         * open the device.
1504         */
1505        if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1506            !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1507                err = -EOPNOTSUPP;
1508                goto done;
1509        }
1510
1511        /* We need to ensure that no other power on/off work is pending
1512         * before proceeding to call hci_dev_do_open. This is
1513         * particularly important if the setup procedure has not yet
1514         * completed.
1515         */
1516        if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1517                cancel_delayed_work(&hdev->power_off);
1518
1519        /* After this call it is guaranteed that the setup procedure
1520         * has finished. This means that error conditions like RFKILL
1521         * or no valid public or static random address apply.
1522         */
1523        flush_workqueue(hdev->req_workqueue);
1524
1525        /* For controllers not using the management interface and that
1526         * are brought up using legacy ioctl, set the HCI_BONDABLE bit
1527         * so that pairing works for them. Once the management interface
1528         * is in use this bit will be cleared again and userspace has
1529         * to explicitly enable it.
1530         */
1531        if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1532            !hci_dev_test_flag(hdev, HCI_MGMT))
1533                hci_dev_set_flag(hdev, HCI_BONDABLE);
1534
1535        err = hci_dev_do_open(hdev);
1536
1537done:
1538        hci_dev_put(hdev);
1539        return err;
1540}
1541
1542/* This function requires the caller holds hdev->lock */
1543static void hci_pend_le_actions_clear(struct hci_dev *hdev)
1544{
1545        struct hci_conn_params *p;
1546
1547        list_for_each_entry(p, &hdev->le_conn_params, list) {
1548                if (p->conn) {
1549                        hci_conn_drop(p->conn);
1550                        hci_conn_put(p->conn);
1551                        p->conn = NULL;
1552                }
1553                list_del_init(&p->action);
1554        }
1555
1556        BT_DBG("All LE pending actions cleared");
1557}
1558
1559int hci_dev_do_close(struct hci_dev *hdev)
1560{
1561        bool auto_off;
1562
1563        BT_DBG("%s %p", hdev->name, hdev);
1564
1565        if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
1566            !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1567            test_bit(HCI_UP, &hdev->flags)) {
1568                /* Execute vendor specific shutdown routine */
1569                if (hdev->shutdown)
1570                        hdev->shutdown(hdev);
1571        }
1572
1573        cancel_delayed_work(&hdev->power_off);
1574
1575        hci_request_cancel_all(hdev);
1576        hci_req_sync_lock(hdev);
1577
1578        if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1579                cancel_delayed_work_sync(&hdev->cmd_timer);
1580                hci_req_sync_unlock(hdev);
1581                return 0;
1582        }
1583
1584        hci_leds_update_powered(hdev, false);
1585
1586        /* Flush RX and TX works */
1587        flush_work(&hdev->tx_work);
1588        flush_work(&hdev->rx_work);
1589
1590        if (hdev->discov_timeout > 0) {
1591                hdev->discov_timeout = 0;
1592                hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1593                hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1594        }
1595
1596        if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1597                cancel_delayed_work(&hdev->service_cache);
1598
1599        if (hci_dev_test_flag(hdev, HCI_MGMT))
1600                cancel_delayed_work_sync(&hdev->rpa_expired);
1601
1602        /* Avoid potential lockdep warnings from the *_flush() calls by
1603         * ensuring the workqueue is empty up front.
1604         */
1605        drain_workqueue(hdev->workqueue);
1606
1607        hci_dev_lock(hdev);
1608
1609        hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1610
1611        auto_off = hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF);
1612
1613        if (!auto_off && hdev->dev_type == HCI_PRIMARY &&
1614            !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1615            hci_dev_test_flag(hdev, HCI_MGMT))
1616                __mgmt_power_off(hdev);
1617
1618        hci_inquiry_cache_flush(hdev);
1619        hci_pend_le_actions_clear(hdev);
1620        hci_conn_hash_flush(hdev);
1621        hci_dev_unlock(hdev);
1622
1623        smp_unregister(hdev);
1624
1625        hci_sock_dev_event(hdev, HCI_DEV_DOWN);
1626
1627        if (hdev->flush)
1628                hdev->flush(hdev);
1629
1630        /* Reset device */
1631        skb_queue_purge(&hdev->cmd_q);
1632        atomic_set(&hdev->cmd_cnt, 1);
1633        if (test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks) &&
1634            !auto_off && !hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1635                set_bit(HCI_INIT, &hdev->flags);
1636                __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT, NULL);
1637                clear_bit(HCI_INIT, &hdev->flags);
1638        }
1639
1640        /* flush cmd  work */
1641        flush_work(&hdev->cmd_work);
1642
1643        /* Drop queues */
1644        skb_queue_purge(&hdev->rx_q);
1645        skb_queue_purge(&hdev->cmd_q);
1646        skb_queue_purge(&hdev->raw_q);
1647
1648        /* Drop last sent command */
1649        if (hdev->sent_cmd) {
1650                cancel_delayed_work_sync(&hdev->cmd_timer);
1651                kfree_skb(hdev->sent_cmd);
1652                hdev->sent_cmd = NULL;
1653        }
1654
1655        clear_bit(HCI_RUNNING, &hdev->flags);
1656        hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
1657
1658        /* After this point our queues are empty
1659         * and no tasks are scheduled. */
1660        hdev->close(hdev);
1661
1662        /* Clear flags */
1663        hdev->flags &= BIT(HCI_RAW);
1664        hci_dev_clear_volatile_flags(hdev);
1665
1666        /* Controller radio is available but is currently powered down */
1667        hdev->amp_status = AMP_STATUS_POWERED_DOWN;
1668
1669        memset(hdev->eir, 0, sizeof(hdev->eir));
1670        memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1671        bacpy(&hdev->random_addr, BDADDR_ANY);
1672
1673        hci_req_sync_unlock(hdev);
1674
1675        hci_dev_put(hdev);
1676        return 0;
1677}
1678
1679int hci_dev_close(__u16 dev)
1680{
1681        struct hci_dev *hdev;
1682        int err;
1683
1684        hdev = hci_dev_get(dev);
1685        if (!hdev)
1686                return -ENODEV;
1687
1688        if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1689                err = -EBUSY;
1690                goto done;
1691        }
1692
1693        if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1694                cancel_delayed_work(&hdev->power_off);
1695
1696        err = hci_dev_do_close(hdev);
1697
1698done:
1699        hci_dev_put(hdev);
1700        return err;
1701}
1702
1703static int hci_dev_do_reset(struct hci_dev *hdev)
1704{
1705        int ret;
1706
1707        BT_DBG("%s %p", hdev->name, hdev);
1708
1709        hci_req_sync_lock(hdev);
1710
1711        /* Drop queues */
1712        skb_queue_purge(&hdev->rx_q);
1713        skb_queue_purge(&hdev->cmd_q);
1714
1715        /* Avoid potential lockdep warnings from the *_flush() calls by
1716         * ensuring the workqueue is empty up front.
1717         */
1718        drain_workqueue(hdev->workqueue);
1719
1720        hci_dev_lock(hdev);
1721        hci_inquiry_cache_flush(hdev);
1722        hci_conn_hash_flush(hdev);
1723        hci_dev_unlock(hdev);
1724
1725        if (hdev->flush)
1726                hdev->flush(hdev);
1727
1728        atomic_set(&hdev->cmd_cnt, 1);
1729        hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1730
1731        ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT, NULL);
1732
1733        hci_req_sync_unlock(hdev);
1734        return ret;
1735}
1736
1737int hci_dev_reset(__u16 dev)
1738{
1739        struct hci_dev *hdev;
1740        int err;
1741
1742        hdev = hci_dev_get(dev);
1743        if (!hdev)
1744                return -ENODEV;
1745
1746        if (!test_bit(HCI_UP, &hdev->flags)) {
1747                err = -ENETDOWN;
1748                goto done;
1749        }
1750
1751        if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1752                err = -EBUSY;
1753                goto done;
1754        }
1755
1756        if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1757                err = -EOPNOTSUPP;
1758                goto done;
1759        }
1760
1761        err = hci_dev_do_reset(hdev);
1762
1763done:
1764        hci_dev_put(hdev);
1765        return err;
1766}
1767
1768int hci_dev_reset_stat(__u16 dev)
1769{
1770        struct hci_dev *hdev;
1771        int ret = 0;
1772
1773        hdev = hci_dev_get(dev);
1774        if (!hdev)
1775                return -ENODEV;
1776
1777        if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1778                ret = -EBUSY;
1779                goto done;
1780        }
1781
1782        if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1783                ret = -EOPNOTSUPP;
1784                goto done;
1785        }
1786
1787        memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1788
1789done:
1790        hci_dev_put(hdev);
1791        return ret;
1792}
1793
1794static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
1795{
1796        bool conn_changed, discov_changed;
1797
1798        BT_DBG("%s scan 0x%02x", hdev->name, scan);
1799
1800        if ((scan & SCAN_PAGE))
1801                conn_changed = !hci_dev_test_and_set_flag(hdev,
1802                                                          HCI_CONNECTABLE);
1803        else
1804                conn_changed = hci_dev_test_and_clear_flag(hdev,
1805                                                           HCI_CONNECTABLE);
1806
1807        if ((scan & SCAN_INQUIRY)) {
1808                discov_changed = !hci_dev_test_and_set_flag(hdev,
1809                                                            HCI_DISCOVERABLE);
1810        } else {
1811                hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1812                discov_changed = hci_dev_test_and_clear_flag(hdev,
1813                                                             HCI_DISCOVERABLE);
1814        }
1815
1816        if (!hci_dev_test_flag(hdev, HCI_MGMT))
1817                return;
1818
1819        if (conn_changed || discov_changed) {
1820                /* In case this was disabled through mgmt */
1821                hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
1822
1823                if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1824                        hci_req_update_adv_data(hdev, hdev->cur_adv_instance);
1825
1826                mgmt_new_settings(hdev);
1827        }
1828}
1829
1830int hci_dev_cmd(unsigned int cmd, void __user *arg)
1831{
1832        struct hci_dev *hdev;
1833        struct hci_dev_req dr;
1834        int err = 0;
1835
1836        if (copy_from_user(&dr, arg, sizeof(dr)))
1837                return -EFAULT;
1838
1839        hdev = hci_dev_get(dr.dev_id);
1840        if (!hdev)
1841                return -ENODEV;
1842
1843        if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1844                err = -EBUSY;
1845                goto done;
1846        }
1847
1848        if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1849                err = -EOPNOTSUPP;
1850                goto done;
1851        }
1852
1853        if (hdev->dev_type != HCI_PRIMARY) {
1854                err = -EOPNOTSUPP;
1855                goto done;
1856        }
1857
1858        if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1859                err = -EOPNOTSUPP;
1860                goto done;
1861        }
1862
1863        switch (cmd) {
1864        case HCISETAUTH:
1865                err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1866                                   HCI_INIT_TIMEOUT, NULL);
1867                break;
1868
1869        case HCISETENCRYPT:
1870                if (!lmp_encrypt_capable(hdev)) {
1871                        err = -EOPNOTSUPP;
1872                        break;
1873                }
1874
1875                if (!test_bit(HCI_AUTH, &hdev->flags)) {
1876                        /* Auth must be enabled first */
1877                        err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1878                                           HCI_INIT_TIMEOUT, NULL);
1879                        if (err)
1880                                break;
1881                }
1882
1883                err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1884                                   HCI_INIT_TIMEOUT, NULL);
1885                break;
1886
1887        case HCISETSCAN:
1888                err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1889                                   HCI_INIT_TIMEOUT, NULL);
1890
1891                /* Ensure that the connectable and discoverable states
1892                 * get correctly modified as this was a non-mgmt change.
1893                 */
1894                if (!err)
1895                        hci_update_scan_state(hdev, dr.dev_opt);
1896                break;
1897
1898        case HCISETLINKPOL:
1899                err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1900                                   HCI_INIT_TIMEOUT, NULL);
1901                break;
1902
1903        case HCISETLINKMODE:
1904                hdev->link_mode = ((__u16) dr.dev_opt) &
1905                                        (HCI_LM_MASTER | HCI_LM_ACCEPT);
1906                break;
1907
1908        case HCISETPTYPE:
1909                hdev->pkt_type = (__u16) dr.dev_opt;
1910                break;
1911
1912        case HCISETACLMTU:
1913                hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
1914                hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1915                break;
1916
1917        case HCISETSCOMTU:
1918                hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
1919                hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1920                break;
1921
1922        default:
1923                err = -EINVAL;
1924                break;
1925        }
1926
1927done:
1928        hci_dev_put(hdev);
1929        return err;
1930}
1931
1932int hci_get_dev_list(void __user *arg)
1933{
1934        struct hci_dev *hdev;
1935        struct hci_dev_list_req *dl;
1936        struct hci_dev_req *dr;
1937        int n = 0, size, err;
1938        __u16 dev_num;
1939
1940        if (get_user(dev_num, (__u16 __user *) arg))
1941                return -EFAULT;
1942
1943        if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1944                return -EINVAL;
1945
1946        size = sizeof(*dl) + dev_num * sizeof(*dr);
1947
1948        dl = kzalloc(size, GFP_KERNEL);
1949        if (!dl)
1950                return -ENOMEM;
1951
1952        dr = dl->dev_req;
1953
1954        read_lock(&hci_dev_list_lock);
1955        list_for_each_entry(hdev, &hci_dev_list, list) {
1956                unsigned long flags = hdev->flags;
1957
1958                /* When the auto-off is configured it means the transport
1959                 * is running, but in that case still indicate that the
1960                 * device is actually down.
1961                 */
1962                if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
1963                        flags &= ~BIT(HCI_UP);
1964
1965                (dr + n)->dev_id  = hdev->id;
1966                (dr + n)->dev_opt = flags;
1967
1968                if (++n >= dev_num)
1969                        break;
1970        }
1971        read_unlock(&hci_dev_list_lock);
1972
1973        dl->dev_num = n;
1974        size = sizeof(*dl) + n * sizeof(*dr);
1975
1976        err = copy_to_user(arg, dl, size);
1977        kfree(dl);
1978
1979        return err ? -EFAULT : 0;
1980}
1981
1982int hci_get_dev_info(void __user *arg)
1983{
1984        struct hci_dev *hdev;
1985        struct hci_dev_info di;
1986        unsigned long flags;
1987        int err = 0;
1988
1989        if (copy_from_user(&di, arg, sizeof(di)))
1990                return -EFAULT;
1991
1992        hdev = hci_dev_get(di.dev_id);
1993        if (!hdev)
1994                return -ENODEV;
1995
1996        /* When the auto-off is configured it means the transport
1997         * is running, but in that case still indicate that the
1998         * device is actually down.
1999         */
2000        if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2001                flags = hdev->flags & ~BIT(HCI_UP);
2002        else
2003                flags = hdev->flags;
2004
2005        strcpy(di.name, hdev->name);
2006        di.bdaddr   = hdev->bdaddr;
2007        di.type     = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2008        di.flags    = flags;
2009        di.pkt_type = hdev->pkt_type;
2010        if (lmp_bredr_capable(hdev)) {
2011                di.acl_mtu  = hdev->acl_mtu;
2012                di.acl_pkts = hdev->acl_pkts;
2013                di.sco_mtu  = hdev->sco_mtu;
2014                di.sco_pkts = hdev->sco_pkts;
2015        } else {
2016                di.acl_mtu  = hdev->le_mtu;
2017                di.acl_pkts = hdev->le_pkts;
2018                di.sco_mtu  = 0;
2019                di.sco_pkts = 0;
2020        }
2021        di.link_policy = hdev->link_policy;
2022        di.link_mode   = hdev->link_mode;
2023
2024        memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2025        memcpy(&di.features, &hdev->features, sizeof(di.features));
2026
2027        if (copy_to_user(arg, &di, sizeof(di)))
2028                err = -EFAULT;
2029
2030        hci_dev_put(hdev);
2031
2032        return err;
2033}
2034
2035/* ---- Interface to HCI drivers ---- */
2036
2037static int hci_rfkill_set_block(void *data, bool blocked)
2038{
2039        struct hci_dev *hdev = data;
2040
2041        BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2042
2043        if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
2044                return -EBUSY;
2045
2046        if (blocked) {
2047                hci_dev_set_flag(hdev, HCI_RFKILLED);
2048                if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
2049                    !hci_dev_test_flag(hdev, HCI_CONFIG))
2050                        hci_dev_do_close(hdev);
2051        } else {
2052                hci_dev_clear_flag(hdev, HCI_RFKILLED);
2053        }
2054
2055        return 0;
2056}
2057
2058static const struct rfkill_ops hci_rfkill_ops = {
2059        .set_block = hci_rfkill_set_block,
2060};
2061
2062static void hci_power_on(struct work_struct *work)
2063{
2064        struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2065        int err;
2066
2067        BT_DBG("%s", hdev->name);
2068
2069        if (test_bit(HCI_UP, &hdev->flags) &&
2070            hci_dev_test_flag(hdev, HCI_MGMT) &&
2071            hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
2072                cancel_delayed_work(&hdev->power_off);
2073                hci_req_sync_lock(hdev);
2074                err = __hci_req_hci_power_on(hdev);
2075                hci_req_sync_unlock(hdev);
2076                mgmt_power_on(hdev, err);
2077                return;
2078        }
2079
2080        err = hci_dev_do_open(hdev);
2081        if (err < 0) {
2082                hci_dev_lock(hdev);
2083                mgmt_set_powered_failed(hdev, err);
2084                hci_dev_unlock(hdev);
2085                return;
2086        }
2087
2088        /* During the HCI setup phase, a few error conditions are
2089         * ignored and they need to be checked now. If they are still
2090         * valid, it is important to turn the device back off.
2091         */
2092        if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
2093            hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
2094            (hdev->dev_type == HCI_PRIMARY &&
2095             !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2096             !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2097                hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
2098                hci_dev_do_close(hdev);
2099        } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
2100                queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2101                                   HCI_AUTO_OFF_TIMEOUT);
2102        }
2103
2104        if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
2105                /* For unconfigured devices, set the HCI_RAW flag
2106                 * so that userspace can easily identify them.
2107                 */
2108                if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2109                        set_bit(HCI_RAW, &hdev->flags);
2110
2111                /* For fully configured devices, this will send
2112                 * the Index Added event. For unconfigured devices,
2113                 * it will send Unconfigued Index Added event.
2114                 *
2115                 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2116                 * and no event will be send.
2117                 */
2118                mgmt_index_added(hdev);
2119        } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
2120                /* When the controller is now configured, then it
2121                 * is important to clear the HCI_RAW flag.
2122                 */
2123                if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2124                        clear_bit(HCI_RAW, &hdev->flags);
2125
2126                /* Powering on the controller with HCI_CONFIG set only
2127                 * happens with the transition from unconfigured to
2128                 * configured. This will send the Index Added event.
2129                 */
2130                mgmt_index_added(hdev);
2131        }
2132}
2133
2134static void hci_power_off(struct work_struct *work)
2135{
2136        struct hci_dev *hdev = container_of(work, struct hci_dev,
2137                                            power_off.work);
2138
2139        BT_DBG("%s", hdev->name);
2140
2141        hci_dev_do_close(hdev);
2142}
2143
2144static void hci_error_reset(struct work_struct *work)
2145{
2146        struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
2147
2148        BT_DBG("%s", hdev->name);
2149
2150        if (hdev->hw_error)
2151                hdev->hw_error(hdev, hdev->hw_error_code);
2152        else
2153                bt_dev_err(hdev, "hardware error 0x%2.2x", hdev->hw_error_code);
2154
2155        if (hci_dev_do_close(hdev))
2156                return;
2157
2158        hci_dev_do_open(hdev);
2159}
2160
2161void hci_uuids_clear(struct hci_dev *hdev)
2162{
2163        struct bt_uuid *uuid, *tmp;
2164
2165        list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2166                list_del(&uuid->list);
2167                kfree(uuid);
2168        }
2169}
2170
2171void hci_link_keys_clear(struct hci_dev *hdev)
2172{
2173        struct link_key *key;
2174
2175        list_for_each_entry_rcu(key, &hdev->link_keys, list) {
2176                list_del_rcu(&key->list);
2177                kfree_rcu(key, rcu);
2178        }
2179}
2180
2181void hci_smp_ltks_clear(struct hci_dev *hdev)
2182{
2183        struct smp_ltk *k;
2184
2185        list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2186                list_del_rcu(&k->list);
2187                kfree_rcu(k, rcu);
2188        }
2189}
2190
2191void hci_smp_irks_clear(struct hci_dev *hdev)
2192{
2193        struct smp_irk *k;
2194
2195        list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2196                list_del_rcu(&k->list);
2197                kfree_rcu(k, rcu);
2198        }
2199}
2200
2201struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2202{
2203        struct link_key *k;
2204
2205        rcu_read_lock();
2206        list_for_each_entry_rcu(k, &hdev->link_keys, list) {
2207                if (bacmp(bdaddr, &k->bdaddr) == 0) {
2208                        rcu_read_unlock();
2209                        return k;
2210                }
2211        }
2212        rcu_read_unlock();
2213
2214        return NULL;
2215}
2216
2217static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
2218                               u8 key_type, u8 old_key_type)
2219{
2220        /* Legacy key */
2221        if (key_type < 0x03)
2222                return true;
2223
2224        /* Debug keys are insecure so don't store them persistently */
2225        if (key_type == HCI_LK_DEBUG_COMBINATION)
2226                return false;
2227
2228        /* Changed combination key and there's no previous one */
2229        if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
2230                return false;
2231
2232        /* Security mode 3 case */
2233        if (!conn)
2234                return true;
2235
2236        /* BR/EDR key derived using SC from an LE link */
2237        if (conn->type == LE_LINK)
2238                return true;
2239
2240        /* Neither local nor remote side had no-bonding as requirement */
2241        if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
2242                return true;
2243
2244        /* Local side had dedicated bonding as requirement */
2245        if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
2246                return true;
2247
2248        /* Remote side had dedicated bonding as requirement */
2249        if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
2250                return true;
2251
2252        /* If none of the above criteria match, then don't store the key
2253         * persistently */
2254        return false;
2255}
2256
2257static u8 ltk_role(u8 type)
2258{
2259        if (type == SMP_LTK)
2260                return HCI_ROLE_MASTER;
2261
2262        return HCI_ROLE_SLAVE;
2263}
2264
2265struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2266                             u8 addr_type, u8 role)
2267{
2268        struct smp_ltk *k;
2269
2270        rcu_read_lock();
2271        list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2272                if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
2273                        continue;
2274
2275                if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
2276                        rcu_read_unlock();
2277                        return k;
2278                }
2279        }
2280        rcu_read_unlock();
2281
2282        return NULL;
2283}
2284
2285struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2286{
2287        struct smp_irk *irk;
2288
2289        rcu_read_lock();
2290        list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2291                if (!bacmp(&irk->rpa, rpa)) {
2292                        rcu_read_unlock();
2293                        return irk;
2294                }
2295        }
2296
2297        list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2298                if (smp_irk_matches(hdev, irk->val, rpa)) {
2299                        bacpy(&irk->rpa, rpa);
2300                        rcu_read_unlock();
2301                        return irk;
2302                }
2303        }
2304        rcu_read_unlock();
2305
2306        return NULL;
2307}
2308
2309struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2310                                     u8 addr_type)
2311{
2312        struct smp_irk *irk;
2313
2314        /* Identity Address must be public or static random */
2315        if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2316                return NULL;
2317
2318        rcu_read_lock();
2319        list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2320                if (addr_type == irk->addr_type &&
2321                    bacmp(bdaddr, &irk->bdaddr) == 0) {
2322                        rcu_read_unlock();
2323                        return irk;
2324                }
2325        }
2326        rcu_read_unlock();
2327
2328        return NULL;
2329}
2330
2331struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
2332                                  bdaddr_t *bdaddr, u8 *val, u8 type,
2333                                  u8 pin_len, bool *persistent)
2334{
2335        struct link_key *key, *old_key;
2336        u8 old_key_type;
2337
2338        old_key = hci_find_link_key(hdev, bdaddr);
2339        if (old_key) {
2340                old_key_type = old_key->type;
2341                key = old_key;
2342        } else {
2343                old_key_type = conn ? conn->key_type : 0xff;
2344                key = kzalloc(sizeof(*key), GFP_KERNEL);
2345                if (!key)
2346                        return NULL;
2347                list_add_rcu(&key->list, &hdev->link_keys);
2348        }
2349
2350        BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
2351
2352        /* Some buggy controller combinations generate a changed
2353         * combination key for legacy pairing even when there's no
2354         * previous key */
2355        if (type == HCI_LK_CHANGED_COMBINATION &&
2356            (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
2357                type = HCI_LK_COMBINATION;
2358                if (conn)
2359                        conn->key_type = type;
2360        }
2361
2362        bacpy(&key->bdaddr, bdaddr);
2363        memcpy(key->val, val, HCI_LINK_KEY_SIZE);
2364        key->pin_len = pin_len;
2365
2366        if (type == HCI_LK_CHANGED_COMBINATION)
2367                key->type = old_key_type;
2368        else
2369                key->type = type;
2370
2371        if (persistent)
2372                *persistent = hci_persistent_key(hdev, conn, type,
2373                                                 old_key_type);
2374
2375        return key;
2376}
2377
2378struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2379                            u8 addr_type, u8 type, u8 authenticated,
2380                            u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
2381{
2382        struct smp_ltk *key, *old_key;
2383        u8 role = ltk_role(type);
2384
2385        old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
2386        if (old_key)
2387                key = old_key;
2388        else {
2389                key = kzalloc(sizeof(*key), GFP_KERNEL);
2390                if (!key)
2391                        return NULL;
2392                list_add_rcu(&key->list, &hdev->long_term_keys);
2393        }
2394
2395        bacpy(&key->bdaddr, bdaddr);
2396        key->bdaddr_type = addr_type;
2397        memcpy(key->val, tk, sizeof(key->val));
2398        key->authenticated = authenticated;
2399        key->ediv = ediv;
2400        key->rand = rand;
2401        key->enc_size = enc_size;
2402        key->type = type;
2403
2404        return key;
2405}
2406
2407struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2408                            u8 addr_type, u8 val[16], bdaddr_t *rpa)
2409{
2410        struct smp_irk *irk;
2411
2412        irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2413        if (!irk) {
2414                irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2415                if (!irk)
2416                        return NULL;
2417
2418                bacpy(&irk->bdaddr, bdaddr);
2419                irk->addr_type = addr_type;
2420
2421                list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
2422        }
2423
2424        memcpy(irk->val, val, 16);
2425        bacpy(&irk->rpa, rpa);
2426
2427        return irk;
2428}
2429
2430int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2431{
2432        struct link_key *key;
2433
2434        key = hci_find_link_key(hdev, bdaddr);
2435        if (!key)
2436                return -ENOENT;
2437
2438        BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2439
2440        list_del_rcu(&key->list);
2441        kfree_rcu(key, rcu);
2442
2443        return 0;
2444}
2445
2446int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
2447{
2448        struct smp_ltk *k;
2449        int removed = 0;
2450
2451        list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2452                if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
2453                        continue;
2454
2455                BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2456
2457                list_del_rcu(&k->list);
2458                kfree_rcu(k, rcu);
2459                removed++;
2460        }
2461
2462        return removed ? 0 : -ENOENT;
2463}
2464
2465void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2466{
2467        struct smp_irk *k;
2468
2469        list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2470                if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2471                        continue;
2472
2473                BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2474
2475                list_del_rcu(&k->list);
2476                kfree_rcu(k, rcu);
2477        }
2478}
2479
2480bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2481{
2482        struct smp_ltk *k;
2483        struct smp_irk *irk;
2484        u8 addr_type;
2485
2486        if (type == BDADDR_BREDR) {
2487                if (hci_find_link_key(hdev, bdaddr))
2488                        return true;
2489                return false;
2490        }
2491
2492        /* Convert to HCI addr type which struct smp_ltk uses */
2493        if (type == BDADDR_LE_PUBLIC)
2494                addr_type = ADDR_LE_DEV_PUBLIC;
2495        else
2496                addr_type = ADDR_LE_DEV_RANDOM;
2497
2498        irk = hci_get_irk(hdev, bdaddr, addr_type);
2499        if (irk) {
2500                bdaddr = &irk->bdaddr;
2501                addr_type = irk->addr_type;
2502        }
2503
2504        rcu_read_lock();
2505        list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2506                if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
2507                        rcu_read_unlock();
2508                        return true;
2509                }
2510        }
2511        rcu_read_unlock();
2512
2513        return false;
2514}
2515
2516/* HCI command timer function */
2517static void hci_cmd_timeout(struct work_struct *work)
2518{
2519        struct hci_dev *hdev = container_of(work, struct hci_dev,
2520                                            cmd_timer.work);
2521
2522        if (hdev->sent_cmd) {
2523                struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2524                u16 opcode = __le16_to_cpu(sent->opcode);
2525
2526                bt_dev_err(hdev, "command 0x%4.4x tx timeout", opcode);
2527        } else {
2528                bt_dev_err(hdev, "command tx timeout");
2529        }
2530
2531        atomic_set(&hdev->cmd_cnt, 1);
2532        queue_work(hdev->workqueue, &hdev->cmd_work);
2533}
2534
2535struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
2536                                          bdaddr_t *bdaddr, u8 bdaddr_type)
2537{
2538        struct oob_data *data;
2539
2540        list_for_each_entry(data, &hdev->remote_oob_data, list) {
2541                if (bacmp(bdaddr, &data->bdaddr) != 0)
2542                        continue;
2543                if (data->bdaddr_type != bdaddr_type)
2544                        continue;
2545                return data;
2546        }
2547
2548        return NULL;
2549}
2550
2551int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2552                               u8 bdaddr_type)
2553{
2554        struct oob_data *data;
2555
2556        data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2557        if (!data)
2558                return -ENOENT;
2559
2560        BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
2561
2562        list_del(&data->list);
2563        kfree(data);
2564
2565        return 0;
2566}
2567
2568void hci_remote_oob_data_clear(struct hci_dev *hdev)
2569{
2570        struct oob_data *data, *n;
2571
2572        list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2573                list_del(&data->list);
2574                kfree(data);
2575        }
2576}
2577
2578int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2579                            u8 bdaddr_type, u8 *hash192, u8 *rand192,
2580                            u8 *hash256, u8 *rand256)
2581{
2582        struct oob_data *data;
2583
2584        data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2585        if (!data) {
2586                data = kmalloc(sizeof(*data), GFP_KERNEL);
2587                if (!data)
2588                        return -ENOMEM;
2589
2590                bacpy(&data->bdaddr, bdaddr);
2591                data->bdaddr_type = bdaddr_type;
2592                list_add(&data->list, &hdev->remote_oob_data);
2593        }
2594
2595        if (hash192 && rand192) {
2596                memcpy(data->hash192, hash192, sizeof(data->hash192));
2597                memcpy(data->rand192, rand192, sizeof(data->rand192));
2598                if (hash256 && rand256)
2599                        data->present = 0x03;
2600        } else {
2601                memset(data->hash192, 0, sizeof(data->hash192));
2602                memset(data->rand192, 0, sizeof(data->rand192));
2603                if (hash256 && rand256)
2604                        data->present = 0x02;
2605                else
2606                        data->present = 0x00;
2607        }
2608
2609        if (hash256 && rand256) {
2610                memcpy(data->hash256, hash256, sizeof(data->hash256));
2611                memcpy(data->rand256, rand256, sizeof(data->rand256));
2612        } else {
2613                memset(data->hash256, 0, sizeof(data->hash256));
2614                memset(data->rand256, 0, sizeof(data->rand256));
2615                if (hash192 && rand192)
2616                        data->present = 0x01;
2617        }
2618
2619        BT_DBG("%s for %pMR", hdev->name, bdaddr);
2620
2621        return 0;
2622}
2623
2624/* This function requires the caller holds hdev->lock */
2625struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
2626{
2627        struct adv_info *adv_instance;
2628
2629        list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
2630                if (adv_instance->instance == instance)
2631                        return adv_instance;
2632        }
2633
2634        return NULL;
2635}
2636
2637/* This function requires the caller holds hdev->lock */
2638struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance)
2639{
2640        struct adv_info *cur_instance;
2641
2642        cur_instance = hci_find_adv_instance(hdev, instance);
2643        if (!cur_instance)
2644                return NULL;
2645
2646        if (cur_instance == list_last_entry(&hdev->adv_instances,
2647                                            struct adv_info, list))
2648                return list_first_entry(&hdev->adv_instances,
2649                                                 struct adv_info, list);
2650        else
2651                return list_next_entry(cur_instance, list);
2652}
2653
2654/* This function requires the caller holds hdev->lock */
2655int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
2656{
2657        struct adv_info *adv_instance;
2658
2659        adv_instance = hci_find_adv_instance(hdev, instance);
2660        if (!adv_instance)
2661                return -ENOENT;
2662
2663        BT_DBG("%s removing %dMR", hdev->name, instance);
2664
2665        if (hdev->cur_adv_instance == instance) {
2666                if (hdev->adv_instance_timeout) {
2667                        cancel_delayed_work(&hdev->adv_instance_expire);
2668                        hdev->adv_instance_timeout = 0;
2669                }
2670                hdev->cur_adv_instance = 0x00;
2671        }
2672
2673        list_del(&adv_instance->list);
2674        kfree(adv_instance);
2675
2676        hdev->adv_instance_cnt--;
2677
2678        return 0;
2679}
2680
2681/* This function requires the caller holds hdev->lock */
2682void hci_adv_instances_clear(struct hci_dev *hdev)
2683{
2684        struct adv_info *adv_instance, *n;
2685
2686        if (hdev->adv_instance_timeout) {
2687                cancel_delayed_work(&hdev->adv_instance_expire);
2688                hdev->adv_instance_timeout = 0;
2689        }
2690
2691        list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
2692                list_del(&adv_instance->list);
2693                kfree(adv_instance);
2694        }
2695
2696        hdev->adv_instance_cnt = 0;
2697        hdev->cur_adv_instance = 0x00;
2698}
2699
2700/* This function requires the caller holds hdev->lock */
2701int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
2702                         u16 adv_data_len, u8 *adv_data,
2703                         u16 scan_rsp_len, u8 *scan_rsp_data,
2704                         u16 timeout, u16 duration)
2705{
2706        struct adv_info *adv_instance;
2707
2708        adv_instance = hci_find_adv_instance(hdev, instance);
2709        if (adv_instance) {
2710                memset(adv_instance->adv_data, 0,
2711                       sizeof(adv_instance->adv_data));
2712                memset(adv_instance->scan_rsp_data, 0,
2713                       sizeof(adv_instance->scan_rsp_data));
2714        } else {
2715                if (hdev->adv_instance_cnt >= HCI_MAX_ADV_INSTANCES ||
2716                    instance < 1 || instance > HCI_MAX_ADV_INSTANCES)
2717                        return -EOVERFLOW;
2718
2719                adv_instance = kzalloc(sizeof(*adv_instance), GFP_KERNEL);
2720                if (!adv_instance)
2721                        return -ENOMEM;
2722
2723                adv_instance->pending = true;
2724                adv_instance->instance = instance;
2725                list_add(&adv_instance->list, &hdev->adv_instances);
2726                hdev->adv_instance_cnt++;
2727        }
2728
2729        adv_instance->flags = flags;
2730        adv_instance->adv_data_len = adv_data_len;
2731        adv_instance->scan_rsp_len = scan_rsp_len;
2732
2733        if (adv_data_len)
2734                memcpy(adv_instance->adv_data, adv_data, adv_data_len);
2735
2736        if (scan_rsp_len)
2737                memcpy(adv_instance->scan_rsp_data,
2738                       scan_rsp_data, scan_rsp_len);
2739
2740        adv_instance->timeout = timeout;
2741        adv_instance->remaining_time = timeout;
2742
2743        if (duration == 0)
2744                adv_instance->duration = HCI_DEFAULT_ADV_DURATION;
2745        else
2746                adv_instance->duration = duration;
2747
2748        BT_DBG("%s for %dMR", hdev->name, instance);
2749
2750        return 0;
2751}
2752
2753struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
2754                                         bdaddr_t *bdaddr, u8 type)
2755{
2756        struct bdaddr_list *b;
2757
2758        list_for_each_entry(b, bdaddr_list, list) {
2759                if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2760                        return b;
2761        }
2762
2763        return NULL;
2764}
2765
2766void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
2767{
2768        struct bdaddr_list *b, *n;
2769
2770        list_for_each_entry_safe(b, n, bdaddr_list, list) {
2771                list_del(&b->list);
2772                kfree(b);
2773        }
2774}
2775
2776int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2777{
2778        struct bdaddr_list *entry;
2779
2780        if (!bacmp(bdaddr, BDADDR_ANY))
2781                return -EBADF;
2782
2783        if (hci_bdaddr_list_lookup(list, bdaddr, type))
2784                return -EEXIST;
2785
2786        entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2787        if (!entry)
2788                return -ENOMEM;
2789
2790        bacpy(&entry->bdaddr, bdaddr);
2791        entry->bdaddr_type = type;
2792
2793        list_add(&entry->list, list);
2794
2795        return 0;
2796}
2797
2798int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2799{
2800        struct bdaddr_list *entry;
2801
2802        if (!bacmp(bdaddr, BDADDR_ANY)) {
2803                hci_bdaddr_list_clear(list);
2804                return 0;
2805        }
2806
2807        entry = hci_bdaddr_list_lookup(list, bdaddr, type);
2808        if (!entry)
2809                return -ENOENT;
2810
2811        list_del(&entry->list);
2812        kfree(entry);
2813
2814        return 0;
2815}
2816
2817/* This function requires the caller holds hdev->lock */
2818struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2819                                               bdaddr_t *addr, u8 addr_type)
2820{
2821        struct hci_conn_params *params;
2822
2823        list_for_each_entry(params, &hdev->le_conn_params, list) {
2824                if (bacmp(&params->addr, addr) == 0 &&
2825                    params->addr_type == addr_type) {
2826                        return params;
2827                }
2828        }
2829
2830        return NULL;
2831}
2832
2833/* This function requires the caller holds hdev->lock */
2834struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2835                                                  bdaddr_t *addr, u8 addr_type)
2836{
2837        struct hci_conn_params *param;
2838
2839        list_for_each_entry(param, list, action) {
2840                if (bacmp(&param->addr, addr) == 0 &&
2841                    param->addr_type == addr_type)
2842                        return param;
2843        }
2844
2845        return NULL;
2846}
2847
2848/* This function requires the caller holds hdev->lock */
2849struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2850                                            bdaddr_t *addr, u8 addr_type)
2851{
2852        struct hci_conn_params *params;
2853
2854        params = hci_conn_params_lookup(hdev, addr, addr_type);
2855        if (params)
2856                return params;
2857
2858        params = kzalloc(sizeof(*params), GFP_KERNEL);
2859        if (!params) {
2860                bt_dev_err(hdev, "out of memory");
2861                return NULL;
2862        }
2863
2864        bacpy(&params->addr, addr);
2865        params->addr_type = addr_type;
2866
2867        list_add(&params->list, &hdev->le_conn_params);
2868        INIT_LIST_HEAD(&params->action);
2869
2870        params->conn_min_interval = hdev->le_conn_min_interval;
2871        params->conn_max_interval = hdev->le_conn_max_interval;
2872        params->conn_latency = hdev->le_conn_latency;
2873        params->supervision_timeout = hdev->le_supv_timeout;
2874        params->auto_connect = HCI_AUTO_CONN_DISABLED;
2875
2876        BT_DBG("addr %pMR (type %u)", addr, addr_type);
2877
2878        return params;
2879}
2880
2881static void hci_conn_params_free(struct hci_conn_params *params)
2882{
2883        if (params->conn) {
2884                hci_conn_drop(params->conn);
2885                hci_conn_put(params->conn);
2886        }
2887
2888        list_del(&params->action);
2889        list_del(&params->list);
2890        kfree(params);
2891}
2892
2893/* This function requires the caller holds hdev->lock */
2894void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2895{
2896        struct hci_conn_params *params;
2897
2898        params = hci_conn_params_lookup(hdev, addr, addr_type);
2899        if (!params)
2900                return;
2901
2902        hci_conn_params_free(params);
2903
2904        hci_update_background_scan(hdev);
2905
2906        BT_DBG("addr %pMR (type %u)", addr, addr_type);
2907}
2908
2909/* This function requires the caller holds hdev->lock */
2910void hci_conn_params_clear_disabled(struct hci_dev *hdev)
2911{
2912        struct hci_conn_params *params, *tmp;
2913
2914        list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
2915                if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
2916                        continue;
2917
2918                /* If trying to estabilish one time connection to disabled
2919                 * device, leave the params, but mark them as just once.
2920                 */
2921                if (params->explicit_connect) {
2922                        params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2923                        continue;
2924                }
2925
2926                list_del(&params->list);
2927                kfree(params);
2928        }
2929
2930        BT_DBG("All LE disabled connection parameters were removed");
2931}
2932
2933/* This function requires the caller holds hdev->lock */
2934static void hci_conn_params_clear_all(struct hci_dev *hdev)
2935{
2936        struct hci_conn_params *params, *tmp;
2937
2938        list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
2939                hci_conn_params_free(params);
2940
2941        BT_DBG("All LE connection parameters were removed");
2942}
2943
2944/* Copy the Identity Address of the controller.
2945 *
2946 * If the controller has a public BD_ADDR, then by default use that one.
2947 * If this is a LE only controller without a public address, default to
2948 * the static random address.
2949 *
2950 * For debugging purposes it is possible to force controllers with a
2951 * public address to use the static random address instead.
2952 *
2953 * In case BR/EDR has been disabled on a dual-mode controller and
2954 * userspace has configured a static address, then that address
2955 * becomes the identity address instead of the public BR/EDR address.
2956 */
2957void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
2958                               u8 *bdaddr_type)
2959{
2960        if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
2961            !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
2962            (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
2963             bacmp(&hdev->static_addr, BDADDR_ANY))) {
2964                bacpy(bdaddr, &hdev->static_addr);
2965                *bdaddr_type = ADDR_LE_DEV_RANDOM;
2966        } else {
2967                bacpy(bdaddr, &hdev->bdaddr);
2968                *bdaddr_type = ADDR_LE_DEV_PUBLIC;
2969        }
2970}
2971
2972/* Alloc HCI device */
2973struct hci_dev *hci_alloc_dev(void)
2974{
2975        struct hci_dev *hdev;
2976
2977        hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
2978        if (!hdev)
2979                return NULL;
2980
2981        hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2982        hdev->esco_type = (ESCO_HV1);
2983        hdev->link_mode = (HCI_LM_ACCEPT);
2984        hdev->num_iac = 0x01;           /* One IAC support is mandatory */
2985        hdev->io_capability = 0x03;     /* No Input No Output */
2986        hdev->manufacturer = 0xffff;    /* Default to internal use */
2987        hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2988        hdev->adv_tx_power = HCI_TX_POWER_INVALID;
2989        hdev->adv_instance_cnt = 0;
2990        hdev->cur_adv_instance = 0x00;
2991        hdev->adv_instance_timeout = 0;
2992
2993        hdev->sniff_max_interval = 800;
2994        hdev->sniff_min_interval = 80;
2995
2996        hdev->le_adv_channel_map = 0x07;
2997        hdev->le_adv_min_interval = 0x0800;
2998        hdev->le_adv_max_interval = 0x0800;
2999        hdev->le_scan_interval = 0x0060;
3000        hdev->le_scan_window = 0x0030;
3001        hdev->le_conn_min_interval = 0x0018;
3002        hdev->le_conn_max_interval = 0x0028;
3003        hdev->le_conn_latency = 0x0000;
3004        hdev->le_supv_timeout = 0x002a;
3005        hdev->le_def_tx_len = 0x001b;
3006        hdev->le_def_tx_time = 0x0148;
3007        hdev->le_max_tx_len = 0x001b;
3008        hdev->le_max_tx_time = 0x0148;
3009        hdev->le_max_rx_len = 0x001b;
3010        hdev->le_max_rx_time = 0x0148;
3011
3012        hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
3013        hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
3014        hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3015        hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
3016
3017        mutex_init(&hdev->lock);
3018        mutex_init(&hdev->req_lock);
3019
3020        INIT_LIST_HEAD(&hdev->mgmt_pending);
3021        INIT_LIST_HEAD(&hdev->blacklist);
3022        INIT_LIST_HEAD(&hdev->whitelist);
3023        INIT_LIST_HEAD(&hdev->uuids);
3024        INIT_LIST_HEAD(&hdev->link_keys);
3025        INIT_LIST_HEAD(&hdev->long_term_keys);
3026        INIT_LIST_HEAD(&hdev->identity_resolving_keys);
3027        INIT_LIST_HEAD(&hdev->remote_oob_data);
3028        INIT_LIST_HEAD(&hdev->le_white_list);
3029        INIT_LIST_HEAD(&hdev->le_conn_params);
3030        INIT_LIST_HEAD(&hdev->pend_le_conns);
3031        INIT_LIST_HEAD(&hdev->pend_le_reports);
3032        INIT_LIST_HEAD(&hdev->conn_hash.list);
3033        INIT_LIST_HEAD(&hdev->adv_instances);
3034
3035        INIT_WORK(&hdev->rx_work, hci_rx_work);
3036        INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3037        INIT_WORK(&hdev->tx_work, hci_tx_work);
3038        INIT_WORK(&hdev->power_on, hci_power_on);
3039        INIT_WORK(&hdev->error_reset, hci_error_reset);
3040
3041        INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3042
3043        skb_queue_head_init(&hdev->rx_q);
3044        skb_queue_head_init(&hdev->cmd_q);
3045        skb_queue_head_init(&hdev->raw_q);
3046
3047        init_waitqueue_head(&hdev->req_wait_q);
3048
3049        INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
3050
3051        hci_request_setup(hdev);
3052
3053        hci_init_sysfs(hdev);
3054        discovery_init(hdev);
3055
3056        return hdev;
3057}
3058EXPORT_SYMBOL(hci_alloc_dev);
3059
3060/* Free HCI device */
3061void hci_free_dev(struct hci_dev *hdev)
3062{
3063        /* will free via device release */
3064        put_device(&hdev->dev);
3065}
3066EXPORT_SYMBOL(hci_free_dev);
3067
3068/* Register HCI device */
3069int hci_register_dev(struct hci_dev *hdev)
3070{
3071        int id, error;
3072
3073        if (!hdev->open || !hdev->close || !hdev->send)
3074                return -EINVAL;
3075
3076        /* Do not allow HCI_AMP devices to register at index 0,
3077         * so the index can be used as the AMP controller ID.
3078         */
3079        switch (hdev->dev_type) {
3080        case HCI_PRIMARY:
3081                id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3082                break;
3083        case HCI_AMP:
3084                id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3085                break;
3086        default:
3087                return -EINVAL;
3088        }
3089
3090        if (id < 0)
3091                return id;
3092
3093        sprintf(hdev->name, "hci%d", id);
3094        hdev->id = id;
3095
3096        BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3097
3098        hdev->workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI, hdev->name);
3099        if (!hdev->workqueue) {
3100                error = -ENOMEM;
3101                goto err;
3102        }
3103
3104        hdev->req_workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI,
3105                                                      hdev->name);
3106        if (!hdev->req_workqueue) {
3107                destroy_workqueue(hdev->workqueue);
3108                error = -ENOMEM;
3109                goto err;
3110        }
3111
3112        if (!IS_ERR_OR_NULL(bt_debugfs))
3113                hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3114
3115        dev_set_name(&hdev->dev, "%s", hdev->name);
3116
3117        error = device_add(&hdev->dev);
3118        if (error < 0)
3119                goto err_wqueue;
3120
3121        hci_leds_init(hdev);
3122
3123        hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
3124                                    RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3125                                    hdev);
3126        if (hdev->rfkill) {
3127                if (rfkill_register(hdev->rfkill) < 0) {
3128                        rfkill_destroy(hdev->rfkill);
3129                        hdev->rfkill = NULL;
3130                }
3131        }
3132
3133        if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3134                hci_dev_set_flag(hdev, HCI_RFKILLED);
3135
3136        hci_dev_set_flag(hdev, HCI_SETUP);
3137        hci_dev_set_flag(hdev, HCI_AUTO_OFF);
3138
3139        if (hdev->dev_type == HCI_PRIMARY) {
3140                /* Assume BR/EDR support until proven otherwise (such as
3141                 * through reading supported features during init.
3142                 */
3143                hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
3144        }
3145
3146        write_lock(&hci_dev_list_lock);
3147        list_add(&hdev->list, &hci_dev_list);
3148        write_unlock(&hci_dev_list_lock);
3149
3150        /* Devices that are marked for raw-only usage are unconfigured
3151         * and should not be included in normal operation.
3152         */
3153        if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
3154                hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
3155
3156        hci_sock_dev_event(hdev, HCI_DEV_REG);
3157        hci_dev_hold(hdev);
3158
3159        queue_work(hdev->req_workqueue, &hdev->power_on);
3160
3161        return id;
3162
3163err_wqueue:
3164        destroy_workqueue(hdev->workqueue);
3165        destroy_workqueue(hdev->req_workqueue);
3166err:
3167        ida_simple_remove(&hci_index_ida, hdev->id);
3168
3169        return error;
3170}
3171EXPORT_SYMBOL(hci_register_dev);
3172
3173/* Unregister HCI device */
3174void hci_unregister_dev(struct hci_dev *hdev)
3175{
3176        int id;
3177
3178        BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3179
3180        hci_dev_set_flag(hdev, HCI_UNREGISTER);
3181
3182        id = hdev->id;
3183
3184        write_lock(&hci_dev_list_lock);
3185        list_del(&hdev->list);
3186        write_unlock(&hci_dev_list_lock);
3187
3188        cancel_work_sync(&hdev->power_on);
3189
3190        hci_dev_do_close(hdev);
3191
3192        if (!test_bit(HCI_INIT, &hdev->flags) &&
3193            !hci_dev_test_flag(hdev, HCI_SETUP) &&
3194            !hci_dev_test_flag(hdev, HCI_CONFIG)) {
3195                hci_dev_lock(hdev);
3196                mgmt_index_removed(hdev);
3197                hci_dev_unlock(hdev);
3198        }
3199
3200        /* mgmt_index_removed should take care of emptying the
3201         * pending list */
3202        BUG_ON(!list_empty(&hdev->mgmt_pending));
3203
3204        hci_sock_dev_event(hdev, HCI_DEV_UNREG);
3205
3206        if (hdev->rfkill) {
3207                rfkill_unregister(hdev->rfkill);
3208                rfkill_destroy(hdev->rfkill);
3209        }
3210
3211        device_del(&hdev->dev);
3212
3213        debugfs_remove_recursive(hdev->debugfs);
3214        kfree_const(hdev->hw_info);
3215        kfree_const(hdev->fw_info);
3216
3217        destroy_workqueue(hdev->workqueue);
3218        destroy_workqueue(hdev->req_workqueue);
3219
3220        hci_dev_lock(hdev);
3221        hci_bdaddr_list_clear(&hdev->blacklist);
3222        hci_bdaddr_list_clear(&hdev->whitelist);
3223        hci_uuids_clear(hdev);
3224        hci_link_keys_clear(hdev);
3225        hci_smp_ltks_clear(hdev);
3226        hci_smp_irks_clear(hdev);
3227        hci_remote_oob_data_clear(hdev);
3228        hci_adv_instances_clear(hdev);
3229        hci_bdaddr_list_clear(&hdev->le_white_list);
3230        hci_conn_params_clear_all(hdev);
3231        hci_discovery_filter_clear(hdev);
3232        hci_dev_unlock(hdev);
3233
3234        hci_dev_put(hdev);
3235
3236        ida_simple_remove(&hci_index_ida, id);
3237}
3238EXPORT_SYMBOL(hci_unregister_dev);
3239
3240/* Suspend HCI device */
3241int hci_suspend_dev(struct hci_dev *hdev)
3242{
3243        hci_sock_dev_event(hdev, HCI_DEV_SUSPEND);
3244        return 0;
3245}
3246EXPORT_SYMBOL(hci_suspend_dev);
3247
3248/* Resume HCI device */
3249int hci_resume_dev(struct hci_dev *hdev)
3250{
3251        hci_sock_dev_event(hdev, HCI_DEV_RESUME);
3252        return 0;
3253}
3254EXPORT_SYMBOL(hci_resume_dev);
3255
3256/* Reset HCI device */
3257int hci_reset_dev(struct hci_dev *hdev)
3258{
3259        const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
3260        struct sk_buff *skb;
3261
3262        skb = bt_skb_alloc(3, GFP_ATOMIC);
3263        if (!skb)
3264                return -ENOMEM;
3265
3266        hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
3267        skb_put_data(skb, hw_err, 3);
3268
3269        /* Send Hardware Error to upper stack */
3270        return hci_recv_frame(hdev, skb);
3271}
3272EXPORT_SYMBOL(hci_reset_dev);
3273
3274/* Receive frame from HCI drivers */
3275int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
3276{
3277        if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
3278                      && !test_bit(HCI_INIT, &hdev->flags))) {
3279                kfree_skb(skb);
3280                return -ENXIO;
3281        }
3282
3283        if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
3284            hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
3285            hci_skb_pkt_type(skb) != HCI_SCODATA_PKT) {
3286                kfree_skb(skb);
3287                return -EINVAL;
3288        }
3289
3290        /* Incoming skb */
3291        bt_cb(skb)->incoming = 1;
3292
3293        /* Time stamp */
3294        __net_timestamp(skb);
3295
3296        skb_queue_tail(&hdev->rx_q, skb);
3297        queue_work(hdev->workqueue, &hdev->rx_work);
3298
3299        return 0;
3300}
3301EXPORT_SYMBOL(hci_recv_frame);
3302
3303/* Receive diagnostic message from HCI drivers */
3304int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
3305{
3306        /* Mark as diagnostic packet */
3307        hci_skb_pkt_type(skb) = HCI_DIAG_PKT;
3308
3309        /* Time stamp */
3310        __net_timestamp(skb);
3311
3312        skb_queue_tail(&hdev->rx_q, skb);
3313        queue_work(hdev->workqueue, &hdev->rx_work);
3314
3315        return 0;
3316}
3317EXPORT_SYMBOL(hci_recv_diag);
3318
3319void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...)
3320{
3321        va_list vargs;
3322
3323        va_start(vargs, fmt);
3324        kfree_const(hdev->hw_info);
3325        hdev->hw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
3326        va_end(vargs);
3327}
3328EXPORT_SYMBOL(hci_set_hw_info);
3329
3330void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...)
3331{
3332        va_list vargs;
3333
3334        va_start(vargs, fmt);
3335        kfree_const(hdev->fw_info);
3336        hdev->fw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
3337        va_end(vargs);
3338}
3339EXPORT_SYMBOL(hci_set_fw_info);
3340
3341/* ---- Interface to upper protocols ---- */
3342
3343int hci_register_cb(struct hci_cb *cb)
3344{
3345        BT_DBG("%p name %s", cb, cb->name);
3346
3347        mutex_lock(&hci_cb_list_lock);
3348        list_add_tail(&cb->list, &hci_cb_list);
3349        mutex_unlock(&hci_cb_list_lock);
3350
3351        return 0;
3352}
3353EXPORT_SYMBOL(hci_register_cb);
3354
3355int hci_unregister_cb(struct hci_cb *cb)
3356{
3357        BT_DBG("%p name %s", cb, cb->name);
3358
3359        mutex_lock(&hci_cb_list_lock);
3360        list_del(&cb->list);
3361        mutex_unlock(&hci_cb_list_lock);
3362
3363        return 0;
3364}
3365EXPORT_SYMBOL(hci_unregister_cb);
3366
3367static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
3368{
3369        int err;
3370
3371        BT_DBG("%s type %d len %d", hdev->name, hci_skb_pkt_type(skb),
3372               skb->len);
3373
3374        /* Time stamp */
3375        __net_timestamp(skb);
3376
3377        /* Send copy to monitor */
3378        hci_send_to_monitor(hdev, skb);
3379
3380        if (atomic_read(&hdev->promisc)) {
3381                /* Send copy to the sockets */
3382                hci_send_to_sock(hdev, skb);
3383        }
3384
3385        /* Get rid of skb owner, prior to sending to the driver. */
3386        skb_orphan(skb);
3387
3388        if (!test_bit(HCI_RUNNING, &hdev->flags)) {
3389                kfree_skb(skb);
3390                return;
3391        }
3392
3393        err = hdev->send(hdev, skb);
3394        if (err < 0) {
3395                bt_dev_err(hdev, "sending frame failed (%d)", err);
3396                kfree_skb(skb);
3397        }
3398}
3399
3400/* Send HCI command */
3401int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3402                 const void *param)
3403{
3404        struct sk_buff *skb;
3405
3406        BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3407
3408        skb = hci_prepare_cmd(hdev, opcode, plen, param);
3409        if (!skb) {
3410                bt_dev_err(hdev, "no memory for command");
3411                return -ENOMEM;
3412        }
3413
3414        /* Stand-alone HCI commands must be flagged as
3415         * single-command requests.
3416         */
3417        bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
3418
3419        skb_queue_tail(&hdev->cmd_q, skb);
3420        queue_work(hdev->workqueue, &hdev->cmd_work);
3421
3422        return 0;
3423}
3424
3425/* Get data from the previously sent command */
3426void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
3427{
3428        struct hci_command_hdr *hdr;
3429
3430        if (!hdev->sent_cmd)
3431                return NULL;
3432
3433        hdr = (void *) hdev->sent_cmd->data;
3434
3435        if (hdr->opcode != cpu_to_le16(opcode))
3436                return NULL;
3437
3438        BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
3439
3440        return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3441}
3442
3443/* Send HCI command and wait for command commplete event */
3444struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
3445                             const void *param, u32 timeout)
3446{
3447        struct sk_buff *skb;
3448
3449        if (!test_bit(HCI_UP, &hdev->flags))
3450                return ERR_PTR(-ENETDOWN);
3451
3452        bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
3453
3454        hci_req_sync_lock(hdev);
3455        skb = __hci_cmd_sync(hdev, opcode, plen, param, timeout);
3456        hci_req_sync_unlock(hdev);
3457
3458        return skb;
3459}
3460EXPORT_SYMBOL(hci_cmd_sync);
3461
3462/* Send ACL data */
3463static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3464{
3465        struct hci_acl_hdr *hdr;
3466        int len = skb->len;
3467
3468        skb_push(skb, HCI_ACL_HDR_SIZE);
3469        skb_reset_transport_header(skb);
3470        hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
3471        hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3472        hdr->dlen   = cpu_to_le16(len);
3473}
3474
3475static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
3476                          struct sk_buff *skb, __u16 flags)
3477{
3478        struct hci_conn *conn = chan->conn;
3479        struct hci_dev *hdev = conn->hdev;
3480        struct sk_buff *list;
3481
3482        skb->len = skb_headlen(skb);
3483        skb->data_len = 0;
3484
3485        hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3486
3487        switch (hdev->dev_type) {
3488        case HCI_PRIMARY:
3489                hci_add_acl_hdr(skb, conn->handle, flags);
3490                break;
3491        case HCI_AMP:
3492                hci_add_acl_hdr(skb, chan->handle, flags);
3493                break;
3494        default:
3495                bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
3496                return;
3497        }
3498
3499        list = skb_shinfo(skb)->frag_list;
3500        if (!list) {
3501                /* Non fragmented */
3502                BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3503
3504                skb_queue_tail(queue, skb);
3505        } else {
3506                /* Fragmented */
3507                BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3508
3509                skb_shinfo(skb)->frag_list = NULL;
3510
3511                /* Queue all fragments atomically. We need to use spin_lock_bh
3512                 * here because of 6LoWPAN links, as there this function is
3513                 * called from softirq and using normal spin lock could cause
3514                 * deadlocks.
3515                 */
3516                spin_lock_bh(&queue->lock);
3517
3518                __skb_queue_tail(queue, skb);
3519
3520                flags &= ~ACL_START;
3521                flags |= ACL_CONT;
3522                do {
3523                        skb = list; list = list->next;
3524
3525                        hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3526                        hci_add_acl_hdr(skb, conn->handle, flags);
3527
3528                        BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3529
3530                        __skb_queue_tail(queue, skb);
3531                } while (list);
3532
3533                spin_unlock_bh(&queue->lock);
3534        }
3535}
3536
3537void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3538{
3539        struct hci_dev *hdev = chan->conn->hdev;
3540
3541        BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
3542
3543        hci_queue_acl(chan, &chan->data_q, skb, flags);
3544
3545        queue_work(hdev->workqueue, &hdev->tx_work);
3546}
3547
3548/* Send SCO data */
3549void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
3550{
3551        struct hci_dev *hdev = conn->hdev;
3552        struct hci_sco_hdr hdr;
3553
3554        BT_DBG("%s len %d", hdev->name, skb->len);
3555
3556        hdr.handle = cpu_to_le16(conn->handle);
3557        hdr.dlen   = skb->len;
3558
3559        skb_push(skb, HCI_SCO_HDR_SIZE);
3560        skb_reset_transport_header(skb);
3561        memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
3562
3563        hci_skb_pkt_type(skb) = HCI_SCODATA_PKT;
3564
3565        skb_queue_tail(&conn->data_q, skb);
3566        queue_work(hdev->workqueue, &hdev->tx_work);
3567}
3568
3569/* ---- HCI TX task (outgoing data) ---- */
3570
3571/* HCI Connection scheduler */
3572static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3573                                     int *quote)
3574{
3575        struct hci_conn_hash *h = &hdev->conn_hash;
3576        struct hci_conn *conn = NULL, *c;
3577        unsigned int num = 0, min = ~0;
3578
3579        /* We don't have to lock device here. Connections are always
3580         * added and removed with TX task disabled. */
3581
3582        rcu_read_lock();
3583
3584        list_for_each_entry_rcu(c, &h->list, list) {
3585                if (c->type != type || skb_queue_empty(&c->data_q))
3586                        continue;
3587
3588                if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3589                        continue;
3590
3591                num++;
3592
3593                if (c->sent < min) {
3594                        min  = c->sent;
3595                        conn = c;
3596                }
3597
3598                if (hci_conn_num(hdev, type) == num)
3599                        break;
3600        }
3601
3602        rcu_read_unlock();
3603
3604        if (conn) {
3605                int cnt, q;
3606
3607                switch (conn->type) {
3608                case ACL_LINK:
3609                        cnt = hdev->acl_cnt;
3610                        break;
3611                case SCO_LINK:
3612                case ESCO_LINK:
3613                        cnt = hdev->sco_cnt;
3614                        break;
3615                case LE_LINK:
3616                        cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3617                        break;
3618                default:
3619                        cnt = 0;
3620                        bt_dev_err(hdev, "unknown link type %d", conn->type);
3621                }
3622
3623                q = cnt / num;
3624                *quote = q ? q : 1;
3625        } else
3626                *quote = 0;
3627
3628        BT_DBG("conn %p quote %d", conn, *quote);
3629        return conn;
3630}
3631
3632static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
3633{
3634        struct hci_conn_hash *h = &hdev->conn_hash;
3635        struct hci_conn *c;
3636
3637        bt_dev_err(hdev, "link tx timeout");
3638
3639        rcu_read_lock();
3640
3641        /* Kill stalled connections */
3642        list_for_each_entry_rcu(c, &h->list, list) {
3643                if (c->type == type && c->sent) {
3644                        bt_dev_err(hdev, "killing stalled connection %pMR",
3645                                   &c->dst);
3646                        hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
3647                }
3648        }
3649
3650        rcu_read_unlock();
3651}
3652
3653static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3654                                      int *quote)
3655{
3656        struct hci_conn_hash *h = &hdev->conn_hash;
3657        struct hci_chan *chan = NULL;
3658        unsigned int num = 0, min = ~0, cur_prio = 0;
3659        struct hci_conn *conn;
3660        int cnt, q, conn_num = 0;
3661
3662        BT_DBG("%s", hdev->name);
3663
3664        rcu_read_lock();
3665
3666        list_for_each_entry_rcu(conn, &h->list, list) {
3667                struct hci_chan *tmp;
3668
3669                if (conn->type != type)
3670                        continue;
3671
3672                if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3673                        continue;
3674
3675                conn_num++;
3676
3677                list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
3678                        struct sk_buff *skb;
3679
3680                        if (skb_queue_empty(&tmp->data_q))
3681                                continue;
3682
3683                        skb = skb_peek(&tmp->data_q);
3684                        if (skb->priority < cur_prio)
3685                                continue;
3686
3687                        if (skb->priority > cur_prio) {
3688                                num = 0;
3689                                min = ~0;
3690                                cur_prio = skb->priority;
3691                        }
3692
3693                        num++;
3694
3695                        if (conn->sent < min) {
3696                                min  = conn->sent;
3697                                chan = tmp;
3698                        }
3699                }
3700
3701                if (hci_conn_num(hdev, type) == conn_num)
3702                        break;
3703        }
3704
3705        rcu_read_unlock();
3706
3707        if (!chan)
3708                return NULL;
3709
3710        switch (chan->conn->type) {
3711        case ACL_LINK:
3712                cnt = hdev->acl_cnt;
3713                break;
3714        case AMP_LINK:
3715                cnt = hdev->block_cnt;
3716                break;
3717        case SCO_LINK:
3718        case ESCO_LINK:
3719                cnt = hdev->sco_cnt;
3720                break;
3721        case LE_LINK:
3722                cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3723                break;
3724        default:
3725                cnt = 0;
3726                bt_dev_err(hdev, "unknown link type %d", chan->conn->type);
3727        }
3728
3729        q = cnt / num;
3730        *quote = q ? q : 1;
3731        BT_DBG("chan %p quote %d", chan, *quote);
3732        return chan;
3733}
3734
3735static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3736{
3737        struct hci_conn_hash *h = &hdev->conn_hash;
3738        struct hci_conn *conn;
3739        int num = 0;
3740
3741        BT_DBG("%s", hdev->name);
3742
3743        rcu_read_lock();
3744
3745        list_for_each_entry_rcu(conn, &h->list, list) {
3746                struct hci_chan *chan;
3747
3748                if (conn->type != type)
3749                        continue;
3750
3751                if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3752                        continue;
3753
3754                num++;
3755
3756                list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3757                        struct sk_buff *skb;
3758
3759                        if (chan->sent) {
3760                                chan->sent = 0;
3761                                continue;
3762                        }
3763
3764                        if (skb_queue_empty(&chan->data_q))
3765                                continue;
3766
3767                        skb = skb_peek(&chan->data_q);
3768                        if (skb->priority >= HCI_PRIO_MAX - 1)
3769                                continue;
3770
3771                        skb->priority = HCI_PRIO_MAX - 1;
3772
3773                        BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3774                               skb->priority);
3775                }
3776
3777                if (hci_conn_num(hdev, type) == num)
3778                        break;
3779        }
3780
3781        rcu_read_unlock();
3782
3783}
3784
3785static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3786{
3787        /* Calculate count of blocks used by this packet */
3788        return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3789}
3790
3791static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
3792{
3793        if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
3794                /* ACL tx timeout must be longer than maximum
3795                 * link supervision timeout (40.9 seconds) */
3796                if (!cnt && time_after(jiffies, hdev->acl_last_tx +
3797                                       HCI_ACL_TX_TIMEOUT))
3798                        hci_link_tx_to(hdev, ACL_LINK);
3799        }
3800}
3801
3802static void hci_sched_acl_pkt(struct hci_dev *hdev)
3803{
3804        unsigned int cnt = hdev->acl_cnt;
3805        struct hci_chan *chan;
3806        struct sk_buff *skb;
3807        int quote;
3808
3809        __check_timeout(hdev, cnt);
3810
3811        while (hdev->acl_cnt &&
3812               (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
3813                u32 priority = (skb_peek(&chan->data_q))->priority;
3814                while (quote-- && (skb = skb_peek(&chan->data_q))) {
3815                        BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3816                               skb->len, skb->priority);
3817
3818                        /* Stop if priority has changed */
3819                        if (skb->priority < priority)
3820                                break;
3821
3822                        skb = skb_dequeue(&chan->data_q);
3823
3824                        hci_conn_enter_active_mode(chan->conn,
3825                                                   bt_cb(skb)->force_active);
3826
3827                        hci_send_frame(hdev, skb);
3828                        hdev->acl_last_tx = jiffies;
3829
3830                        hdev->acl_cnt--;
3831                        chan->sent++;
3832                        chan->conn->sent++;
3833                }
3834        }
3835
3836        if (cnt != hdev->acl_cnt)
3837                hci_prio_recalculate(hdev, ACL_LINK);
3838}
3839
3840static void hci_sched_acl_blk(struct hci_dev *hdev)
3841{
3842        unsigned int cnt = hdev->block_cnt;
3843        struct hci_chan *chan;
3844        struct sk_buff *skb;
3845        int quote;
3846        u8 type;
3847
3848        __check_timeout(hdev, cnt);
3849
3850        BT_DBG("%s", hdev->name);
3851
3852        if (hdev->dev_type == HCI_AMP)
3853                type = AMP_LINK;
3854        else
3855                type = ACL_LINK;
3856
3857        while (hdev->block_cnt > 0 &&
3858               (chan = hci_chan_sent(hdev, type, &quote))) {
3859                u32 priority = (skb_peek(&chan->data_q))->priority;
3860                while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3861                        int blocks;
3862
3863                        BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3864                               skb->len, skb->priority);
3865
3866                        /* Stop if priority has changed */
3867                        if (skb->priority < priority)
3868                                break;
3869
3870                        skb = skb_dequeue(&chan->data_q);
3871
3872                        blocks = __get_blocks(hdev, skb);
3873                        if (blocks > hdev->block_cnt)
3874                                return;
3875
3876                        hci_conn_enter_active_mode(chan->conn,
3877                                                   bt_cb(skb)->force_active);
3878
3879                        hci_send_frame(hdev, skb);
3880                        hdev->acl_last_tx = jiffies;
3881
3882                        hdev->block_cnt -= blocks;
3883                        quote -= blocks;
3884
3885                        chan->sent += blocks;
3886                        chan->conn->sent += blocks;
3887                }
3888        }
3889
3890        if (cnt != hdev->block_cnt)
3891                hci_prio_recalculate(hdev, type);
3892}
3893
3894static void hci_sched_acl(struct hci_dev *hdev)
3895{
3896        BT_DBG("%s", hdev->name);
3897
3898        /* No ACL link over BR/EDR controller */
3899        if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_PRIMARY)
3900                return;
3901
3902        /* No AMP link over AMP controller */
3903        if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
3904                return;
3905
3906        switch (hdev->flow_ctl_mode) {
3907        case HCI_FLOW_CTL_MODE_PACKET_BASED:
3908                hci_sched_acl_pkt(hdev);
3909                break;
3910
3911        case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3912                hci_sched_acl_blk(hdev);
3913                break;
3914        }
3915}
3916
3917/* Schedule SCO */
3918static void hci_sched_sco(struct hci_dev *hdev)
3919{
3920        struct hci_conn *conn;
3921        struct sk_buff *skb;
3922        int quote;
3923
3924        BT_DBG("%s", hdev->name);
3925
3926        if (!hci_conn_num(hdev, SCO_LINK))
3927                return;
3928
3929        while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3930                while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3931                        BT_DBG("skb %p len %d", skb, skb->len);
3932                        hci_send_frame(hdev, skb);
3933
3934                        conn->sent++;
3935                        if (conn->sent == ~0)
3936                                conn->sent = 0;
3937                }
3938        }
3939}
3940
3941static void hci_sched_esco(struct hci_dev *hdev)
3942{
3943        struct hci_conn *conn;
3944        struct sk_buff *skb;
3945        int quote;
3946
3947        BT_DBG("%s", hdev->name);
3948
3949        if (!hci_conn_num(hdev, ESCO_LINK))
3950                return;
3951
3952        while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3953                                                     &quote))) {
3954                while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3955                        BT_DBG("skb %p len %d", skb, skb->len);
3956                        hci_send_frame(hdev, skb);
3957
3958                        conn->sent++;
3959                        if (conn->sent == ~0)
3960                                conn->sent = 0;
3961                }
3962        }
3963}
3964
3965static void hci_sched_le(struct hci_dev *hdev)
3966{
3967        struct hci_chan *chan;
3968        struct sk_buff *skb;
3969        int quote, cnt, tmp;
3970
3971        BT_DBG("%s", hdev->name);
3972
3973        if (!hci_conn_num(hdev, LE_LINK))
3974                return;
3975
3976        if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
3977                /* LE tx timeout must be longer than maximum
3978                 * link supervision timeout (40.9 seconds) */
3979                if (!hdev->le_cnt && hdev->le_pkts &&
3980                    time_after(jiffies, hdev->le_last_tx + HZ * 45))
3981                        hci_link_tx_to(hdev, LE_LINK);
3982        }
3983
3984        cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
3985        tmp = cnt;
3986        while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
3987                u32 priority = (skb_peek(&chan->data_q))->priority;
3988                while (quote-- && (skb = skb_peek(&chan->data_q))) {
3989                        BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3990                               skb->len, skb->priority);
3991
3992                        /* Stop if priority has changed */
3993                        if (skb->priority < priority)
3994                                break;
3995
3996                        skb = skb_dequeue(&chan->data_q);
3997
3998                        hci_send_frame(hdev, skb);
3999                        hdev->le_last_tx = jiffies;
4000
4001                        cnt--;
4002                        chan->sent++;
4003                        chan->conn->sent++;
4004                }
4005        }
4006
4007        if (hdev->le_pkts)
4008                hdev->le_cnt = cnt;
4009        else
4010                hdev->acl_cnt = cnt;
4011
4012        if (cnt != tmp)
4013                hci_prio_recalculate(hdev, LE_LINK);
4014}
4015
4016static void hci_tx_work(struct work_struct *work)
4017{
4018        struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
4019        struct sk_buff *skb;
4020
4021        BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
4022               hdev->sco_cnt, hdev->le_cnt);
4023
4024        if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
4025                /* Schedule queues and send stuff to HCI driver */
4026                hci_sched_acl(hdev);
4027                hci_sched_sco(hdev);
4028                hci_sched_esco(hdev);
4029                hci_sched_le(hdev);
4030        }
4031
4032        /* Send next queued raw (unknown type) packet */
4033        while ((skb = skb_dequeue(&hdev->raw_q)))
4034                hci_send_frame(hdev, skb);
4035}
4036
4037/* ----- HCI RX task (incoming data processing) ----- */
4038
4039/* ACL data packet */
4040static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4041{
4042        struct hci_acl_hdr *hdr = (void *) skb->data;
4043        struct hci_conn *conn;
4044        __u16 handle, flags;
4045
4046        skb_pull(skb, HCI_ACL_HDR_SIZE);
4047
4048        handle = __le16_to_cpu(hdr->handle);
4049        flags  = hci_flags(handle);
4050        handle = hci_handle(handle);
4051
4052        BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
4053               handle, flags);
4054
4055        hdev->stat.acl_rx++;
4056
4057        hci_dev_lock(hdev);
4058        conn = hci_conn_hash_lookup_handle(hdev, handle);
4059        hci_dev_unlock(hdev);
4060
4061        if (conn) {
4062                hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
4063
4064                /* Send to upper protocol */
4065                l2cap_recv_acldata(conn, skb, flags);
4066                return;
4067        } else {
4068                bt_dev_err(hdev, "ACL packet for unknown connection handle %d",
4069                           handle);
4070        }
4071
4072        kfree_skb(skb);
4073}
4074
4075/* SCO data packet */
4076static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4077{
4078        struct hci_sco_hdr *hdr = (void *) skb->data;
4079        struct hci_conn *conn;
4080        __u16 handle;
4081
4082        skb_pull(skb, HCI_SCO_HDR_SIZE);
4083
4084        handle = __le16_to_cpu(hdr->handle);
4085
4086        BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
4087
4088        hdev->stat.sco_rx++;
4089
4090        hci_dev_lock(hdev);
4091        conn = hci_conn_hash_lookup_handle(hdev, handle);
4092        hci_dev_unlock(hdev);
4093
4094        if (conn) {
4095                /* Send to upper protocol */
4096                sco_recv_scodata(conn, skb);
4097                return;
4098        } else {
4099                bt_dev_err(hdev, "SCO packet for unknown connection handle %d",
4100                           handle);
4101        }
4102
4103        kfree_skb(skb);
4104}
4105
4106static bool hci_req_is_complete(struct hci_dev *hdev)
4107{
4108        struct sk_buff *skb;
4109
4110        skb = skb_peek(&hdev->cmd_q);
4111        if (!skb)
4112                return true;
4113
4114        return (bt_cb(skb)->hci.req_flags & HCI_REQ_START);
4115}
4116
4117static void hci_resend_last(struct hci_dev *hdev)
4118{
4119        struct hci_command_hdr *sent;
4120        struct sk_buff *skb;
4121        u16 opcode;
4122
4123        if (!hdev->sent_cmd)
4124                return;
4125
4126        sent = (void *) hdev->sent_cmd->data;
4127        opcode = __le16_to_cpu(sent->opcode);
4128        if (opcode == HCI_OP_RESET)
4129                return;
4130
4131        skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4132        if (!skb)
4133                return;
4134
4135        skb_queue_head(&hdev->cmd_q, skb);
4136        queue_work(hdev->workqueue, &hdev->cmd_work);
4137}
4138
4139void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
4140                          hci_req_complete_t *req_complete,
4141                          hci_req_complete_skb_t *req_complete_skb)
4142{
4143        struct sk_buff *skb;
4144        unsigned long flags;
4145
4146        BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4147
4148        /* If the completed command doesn't match the last one that was
4149         * sent we need to do special handling of it.
4150         */
4151        if (!hci_sent_cmd_data(hdev, opcode)) {
4152                /* Some CSR based controllers generate a spontaneous
4153                 * reset complete event during init and any pending
4154                 * command will never be completed. In such a case we
4155                 * need to resend whatever was the last sent
4156                 * command.
4157                 */
4158                if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4159                        hci_resend_last(hdev);
4160
4161                return;
4162        }
4163
4164        /* If the command succeeded and there's still more commands in
4165         * this request the request is not yet complete.
4166         */
4167        if (!status && !hci_req_is_complete(hdev))
4168                return;
4169
4170        /* If this was the last command in a request the complete
4171         * callback would be found in hdev->sent_cmd instead of the
4172         * command queue (hdev->cmd_q).
4173         */
4174        if (bt_cb(hdev->sent_cmd)->hci.req_flags & HCI_REQ_SKB) {
4175                *req_complete_skb = bt_cb(hdev->sent_cmd)->hci.req_complete_skb;
4176                return;
4177        }
4178
4179        if (bt_cb(hdev->sent_cmd)->hci.req_complete) {
4180                *req_complete = bt_cb(hdev->sent_cmd)->hci.req_complete;
4181                return;
4182        }
4183
4184        /* Remove all pending commands belonging to this request */
4185        spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4186        while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4187                if (bt_cb(skb)->hci.req_flags & HCI_REQ_START) {
4188                        __skb_queue_head(&hdev->cmd_q, skb);
4189                        break;
4190                }
4191
4192                if (bt_cb(skb)->hci.req_flags & HCI_REQ_SKB)
4193                        *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
4194                else
4195                        *req_complete = bt_cb(skb)->hci.req_complete;
4196                kfree_skb(skb);
4197        }
4198        spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4199}
4200
4201static void hci_rx_work(struct work_struct *work)
4202{
4203        struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
4204        struct sk_buff *skb;
4205
4206        BT_DBG("%s", hdev->name);
4207
4208        while ((skb = skb_dequeue(&hdev->rx_q))) {
4209                /* Send copy to monitor */
4210                hci_send_to_monitor(hdev, skb);
4211
4212                if (atomic_read(&hdev->promisc)) {
4213                        /* Send copy to the sockets */
4214                        hci_send_to_sock(hdev, skb);
4215                }
4216
4217                if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
4218                        kfree_skb(skb);
4219                        continue;
4220                }
4221
4222                if (test_bit(HCI_INIT, &hdev->flags)) {
4223                        /* Don't process data packets in this states. */
4224                        switch (hci_skb_pkt_type(skb)) {
4225                        case HCI_ACLDATA_PKT:
4226                        case HCI_SCODATA_PKT:
4227                                kfree_skb(skb);
4228                                continue;
4229                        }
4230                }
4231
4232                /* Process frame */
4233                switch (hci_skb_pkt_type(skb)) {
4234                case HCI_EVENT_PKT:
4235                        BT_DBG("%s Event packet", hdev->name);
4236                        hci_event_packet(hdev, skb);
4237                        break;
4238
4239                case HCI_ACLDATA_PKT:
4240                        BT_DBG("%s ACL data packet", hdev->name);
4241                        hci_acldata_packet(hdev, skb);
4242                        break;
4243
4244                case HCI_SCODATA_PKT:
4245                        BT_DBG("%s SCO data packet", hdev->name);
4246                        hci_scodata_packet(hdev, skb);
4247                        break;
4248
4249                default:
4250                        kfree_skb(skb);
4251                        break;
4252                }
4253        }
4254}
4255
4256static void hci_cmd_work(struct work_struct *work)
4257{
4258        struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
4259        struct sk_buff *skb;
4260
4261        BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4262               atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
4263
4264        /* Send queued commands */
4265        if (atomic_read(&hdev->cmd_cnt)) {
4266                skb = skb_dequeue(&hdev->cmd_q);
4267                if (!skb)
4268                        return;
4269
4270                kfree_skb(hdev->sent_cmd);
4271
4272                hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
4273                if (hdev->sent_cmd) {
4274                        atomic_dec(&hdev->cmd_cnt);
4275                        hci_send_frame(hdev, skb);
4276                        if (test_bit(HCI_RESET, &hdev->flags))
4277                                cancel_delayed_work(&hdev->cmd_timer);
4278                        else
4279                                schedule_delayed_work(&hdev->cmd_timer,
4280                                                      HCI_CMD_TIMEOUT);
4281                } else {
4282                        skb_queue_head(&hdev->cmd_q, skb);
4283                        queue_work(hdev->workqueue, &hdev->cmd_work);
4284                }
4285        }
4286}
4287