linux/net/bluetooth/hci_request.c
<<
>>
Prefs
   1/*
   2   BlueZ - Bluetooth protocol stack for Linux
   3
   4   Copyright (C) 2014 Intel Corporation
   5
   6   This program is free software; you can redistribute it and/or modify
   7   it under the terms of the GNU General Public License version 2 as
   8   published by the Free Software Foundation;
   9
  10   THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
  11   OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  12   FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
  13   IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
  14   CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
  15   WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  16   ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  17   OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  18
  19   ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
  20   COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
  21   SOFTWARE IS DISCLAIMED.
  22*/
  23
  24#include <asm/unaligned.h>
  25
  26#include <net/bluetooth/bluetooth.h>
  27#include <net/bluetooth/hci_core.h>
  28#include <net/bluetooth/mgmt.h>
  29
  30#include "smp.h"
  31#include "hci_request.h"
  32
  33#define HCI_REQ_DONE      0
  34#define HCI_REQ_PEND      1
  35#define HCI_REQ_CANCELED  2
  36
  37void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
  38{
  39        skb_queue_head_init(&req->cmd_q);
  40        req->hdev = hdev;
  41        req->err = 0;
  42}
  43
  44static int req_run(struct hci_request *req, hci_req_complete_t complete,
  45                   hci_req_complete_skb_t complete_skb)
  46{
  47        struct hci_dev *hdev = req->hdev;
  48        struct sk_buff *skb;
  49        unsigned long flags;
  50
  51        BT_DBG("length %u", skb_queue_len(&req->cmd_q));
  52
  53        /* If an error occurred during request building, remove all HCI
  54         * commands queued on the HCI request queue.
  55         */
  56        if (req->err) {
  57                skb_queue_purge(&req->cmd_q);
  58                return req->err;
  59        }
  60
  61        /* Do not allow empty requests */
  62        if (skb_queue_empty(&req->cmd_q))
  63                return -ENODATA;
  64
  65        skb = skb_peek_tail(&req->cmd_q);
  66        if (complete) {
  67                bt_cb(skb)->hci.req_complete = complete;
  68        } else if (complete_skb) {
  69                bt_cb(skb)->hci.req_complete_skb = complete_skb;
  70                bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
  71        }
  72
  73        spin_lock_irqsave(&hdev->cmd_q.lock, flags);
  74        skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
  75        spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
  76
  77        queue_work(hdev->workqueue, &hdev->cmd_work);
  78
  79        return 0;
  80}
  81
  82int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
  83{
  84        return req_run(req, complete, NULL);
  85}
  86
  87int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete)
  88{
  89        return req_run(req, NULL, complete);
  90}
  91
  92static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
  93                                  struct sk_buff *skb)
  94{
  95        BT_DBG("%s result 0x%2.2x", hdev->name, result);
  96
  97        if (hdev->req_status == HCI_REQ_PEND) {
  98                hdev->req_result = result;
  99                hdev->req_status = HCI_REQ_DONE;
 100                if (skb)
 101                        hdev->req_skb = skb_get(skb);
 102                wake_up_interruptible(&hdev->req_wait_q);
 103        }
 104}
 105
 106void hci_req_sync_cancel(struct hci_dev *hdev, int err)
 107{
 108        BT_DBG("%s err 0x%2.2x", hdev->name, err);
 109
 110        if (hdev->req_status == HCI_REQ_PEND) {
 111                hdev->req_result = err;
 112                hdev->req_status = HCI_REQ_CANCELED;
 113                wake_up_interruptible(&hdev->req_wait_q);
 114        }
 115}
 116
 117struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
 118                                  const void *param, u8 event, u32 timeout)
 119{
 120        DECLARE_WAITQUEUE(wait, current);
 121        struct hci_request req;
 122        struct sk_buff *skb;
 123        int err = 0;
 124
 125        BT_DBG("%s", hdev->name);
 126
 127        hci_req_init(&req, hdev);
 128
 129        hci_req_add_ev(&req, opcode, plen, param, event);
 130
 131        hdev->req_status = HCI_REQ_PEND;
 132
 133        add_wait_queue(&hdev->req_wait_q, &wait);
 134        set_current_state(TASK_INTERRUPTIBLE);
 135
 136        err = hci_req_run_skb(&req, hci_req_sync_complete);
 137        if (err < 0) {
 138                remove_wait_queue(&hdev->req_wait_q, &wait);
 139                set_current_state(TASK_RUNNING);
 140                return ERR_PTR(err);
 141        }
 142
 143        schedule_timeout(timeout);
 144
 145        remove_wait_queue(&hdev->req_wait_q, &wait);
 146
 147        if (signal_pending(current))
 148                return ERR_PTR(-EINTR);
 149
 150        switch (hdev->req_status) {
 151        case HCI_REQ_DONE:
 152                err = -bt_to_errno(hdev->req_result);
 153                break;
 154
 155        case HCI_REQ_CANCELED:
 156                err = -hdev->req_result;
 157                break;
 158
 159        default:
 160                err = -ETIMEDOUT;
 161                break;
 162        }
 163
 164        hdev->req_status = hdev->req_result = 0;
 165        skb = hdev->req_skb;
 166        hdev->req_skb = NULL;
 167
 168        BT_DBG("%s end: err %d", hdev->name, err);
 169
 170        if (err < 0) {
 171                kfree_skb(skb);
 172                return ERR_PTR(err);
 173        }
 174
 175        if (!skb)
 176                return ERR_PTR(-ENODATA);
 177
 178        return skb;
 179}
 180EXPORT_SYMBOL(__hci_cmd_sync_ev);
 181
 182struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
 183                               const void *param, u32 timeout)
 184{
 185        return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
 186}
 187EXPORT_SYMBOL(__hci_cmd_sync);
 188
 189/* Execute request and wait for completion. */
 190int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
 191                                                     unsigned long opt),
 192                   unsigned long opt, u32 timeout, u8 *hci_status)
 193{
 194        struct hci_request req;
 195        DECLARE_WAITQUEUE(wait, current);
 196        int err = 0;
 197
 198        BT_DBG("%s start", hdev->name);
 199
 200        hci_req_init(&req, hdev);
 201
 202        hdev->req_status = HCI_REQ_PEND;
 203
 204        err = func(&req, opt);
 205        if (err) {
 206                if (hci_status)
 207                        *hci_status = HCI_ERROR_UNSPECIFIED;
 208                return err;
 209        }
 210
 211        add_wait_queue(&hdev->req_wait_q, &wait);
 212        set_current_state(TASK_INTERRUPTIBLE);
 213
 214        err = hci_req_run_skb(&req, hci_req_sync_complete);
 215        if (err < 0) {
 216                hdev->req_status = 0;
 217
 218                remove_wait_queue(&hdev->req_wait_q, &wait);
 219                set_current_state(TASK_RUNNING);
 220
 221                /* ENODATA means the HCI request command queue is empty.
 222                 * This can happen when a request with conditionals doesn't
 223                 * trigger any commands to be sent. This is normal behavior
 224                 * and should not trigger an error return.
 225                 */
 226                if (err == -ENODATA) {
 227                        if (hci_status)
 228                                *hci_status = 0;
 229                        return 0;
 230                }
 231
 232                if (hci_status)
 233                        *hci_status = HCI_ERROR_UNSPECIFIED;
 234
 235                return err;
 236        }
 237
 238        schedule_timeout(timeout);
 239
 240        remove_wait_queue(&hdev->req_wait_q, &wait);
 241
 242        if (signal_pending(current))
 243                return -EINTR;
 244
 245        switch (hdev->req_status) {
 246        case HCI_REQ_DONE:
 247                err = -bt_to_errno(hdev->req_result);
 248                if (hci_status)
 249                        *hci_status = hdev->req_result;
 250                break;
 251
 252        case HCI_REQ_CANCELED:
 253                err = -hdev->req_result;
 254                if (hci_status)
 255                        *hci_status = HCI_ERROR_UNSPECIFIED;
 256                break;
 257
 258        default:
 259                err = -ETIMEDOUT;
 260                if (hci_status)
 261                        *hci_status = HCI_ERROR_UNSPECIFIED;
 262                break;
 263        }
 264
 265        kfree_skb(hdev->req_skb);
 266        hdev->req_skb = NULL;
 267        hdev->req_status = hdev->req_result = 0;
 268
 269        BT_DBG("%s end: err %d", hdev->name, err);
 270
 271        return err;
 272}
 273
 274int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
 275                                                  unsigned long opt),
 276                 unsigned long opt, u32 timeout, u8 *hci_status)
 277{
 278        int ret;
 279
 280        if (!test_bit(HCI_UP, &hdev->flags))
 281                return -ENETDOWN;
 282
 283        /* Serialize all requests */
 284        hci_req_sync_lock(hdev);
 285        ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
 286        hci_req_sync_unlock(hdev);
 287
 288        return ret;
 289}
 290
 291struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
 292                                const void *param)
 293{
 294        int len = HCI_COMMAND_HDR_SIZE + plen;
 295        struct hci_command_hdr *hdr;
 296        struct sk_buff *skb;
 297
 298        skb = bt_skb_alloc(len, GFP_ATOMIC);
 299        if (!skb)
 300                return NULL;
 301
 302        hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
 303        hdr->opcode = cpu_to_le16(opcode);
 304        hdr->plen   = plen;
 305
 306        if (plen)
 307                memcpy(skb_put(skb, plen), param, plen);
 308
 309        BT_DBG("skb len %d", skb->len);
 310
 311        hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
 312        hci_skb_opcode(skb) = opcode;
 313
 314        return skb;
 315}
 316
 317/* Queue a command to an asynchronous HCI request */
 318void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
 319                    const void *param, u8 event)
 320{
 321        struct hci_dev *hdev = req->hdev;
 322        struct sk_buff *skb;
 323
 324        BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
 325
 326        /* If an error occurred during request building, there is no point in
 327         * queueing the HCI command. We can simply return.
 328         */
 329        if (req->err)
 330                return;
 331
 332        skb = hci_prepare_cmd(hdev, opcode, plen, param);
 333        if (!skb) {
 334                BT_ERR("%s no memory for command (opcode 0x%4.4x)",
 335                       hdev->name, opcode);
 336                req->err = -ENOMEM;
 337                return;
 338        }
 339
 340        if (skb_queue_empty(&req->cmd_q))
 341                bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
 342
 343        bt_cb(skb)->hci.req_event = event;
 344
 345        skb_queue_tail(&req->cmd_q, skb);
 346}
 347
 348void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
 349                 const void *param)
 350{
 351        hci_req_add_ev(req, opcode, plen, param, 0);
 352}
 353
 354void __hci_req_write_fast_connectable(struct hci_request *req, bool enable)
 355{
 356        struct hci_dev *hdev = req->hdev;
 357        struct hci_cp_write_page_scan_activity acp;
 358        u8 type;
 359
 360        if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
 361                return;
 362
 363        if (hdev->hci_ver < BLUETOOTH_VER_1_2)
 364                return;
 365
 366        if (enable) {
 367                type = PAGE_SCAN_TYPE_INTERLACED;
 368
 369                /* 160 msec page scan interval */
 370                acp.interval = cpu_to_le16(0x0100);
 371        } else {
 372                type = PAGE_SCAN_TYPE_STANDARD; /* default */
 373
 374                /* default 1.28 sec page scan */
 375                acp.interval = cpu_to_le16(0x0800);
 376        }
 377
 378        acp.window = cpu_to_le16(0x0012);
 379
 380        if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
 381            __cpu_to_le16(hdev->page_scan_window) != acp.window)
 382                hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
 383                            sizeof(acp), &acp);
 384
 385        if (hdev->page_scan_type != type)
 386                hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
 387}
 388
 389/* This function controls the background scanning based on hdev->pend_le_conns
 390 * list. If there are pending LE connection we start the background scanning,
 391 * otherwise we stop it.
 392 *
 393 * This function requires the caller holds hdev->lock.
 394 */
 395static void __hci_update_background_scan(struct hci_request *req)
 396{
 397        struct hci_dev *hdev = req->hdev;
 398
 399        if (!test_bit(HCI_UP, &hdev->flags) ||
 400            test_bit(HCI_INIT, &hdev->flags) ||
 401            hci_dev_test_flag(hdev, HCI_SETUP) ||
 402            hci_dev_test_flag(hdev, HCI_CONFIG) ||
 403            hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
 404            hci_dev_test_flag(hdev, HCI_UNREGISTER))
 405                return;
 406
 407        /* No point in doing scanning if LE support hasn't been enabled */
 408        if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
 409                return;
 410
 411        /* If discovery is active don't interfere with it */
 412        if (hdev->discovery.state != DISCOVERY_STOPPED)
 413                return;
 414
 415        /* Reset RSSI and UUID filters when starting background scanning
 416         * since these filters are meant for service discovery only.
 417         *
 418         * The Start Discovery and Start Service Discovery operations
 419         * ensure to set proper values for RSSI threshold and UUID
 420         * filter list. So it is safe to just reset them here.
 421         */
 422        hci_discovery_filter_clear(hdev);
 423
 424        if (list_empty(&hdev->pend_le_conns) &&
 425            list_empty(&hdev->pend_le_reports)) {
 426                /* If there is no pending LE connections or devices
 427                 * to be scanned for, we should stop the background
 428                 * scanning.
 429                 */
 430
 431                /* If controller is not scanning we are done. */
 432                if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
 433                        return;
 434
 435                hci_req_add_le_scan_disable(req);
 436
 437                BT_DBG("%s stopping background scanning", hdev->name);
 438        } else {
 439                /* If there is at least one pending LE connection, we should
 440                 * keep the background scan running.
 441                 */
 442
 443                /* If controller is connecting, we should not start scanning
 444                 * since some controllers are not able to scan and connect at
 445                 * the same time.
 446                 */
 447                if (hci_lookup_le_connect(hdev))
 448                        return;
 449
 450                /* If controller is currently scanning, we stop it to ensure we
 451                 * don't miss any advertising (due to duplicates filter).
 452                 */
 453                if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
 454                        hci_req_add_le_scan_disable(req);
 455
 456                hci_req_add_le_passive_scan(req);
 457
 458                BT_DBG("%s starting background scanning", hdev->name);
 459        }
 460}
 461
 462void __hci_req_update_name(struct hci_request *req)
 463{
 464        struct hci_dev *hdev = req->hdev;
 465        struct hci_cp_write_local_name cp;
 466
 467        memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
 468
 469        hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
 470}
 471
 472#define PNP_INFO_SVCLASS_ID             0x1200
 473
 474static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
 475{
 476        u8 *ptr = data, *uuids_start = NULL;
 477        struct bt_uuid *uuid;
 478
 479        if (len < 4)
 480                return ptr;
 481
 482        list_for_each_entry(uuid, &hdev->uuids, list) {
 483                u16 uuid16;
 484
 485                if (uuid->size != 16)
 486                        continue;
 487
 488                uuid16 = get_unaligned_le16(&uuid->uuid[12]);
 489                if (uuid16 < 0x1100)
 490                        continue;
 491
 492                if (uuid16 == PNP_INFO_SVCLASS_ID)
 493                        continue;
 494
 495                if (!uuids_start) {
 496                        uuids_start = ptr;
 497                        uuids_start[0] = 1;
 498                        uuids_start[1] = EIR_UUID16_ALL;
 499                        ptr += 2;
 500                }
 501
 502                /* Stop if not enough space to put next UUID */
 503                if ((ptr - data) + sizeof(u16) > len) {
 504                        uuids_start[1] = EIR_UUID16_SOME;
 505                        break;
 506                }
 507
 508                *ptr++ = (uuid16 & 0x00ff);
 509                *ptr++ = (uuid16 & 0xff00) >> 8;
 510                uuids_start[0] += sizeof(uuid16);
 511        }
 512
 513        return ptr;
 514}
 515
 516static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
 517{
 518        u8 *ptr = data, *uuids_start = NULL;
 519        struct bt_uuid *uuid;
 520
 521        if (len < 6)
 522                return ptr;
 523
 524        list_for_each_entry(uuid, &hdev->uuids, list) {
 525                if (uuid->size != 32)
 526                        continue;
 527
 528                if (!uuids_start) {
 529                        uuids_start = ptr;
 530                        uuids_start[0] = 1;
 531                        uuids_start[1] = EIR_UUID32_ALL;
 532                        ptr += 2;
 533                }
 534
 535                /* Stop if not enough space to put next UUID */
 536                if ((ptr - data) + sizeof(u32) > len) {
 537                        uuids_start[1] = EIR_UUID32_SOME;
 538                        break;
 539                }
 540
 541                memcpy(ptr, &uuid->uuid[12], sizeof(u32));
 542                ptr += sizeof(u32);
 543                uuids_start[0] += sizeof(u32);
 544        }
 545
 546        return ptr;
 547}
 548
 549static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
 550{
 551        u8 *ptr = data, *uuids_start = NULL;
 552        struct bt_uuid *uuid;
 553
 554        if (len < 18)
 555                return ptr;
 556
 557        list_for_each_entry(uuid, &hdev->uuids, list) {
 558                if (uuid->size != 128)
 559                        continue;
 560
 561                if (!uuids_start) {
 562                        uuids_start = ptr;
 563                        uuids_start[0] = 1;
 564                        uuids_start[1] = EIR_UUID128_ALL;
 565                        ptr += 2;
 566                }
 567
 568                /* Stop if not enough space to put next UUID */
 569                if ((ptr - data) + 16 > len) {
 570                        uuids_start[1] = EIR_UUID128_SOME;
 571                        break;
 572                }
 573
 574                memcpy(ptr, uuid->uuid, 16);
 575                ptr += 16;
 576                uuids_start[0] += 16;
 577        }
 578
 579        return ptr;
 580}
 581
 582static void create_eir(struct hci_dev *hdev, u8 *data)
 583{
 584        u8 *ptr = data;
 585        size_t name_len;
 586
 587        name_len = strlen(hdev->dev_name);
 588
 589        if (name_len > 0) {
 590                /* EIR Data type */
 591                if (name_len > 48) {
 592                        name_len = 48;
 593                        ptr[1] = EIR_NAME_SHORT;
 594                } else
 595                        ptr[1] = EIR_NAME_COMPLETE;
 596
 597                /* EIR Data length */
 598                ptr[0] = name_len + 1;
 599
 600                memcpy(ptr + 2, hdev->dev_name, name_len);
 601
 602                ptr += (name_len + 2);
 603        }
 604
 605        if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
 606                ptr[0] = 2;
 607                ptr[1] = EIR_TX_POWER;
 608                ptr[2] = (u8) hdev->inq_tx_power;
 609
 610                ptr += 3;
 611        }
 612
 613        if (hdev->devid_source > 0) {
 614                ptr[0] = 9;
 615                ptr[1] = EIR_DEVICE_ID;
 616
 617                put_unaligned_le16(hdev->devid_source, ptr + 2);
 618                put_unaligned_le16(hdev->devid_vendor, ptr + 4);
 619                put_unaligned_le16(hdev->devid_product, ptr + 6);
 620                put_unaligned_le16(hdev->devid_version, ptr + 8);
 621
 622                ptr += 10;
 623        }
 624
 625        ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
 626        ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
 627        ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
 628}
 629
 630void __hci_req_update_eir(struct hci_request *req)
 631{
 632        struct hci_dev *hdev = req->hdev;
 633        struct hci_cp_write_eir cp;
 634
 635        if (!hdev_is_powered(hdev))
 636                return;
 637
 638        if (!lmp_ext_inq_capable(hdev))
 639                return;
 640
 641        if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
 642                return;
 643
 644        if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
 645                return;
 646
 647        memset(&cp, 0, sizeof(cp));
 648
 649        create_eir(hdev, cp.data);
 650
 651        if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
 652                return;
 653
 654        memcpy(hdev->eir, cp.data, sizeof(cp.data));
 655
 656        hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
 657}
 658
 659void hci_req_add_le_scan_disable(struct hci_request *req)
 660{
 661        struct hci_cp_le_set_scan_enable cp;
 662
 663        memset(&cp, 0, sizeof(cp));
 664        cp.enable = LE_SCAN_DISABLE;
 665        hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
 666}
 667
 668static void add_to_white_list(struct hci_request *req,
 669                              struct hci_conn_params *params)
 670{
 671        struct hci_cp_le_add_to_white_list cp;
 672
 673        cp.bdaddr_type = params->addr_type;
 674        bacpy(&cp.bdaddr, &params->addr);
 675
 676        hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
 677}
 678
 679static u8 update_white_list(struct hci_request *req)
 680{
 681        struct hci_dev *hdev = req->hdev;
 682        struct hci_conn_params *params;
 683        struct bdaddr_list *b;
 684        uint8_t white_list_entries = 0;
 685
 686        /* Go through the current white list programmed into the
 687         * controller one by one and check if that address is still
 688         * in the list of pending connections or list of devices to
 689         * report. If not present in either list, then queue the
 690         * command to remove it from the controller.
 691         */
 692        list_for_each_entry(b, &hdev->le_white_list, list) {
 693                /* If the device is neither in pend_le_conns nor
 694                 * pend_le_reports then remove it from the whitelist.
 695                 */
 696                if (!hci_pend_le_action_lookup(&hdev->pend_le_conns,
 697                                               &b->bdaddr, b->bdaddr_type) &&
 698                    !hci_pend_le_action_lookup(&hdev->pend_le_reports,
 699                                               &b->bdaddr, b->bdaddr_type)) {
 700                        struct hci_cp_le_del_from_white_list cp;
 701
 702                        cp.bdaddr_type = b->bdaddr_type;
 703                        bacpy(&cp.bdaddr, &b->bdaddr);
 704
 705                        hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST,
 706                                    sizeof(cp), &cp);
 707                        continue;
 708                }
 709
 710                if (hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) {
 711                        /* White list can not be used with RPAs */
 712                        return 0x00;
 713                }
 714
 715                white_list_entries++;
 716        }
 717
 718        /* Since all no longer valid white list entries have been
 719         * removed, walk through the list of pending connections
 720         * and ensure that any new device gets programmed into
 721         * the controller.
 722         *
 723         * If the list of the devices is larger than the list of
 724         * available white list entries in the controller, then
 725         * just abort and return filer policy value to not use the
 726         * white list.
 727         */
 728        list_for_each_entry(params, &hdev->pend_le_conns, action) {
 729                if (hci_bdaddr_list_lookup(&hdev->le_white_list,
 730                                           &params->addr, params->addr_type))
 731                        continue;
 732
 733                if (white_list_entries >= hdev->le_white_list_size) {
 734                        /* Select filter policy to accept all advertising */
 735                        return 0x00;
 736                }
 737
 738                if (hci_find_irk_by_addr(hdev, &params->addr,
 739                                         params->addr_type)) {
 740                        /* White list can not be used with RPAs */
 741                        return 0x00;
 742                }
 743
 744                white_list_entries++;
 745                add_to_white_list(req, params);
 746        }
 747
 748        /* After adding all new pending connections, walk through
 749         * the list of pending reports and also add these to the
 750         * white list if there is still space.
 751         */
 752        list_for_each_entry(params, &hdev->pend_le_reports, action) {
 753                if (hci_bdaddr_list_lookup(&hdev->le_white_list,
 754                                           &params->addr, params->addr_type))
 755                        continue;
 756
 757                if (white_list_entries >= hdev->le_white_list_size) {
 758                        /* Select filter policy to accept all advertising */
 759                        return 0x00;
 760                }
 761
 762                if (hci_find_irk_by_addr(hdev, &params->addr,
 763                                         params->addr_type)) {
 764                        /* White list can not be used with RPAs */
 765                        return 0x00;
 766                }
 767
 768                white_list_entries++;
 769                add_to_white_list(req, params);
 770        }
 771
 772        /* Select filter policy to use white list */
 773        return 0x01;
 774}
 775
 776static bool scan_use_rpa(struct hci_dev *hdev)
 777{
 778        return hci_dev_test_flag(hdev, HCI_PRIVACY);
 779}
 780
 781void hci_req_add_le_passive_scan(struct hci_request *req)
 782{
 783        struct hci_cp_le_set_scan_param param_cp;
 784        struct hci_cp_le_set_scan_enable enable_cp;
 785        struct hci_dev *hdev = req->hdev;
 786        u8 own_addr_type;
 787        u8 filter_policy;
 788
 789        /* Set require_privacy to false since no SCAN_REQ are send
 790         * during passive scanning. Not using an non-resolvable address
 791         * here is important so that peer devices using direct
 792         * advertising with our address will be correctly reported
 793         * by the controller.
 794         */
 795        if (hci_update_random_address(req, false, scan_use_rpa(hdev),
 796                                      &own_addr_type))
 797                return;
 798
 799        /* Adding or removing entries from the white list must
 800         * happen before enabling scanning. The controller does
 801         * not allow white list modification while scanning.
 802         */
 803        filter_policy = update_white_list(req);
 804
 805        /* When the controller is using random resolvable addresses and
 806         * with that having LE privacy enabled, then controllers with
 807         * Extended Scanner Filter Policies support can now enable support
 808         * for handling directed advertising.
 809         *
 810         * So instead of using filter polices 0x00 (no whitelist)
 811         * and 0x01 (whitelist enabled) use the new filter policies
 812         * 0x02 (no whitelist) and 0x03 (whitelist enabled).
 813         */
 814        if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
 815            (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
 816                filter_policy |= 0x02;
 817
 818        memset(&param_cp, 0, sizeof(param_cp));
 819        param_cp.type = LE_SCAN_PASSIVE;
 820        param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
 821        param_cp.window = cpu_to_le16(hdev->le_scan_window);
 822        param_cp.own_address_type = own_addr_type;
 823        param_cp.filter_policy = filter_policy;
 824        hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
 825                    &param_cp);
 826
 827        memset(&enable_cp, 0, sizeof(enable_cp));
 828        enable_cp.enable = LE_SCAN_ENABLE;
 829        enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
 830        hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
 831                    &enable_cp);
 832}
 833
 834static u8 get_cur_adv_instance_scan_rsp_len(struct hci_dev *hdev)
 835{
 836        u8 instance = hdev->cur_adv_instance;
 837        struct adv_info *adv_instance;
 838
 839        /* Ignore instance 0 */
 840        if (instance == 0x00)
 841                return 0;
 842
 843        adv_instance = hci_find_adv_instance(hdev, instance);
 844        if (!adv_instance)
 845                return 0;
 846
 847        /* TODO: Take into account the "appearance" and "local-name" flags here.
 848         * These are currently being ignored as they are not supported.
 849         */
 850        return adv_instance->scan_rsp_len;
 851}
 852
 853void __hci_req_disable_advertising(struct hci_request *req)
 854{
 855        u8 enable = 0x00;
 856
 857        hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
 858}
 859
 860static u32 get_adv_instance_flags(struct hci_dev *hdev, u8 instance)
 861{
 862        u32 flags;
 863        struct adv_info *adv_instance;
 864
 865        if (instance == 0x00) {
 866                /* Instance 0 always manages the "Tx Power" and "Flags"
 867                 * fields
 868                 */
 869                flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
 870
 871                /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
 872                 * corresponds to the "connectable" instance flag.
 873                 */
 874                if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
 875                        flags |= MGMT_ADV_FLAG_CONNECTABLE;
 876
 877                if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
 878                        flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
 879                else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
 880                        flags |= MGMT_ADV_FLAG_DISCOV;
 881
 882                return flags;
 883        }
 884
 885        adv_instance = hci_find_adv_instance(hdev, instance);
 886
 887        /* Return 0 when we got an invalid instance identifier. */
 888        if (!adv_instance)
 889                return 0;
 890
 891        return adv_instance->flags;
 892}
 893
 894static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags)
 895{
 896        /* If privacy is not enabled don't use RPA */
 897        if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
 898                return false;
 899
 900        /* If basic privacy mode is enabled use RPA */
 901        if (!hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
 902                return true;
 903
 904        /* If limited privacy mode is enabled don't use RPA if we're
 905         * both discoverable and bondable.
 906         */
 907        if ((flags & MGMT_ADV_FLAG_DISCOV) &&
 908            hci_dev_test_flag(hdev, HCI_BONDABLE))
 909                return false;
 910
 911        /* We're neither bondable nor discoverable in the limited
 912         * privacy mode, therefore use RPA.
 913         */
 914        return true;
 915}
 916
 917void __hci_req_enable_advertising(struct hci_request *req)
 918{
 919        struct hci_dev *hdev = req->hdev;
 920        struct hci_cp_le_set_adv_param cp;
 921        u8 own_addr_type, enable = 0x01;
 922        bool connectable;
 923        u32 flags;
 924
 925        if (hci_conn_num(hdev, LE_LINK) > 0)
 926                return;
 927
 928        if (hci_dev_test_flag(hdev, HCI_LE_ADV))
 929                __hci_req_disable_advertising(req);
 930
 931        /* Clear the HCI_LE_ADV bit temporarily so that the
 932         * hci_update_random_address knows that it's safe to go ahead
 933         * and write a new random address. The flag will be set back on
 934         * as soon as the SET_ADV_ENABLE HCI command completes.
 935         */
 936        hci_dev_clear_flag(hdev, HCI_LE_ADV);
 937
 938        flags = get_adv_instance_flags(hdev, hdev->cur_adv_instance);
 939
 940        /* If the "connectable" instance flag was not set, then choose between
 941         * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
 942         */
 943        connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
 944                      mgmt_get_connectable(hdev);
 945
 946        /* Set require_privacy to true only when non-connectable
 947         * advertising is used. In that case it is fine to use a
 948         * non-resolvable private address.
 949         */
 950        if (hci_update_random_address(req, !connectable,
 951                                      adv_use_rpa(hdev, flags),
 952                                      &own_addr_type) < 0)
 953                return;
 954
 955        memset(&cp, 0, sizeof(cp));
 956        cp.min_interval = cpu_to_le16(hdev->le_adv_min_interval);
 957        cp.max_interval = cpu_to_le16(hdev->le_adv_max_interval);
 958
 959        if (connectable)
 960                cp.type = LE_ADV_IND;
 961        else if (get_cur_adv_instance_scan_rsp_len(hdev))
 962                cp.type = LE_ADV_SCAN_IND;
 963        else
 964                cp.type = LE_ADV_NONCONN_IND;
 965
 966        cp.own_address_type = own_addr_type;
 967        cp.channel_map = hdev->le_adv_channel_map;
 968
 969        hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
 970
 971        hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
 972}
 973
 974static u8 create_default_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
 975{
 976        u8 ad_len = 0;
 977        size_t name_len;
 978
 979        name_len = strlen(hdev->dev_name);
 980        if (name_len > 0) {
 981                size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
 982
 983                if (name_len > max_len) {
 984                        name_len = max_len;
 985                        ptr[1] = EIR_NAME_SHORT;
 986                } else
 987                        ptr[1] = EIR_NAME_COMPLETE;
 988
 989                ptr[0] = name_len + 1;
 990
 991                memcpy(ptr + 2, hdev->dev_name, name_len);
 992
 993                ad_len += (name_len + 2);
 994                ptr += (name_len + 2);
 995        }
 996
 997        return ad_len;
 998}
 999
1000static u8 create_instance_scan_rsp_data(struct hci_dev *hdev, u8 instance,
1001                                        u8 *ptr)
1002{
1003        struct adv_info *adv_instance;
1004
1005        adv_instance = hci_find_adv_instance(hdev, instance);
1006        if (!adv_instance)
1007                return 0;
1008
1009        /* TODO: Set the appropriate entries based on advertising instance flags
1010         * here once flags other than 0 are supported.
1011         */
1012        memcpy(ptr, adv_instance->scan_rsp_data,
1013               adv_instance->scan_rsp_len);
1014
1015        return adv_instance->scan_rsp_len;
1016}
1017
1018void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance)
1019{
1020        struct hci_dev *hdev = req->hdev;
1021        struct hci_cp_le_set_scan_rsp_data cp;
1022        u8 len;
1023
1024        if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1025                return;
1026
1027        memset(&cp, 0, sizeof(cp));
1028
1029        if (instance)
1030                len = create_instance_scan_rsp_data(hdev, instance, cp.data);
1031        else
1032                len = create_default_scan_rsp_data(hdev, cp.data);
1033
1034        if (hdev->scan_rsp_data_len == len &&
1035            !memcmp(cp.data, hdev->scan_rsp_data, len))
1036                return;
1037
1038        memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1039        hdev->scan_rsp_data_len = len;
1040
1041        cp.length = len;
1042
1043        hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
1044}
1045
1046static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
1047{
1048        struct adv_info *adv_instance = NULL;
1049        u8 ad_len = 0, flags = 0;
1050        u32 instance_flags;
1051
1052        /* Return 0 when the current instance identifier is invalid. */
1053        if (instance) {
1054                adv_instance = hci_find_adv_instance(hdev, instance);
1055                if (!adv_instance)
1056                        return 0;
1057        }
1058
1059        instance_flags = get_adv_instance_flags(hdev, instance);
1060
1061        /* The Add Advertising command allows userspace to set both the general
1062         * and limited discoverable flags.
1063         */
1064        if (instance_flags & MGMT_ADV_FLAG_DISCOV)
1065                flags |= LE_AD_GENERAL;
1066
1067        if (instance_flags & MGMT_ADV_FLAG_LIMITED_DISCOV)
1068                flags |= LE_AD_LIMITED;
1069
1070        if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1071                flags |= LE_AD_NO_BREDR;
1072
1073        if (flags || (instance_flags & MGMT_ADV_FLAG_MANAGED_FLAGS)) {
1074                /* If a discovery flag wasn't provided, simply use the global
1075                 * settings.
1076                 */
1077                if (!flags)
1078                        flags |= mgmt_get_adv_discov_flags(hdev);
1079
1080                /* If flags would still be empty, then there is no need to
1081                 * include the "Flags" AD field".
1082                 */
1083                if (flags) {
1084                        ptr[0] = 0x02;
1085                        ptr[1] = EIR_FLAGS;
1086                        ptr[2] = flags;
1087
1088                        ad_len += 3;
1089                        ptr += 3;
1090                }
1091        }
1092
1093        if (adv_instance) {
1094                memcpy(ptr, adv_instance->adv_data,
1095                       adv_instance->adv_data_len);
1096                ad_len += adv_instance->adv_data_len;
1097                ptr += adv_instance->adv_data_len;
1098        }
1099
1100        /* Provide Tx Power only if we can provide a valid value for it */
1101        if (hdev->adv_tx_power != HCI_TX_POWER_INVALID &&
1102            (instance_flags & MGMT_ADV_FLAG_TX_POWER)) {
1103                ptr[0] = 0x02;
1104                ptr[1] = EIR_TX_POWER;
1105                ptr[2] = (u8)hdev->adv_tx_power;
1106
1107                ad_len += 3;
1108                ptr += 3;
1109        }
1110
1111        return ad_len;
1112}
1113
1114void __hci_req_update_adv_data(struct hci_request *req, u8 instance)
1115{
1116        struct hci_dev *hdev = req->hdev;
1117        struct hci_cp_le_set_adv_data cp;
1118        u8 len;
1119
1120        if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1121                return;
1122
1123        memset(&cp, 0, sizeof(cp));
1124
1125        len = create_instance_adv_data(hdev, instance, cp.data);
1126
1127        /* There's nothing to do if the data hasn't changed */
1128        if (hdev->adv_data_len == len &&
1129            memcmp(cp.data, hdev->adv_data, len) == 0)
1130                return;
1131
1132        memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1133        hdev->adv_data_len = len;
1134
1135        cp.length = len;
1136
1137        hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1138}
1139
1140int hci_req_update_adv_data(struct hci_dev *hdev, u8 instance)
1141{
1142        struct hci_request req;
1143
1144        hci_req_init(&req, hdev);
1145        __hci_req_update_adv_data(&req, instance);
1146
1147        return hci_req_run(&req, NULL);
1148}
1149
1150static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1151{
1152        BT_DBG("%s status %u", hdev->name, status);
1153}
1154
1155void hci_req_reenable_advertising(struct hci_dev *hdev)
1156{
1157        struct hci_request req;
1158
1159        if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1160            list_empty(&hdev->adv_instances))
1161                return;
1162
1163        hci_req_init(&req, hdev);
1164
1165        if (hdev->cur_adv_instance) {
1166                __hci_req_schedule_adv_instance(&req, hdev->cur_adv_instance,
1167                                                true);
1168        } else {
1169                __hci_req_update_adv_data(&req, 0x00);
1170                __hci_req_update_scan_rsp_data(&req, 0x00);
1171                __hci_req_enable_advertising(&req);
1172        }
1173
1174        hci_req_run(&req, adv_enable_complete);
1175}
1176
1177static void adv_timeout_expire(struct work_struct *work)
1178{
1179        struct hci_dev *hdev = container_of(work, struct hci_dev,
1180                                            adv_instance_expire.work);
1181
1182        struct hci_request req;
1183        u8 instance;
1184
1185        BT_DBG("%s", hdev->name);
1186
1187        hci_dev_lock(hdev);
1188
1189        hdev->adv_instance_timeout = 0;
1190
1191        instance = hdev->cur_adv_instance;
1192        if (instance == 0x00)
1193                goto unlock;
1194
1195        hci_req_init(&req, hdev);
1196
1197        hci_req_clear_adv_instance(hdev, &req, instance, false);
1198
1199        if (list_empty(&hdev->adv_instances))
1200                __hci_req_disable_advertising(&req);
1201
1202        hci_req_run(&req, NULL);
1203
1204unlock:
1205        hci_dev_unlock(hdev);
1206}
1207
1208int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance,
1209                                    bool force)
1210{
1211        struct hci_dev *hdev = req->hdev;
1212        struct adv_info *adv_instance = NULL;
1213        u16 timeout;
1214
1215        if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
1216            list_empty(&hdev->adv_instances))
1217                return -EPERM;
1218
1219        if (hdev->adv_instance_timeout)
1220                return -EBUSY;
1221
1222        adv_instance = hci_find_adv_instance(hdev, instance);
1223        if (!adv_instance)
1224                return -ENOENT;
1225
1226        /* A zero timeout means unlimited advertising. As long as there is
1227         * only one instance, duration should be ignored. We still set a timeout
1228         * in case further instances are being added later on.
1229         *
1230         * If the remaining lifetime of the instance is more than the duration
1231         * then the timeout corresponds to the duration, otherwise it will be
1232         * reduced to the remaining instance lifetime.
1233         */
1234        if (adv_instance->timeout == 0 ||
1235            adv_instance->duration <= adv_instance->remaining_time)
1236                timeout = adv_instance->duration;
1237        else
1238                timeout = adv_instance->remaining_time;
1239
1240        /* The remaining time is being reduced unless the instance is being
1241         * advertised without time limit.
1242         */
1243        if (adv_instance->timeout)
1244                adv_instance->remaining_time =
1245                                adv_instance->remaining_time - timeout;
1246
1247        hdev->adv_instance_timeout = timeout;
1248        queue_delayed_work(hdev->req_workqueue,
1249                           &hdev->adv_instance_expire,
1250                           msecs_to_jiffies(timeout * 1000));
1251
1252        /* If we're just re-scheduling the same instance again then do not
1253         * execute any HCI commands. This happens when a single instance is
1254         * being advertised.
1255         */
1256        if (!force && hdev->cur_adv_instance == instance &&
1257            hci_dev_test_flag(hdev, HCI_LE_ADV))
1258                return 0;
1259
1260        hdev->cur_adv_instance = instance;
1261        __hci_req_update_adv_data(req, instance);
1262        __hci_req_update_scan_rsp_data(req, instance);
1263        __hci_req_enable_advertising(req);
1264
1265        return 0;
1266}
1267
1268static void cancel_adv_timeout(struct hci_dev *hdev)
1269{
1270        if (hdev->adv_instance_timeout) {
1271                hdev->adv_instance_timeout = 0;
1272                cancel_delayed_work(&hdev->adv_instance_expire);
1273        }
1274}
1275
1276/* For a single instance:
1277 * - force == true: The instance will be removed even when its remaining
1278 *   lifetime is not zero.
1279 * - force == false: the instance will be deactivated but kept stored unless
1280 *   the remaining lifetime is zero.
1281 *
1282 * For instance == 0x00:
1283 * - force == true: All instances will be removed regardless of their timeout
1284 *   setting.
1285 * - force == false: Only instances that have a timeout will be removed.
1286 */
1287void hci_req_clear_adv_instance(struct hci_dev *hdev, struct hci_request *req,
1288                                u8 instance, bool force)
1289{
1290        struct adv_info *adv_instance, *n, *next_instance = NULL;
1291        int err;
1292        u8 rem_inst;
1293
1294        /* Cancel any timeout concerning the removed instance(s). */
1295        if (!instance || hdev->cur_adv_instance == instance)
1296                cancel_adv_timeout(hdev);
1297
1298        /* Get the next instance to advertise BEFORE we remove
1299         * the current one. This can be the same instance again
1300         * if there is only one instance.
1301         */
1302        if (instance && hdev->cur_adv_instance == instance)
1303                next_instance = hci_get_next_instance(hdev, instance);
1304
1305        if (instance == 0x00) {
1306                list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances,
1307                                         list) {
1308                        if (!(force || adv_instance->timeout))
1309                                continue;
1310
1311                        rem_inst = adv_instance->instance;
1312                        err = hci_remove_adv_instance(hdev, rem_inst);
1313                        if (!err)
1314                                mgmt_advertising_removed(NULL, hdev, rem_inst);
1315                }
1316        } else {
1317                adv_instance = hci_find_adv_instance(hdev, instance);
1318
1319                if (force || (adv_instance && adv_instance->timeout &&
1320                              !adv_instance->remaining_time)) {
1321                        /* Don't advertise a removed instance. */
1322                        if (next_instance &&
1323                            next_instance->instance == instance)
1324                                next_instance = NULL;
1325
1326                        err = hci_remove_adv_instance(hdev, instance);
1327                        if (!err)
1328                                mgmt_advertising_removed(NULL, hdev, instance);
1329                }
1330        }
1331
1332        if (!req || !hdev_is_powered(hdev) ||
1333            hci_dev_test_flag(hdev, HCI_ADVERTISING))
1334                return;
1335
1336        if (next_instance)
1337                __hci_req_schedule_adv_instance(req, next_instance->instance,
1338                                                false);
1339}
1340
1341static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
1342{
1343        struct hci_dev *hdev = req->hdev;
1344
1345        /* If we're advertising or initiating an LE connection we can't
1346         * go ahead and change the random address at this time. This is
1347         * because the eventual initiator address used for the
1348         * subsequently created connection will be undefined (some
1349         * controllers use the new address and others the one we had
1350         * when the operation started).
1351         *
1352         * In this kind of scenario skip the update and let the random
1353         * address be updated at the next cycle.
1354         */
1355        if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
1356            hci_lookup_le_connect(hdev)) {
1357                BT_DBG("Deferring random address update");
1358                hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1359                return;
1360        }
1361
1362        hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
1363}
1364
1365int hci_update_random_address(struct hci_request *req, bool require_privacy,
1366                              bool use_rpa, u8 *own_addr_type)
1367{
1368        struct hci_dev *hdev = req->hdev;
1369        int err;
1370
1371        /* If privacy is enabled use a resolvable private address. If
1372         * current RPA has expired or there is something else than
1373         * the current RPA in use, then generate a new one.
1374         */
1375        if (use_rpa) {
1376                int to;
1377
1378                *own_addr_type = ADDR_LE_DEV_RANDOM;
1379
1380                if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
1381                    !bacmp(&hdev->random_addr, &hdev->rpa))
1382                        return 0;
1383
1384                err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
1385                if (err < 0) {
1386                        BT_ERR("%s failed to generate new RPA", hdev->name);
1387                        return err;
1388                }
1389
1390                set_random_addr(req, &hdev->rpa);
1391
1392                to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
1393                queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
1394
1395                return 0;
1396        }
1397
1398        /* In case of required privacy without resolvable private address,
1399         * use an non-resolvable private address. This is useful for active
1400         * scanning and non-connectable advertising.
1401         */
1402        if (require_privacy) {
1403                bdaddr_t nrpa;
1404
1405                while (true) {
1406                        /* The non-resolvable private address is generated
1407                         * from random six bytes with the two most significant
1408                         * bits cleared.
1409                         */
1410                        get_random_bytes(&nrpa, 6);
1411                        nrpa.b[5] &= 0x3f;
1412
1413                        /* The non-resolvable private address shall not be
1414                         * equal to the public address.
1415                         */
1416                        if (bacmp(&hdev->bdaddr, &nrpa))
1417                                break;
1418                }
1419
1420                *own_addr_type = ADDR_LE_DEV_RANDOM;
1421                set_random_addr(req, &nrpa);
1422                return 0;
1423        }
1424
1425        /* If forcing static address is in use or there is no public
1426         * address use the static address as random address (but skip
1427         * the HCI command if the current random address is already the
1428         * static one.
1429         *
1430         * In case BR/EDR has been disabled on a dual-mode controller
1431         * and a static address has been configured, then use that
1432         * address instead of the public BR/EDR address.
1433         */
1434        if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
1435            !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
1436            (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
1437             bacmp(&hdev->static_addr, BDADDR_ANY))) {
1438                *own_addr_type = ADDR_LE_DEV_RANDOM;
1439                if (bacmp(&hdev->static_addr, &hdev->random_addr))
1440                        hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
1441                                    &hdev->static_addr);
1442                return 0;
1443        }
1444
1445        /* Neither privacy nor static address is being used so use a
1446         * public address.
1447         */
1448        *own_addr_type = ADDR_LE_DEV_PUBLIC;
1449
1450        return 0;
1451}
1452
1453static bool disconnected_whitelist_entries(struct hci_dev *hdev)
1454{
1455        struct bdaddr_list *b;
1456
1457        list_for_each_entry(b, &hdev->whitelist, list) {
1458                struct hci_conn *conn;
1459
1460                conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
1461                if (!conn)
1462                        return true;
1463
1464                if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
1465                        return true;
1466        }
1467
1468        return false;
1469}
1470
1471void __hci_req_update_scan(struct hci_request *req)
1472{
1473        struct hci_dev *hdev = req->hdev;
1474        u8 scan;
1475
1476        if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1477                return;
1478
1479        if (!hdev_is_powered(hdev))
1480                return;
1481
1482        if (mgmt_powering_down(hdev))
1483                return;
1484
1485        if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) ||
1486            disconnected_whitelist_entries(hdev))
1487                scan = SCAN_PAGE;
1488        else
1489                scan = SCAN_DISABLED;
1490
1491        if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1492                scan |= SCAN_INQUIRY;
1493
1494        if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) &&
1495            test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY))
1496                return;
1497
1498        hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1499}
1500
1501static int update_scan(struct hci_request *req, unsigned long opt)
1502{
1503        hci_dev_lock(req->hdev);
1504        __hci_req_update_scan(req);
1505        hci_dev_unlock(req->hdev);
1506        return 0;
1507}
1508
1509static void scan_update_work(struct work_struct *work)
1510{
1511        struct hci_dev *hdev = container_of(work, struct hci_dev, scan_update);
1512
1513        hci_req_sync(hdev, update_scan, 0, HCI_CMD_TIMEOUT, NULL);
1514}
1515
1516static int connectable_update(struct hci_request *req, unsigned long opt)
1517{
1518        struct hci_dev *hdev = req->hdev;
1519
1520        hci_dev_lock(hdev);
1521
1522        __hci_req_update_scan(req);
1523
1524        /* If BR/EDR is not enabled and we disable advertising as a
1525         * by-product of disabling connectable, we need to update the
1526         * advertising flags.
1527         */
1528        if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1529                __hci_req_update_adv_data(req, hdev->cur_adv_instance);
1530
1531        /* Update the advertising parameters if necessary */
1532        if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
1533            !list_empty(&hdev->adv_instances))
1534                __hci_req_enable_advertising(req);
1535
1536        __hci_update_background_scan(req);
1537
1538        hci_dev_unlock(hdev);
1539
1540        return 0;
1541}
1542
1543static void connectable_update_work(struct work_struct *work)
1544{
1545        struct hci_dev *hdev = container_of(work, struct hci_dev,
1546                                            connectable_update);
1547        u8 status;
1548
1549        hci_req_sync(hdev, connectable_update, 0, HCI_CMD_TIMEOUT, &status);
1550        mgmt_set_connectable_complete(hdev, status);
1551}
1552
1553static u8 get_service_classes(struct hci_dev *hdev)
1554{
1555        struct bt_uuid *uuid;
1556        u8 val = 0;
1557
1558        list_for_each_entry(uuid, &hdev->uuids, list)
1559                val |= uuid->svc_hint;
1560
1561        return val;
1562}
1563
1564void __hci_req_update_class(struct hci_request *req)
1565{
1566        struct hci_dev *hdev = req->hdev;
1567        u8 cod[3];
1568
1569        BT_DBG("%s", hdev->name);
1570
1571        if (!hdev_is_powered(hdev))
1572                return;
1573
1574        if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1575                return;
1576
1577        if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
1578                return;
1579
1580        cod[0] = hdev->minor_class;
1581        cod[1] = hdev->major_class;
1582        cod[2] = get_service_classes(hdev);
1583
1584        if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1585                cod[1] |= 0x20;
1586
1587        if (memcmp(cod, hdev->dev_class, 3) == 0)
1588                return;
1589
1590        hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
1591}
1592
1593static void write_iac(struct hci_request *req)
1594{
1595        struct hci_dev *hdev = req->hdev;
1596        struct hci_cp_write_current_iac_lap cp;
1597
1598        if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1599                return;
1600
1601        if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
1602                /* Limited discoverable mode */
1603                cp.num_iac = min_t(u8, hdev->num_iac, 2);
1604                cp.iac_lap[0] = 0x00;   /* LIAC */
1605                cp.iac_lap[1] = 0x8b;
1606                cp.iac_lap[2] = 0x9e;
1607                cp.iac_lap[3] = 0x33;   /* GIAC */
1608                cp.iac_lap[4] = 0x8b;
1609                cp.iac_lap[5] = 0x9e;
1610        } else {
1611                /* General discoverable mode */
1612                cp.num_iac = 1;
1613                cp.iac_lap[0] = 0x33;   /* GIAC */
1614                cp.iac_lap[1] = 0x8b;
1615                cp.iac_lap[2] = 0x9e;
1616        }
1617
1618        hci_req_add(req, HCI_OP_WRITE_CURRENT_IAC_LAP,
1619                    (cp.num_iac * 3) + 1, &cp);
1620}
1621
1622static int discoverable_update(struct hci_request *req, unsigned long opt)
1623{
1624        struct hci_dev *hdev = req->hdev;
1625
1626        hci_dev_lock(hdev);
1627
1628        if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1629                write_iac(req);
1630                __hci_req_update_scan(req);
1631                __hci_req_update_class(req);
1632        }
1633
1634        /* Advertising instances don't use the global discoverable setting, so
1635         * only update AD if advertising was enabled using Set Advertising.
1636         */
1637        if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
1638                __hci_req_update_adv_data(req, 0x00);
1639
1640                /* Discoverable mode affects the local advertising
1641                 * address in limited privacy mode.
1642                 */
1643                if (hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
1644                        __hci_req_enable_advertising(req);
1645        }
1646
1647        hci_dev_unlock(hdev);
1648
1649        return 0;
1650}
1651
1652static void discoverable_update_work(struct work_struct *work)
1653{
1654        struct hci_dev *hdev = container_of(work, struct hci_dev,
1655                                            discoverable_update);
1656        u8 status;
1657
1658        hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, &status);
1659        mgmt_set_discoverable_complete(hdev, status);
1660}
1661
1662void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
1663                      u8 reason)
1664{
1665        switch (conn->state) {
1666        case BT_CONNECTED:
1667        case BT_CONFIG:
1668                if (conn->type == AMP_LINK) {
1669                        struct hci_cp_disconn_phy_link cp;
1670
1671                        cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
1672                        cp.reason = reason;
1673                        hci_req_add(req, HCI_OP_DISCONN_PHY_LINK, sizeof(cp),
1674                                    &cp);
1675                } else {
1676                        struct hci_cp_disconnect dc;
1677
1678                        dc.handle = cpu_to_le16(conn->handle);
1679                        dc.reason = reason;
1680                        hci_req_add(req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1681                }
1682
1683                conn->state = BT_DISCONN;
1684
1685                break;
1686        case BT_CONNECT:
1687                if (conn->type == LE_LINK) {
1688                        if (test_bit(HCI_CONN_SCANNING, &conn->flags))
1689                                break;
1690                        hci_req_add(req, HCI_OP_LE_CREATE_CONN_CANCEL,
1691                                    0, NULL);
1692                } else if (conn->type == ACL_LINK) {
1693                        if (req->hdev->hci_ver < BLUETOOTH_VER_1_2)
1694                                break;
1695                        hci_req_add(req, HCI_OP_CREATE_CONN_CANCEL,
1696                                    6, &conn->dst);
1697                }
1698                break;
1699        case BT_CONNECT2:
1700                if (conn->type == ACL_LINK) {
1701                        struct hci_cp_reject_conn_req rej;
1702
1703                        bacpy(&rej.bdaddr, &conn->dst);
1704                        rej.reason = reason;
1705
1706                        hci_req_add(req, HCI_OP_REJECT_CONN_REQ,
1707                                    sizeof(rej), &rej);
1708                } else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
1709                        struct hci_cp_reject_sync_conn_req rej;
1710
1711                        bacpy(&rej.bdaddr, &conn->dst);
1712
1713                        /* SCO rejection has its own limited set of
1714                         * allowed error values (0x0D-0x0F) which isn't
1715                         * compatible with most values passed to this
1716                         * function. To be safe hard-code one of the
1717                         * values that's suitable for SCO.
1718                         */
1719                        rej.reason = HCI_ERROR_REMOTE_LOW_RESOURCES;
1720
1721                        hci_req_add(req, HCI_OP_REJECT_SYNC_CONN_REQ,
1722                                    sizeof(rej), &rej);
1723                }
1724                break;
1725        default:
1726                conn->state = BT_CLOSED;
1727                break;
1728        }
1729}
1730
1731static void abort_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1732{
1733        if (status)
1734                BT_DBG("Failed to abort connection: status 0x%2.2x", status);
1735}
1736
1737int hci_abort_conn(struct hci_conn *conn, u8 reason)
1738{
1739        struct hci_request req;
1740        int err;
1741
1742        hci_req_init(&req, conn->hdev);
1743
1744        __hci_abort_conn(&req, conn, reason);
1745
1746        err = hci_req_run(&req, abort_conn_complete);
1747        if (err && err != -ENODATA) {
1748                BT_ERR("Failed to run HCI request: err %d", err);
1749                return err;
1750        }
1751
1752        return 0;
1753}
1754
1755static int update_bg_scan(struct hci_request *req, unsigned long opt)
1756{
1757        hci_dev_lock(req->hdev);
1758        __hci_update_background_scan(req);
1759        hci_dev_unlock(req->hdev);
1760        return 0;
1761}
1762
1763static void bg_scan_update(struct work_struct *work)
1764{
1765        struct hci_dev *hdev = container_of(work, struct hci_dev,
1766                                            bg_scan_update);
1767        struct hci_conn *conn;
1768        u8 status;
1769        int err;
1770
1771        err = hci_req_sync(hdev, update_bg_scan, 0, HCI_CMD_TIMEOUT, &status);
1772        if (!err)
1773                return;
1774
1775        hci_dev_lock(hdev);
1776
1777        conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
1778        if (conn)
1779                hci_le_conn_failed(conn, status);
1780
1781        hci_dev_unlock(hdev);
1782}
1783
1784static int le_scan_disable(struct hci_request *req, unsigned long opt)
1785{
1786        hci_req_add_le_scan_disable(req);
1787        return 0;
1788}
1789
1790static int bredr_inquiry(struct hci_request *req, unsigned long opt)
1791{
1792        u8 length = opt;
1793        const u8 giac[3] = { 0x33, 0x8b, 0x9e };
1794        const u8 liac[3] = { 0x00, 0x8b, 0x9e };
1795        struct hci_cp_inquiry cp;
1796
1797        BT_DBG("%s", req->hdev->name);
1798
1799        hci_dev_lock(req->hdev);
1800        hci_inquiry_cache_flush(req->hdev);
1801        hci_dev_unlock(req->hdev);
1802
1803        memset(&cp, 0, sizeof(cp));
1804
1805        if (req->hdev->discovery.limited)
1806                memcpy(&cp.lap, liac, sizeof(cp.lap));
1807        else
1808                memcpy(&cp.lap, giac, sizeof(cp.lap));
1809
1810        cp.length = length;
1811
1812        hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1813
1814        return 0;
1815}
1816
1817static void le_scan_disable_work(struct work_struct *work)
1818{
1819        struct hci_dev *hdev = container_of(work, struct hci_dev,
1820                                            le_scan_disable.work);
1821        u8 status;
1822
1823        BT_DBG("%s", hdev->name);
1824
1825        if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
1826                return;
1827
1828        cancel_delayed_work(&hdev->le_scan_restart);
1829
1830        hci_req_sync(hdev, le_scan_disable, 0, HCI_CMD_TIMEOUT, &status);
1831        if (status) {
1832                BT_ERR("Failed to disable LE scan: status 0x%02x", status);
1833                return;
1834        }
1835
1836        hdev->discovery.scan_start = 0;
1837
1838        /* If we were running LE only scan, change discovery state. If
1839         * we were running both LE and BR/EDR inquiry simultaneously,
1840         * and BR/EDR inquiry is already finished, stop discovery,
1841         * otherwise BR/EDR inquiry will stop discovery when finished.
1842         * If we will resolve remote device name, do not change
1843         * discovery state.
1844         */
1845
1846        if (hdev->discovery.type == DISCOV_TYPE_LE)
1847                goto discov_stopped;
1848
1849        if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED)
1850                return;
1851
1852        if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) {
1853                if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
1854                    hdev->discovery.state != DISCOVERY_RESOLVING)
1855                        goto discov_stopped;
1856
1857                return;
1858        }
1859
1860        hci_req_sync(hdev, bredr_inquiry, DISCOV_INTERLEAVED_INQUIRY_LEN,
1861                     HCI_CMD_TIMEOUT, &status);
1862        if (status) {
1863                BT_ERR("Inquiry failed: status 0x%02x", status);
1864                goto discov_stopped;
1865        }
1866
1867        return;
1868
1869discov_stopped:
1870        hci_dev_lock(hdev);
1871        hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1872        hci_dev_unlock(hdev);
1873}
1874
1875static int le_scan_restart(struct hci_request *req, unsigned long opt)
1876{
1877        struct hci_dev *hdev = req->hdev;
1878        struct hci_cp_le_set_scan_enable cp;
1879
1880        /* If controller is not scanning we are done. */
1881        if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
1882                return 0;
1883
1884        hci_req_add_le_scan_disable(req);
1885
1886        memset(&cp, 0, sizeof(cp));
1887        cp.enable = LE_SCAN_ENABLE;
1888        cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
1889        hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1890
1891        return 0;
1892}
1893
1894static void le_scan_restart_work(struct work_struct *work)
1895{
1896        struct hci_dev *hdev = container_of(work, struct hci_dev,
1897                                            le_scan_restart.work);
1898        unsigned long timeout, duration, scan_start, now;
1899        u8 status;
1900
1901        BT_DBG("%s", hdev->name);
1902
1903        hci_req_sync(hdev, le_scan_restart, 0, HCI_CMD_TIMEOUT, &status);
1904        if (status) {
1905                BT_ERR("Failed to restart LE scan: status %d", status);
1906                return;
1907        }
1908
1909        hci_dev_lock(hdev);
1910
1911        if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
1912            !hdev->discovery.scan_start)
1913                goto unlock;
1914
1915        /* When the scan was started, hdev->le_scan_disable has been queued
1916         * after duration from scan_start. During scan restart this job
1917         * has been canceled, and we need to queue it again after proper
1918         * timeout, to make sure that scan does not run indefinitely.
1919         */
1920        duration = hdev->discovery.scan_duration;
1921        scan_start = hdev->discovery.scan_start;
1922        now = jiffies;
1923        if (now - scan_start <= duration) {
1924                int elapsed;
1925
1926                if (now >= scan_start)
1927                        elapsed = now - scan_start;
1928                else
1929                        elapsed = ULONG_MAX - scan_start + now;
1930
1931                timeout = duration - elapsed;
1932        } else {
1933                timeout = 0;
1934        }
1935
1936        queue_delayed_work(hdev->req_workqueue,
1937                           &hdev->le_scan_disable, timeout);
1938
1939unlock:
1940        hci_dev_unlock(hdev);
1941}
1942
1943static void disable_advertising(struct hci_request *req)
1944{
1945        u8 enable = 0x00;
1946
1947        hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1948}
1949
1950static int active_scan(struct hci_request *req, unsigned long opt)
1951{
1952        uint16_t interval = opt;
1953        struct hci_dev *hdev = req->hdev;
1954        struct hci_cp_le_set_scan_param param_cp;
1955        struct hci_cp_le_set_scan_enable enable_cp;
1956        u8 own_addr_type;
1957        int err;
1958
1959        BT_DBG("%s", hdev->name);
1960
1961        if (hci_dev_test_flag(hdev, HCI_LE_ADV)) {
1962                hci_dev_lock(hdev);
1963
1964                /* Don't let discovery abort an outgoing connection attempt
1965                 * that's using directed advertising.
1966                 */
1967                if (hci_lookup_le_connect(hdev)) {
1968                        hci_dev_unlock(hdev);
1969                        return -EBUSY;
1970                }
1971
1972                cancel_adv_timeout(hdev);
1973                hci_dev_unlock(hdev);
1974
1975                disable_advertising(req);
1976        }
1977
1978        /* If controller is scanning, it means the background scanning is
1979         * running. Thus, we should temporarily stop it in order to set the
1980         * discovery scanning parameters.
1981         */
1982        if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
1983                hci_req_add_le_scan_disable(req);
1984
1985        /* All active scans will be done with either a resolvable private
1986         * address (when privacy feature has been enabled) or non-resolvable
1987         * private address.
1988         */
1989        err = hci_update_random_address(req, true, scan_use_rpa(hdev),
1990                                        &own_addr_type);
1991        if (err < 0)
1992                own_addr_type = ADDR_LE_DEV_PUBLIC;
1993
1994        memset(&param_cp, 0, sizeof(param_cp));
1995        param_cp.type = LE_SCAN_ACTIVE;
1996        param_cp.interval = cpu_to_le16(interval);
1997        param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
1998        param_cp.own_address_type = own_addr_type;
1999
2000        hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
2001                    &param_cp);
2002
2003        memset(&enable_cp, 0, sizeof(enable_cp));
2004        enable_cp.enable = LE_SCAN_ENABLE;
2005        enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2006
2007        hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
2008                    &enable_cp);
2009
2010        return 0;
2011}
2012
2013static int interleaved_discov(struct hci_request *req, unsigned long opt)
2014{
2015        int err;
2016
2017        BT_DBG("%s", req->hdev->name);
2018
2019        err = active_scan(req, opt);
2020        if (err)
2021                return err;
2022
2023        return bredr_inquiry(req, DISCOV_BREDR_INQUIRY_LEN);
2024}
2025
2026static void start_discovery(struct hci_dev *hdev, u8 *status)
2027{
2028        unsigned long timeout;
2029
2030        BT_DBG("%s type %u", hdev->name, hdev->discovery.type);
2031
2032        switch (hdev->discovery.type) {
2033        case DISCOV_TYPE_BREDR:
2034                if (!hci_dev_test_flag(hdev, HCI_INQUIRY))
2035                        hci_req_sync(hdev, bredr_inquiry,
2036                                     DISCOV_BREDR_INQUIRY_LEN, HCI_CMD_TIMEOUT,
2037                                     status);
2038                return;
2039        case DISCOV_TYPE_INTERLEAVED:
2040                /* When running simultaneous discovery, the LE scanning time
2041                 * should occupy the whole discovery time sine BR/EDR inquiry
2042                 * and LE scanning are scheduled by the controller.
2043                 *
2044                 * For interleaving discovery in comparison, BR/EDR inquiry
2045                 * and LE scanning are done sequentially with separate
2046                 * timeouts.
2047                 */
2048                if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
2049                             &hdev->quirks)) {
2050                        timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
2051                        /* During simultaneous discovery, we double LE scan
2052                         * interval. We must leave some time for the controller
2053                         * to do BR/EDR inquiry.
2054                         */
2055                        hci_req_sync(hdev, interleaved_discov,
2056                                     DISCOV_LE_SCAN_INT * 2, HCI_CMD_TIMEOUT,
2057                                     status);
2058                        break;
2059                }
2060
2061                timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
2062                hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT,
2063                             HCI_CMD_TIMEOUT, status);
2064                break;
2065        case DISCOV_TYPE_LE:
2066                timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
2067                hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT,
2068                             HCI_CMD_TIMEOUT, status);
2069                break;
2070        default:
2071                *status = HCI_ERROR_UNSPECIFIED;
2072                return;
2073        }
2074
2075        if (*status)
2076                return;
2077
2078        BT_DBG("%s timeout %u ms", hdev->name, jiffies_to_msecs(timeout));
2079
2080        /* When service discovery is used and the controller has a
2081         * strict duplicate filter, it is important to remember the
2082         * start and duration of the scan. This is required for
2083         * restarting scanning during the discovery phase.
2084         */
2085        if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) &&
2086                     hdev->discovery.result_filtering) {
2087                hdev->discovery.scan_start = jiffies;
2088                hdev->discovery.scan_duration = timeout;
2089        }
2090
2091        queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable,
2092                           timeout);
2093}
2094
2095bool hci_req_stop_discovery(struct hci_request *req)
2096{
2097        struct hci_dev *hdev = req->hdev;
2098        struct discovery_state *d = &hdev->discovery;
2099        struct hci_cp_remote_name_req_cancel cp;
2100        struct inquiry_entry *e;
2101        bool ret = false;
2102
2103        BT_DBG("%s state %u", hdev->name, hdev->discovery.state);
2104
2105        if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) {
2106                if (test_bit(HCI_INQUIRY, &hdev->flags))
2107                        hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2108
2109                if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2110                        cancel_delayed_work(&hdev->le_scan_disable);
2111                        hci_req_add_le_scan_disable(req);
2112                }
2113
2114                ret = true;
2115        } else {
2116                /* Passive scanning */
2117                if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2118                        hci_req_add_le_scan_disable(req);
2119                        ret = true;
2120                }
2121        }
2122
2123        /* No further actions needed for LE-only discovery */
2124        if (d->type == DISCOV_TYPE_LE)
2125                return ret;
2126
2127        if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) {
2128                e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
2129                                                     NAME_PENDING);
2130                if (!e)
2131                        return ret;
2132
2133                bacpy(&cp.bdaddr, &e->data.bdaddr);
2134                hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
2135                            &cp);
2136                ret = true;
2137        }
2138
2139        return ret;
2140}
2141
2142static int stop_discovery(struct hci_request *req, unsigned long opt)
2143{
2144        hci_dev_lock(req->hdev);
2145        hci_req_stop_discovery(req);
2146        hci_dev_unlock(req->hdev);
2147
2148        return 0;
2149}
2150
2151static void discov_update(struct work_struct *work)
2152{
2153        struct hci_dev *hdev = container_of(work, struct hci_dev,
2154                                            discov_update);
2155        u8 status = 0;
2156
2157        switch (hdev->discovery.state) {
2158        case DISCOVERY_STARTING:
2159                start_discovery(hdev, &status);
2160                mgmt_start_discovery_complete(hdev, status);
2161                if (status)
2162                        hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2163                else
2164                        hci_discovery_set_state(hdev, DISCOVERY_FINDING);
2165                break;
2166        case DISCOVERY_STOPPING:
2167                hci_req_sync(hdev, stop_discovery, 0, HCI_CMD_TIMEOUT, &status);
2168                mgmt_stop_discovery_complete(hdev, status);
2169                if (!status)
2170                        hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2171                break;
2172        case DISCOVERY_STOPPED:
2173        default:
2174                return;
2175        }
2176}
2177
2178static void discov_off(struct work_struct *work)
2179{
2180        struct hci_dev *hdev = container_of(work, struct hci_dev,
2181                                            discov_off.work);
2182
2183        BT_DBG("%s", hdev->name);
2184
2185        hci_dev_lock(hdev);
2186
2187        /* When discoverable timeout triggers, then just make sure
2188         * the limited discoverable flag is cleared. Even in the case
2189         * of a timeout triggered from general discoverable, it is
2190         * safe to unconditionally clear the flag.
2191         */
2192        hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
2193        hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
2194        hdev->discov_timeout = 0;
2195
2196        hci_dev_unlock(hdev);
2197
2198        hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, NULL);
2199        mgmt_new_settings(hdev);
2200}
2201
2202static int powered_update_hci(struct hci_request *req, unsigned long opt)
2203{
2204        struct hci_dev *hdev = req->hdev;
2205        u8 link_sec;
2206
2207        hci_dev_lock(hdev);
2208
2209        if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
2210            !lmp_host_ssp_capable(hdev)) {
2211                u8 mode = 0x01;
2212
2213                hci_req_add(req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
2214
2215                if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
2216                        u8 support = 0x01;
2217
2218                        hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
2219                                    sizeof(support), &support);
2220                }
2221        }
2222
2223        if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
2224            lmp_bredr_capable(hdev)) {
2225                struct hci_cp_write_le_host_supported cp;
2226
2227                cp.le = 0x01;
2228                cp.simul = 0x00;
2229
2230                /* Check first if we already have the right
2231                 * host state (host features set)
2232                 */
2233                if (cp.le != lmp_host_le_capable(hdev) ||
2234                    cp.simul != lmp_host_le_br_capable(hdev))
2235                        hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
2236                                    sizeof(cp), &cp);
2237        }
2238
2239        if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2240                /* Make sure the controller has a good default for
2241                 * advertising data. This also applies to the case
2242                 * where BR/EDR was toggled during the AUTO_OFF phase.
2243                 */
2244                if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2245                    list_empty(&hdev->adv_instances)) {
2246                        __hci_req_update_adv_data(req, 0x00);
2247                        __hci_req_update_scan_rsp_data(req, 0x00);
2248
2249                        if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
2250                                __hci_req_enable_advertising(req);
2251                } else if (!list_empty(&hdev->adv_instances)) {
2252                        struct adv_info *adv_instance;
2253
2254                        adv_instance = list_first_entry(&hdev->adv_instances,
2255                                                        struct adv_info, list);
2256                        __hci_req_schedule_adv_instance(req,
2257                                                        adv_instance->instance,
2258                                                        true);
2259                }
2260        }
2261
2262        link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
2263        if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
2264                hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE,
2265                            sizeof(link_sec), &link_sec);
2266
2267        if (lmp_bredr_capable(hdev)) {
2268                if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
2269                        __hci_req_write_fast_connectable(req, true);
2270                else
2271                        __hci_req_write_fast_connectable(req, false);
2272                __hci_req_update_scan(req);
2273                __hci_req_update_class(req);
2274                __hci_req_update_name(req);
2275                __hci_req_update_eir(req);
2276        }
2277
2278        hci_dev_unlock(hdev);
2279        return 0;
2280}
2281
2282int __hci_req_hci_power_on(struct hci_dev *hdev)
2283{
2284        /* Register the available SMP channels (BR/EDR and LE) only when
2285         * successfully powering on the controller. This late
2286         * registration is required so that LE SMP can clearly decide if
2287         * the public address or static address is used.
2288         */
2289        smp_register(hdev);
2290
2291        return __hci_req_sync(hdev, powered_update_hci, 0, HCI_CMD_TIMEOUT,
2292                              NULL);
2293}
2294
2295void hci_request_setup(struct hci_dev *hdev)
2296{
2297        INIT_WORK(&hdev->discov_update, discov_update);
2298        INIT_WORK(&hdev->bg_scan_update, bg_scan_update);
2299        INIT_WORK(&hdev->scan_update, scan_update_work);
2300        INIT_WORK(&hdev->connectable_update, connectable_update_work);
2301        INIT_WORK(&hdev->discoverable_update, discoverable_update_work);
2302        INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
2303        INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2304        INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
2305        INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire);
2306}
2307
2308void hci_request_cancel_all(struct hci_dev *hdev)
2309{
2310        hci_req_sync_cancel(hdev, ENODEV);
2311
2312        cancel_work_sync(&hdev->discov_update);
2313        cancel_work_sync(&hdev->bg_scan_update);
2314        cancel_work_sync(&hdev->scan_update);
2315        cancel_work_sync(&hdev->connectable_update);
2316        cancel_work_sync(&hdev->discoverable_update);
2317        cancel_delayed_work_sync(&hdev->discov_off);
2318        cancel_delayed_work_sync(&hdev->le_scan_disable);
2319        cancel_delayed_work_sync(&hdev->le_scan_restart);
2320
2321        if (hdev->adv_instance_timeout) {
2322                cancel_delayed_work_sync(&hdev->adv_instance_expire);
2323                hdev->adv_instance_timeout = 0;
2324        }
2325}
2326