linux/net/bluetooth/hci_request.c
<<
>>
Prefs
   1/*
   2   BlueZ - Bluetooth protocol stack for Linux
   3
   4   Copyright (C) 2014 Intel Corporation
   5
   6   This program is free software; you can redistribute it and/or modify
   7   it under the terms of the GNU General Public License version 2 as
   8   published by the Free Software Foundation;
   9
  10   THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
  11   OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  12   FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
  13   IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
  14   CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
  15   WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  16   ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  17   OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  18
  19   ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
  20   COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
  21   SOFTWARE IS DISCLAIMED.
  22*/
  23
  24#include <net/bluetooth/bluetooth.h>
  25#include <net/bluetooth/hci_core.h>
  26#include <net/bluetooth/mgmt.h>
  27
  28#include "smp.h"
  29#include "hci_request.h"
  30
  31#define HCI_REQ_DONE      0
  32#define HCI_REQ_PEND      1
  33#define HCI_REQ_CANCELED  2
  34
  35void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
  36{
  37        skb_queue_head_init(&req->cmd_q);
  38        req->hdev = hdev;
  39        req->err = 0;
  40}
  41
  42static int req_run(struct hci_request *req, hci_req_complete_t complete,
  43                   hci_req_complete_skb_t complete_skb)
  44{
  45        struct hci_dev *hdev = req->hdev;
  46        struct sk_buff *skb;
  47        unsigned long flags;
  48
  49        BT_DBG("length %u", skb_queue_len(&req->cmd_q));
  50
  51        /* If an error occurred during request building, remove all HCI
  52         * commands queued on the HCI request queue.
  53         */
  54        if (req->err) {
  55                skb_queue_purge(&req->cmd_q);
  56                return req->err;
  57        }
  58
  59        /* Do not allow empty requests */
  60        if (skb_queue_empty(&req->cmd_q))
  61                return -ENODATA;
  62
  63        skb = skb_peek_tail(&req->cmd_q);
  64        if (complete) {
  65                bt_cb(skb)->hci.req_complete = complete;
  66        } else if (complete_skb) {
  67                bt_cb(skb)->hci.req_complete_skb = complete_skb;
  68                bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
  69        }
  70
  71        spin_lock_irqsave(&hdev->cmd_q.lock, flags);
  72        skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
  73        spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
  74
  75        queue_work(hdev->workqueue, &hdev->cmd_work);
  76
  77        return 0;
  78}
  79
  80int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
  81{
  82        return req_run(req, complete, NULL);
  83}
  84
  85int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete)
  86{
  87        return req_run(req, NULL, complete);
  88}
  89
  90static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
  91                                  struct sk_buff *skb)
  92{
  93        BT_DBG("%s result 0x%2.2x", hdev->name, result);
  94
  95        if (hdev->req_status == HCI_REQ_PEND) {
  96                hdev->req_result = result;
  97                hdev->req_status = HCI_REQ_DONE;
  98                if (skb)
  99                        hdev->req_skb = skb_get(skb);
 100                wake_up_interruptible(&hdev->req_wait_q);
 101        }
 102}
 103
 104void hci_req_sync_cancel(struct hci_dev *hdev, int err)
 105{
 106        BT_DBG("%s err 0x%2.2x", hdev->name, err);
 107
 108        if (hdev->req_status == HCI_REQ_PEND) {
 109                hdev->req_result = err;
 110                hdev->req_status = HCI_REQ_CANCELED;
 111                wake_up_interruptible(&hdev->req_wait_q);
 112        }
 113}
 114
 115struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
 116                                  const void *param, u8 event, u32 timeout)
 117{
 118        DECLARE_WAITQUEUE(wait, current);
 119        struct hci_request req;
 120        struct sk_buff *skb;
 121        int err = 0;
 122
 123        BT_DBG("%s", hdev->name);
 124
 125        hci_req_init(&req, hdev);
 126
 127        hci_req_add_ev(&req, opcode, plen, param, event);
 128
 129        hdev->req_status = HCI_REQ_PEND;
 130
 131        add_wait_queue(&hdev->req_wait_q, &wait);
 132        set_current_state(TASK_INTERRUPTIBLE);
 133
 134        err = hci_req_run_skb(&req, hci_req_sync_complete);
 135        if (err < 0) {
 136                remove_wait_queue(&hdev->req_wait_q, &wait);
 137                set_current_state(TASK_RUNNING);
 138                return ERR_PTR(err);
 139        }
 140
 141        schedule_timeout(timeout);
 142
 143        remove_wait_queue(&hdev->req_wait_q, &wait);
 144
 145        if (signal_pending(current))
 146                return ERR_PTR(-EINTR);
 147
 148        switch (hdev->req_status) {
 149        case HCI_REQ_DONE:
 150                err = -bt_to_errno(hdev->req_result);
 151                break;
 152
 153        case HCI_REQ_CANCELED:
 154                err = -hdev->req_result;
 155                break;
 156
 157        default:
 158                err = -ETIMEDOUT;
 159                break;
 160        }
 161
 162        hdev->req_status = hdev->req_result = 0;
 163        skb = hdev->req_skb;
 164        hdev->req_skb = NULL;
 165
 166        BT_DBG("%s end: err %d", hdev->name, err);
 167
 168        if (err < 0) {
 169                kfree_skb(skb);
 170                return ERR_PTR(err);
 171        }
 172
 173        if (!skb)
 174                return ERR_PTR(-ENODATA);
 175
 176        return skb;
 177}
 178EXPORT_SYMBOL(__hci_cmd_sync_ev);
 179
 180struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
 181                               const void *param, u32 timeout)
 182{
 183        return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
 184}
 185EXPORT_SYMBOL(__hci_cmd_sync);
 186
 187/* Execute request and wait for completion. */
 188int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
 189                                                     unsigned long opt),
 190                   unsigned long opt, u32 timeout, u8 *hci_status)
 191{
 192        struct hci_request req;
 193        DECLARE_WAITQUEUE(wait, current);
 194        int err = 0;
 195
 196        BT_DBG("%s start", hdev->name);
 197
 198        hci_req_init(&req, hdev);
 199
 200        hdev->req_status = HCI_REQ_PEND;
 201
 202        err = func(&req, opt);
 203        if (err) {
 204                if (hci_status)
 205                        *hci_status = HCI_ERROR_UNSPECIFIED;
 206                return err;
 207        }
 208
 209        add_wait_queue(&hdev->req_wait_q, &wait);
 210        set_current_state(TASK_INTERRUPTIBLE);
 211
 212        err = hci_req_run_skb(&req, hci_req_sync_complete);
 213        if (err < 0) {
 214                hdev->req_status = 0;
 215
 216                remove_wait_queue(&hdev->req_wait_q, &wait);
 217                set_current_state(TASK_RUNNING);
 218
 219                /* ENODATA means the HCI request command queue is empty.
 220                 * This can happen when a request with conditionals doesn't
 221                 * trigger any commands to be sent. This is normal behavior
 222                 * and should not trigger an error return.
 223                 */
 224                if (err == -ENODATA) {
 225                        if (hci_status)
 226                                *hci_status = 0;
 227                        return 0;
 228                }
 229
 230                if (hci_status)
 231                        *hci_status = HCI_ERROR_UNSPECIFIED;
 232
 233                return err;
 234        }
 235
 236        schedule_timeout(timeout);
 237
 238        remove_wait_queue(&hdev->req_wait_q, &wait);
 239
 240        if (signal_pending(current))
 241                return -EINTR;
 242
 243        switch (hdev->req_status) {
 244        case HCI_REQ_DONE:
 245                err = -bt_to_errno(hdev->req_result);
 246                if (hci_status)
 247                        *hci_status = hdev->req_result;
 248                break;
 249
 250        case HCI_REQ_CANCELED:
 251                err = -hdev->req_result;
 252                if (hci_status)
 253                        *hci_status = HCI_ERROR_UNSPECIFIED;
 254                break;
 255
 256        default:
 257                err = -ETIMEDOUT;
 258                if (hci_status)
 259                        *hci_status = HCI_ERROR_UNSPECIFIED;
 260                break;
 261        }
 262
 263        kfree_skb(hdev->req_skb);
 264        hdev->req_skb = NULL;
 265        hdev->req_status = hdev->req_result = 0;
 266
 267        BT_DBG("%s end: err %d", hdev->name, err);
 268
 269        return err;
 270}
 271
 272int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
 273                                                  unsigned long opt),
 274                 unsigned long opt, u32 timeout, u8 *hci_status)
 275{
 276        int ret;
 277
 278        if (!test_bit(HCI_UP, &hdev->flags))
 279                return -ENETDOWN;
 280
 281        /* Serialize all requests */
 282        hci_req_sync_lock(hdev);
 283        ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
 284        hci_req_sync_unlock(hdev);
 285
 286        return ret;
 287}
 288
 289struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
 290                                const void *param)
 291{
 292        int len = HCI_COMMAND_HDR_SIZE + plen;
 293        struct hci_command_hdr *hdr;
 294        struct sk_buff *skb;
 295
 296        skb = bt_skb_alloc(len, GFP_ATOMIC);
 297        if (!skb)
 298                return NULL;
 299
 300        hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
 301        hdr->opcode = cpu_to_le16(opcode);
 302        hdr->plen   = plen;
 303
 304        if (plen)
 305                memcpy(skb_put(skb, plen), param, plen);
 306
 307        BT_DBG("skb len %d", skb->len);
 308
 309        hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
 310        hci_skb_opcode(skb) = opcode;
 311
 312        return skb;
 313}
 314
 315/* Queue a command to an asynchronous HCI request */
 316void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
 317                    const void *param, u8 event)
 318{
 319        struct hci_dev *hdev = req->hdev;
 320        struct sk_buff *skb;
 321
 322        BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
 323
 324        /* If an error occurred during request building, there is no point in
 325         * queueing the HCI command. We can simply return.
 326         */
 327        if (req->err)
 328                return;
 329
 330        skb = hci_prepare_cmd(hdev, opcode, plen, param);
 331        if (!skb) {
 332                BT_ERR("%s no memory for command (opcode 0x%4.4x)",
 333                       hdev->name, opcode);
 334                req->err = -ENOMEM;
 335                return;
 336        }
 337
 338        if (skb_queue_empty(&req->cmd_q))
 339                bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
 340
 341        bt_cb(skb)->hci.req_event = event;
 342
 343        skb_queue_tail(&req->cmd_q, skb);
 344}
 345
 346void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
 347                 const void *param)
 348{
 349        hci_req_add_ev(req, opcode, plen, param, 0);
 350}
 351
 352void __hci_req_write_fast_connectable(struct hci_request *req, bool enable)
 353{
 354        struct hci_dev *hdev = req->hdev;
 355        struct hci_cp_write_page_scan_activity acp;
 356        u8 type;
 357
 358        if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
 359                return;
 360
 361        if (hdev->hci_ver < BLUETOOTH_VER_1_2)
 362                return;
 363
 364        if (enable) {
 365                type = PAGE_SCAN_TYPE_INTERLACED;
 366
 367                /* 160 msec page scan interval */
 368                acp.interval = cpu_to_le16(0x0100);
 369        } else {
 370                type = PAGE_SCAN_TYPE_STANDARD; /* default */
 371
 372                /* default 1.28 sec page scan */
 373                acp.interval = cpu_to_le16(0x0800);
 374        }
 375
 376        acp.window = cpu_to_le16(0x0012);
 377
 378        if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
 379            __cpu_to_le16(hdev->page_scan_window) != acp.window)
 380                hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
 381                            sizeof(acp), &acp);
 382
 383        if (hdev->page_scan_type != type)
 384                hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
 385}
 386
 387/* This function controls the background scanning based on hdev->pend_le_conns
 388 * list. If there are pending LE connection we start the background scanning,
 389 * otherwise we stop it.
 390 *
 391 * This function requires the caller holds hdev->lock.
 392 */
 393static void __hci_update_background_scan(struct hci_request *req)
 394{
 395        struct hci_dev *hdev = req->hdev;
 396
 397        if (!test_bit(HCI_UP, &hdev->flags) ||
 398            test_bit(HCI_INIT, &hdev->flags) ||
 399            hci_dev_test_flag(hdev, HCI_SETUP) ||
 400            hci_dev_test_flag(hdev, HCI_CONFIG) ||
 401            hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
 402            hci_dev_test_flag(hdev, HCI_UNREGISTER))
 403                return;
 404
 405        /* No point in doing scanning if LE support hasn't been enabled */
 406        if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
 407                return;
 408
 409        /* If discovery is active don't interfere with it */
 410        if (hdev->discovery.state != DISCOVERY_STOPPED)
 411                return;
 412
 413        /* Reset RSSI and UUID filters when starting background scanning
 414         * since these filters are meant for service discovery only.
 415         *
 416         * The Start Discovery and Start Service Discovery operations
 417         * ensure to set proper values for RSSI threshold and UUID
 418         * filter list. So it is safe to just reset them here.
 419         */
 420        hci_discovery_filter_clear(hdev);
 421
 422        if (list_empty(&hdev->pend_le_conns) &&
 423            list_empty(&hdev->pend_le_reports)) {
 424                /* If there is no pending LE connections or devices
 425                 * to be scanned for, we should stop the background
 426                 * scanning.
 427                 */
 428
 429                /* If controller is not scanning we are done. */
 430                if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
 431                        return;
 432
 433                hci_req_add_le_scan_disable(req);
 434
 435                BT_DBG("%s stopping background scanning", hdev->name);
 436        } else {
 437                /* If there is at least one pending LE connection, we should
 438                 * keep the background scan running.
 439                 */
 440
 441                /* If controller is connecting, we should not start scanning
 442                 * since some controllers are not able to scan and connect at
 443                 * the same time.
 444                 */
 445                if (hci_lookup_le_connect(hdev))
 446                        return;
 447
 448                /* If controller is currently scanning, we stop it to ensure we
 449                 * don't miss any advertising (due to duplicates filter).
 450                 */
 451                if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
 452                        hci_req_add_le_scan_disable(req);
 453
 454                hci_req_add_le_passive_scan(req);
 455
 456                BT_DBG("%s starting background scanning", hdev->name);
 457        }
 458}
 459
 460void __hci_req_update_name(struct hci_request *req)
 461{
 462        struct hci_dev *hdev = req->hdev;
 463        struct hci_cp_write_local_name cp;
 464
 465        memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
 466
 467        hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
 468}
 469
 470#define PNP_INFO_SVCLASS_ID             0x1200
 471
 472static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
 473{
 474        u8 *ptr = data, *uuids_start = NULL;
 475        struct bt_uuid *uuid;
 476
 477        if (len < 4)
 478                return ptr;
 479
 480        list_for_each_entry(uuid, &hdev->uuids, list) {
 481                u16 uuid16;
 482
 483                if (uuid->size != 16)
 484                        continue;
 485
 486                uuid16 = get_unaligned_le16(&uuid->uuid[12]);
 487                if (uuid16 < 0x1100)
 488                        continue;
 489
 490                if (uuid16 == PNP_INFO_SVCLASS_ID)
 491                        continue;
 492
 493                if (!uuids_start) {
 494                        uuids_start = ptr;
 495                        uuids_start[0] = 1;
 496                        uuids_start[1] = EIR_UUID16_ALL;
 497                        ptr += 2;
 498                }
 499
 500                /* Stop if not enough space to put next UUID */
 501                if ((ptr - data) + sizeof(u16) > len) {
 502                        uuids_start[1] = EIR_UUID16_SOME;
 503                        break;
 504                }
 505
 506                *ptr++ = (uuid16 & 0x00ff);
 507                *ptr++ = (uuid16 & 0xff00) >> 8;
 508                uuids_start[0] += sizeof(uuid16);
 509        }
 510
 511        return ptr;
 512}
 513
 514static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
 515{
 516        u8 *ptr = data, *uuids_start = NULL;
 517        struct bt_uuid *uuid;
 518
 519        if (len < 6)
 520                return ptr;
 521
 522        list_for_each_entry(uuid, &hdev->uuids, list) {
 523                if (uuid->size != 32)
 524                        continue;
 525
 526                if (!uuids_start) {
 527                        uuids_start = ptr;
 528                        uuids_start[0] = 1;
 529                        uuids_start[1] = EIR_UUID32_ALL;
 530                        ptr += 2;
 531                }
 532
 533                /* Stop if not enough space to put next UUID */
 534                if ((ptr - data) + sizeof(u32) > len) {
 535                        uuids_start[1] = EIR_UUID32_SOME;
 536                        break;
 537                }
 538
 539                memcpy(ptr, &uuid->uuid[12], sizeof(u32));
 540                ptr += sizeof(u32);
 541                uuids_start[0] += sizeof(u32);
 542        }
 543
 544        return ptr;
 545}
 546
 547static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
 548{
 549        u8 *ptr = data, *uuids_start = NULL;
 550        struct bt_uuid *uuid;
 551
 552        if (len < 18)
 553                return ptr;
 554
 555        list_for_each_entry(uuid, &hdev->uuids, list) {
 556                if (uuid->size != 128)
 557                        continue;
 558
 559                if (!uuids_start) {
 560                        uuids_start = ptr;
 561                        uuids_start[0] = 1;
 562                        uuids_start[1] = EIR_UUID128_ALL;
 563                        ptr += 2;
 564                }
 565
 566                /* Stop if not enough space to put next UUID */
 567                if ((ptr - data) + 16 > len) {
 568                        uuids_start[1] = EIR_UUID128_SOME;
 569                        break;
 570                }
 571
 572                memcpy(ptr, uuid->uuid, 16);
 573                ptr += 16;
 574                uuids_start[0] += 16;
 575        }
 576
 577        return ptr;
 578}
 579
 580static void create_eir(struct hci_dev *hdev, u8 *data)
 581{
 582        u8 *ptr = data;
 583        size_t name_len;
 584
 585        name_len = strlen(hdev->dev_name);
 586
 587        if (name_len > 0) {
 588                /* EIR Data type */
 589                if (name_len > 48) {
 590                        name_len = 48;
 591                        ptr[1] = EIR_NAME_SHORT;
 592                } else
 593                        ptr[1] = EIR_NAME_COMPLETE;
 594
 595                /* EIR Data length */
 596                ptr[0] = name_len + 1;
 597
 598                memcpy(ptr + 2, hdev->dev_name, name_len);
 599
 600                ptr += (name_len + 2);
 601        }
 602
 603        if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
 604                ptr[0] = 2;
 605                ptr[1] = EIR_TX_POWER;
 606                ptr[2] = (u8) hdev->inq_tx_power;
 607
 608                ptr += 3;
 609        }
 610
 611        if (hdev->devid_source > 0) {
 612                ptr[0] = 9;
 613                ptr[1] = EIR_DEVICE_ID;
 614
 615                put_unaligned_le16(hdev->devid_source, ptr + 2);
 616                put_unaligned_le16(hdev->devid_vendor, ptr + 4);
 617                put_unaligned_le16(hdev->devid_product, ptr + 6);
 618                put_unaligned_le16(hdev->devid_version, ptr + 8);
 619
 620                ptr += 10;
 621        }
 622
 623        ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
 624        ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
 625        ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
 626}
 627
 628void __hci_req_update_eir(struct hci_request *req)
 629{
 630        struct hci_dev *hdev = req->hdev;
 631        struct hci_cp_write_eir cp;
 632
 633        if (!hdev_is_powered(hdev))
 634                return;
 635
 636        if (!lmp_ext_inq_capable(hdev))
 637                return;
 638
 639        if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
 640                return;
 641
 642        if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
 643                return;
 644
 645        memset(&cp, 0, sizeof(cp));
 646
 647        create_eir(hdev, cp.data);
 648
 649        if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
 650                return;
 651
 652        memcpy(hdev->eir, cp.data, sizeof(cp.data));
 653
 654        hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
 655}
 656
 657void hci_req_add_le_scan_disable(struct hci_request *req)
 658{
 659        struct hci_cp_le_set_scan_enable cp;
 660
 661        memset(&cp, 0, sizeof(cp));
 662        cp.enable = LE_SCAN_DISABLE;
 663        hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
 664}
 665
 666static void add_to_white_list(struct hci_request *req,
 667                              struct hci_conn_params *params)
 668{
 669        struct hci_cp_le_add_to_white_list cp;
 670
 671        cp.bdaddr_type = params->addr_type;
 672        bacpy(&cp.bdaddr, &params->addr);
 673
 674        hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
 675}
 676
 677static u8 update_white_list(struct hci_request *req)
 678{
 679        struct hci_dev *hdev = req->hdev;
 680        struct hci_conn_params *params;
 681        struct bdaddr_list *b;
 682        uint8_t white_list_entries = 0;
 683
 684        /* Go through the current white list programmed into the
 685         * controller one by one and check if that address is still
 686         * in the list of pending connections or list of devices to
 687         * report. If not present in either list, then queue the
 688         * command to remove it from the controller.
 689         */
 690        list_for_each_entry(b, &hdev->le_white_list, list) {
 691                /* If the device is neither in pend_le_conns nor
 692                 * pend_le_reports then remove it from the whitelist.
 693                 */
 694                if (!hci_pend_le_action_lookup(&hdev->pend_le_conns,
 695                                               &b->bdaddr, b->bdaddr_type) &&
 696                    !hci_pend_le_action_lookup(&hdev->pend_le_reports,
 697                                               &b->bdaddr, b->bdaddr_type)) {
 698                        struct hci_cp_le_del_from_white_list cp;
 699
 700                        cp.bdaddr_type = b->bdaddr_type;
 701                        bacpy(&cp.bdaddr, &b->bdaddr);
 702
 703                        hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST,
 704                                    sizeof(cp), &cp);
 705                        continue;
 706                }
 707
 708                if (hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) {
 709                        /* White list can not be used with RPAs */
 710                        return 0x00;
 711                }
 712
 713                white_list_entries++;
 714        }
 715
 716        /* Since all no longer valid white list entries have been
 717         * removed, walk through the list of pending connections
 718         * and ensure that any new device gets programmed into
 719         * the controller.
 720         *
 721         * If the list of the devices is larger than the list of
 722         * available white list entries in the controller, then
 723         * just abort and return filer policy value to not use the
 724         * white list.
 725         */
 726        list_for_each_entry(params, &hdev->pend_le_conns, action) {
 727                if (hci_bdaddr_list_lookup(&hdev->le_white_list,
 728                                           &params->addr, params->addr_type))
 729                        continue;
 730
 731                if (white_list_entries >= hdev->le_white_list_size) {
 732                        /* Select filter policy to accept all advertising */
 733                        return 0x00;
 734                }
 735
 736                if (hci_find_irk_by_addr(hdev, &params->addr,
 737                                         params->addr_type)) {
 738                        /* White list can not be used with RPAs */
 739                        return 0x00;
 740                }
 741
 742                white_list_entries++;
 743                add_to_white_list(req, params);
 744        }
 745
 746        /* After adding all new pending connections, walk through
 747         * the list of pending reports and also add these to the
 748         * white list if there is still space.
 749         */
 750        list_for_each_entry(params, &hdev->pend_le_reports, action) {
 751                if (hci_bdaddr_list_lookup(&hdev->le_white_list,
 752                                           &params->addr, params->addr_type))
 753                        continue;
 754
 755                if (white_list_entries >= hdev->le_white_list_size) {
 756                        /* Select filter policy to accept all advertising */
 757                        return 0x00;
 758                }
 759
 760                if (hci_find_irk_by_addr(hdev, &params->addr,
 761                                         params->addr_type)) {
 762                        /* White list can not be used with RPAs */
 763                        return 0x00;
 764                }
 765
 766                white_list_entries++;
 767                add_to_white_list(req, params);
 768        }
 769
 770        /* Select filter policy to use white list */
 771        return 0x01;
 772}
 773
 774static bool scan_use_rpa(struct hci_dev *hdev)
 775{
 776        return hci_dev_test_flag(hdev, HCI_PRIVACY);
 777}
 778
 779void hci_req_add_le_passive_scan(struct hci_request *req)
 780{
 781        struct hci_cp_le_set_scan_param param_cp;
 782        struct hci_cp_le_set_scan_enable enable_cp;
 783        struct hci_dev *hdev = req->hdev;
 784        u8 own_addr_type;
 785        u8 filter_policy;
 786
 787        /* Set require_privacy to false since no SCAN_REQ are send
 788         * during passive scanning. Not using an non-resolvable address
 789         * here is important so that peer devices using direct
 790         * advertising with our address will be correctly reported
 791         * by the controller.
 792         */
 793        if (hci_update_random_address(req, false, scan_use_rpa(hdev),
 794                                      &own_addr_type))
 795                return;
 796
 797        /* Adding or removing entries from the white list must
 798         * happen before enabling scanning. The controller does
 799         * not allow white list modification while scanning.
 800         */
 801        filter_policy = update_white_list(req);
 802
 803        /* When the controller is using random resolvable addresses and
 804         * with that having LE privacy enabled, then controllers with
 805         * Extended Scanner Filter Policies support can now enable support
 806         * for handling directed advertising.
 807         *
 808         * So instead of using filter polices 0x00 (no whitelist)
 809         * and 0x01 (whitelist enabled) use the new filter policies
 810         * 0x02 (no whitelist) and 0x03 (whitelist enabled).
 811         */
 812        if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
 813            (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
 814                filter_policy |= 0x02;
 815
 816        memset(&param_cp, 0, sizeof(param_cp));
 817        param_cp.type = LE_SCAN_PASSIVE;
 818        param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
 819        param_cp.window = cpu_to_le16(hdev->le_scan_window);
 820        param_cp.own_address_type = own_addr_type;
 821        param_cp.filter_policy = filter_policy;
 822        hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
 823                    &param_cp);
 824
 825        memset(&enable_cp, 0, sizeof(enable_cp));
 826        enable_cp.enable = LE_SCAN_ENABLE;
 827        enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
 828        hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
 829                    &enable_cp);
 830}
 831
 832static u8 get_cur_adv_instance_scan_rsp_len(struct hci_dev *hdev)
 833{
 834        u8 instance = hdev->cur_adv_instance;
 835        struct adv_info *adv_instance;
 836
 837        /* Ignore instance 0 */
 838        if (instance == 0x00)
 839                return 0;
 840
 841        adv_instance = hci_find_adv_instance(hdev, instance);
 842        if (!adv_instance)
 843                return 0;
 844
 845        /* TODO: Take into account the "appearance" and "local-name" flags here.
 846         * These are currently being ignored as they are not supported.
 847         */
 848        return adv_instance->scan_rsp_len;
 849}
 850
 851void __hci_req_disable_advertising(struct hci_request *req)
 852{
 853        u8 enable = 0x00;
 854
 855        hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
 856}
 857
 858static u32 get_adv_instance_flags(struct hci_dev *hdev, u8 instance)
 859{
 860        u32 flags;
 861        struct adv_info *adv_instance;
 862
 863        if (instance == 0x00) {
 864                /* Instance 0 always manages the "Tx Power" and "Flags"
 865                 * fields
 866                 */
 867                flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
 868
 869                /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
 870                 * corresponds to the "connectable" instance flag.
 871                 */
 872                if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
 873                        flags |= MGMT_ADV_FLAG_CONNECTABLE;
 874
 875                if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
 876                        flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
 877                else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
 878                        flags |= MGMT_ADV_FLAG_DISCOV;
 879
 880                return flags;
 881        }
 882
 883        adv_instance = hci_find_adv_instance(hdev, instance);
 884
 885        /* Return 0 when we got an invalid instance identifier. */
 886        if (!adv_instance)
 887                return 0;
 888
 889        return adv_instance->flags;
 890}
 891
 892static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags)
 893{
 894        /* If privacy is not enabled don't use RPA */
 895        if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
 896                return false;
 897
 898        /* If basic privacy mode is enabled use RPA */
 899        if (!hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
 900                return true;
 901
 902        /* If limited privacy mode is enabled don't use RPA if we're
 903         * both discoverable and bondable.
 904         */
 905        if ((flags & MGMT_ADV_FLAG_DISCOV) &&
 906            hci_dev_test_flag(hdev, HCI_BONDABLE))
 907                return false;
 908
 909        /* We're neither bondable nor discoverable in the limited
 910         * privacy mode, therefore use RPA.
 911         */
 912        return true;
 913}
 914
 915void __hci_req_enable_advertising(struct hci_request *req)
 916{
 917        struct hci_dev *hdev = req->hdev;
 918        struct hci_cp_le_set_adv_param cp;
 919        u8 own_addr_type, enable = 0x01;
 920        bool connectable;
 921        u32 flags;
 922
 923        if (hci_conn_num(hdev, LE_LINK) > 0)
 924                return;
 925
 926        if (hci_dev_test_flag(hdev, HCI_LE_ADV))
 927                __hci_req_disable_advertising(req);
 928
 929        /* Clear the HCI_LE_ADV bit temporarily so that the
 930         * hci_update_random_address knows that it's safe to go ahead
 931         * and write a new random address. The flag will be set back on
 932         * as soon as the SET_ADV_ENABLE HCI command completes.
 933         */
 934        hci_dev_clear_flag(hdev, HCI_LE_ADV);
 935
 936        flags = get_adv_instance_flags(hdev, hdev->cur_adv_instance);
 937
 938        /* If the "connectable" instance flag was not set, then choose between
 939         * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
 940         */
 941        connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
 942                      mgmt_get_connectable(hdev);
 943
 944        /* Set require_privacy to true only when non-connectable
 945         * advertising is used. In that case it is fine to use a
 946         * non-resolvable private address.
 947         */
 948        if (hci_update_random_address(req, !connectable,
 949                                      adv_use_rpa(hdev, flags),
 950                                      &own_addr_type) < 0)
 951                return;
 952
 953        memset(&cp, 0, sizeof(cp));
 954        cp.min_interval = cpu_to_le16(hdev->le_adv_min_interval);
 955        cp.max_interval = cpu_to_le16(hdev->le_adv_max_interval);
 956
 957        if (connectable)
 958                cp.type = LE_ADV_IND;
 959        else if (get_cur_adv_instance_scan_rsp_len(hdev))
 960                cp.type = LE_ADV_SCAN_IND;
 961        else
 962                cp.type = LE_ADV_NONCONN_IND;
 963
 964        cp.own_address_type = own_addr_type;
 965        cp.channel_map = hdev->le_adv_channel_map;
 966
 967        hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
 968
 969        hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
 970}
 971
 972u8 append_local_name(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
 973{
 974        size_t short_len;
 975        size_t complete_len;
 976
 977        /* no space left for name (+ NULL + type + len) */
 978        if ((HCI_MAX_AD_LENGTH - ad_len) < HCI_MAX_SHORT_NAME_LENGTH + 3)
 979                return ad_len;
 980
 981        /* use complete name if present and fits */
 982        complete_len = strlen(hdev->dev_name);
 983        if (complete_len && complete_len <= HCI_MAX_SHORT_NAME_LENGTH)
 984                return eir_append_data(ptr, ad_len, EIR_NAME_COMPLETE,
 985                                       hdev->dev_name, complete_len + 1);
 986
 987        /* use short name if present */
 988        short_len = strlen(hdev->short_name);
 989        if (short_len)
 990                return eir_append_data(ptr, ad_len, EIR_NAME_SHORT,
 991                                       hdev->short_name, short_len + 1);
 992
 993        /* use shortened full name if present, we already know that name
 994         * is longer then HCI_MAX_SHORT_NAME_LENGTH
 995         */
 996        if (complete_len) {
 997                u8 name[HCI_MAX_SHORT_NAME_LENGTH + 1];
 998
 999                memcpy(name, hdev->dev_name, HCI_MAX_SHORT_NAME_LENGTH);
1000                name[HCI_MAX_SHORT_NAME_LENGTH] = '\0';
1001
1002                return eir_append_data(ptr, ad_len, EIR_NAME_SHORT, name,
1003                                       sizeof(name));
1004        }
1005
1006        return ad_len;
1007}
1008
1009static u8 append_appearance(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
1010{
1011        return eir_append_le16(ptr, ad_len, EIR_APPEARANCE, hdev->appearance);
1012}
1013
1014static u8 create_default_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
1015{
1016        u8 scan_rsp_len = 0;
1017
1018        if (hdev->appearance) {
1019                scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
1020        }
1021
1022        return append_local_name(hdev, ptr, scan_rsp_len);
1023}
1024
1025static u8 create_instance_scan_rsp_data(struct hci_dev *hdev, u8 instance,
1026                                        u8 *ptr)
1027{
1028        struct adv_info *adv_instance;
1029        u32 instance_flags;
1030        u8 scan_rsp_len = 0;
1031
1032        adv_instance = hci_find_adv_instance(hdev, instance);
1033        if (!adv_instance)
1034                return 0;
1035
1036        instance_flags = adv_instance->flags;
1037
1038        if ((instance_flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance) {
1039                scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
1040        }
1041
1042        memcpy(&ptr[scan_rsp_len], adv_instance->scan_rsp_data,
1043               adv_instance->scan_rsp_len);
1044
1045        scan_rsp_len += adv_instance->scan_rsp_len;
1046
1047        if (instance_flags & MGMT_ADV_FLAG_LOCAL_NAME)
1048                scan_rsp_len = append_local_name(hdev, ptr, scan_rsp_len);
1049
1050        return scan_rsp_len;
1051}
1052
1053void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance)
1054{
1055        struct hci_dev *hdev = req->hdev;
1056        struct hci_cp_le_set_scan_rsp_data cp;
1057        u8 len;
1058
1059        if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1060                return;
1061
1062        memset(&cp, 0, sizeof(cp));
1063
1064        if (instance)
1065                len = create_instance_scan_rsp_data(hdev, instance, cp.data);
1066        else
1067                len = create_default_scan_rsp_data(hdev, cp.data);
1068
1069        if (hdev->scan_rsp_data_len == len &&
1070            !memcmp(cp.data, hdev->scan_rsp_data, len))
1071                return;
1072
1073        memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1074        hdev->scan_rsp_data_len = len;
1075
1076        cp.length = len;
1077
1078        hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
1079}
1080
1081static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
1082{
1083        struct adv_info *adv_instance = NULL;
1084        u8 ad_len = 0, flags = 0;
1085        u32 instance_flags;
1086
1087        /* Return 0 when the current instance identifier is invalid. */
1088        if (instance) {
1089                adv_instance = hci_find_adv_instance(hdev, instance);
1090                if (!adv_instance)
1091                        return 0;
1092        }
1093
1094        instance_flags = get_adv_instance_flags(hdev, instance);
1095
1096        /* The Add Advertising command allows userspace to set both the general
1097         * and limited discoverable flags.
1098         */
1099        if (instance_flags & MGMT_ADV_FLAG_DISCOV)
1100                flags |= LE_AD_GENERAL;
1101
1102        if (instance_flags & MGMT_ADV_FLAG_LIMITED_DISCOV)
1103                flags |= LE_AD_LIMITED;
1104
1105        if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1106                flags |= LE_AD_NO_BREDR;
1107
1108        if (flags || (instance_flags & MGMT_ADV_FLAG_MANAGED_FLAGS)) {
1109                /* If a discovery flag wasn't provided, simply use the global
1110                 * settings.
1111                 */
1112                if (!flags)
1113                        flags |= mgmt_get_adv_discov_flags(hdev);
1114
1115                /* If flags would still be empty, then there is no need to
1116                 * include the "Flags" AD field".
1117                 */
1118                if (flags) {
1119                        ptr[0] = 0x02;
1120                        ptr[1] = EIR_FLAGS;
1121                        ptr[2] = flags;
1122
1123                        ad_len += 3;
1124                        ptr += 3;
1125                }
1126        }
1127
1128        if (adv_instance) {
1129                memcpy(ptr, adv_instance->adv_data,
1130                       adv_instance->adv_data_len);
1131                ad_len += adv_instance->adv_data_len;
1132                ptr += adv_instance->adv_data_len;
1133        }
1134
1135        /* Provide Tx Power only if we can provide a valid value for it */
1136        if (hdev->adv_tx_power != HCI_TX_POWER_INVALID &&
1137            (instance_flags & MGMT_ADV_FLAG_TX_POWER)) {
1138                ptr[0] = 0x02;
1139                ptr[1] = EIR_TX_POWER;
1140                ptr[2] = (u8)hdev->adv_tx_power;
1141
1142                ad_len += 3;
1143                ptr += 3;
1144        }
1145
1146        return ad_len;
1147}
1148
1149void __hci_req_update_adv_data(struct hci_request *req, u8 instance)
1150{
1151        struct hci_dev *hdev = req->hdev;
1152        struct hci_cp_le_set_adv_data cp;
1153        u8 len;
1154
1155        if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1156                return;
1157
1158        memset(&cp, 0, sizeof(cp));
1159
1160        len = create_instance_adv_data(hdev, instance, cp.data);
1161
1162        /* There's nothing to do if the data hasn't changed */
1163        if (hdev->adv_data_len == len &&
1164            memcmp(cp.data, hdev->adv_data, len) == 0)
1165                return;
1166
1167        memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1168        hdev->adv_data_len = len;
1169
1170        cp.length = len;
1171
1172        hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1173}
1174
1175int hci_req_update_adv_data(struct hci_dev *hdev, u8 instance)
1176{
1177        struct hci_request req;
1178
1179        hci_req_init(&req, hdev);
1180        __hci_req_update_adv_data(&req, instance);
1181
1182        return hci_req_run(&req, NULL);
1183}
1184
1185static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1186{
1187        BT_DBG("%s status %u", hdev->name, status);
1188}
1189
1190void hci_req_reenable_advertising(struct hci_dev *hdev)
1191{
1192        struct hci_request req;
1193
1194        if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1195            list_empty(&hdev->adv_instances))
1196                return;
1197
1198        hci_req_init(&req, hdev);
1199
1200        if (hdev->cur_adv_instance) {
1201                __hci_req_schedule_adv_instance(&req, hdev->cur_adv_instance,
1202                                                true);
1203        } else {
1204                __hci_req_update_adv_data(&req, 0x00);
1205                __hci_req_update_scan_rsp_data(&req, 0x00);
1206                __hci_req_enable_advertising(&req);
1207        }
1208
1209        hci_req_run(&req, adv_enable_complete);
1210}
1211
1212static void adv_timeout_expire(struct work_struct *work)
1213{
1214        struct hci_dev *hdev = container_of(work, struct hci_dev,
1215                                            adv_instance_expire.work);
1216
1217        struct hci_request req;
1218        u8 instance;
1219
1220        BT_DBG("%s", hdev->name);
1221
1222        hci_dev_lock(hdev);
1223
1224        hdev->adv_instance_timeout = 0;
1225
1226        instance = hdev->cur_adv_instance;
1227        if (instance == 0x00)
1228                goto unlock;
1229
1230        hci_req_init(&req, hdev);
1231
1232        hci_req_clear_adv_instance(hdev, NULL, &req, instance, false);
1233
1234        if (list_empty(&hdev->adv_instances))
1235                __hci_req_disable_advertising(&req);
1236
1237        hci_req_run(&req, NULL);
1238
1239unlock:
1240        hci_dev_unlock(hdev);
1241}
1242
1243int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance,
1244                                    bool force)
1245{
1246        struct hci_dev *hdev = req->hdev;
1247        struct adv_info *adv_instance = NULL;
1248        u16 timeout;
1249
1250        if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
1251            list_empty(&hdev->adv_instances))
1252                return -EPERM;
1253
1254        if (hdev->adv_instance_timeout)
1255                return -EBUSY;
1256
1257        adv_instance = hci_find_adv_instance(hdev, instance);
1258        if (!adv_instance)
1259                return -ENOENT;
1260
1261        /* A zero timeout means unlimited advertising. As long as there is
1262         * only one instance, duration should be ignored. We still set a timeout
1263         * in case further instances are being added later on.
1264         *
1265         * If the remaining lifetime of the instance is more than the duration
1266         * then the timeout corresponds to the duration, otherwise it will be
1267         * reduced to the remaining instance lifetime.
1268         */
1269        if (adv_instance->timeout == 0 ||
1270            adv_instance->duration <= adv_instance->remaining_time)
1271                timeout = adv_instance->duration;
1272        else
1273                timeout = adv_instance->remaining_time;
1274
1275        /* The remaining time is being reduced unless the instance is being
1276         * advertised without time limit.
1277         */
1278        if (adv_instance->timeout)
1279                adv_instance->remaining_time =
1280                                adv_instance->remaining_time - timeout;
1281
1282        hdev->adv_instance_timeout = timeout;
1283        queue_delayed_work(hdev->req_workqueue,
1284                           &hdev->adv_instance_expire,
1285                           msecs_to_jiffies(timeout * 1000));
1286
1287        /* If we're just re-scheduling the same instance again then do not
1288         * execute any HCI commands. This happens when a single instance is
1289         * being advertised.
1290         */
1291        if (!force && hdev->cur_adv_instance == instance &&
1292            hci_dev_test_flag(hdev, HCI_LE_ADV))
1293                return 0;
1294
1295        hdev->cur_adv_instance = instance;
1296        __hci_req_update_adv_data(req, instance);
1297        __hci_req_update_scan_rsp_data(req, instance);
1298        __hci_req_enable_advertising(req);
1299
1300        return 0;
1301}
1302
1303static void cancel_adv_timeout(struct hci_dev *hdev)
1304{
1305        if (hdev->adv_instance_timeout) {
1306                hdev->adv_instance_timeout = 0;
1307                cancel_delayed_work(&hdev->adv_instance_expire);
1308        }
1309}
1310
1311/* For a single instance:
1312 * - force == true: The instance will be removed even when its remaining
1313 *   lifetime is not zero.
1314 * - force == false: the instance will be deactivated but kept stored unless
1315 *   the remaining lifetime is zero.
1316 *
1317 * For instance == 0x00:
1318 * - force == true: All instances will be removed regardless of their timeout
1319 *   setting.
1320 * - force == false: Only instances that have a timeout will be removed.
1321 */
1322void hci_req_clear_adv_instance(struct hci_dev *hdev, struct sock *sk,
1323                                struct hci_request *req, u8 instance,
1324                                bool force)
1325{
1326        struct adv_info *adv_instance, *n, *next_instance = NULL;
1327        int err;
1328        u8 rem_inst;
1329
1330        /* Cancel any timeout concerning the removed instance(s). */
1331        if (!instance || hdev->cur_adv_instance == instance)
1332                cancel_adv_timeout(hdev);
1333
1334        /* Get the next instance to advertise BEFORE we remove
1335         * the current one. This can be the same instance again
1336         * if there is only one instance.
1337         */
1338        if (instance && hdev->cur_adv_instance == instance)
1339                next_instance = hci_get_next_instance(hdev, instance);
1340
1341        if (instance == 0x00) {
1342                list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances,
1343                                         list) {
1344                        if (!(force || adv_instance->timeout))
1345                                continue;
1346
1347                        rem_inst = adv_instance->instance;
1348                        err = hci_remove_adv_instance(hdev, rem_inst);
1349                        if (!err)
1350                                mgmt_advertising_removed(sk, hdev, rem_inst);
1351                }
1352        } else {
1353                adv_instance = hci_find_adv_instance(hdev, instance);
1354
1355                if (force || (adv_instance && adv_instance->timeout &&
1356                              !adv_instance->remaining_time)) {
1357                        /* Don't advertise a removed instance. */
1358                        if (next_instance &&
1359                            next_instance->instance == instance)
1360                                next_instance = NULL;
1361
1362                        err = hci_remove_adv_instance(hdev, instance);
1363                        if (!err)
1364                                mgmt_advertising_removed(sk, hdev, instance);
1365                }
1366        }
1367
1368        if (!req || !hdev_is_powered(hdev) ||
1369            hci_dev_test_flag(hdev, HCI_ADVERTISING))
1370                return;
1371
1372        if (next_instance)
1373                __hci_req_schedule_adv_instance(req, next_instance->instance,
1374                                                false);
1375}
1376
1377static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
1378{
1379        struct hci_dev *hdev = req->hdev;
1380
1381        /* If we're advertising or initiating an LE connection we can't
1382         * go ahead and change the random address at this time. This is
1383         * because the eventual initiator address used for the
1384         * subsequently created connection will be undefined (some
1385         * controllers use the new address and others the one we had
1386         * when the operation started).
1387         *
1388         * In this kind of scenario skip the update and let the random
1389         * address be updated at the next cycle.
1390         */
1391        if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
1392            hci_lookup_le_connect(hdev)) {
1393                BT_DBG("Deferring random address update");
1394                hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1395                return;
1396        }
1397
1398        hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
1399}
1400
1401int hci_update_random_address(struct hci_request *req, bool require_privacy,
1402                              bool use_rpa, u8 *own_addr_type)
1403{
1404        struct hci_dev *hdev = req->hdev;
1405        int err;
1406
1407        /* If privacy is enabled use a resolvable private address. If
1408         * current RPA has expired or there is something else than
1409         * the current RPA in use, then generate a new one.
1410         */
1411        if (use_rpa) {
1412                int to;
1413
1414                *own_addr_type = ADDR_LE_DEV_RANDOM;
1415
1416                if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
1417                    !bacmp(&hdev->random_addr, &hdev->rpa))
1418                        return 0;
1419
1420                err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
1421                if (err < 0) {
1422                        BT_ERR("%s failed to generate new RPA", hdev->name);
1423                        return err;
1424                }
1425
1426                set_random_addr(req, &hdev->rpa);
1427
1428                to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
1429                queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
1430
1431                return 0;
1432        }
1433
1434        /* In case of required privacy without resolvable private address,
1435         * use an non-resolvable private address. This is useful for active
1436         * scanning and non-connectable advertising.
1437         */
1438        if (require_privacy) {
1439                bdaddr_t nrpa;
1440
1441                while (true) {
1442                        /* The non-resolvable private address is generated
1443                         * from random six bytes with the two most significant
1444                         * bits cleared.
1445                         */
1446                        get_random_bytes(&nrpa, 6);
1447                        nrpa.b[5] &= 0x3f;
1448
1449                        /* The non-resolvable private address shall not be
1450                         * equal to the public address.
1451                         */
1452                        if (bacmp(&hdev->bdaddr, &nrpa))
1453                                break;
1454                }
1455
1456                *own_addr_type = ADDR_LE_DEV_RANDOM;
1457                set_random_addr(req, &nrpa);
1458                return 0;
1459        }
1460
1461        /* If forcing static address is in use or there is no public
1462         * address use the static address as random address (but skip
1463         * the HCI command if the current random address is already the
1464         * static one.
1465         *
1466         * In case BR/EDR has been disabled on a dual-mode controller
1467         * and a static address has been configured, then use that
1468         * address instead of the public BR/EDR address.
1469         */
1470        if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
1471            !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
1472            (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
1473             bacmp(&hdev->static_addr, BDADDR_ANY))) {
1474                *own_addr_type = ADDR_LE_DEV_RANDOM;
1475                if (bacmp(&hdev->static_addr, &hdev->random_addr))
1476                        hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
1477                                    &hdev->static_addr);
1478                return 0;
1479        }
1480
1481        /* Neither privacy nor static address is being used so use a
1482         * public address.
1483         */
1484        *own_addr_type = ADDR_LE_DEV_PUBLIC;
1485
1486        return 0;
1487}
1488
1489static bool disconnected_whitelist_entries(struct hci_dev *hdev)
1490{
1491        struct bdaddr_list *b;
1492
1493        list_for_each_entry(b, &hdev->whitelist, list) {
1494                struct hci_conn *conn;
1495
1496                conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
1497                if (!conn)
1498                        return true;
1499
1500                if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
1501                        return true;
1502        }
1503
1504        return false;
1505}
1506
1507void __hci_req_update_scan(struct hci_request *req)
1508{
1509        struct hci_dev *hdev = req->hdev;
1510        u8 scan;
1511
1512        if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1513                return;
1514
1515        if (!hdev_is_powered(hdev))
1516                return;
1517
1518        if (mgmt_powering_down(hdev))
1519                return;
1520
1521        if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) ||
1522            disconnected_whitelist_entries(hdev))
1523                scan = SCAN_PAGE;
1524        else
1525                scan = SCAN_DISABLED;
1526
1527        if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1528                scan |= SCAN_INQUIRY;
1529
1530        if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) &&
1531            test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY))
1532                return;
1533
1534        hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1535}
1536
1537static int update_scan(struct hci_request *req, unsigned long opt)
1538{
1539        hci_dev_lock(req->hdev);
1540        __hci_req_update_scan(req);
1541        hci_dev_unlock(req->hdev);
1542        return 0;
1543}
1544
1545static void scan_update_work(struct work_struct *work)
1546{
1547        struct hci_dev *hdev = container_of(work, struct hci_dev, scan_update);
1548
1549        hci_req_sync(hdev, update_scan, 0, HCI_CMD_TIMEOUT, NULL);
1550}
1551
1552static int connectable_update(struct hci_request *req, unsigned long opt)
1553{
1554        struct hci_dev *hdev = req->hdev;
1555
1556        hci_dev_lock(hdev);
1557
1558        __hci_req_update_scan(req);
1559
1560        /* If BR/EDR is not enabled and we disable advertising as a
1561         * by-product of disabling connectable, we need to update the
1562         * advertising flags.
1563         */
1564        if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1565                __hci_req_update_adv_data(req, hdev->cur_adv_instance);
1566
1567        /* Update the advertising parameters if necessary */
1568        if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
1569            !list_empty(&hdev->adv_instances))
1570                __hci_req_enable_advertising(req);
1571
1572        __hci_update_background_scan(req);
1573
1574        hci_dev_unlock(hdev);
1575
1576        return 0;
1577}
1578
1579static void connectable_update_work(struct work_struct *work)
1580{
1581        struct hci_dev *hdev = container_of(work, struct hci_dev,
1582                                            connectable_update);
1583        u8 status;
1584
1585        hci_req_sync(hdev, connectable_update, 0, HCI_CMD_TIMEOUT, &status);
1586        mgmt_set_connectable_complete(hdev, status);
1587}
1588
1589static u8 get_service_classes(struct hci_dev *hdev)
1590{
1591        struct bt_uuid *uuid;
1592        u8 val = 0;
1593
1594        list_for_each_entry(uuid, &hdev->uuids, list)
1595                val |= uuid->svc_hint;
1596
1597        return val;
1598}
1599
1600void __hci_req_update_class(struct hci_request *req)
1601{
1602        struct hci_dev *hdev = req->hdev;
1603        u8 cod[3];
1604
1605        BT_DBG("%s", hdev->name);
1606
1607        if (!hdev_is_powered(hdev))
1608                return;
1609
1610        if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1611                return;
1612
1613        if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
1614                return;
1615
1616        cod[0] = hdev->minor_class;
1617        cod[1] = hdev->major_class;
1618        cod[2] = get_service_classes(hdev);
1619
1620        if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1621                cod[1] |= 0x20;
1622
1623        if (memcmp(cod, hdev->dev_class, 3) == 0)
1624                return;
1625
1626        hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
1627}
1628
1629static void write_iac(struct hci_request *req)
1630{
1631        struct hci_dev *hdev = req->hdev;
1632        struct hci_cp_write_current_iac_lap cp;
1633
1634        if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1635                return;
1636
1637        if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
1638                /* Limited discoverable mode */
1639                cp.num_iac = min_t(u8, hdev->num_iac, 2);
1640                cp.iac_lap[0] = 0x00;   /* LIAC */
1641                cp.iac_lap[1] = 0x8b;
1642                cp.iac_lap[2] = 0x9e;
1643                cp.iac_lap[3] = 0x33;   /* GIAC */
1644                cp.iac_lap[4] = 0x8b;
1645                cp.iac_lap[5] = 0x9e;
1646        } else {
1647                /* General discoverable mode */
1648                cp.num_iac = 1;
1649                cp.iac_lap[0] = 0x33;   /* GIAC */
1650                cp.iac_lap[1] = 0x8b;
1651                cp.iac_lap[2] = 0x9e;
1652        }
1653
1654        hci_req_add(req, HCI_OP_WRITE_CURRENT_IAC_LAP,
1655                    (cp.num_iac * 3) + 1, &cp);
1656}
1657
1658static int discoverable_update(struct hci_request *req, unsigned long opt)
1659{
1660        struct hci_dev *hdev = req->hdev;
1661
1662        hci_dev_lock(hdev);
1663
1664        if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1665                write_iac(req);
1666                __hci_req_update_scan(req);
1667                __hci_req_update_class(req);
1668        }
1669
1670        /* Advertising instances don't use the global discoverable setting, so
1671         * only update AD if advertising was enabled using Set Advertising.
1672         */
1673        if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
1674                __hci_req_update_adv_data(req, 0x00);
1675
1676                /* Discoverable mode affects the local advertising
1677                 * address in limited privacy mode.
1678                 */
1679                if (hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
1680                        __hci_req_enable_advertising(req);
1681        }
1682
1683        hci_dev_unlock(hdev);
1684
1685        return 0;
1686}
1687
1688static void discoverable_update_work(struct work_struct *work)
1689{
1690        struct hci_dev *hdev = container_of(work, struct hci_dev,
1691                                            discoverable_update);
1692        u8 status;
1693
1694        hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, &status);
1695        mgmt_set_discoverable_complete(hdev, status);
1696}
1697
1698void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
1699                      u8 reason)
1700{
1701        switch (conn->state) {
1702        case BT_CONNECTED:
1703        case BT_CONFIG:
1704                if (conn->type == AMP_LINK) {
1705                        struct hci_cp_disconn_phy_link cp;
1706
1707                        cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
1708                        cp.reason = reason;
1709                        hci_req_add(req, HCI_OP_DISCONN_PHY_LINK, sizeof(cp),
1710                                    &cp);
1711                } else {
1712                        struct hci_cp_disconnect dc;
1713
1714                        dc.handle = cpu_to_le16(conn->handle);
1715                        dc.reason = reason;
1716                        hci_req_add(req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1717                }
1718
1719                conn->state = BT_DISCONN;
1720
1721                break;
1722        case BT_CONNECT:
1723                if (conn->type == LE_LINK) {
1724                        if (test_bit(HCI_CONN_SCANNING, &conn->flags))
1725                                break;
1726                        hci_req_add(req, HCI_OP_LE_CREATE_CONN_CANCEL,
1727                                    0, NULL);
1728                } else if (conn->type == ACL_LINK) {
1729                        if (req->hdev->hci_ver < BLUETOOTH_VER_1_2)
1730                                break;
1731                        hci_req_add(req, HCI_OP_CREATE_CONN_CANCEL,
1732                                    6, &conn->dst);
1733                }
1734                break;
1735        case BT_CONNECT2:
1736                if (conn->type == ACL_LINK) {
1737                        struct hci_cp_reject_conn_req rej;
1738
1739                        bacpy(&rej.bdaddr, &conn->dst);
1740                        rej.reason = reason;
1741
1742                        hci_req_add(req, HCI_OP_REJECT_CONN_REQ,
1743                                    sizeof(rej), &rej);
1744                } else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
1745                        struct hci_cp_reject_sync_conn_req rej;
1746
1747                        bacpy(&rej.bdaddr, &conn->dst);
1748
1749                        /* SCO rejection has its own limited set of
1750                         * allowed error values (0x0D-0x0F) which isn't
1751                         * compatible with most values passed to this
1752                         * function. To be safe hard-code one of the
1753                         * values that's suitable for SCO.
1754                         */
1755                        rej.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
1756
1757                        hci_req_add(req, HCI_OP_REJECT_SYNC_CONN_REQ,
1758                                    sizeof(rej), &rej);
1759                }
1760                break;
1761        default:
1762                conn->state = BT_CLOSED;
1763                break;
1764        }
1765}
1766
1767static void abort_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1768{
1769        if (status)
1770                BT_DBG("Failed to abort connection: status 0x%2.2x", status);
1771}
1772
1773int hci_abort_conn(struct hci_conn *conn, u8 reason)
1774{
1775        struct hci_request req;
1776        int err;
1777
1778        hci_req_init(&req, conn->hdev);
1779
1780        __hci_abort_conn(&req, conn, reason);
1781
1782        err = hci_req_run(&req, abort_conn_complete);
1783        if (err && err != -ENODATA) {
1784                BT_ERR("Failed to run HCI request: err %d", err);
1785                return err;
1786        }
1787
1788        return 0;
1789}
1790
1791static int update_bg_scan(struct hci_request *req, unsigned long opt)
1792{
1793        hci_dev_lock(req->hdev);
1794        __hci_update_background_scan(req);
1795        hci_dev_unlock(req->hdev);
1796        return 0;
1797}
1798
1799static void bg_scan_update(struct work_struct *work)
1800{
1801        struct hci_dev *hdev = container_of(work, struct hci_dev,
1802                                            bg_scan_update);
1803        struct hci_conn *conn;
1804        u8 status;
1805        int err;
1806
1807        err = hci_req_sync(hdev, update_bg_scan, 0, HCI_CMD_TIMEOUT, &status);
1808        if (!err)
1809                return;
1810
1811        hci_dev_lock(hdev);
1812
1813        conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
1814        if (conn)
1815                hci_le_conn_failed(conn, status);
1816
1817        hci_dev_unlock(hdev);
1818}
1819
1820static int le_scan_disable(struct hci_request *req, unsigned long opt)
1821{
1822        hci_req_add_le_scan_disable(req);
1823        return 0;
1824}
1825
1826static int bredr_inquiry(struct hci_request *req, unsigned long opt)
1827{
1828        u8 length = opt;
1829        const u8 giac[3] = { 0x33, 0x8b, 0x9e };
1830        const u8 liac[3] = { 0x00, 0x8b, 0x9e };
1831        struct hci_cp_inquiry cp;
1832
1833        BT_DBG("%s", req->hdev->name);
1834
1835        hci_dev_lock(req->hdev);
1836        hci_inquiry_cache_flush(req->hdev);
1837        hci_dev_unlock(req->hdev);
1838
1839        memset(&cp, 0, sizeof(cp));
1840
1841        if (req->hdev->discovery.limited)
1842                memcpy(&cp.lap, liac, sizeof(cp.lap));
1843        else
1844                memcpy(&cp.lap, giac, sizeof(cp.lap));
1845
1846        cp.length = length;
1847
1848        hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1849
1850        return 0;
1851}
1852
1853static void le_scan_disable_work(struct work_struct *work)
1854{
1855        struct hci_dev *hdev = container_of(work, struct hci_dev,
1856                                            le_scan_disable.work);
1857        u8 status;
1858
1859        BT_DBG("%s", hdev->name);
1860
1861        if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
1862                return;
1863
1864        cancel_delayed_work(&hdev->le_scan_restart);
1865
1866        hci_req_sync(hdev, le_scan_disable, 0, HCI_CMD_TIMEOUT, &status);
1867        if (status) {
1868                BT_ERR("Failed to disable LE scan: status 0x%02x", status);
1869                return;
1870        }
1871
1872        hdev->discovery.scan_start = 0;
1873
1874        /* If we were running LE only scan, change discovery state. If
1875         * we were running both LE and BR/EDR inquiry simultaneously,
1876         * and BR/EDR inquiry is already finished, stop discovery,
1877         * otherwise BR/EDR inquiry will stop discovery when finished.
1878         * If we will resolve remote device name, do not change
1879         * discovery state.
1880         */
1881
1882        if (hdev->discovery.type == DISCOV_TYPE_LE)
1883                goto discov_stopped;
1884
1885        if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED)
1886                return;
1887
1888        if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) {
1889                if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
1890                    hdev->discovery.state != DISCOVERY_RESOLVING)
1891                        goto discov_stopped;
1892
1893                return;
1894        }
1895
1896        hci_req_sync(hdev, bredr_inquiry, DISCOV_INTERLEAVED_INQUIRY_LEN,
1897                     HCI_CMD_TIMEOUT, &status);
1898        if (status) {
1899                BT_ERR("Inquiry failed: status 0x%02x", status);
1900                goto discov_stopped;
1901        }
1902
1903        return;
1904
1905discov_stopped:
1906        hci_dev_lock(hdev);
1907        hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1908        hci_dev_unlock(hdev);
1909}
1910
1911static int le_scan_restart(struct hci_request *req, unsigned long opt)
1912{
1913        struct hci_dev *hdev = req->hdev;
1914        struct hci_cp_le_set_scan_enable cp;
1915
1916        /* If controller is not scanning we are done. */
1917        if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
1918                return 0;
1919
1920        hci_req_add_le_scan_disable(req);
1921
1922        memset(&cp, 0, sizeof(cp));
1923        cp.enable = LE_SCAN_ENABLE;
1924        cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
1925        hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1926
1927        return 0;
1928}
1929
1930static void le_scan_restart_work(struct work_struct *work)
1931{
1932        struct hci_dev *hdev = container_of(work, struct hci_dev,
1933                                            le_scan_restart.work);
1934        unsigned long timeout, duration, scan_start, now;
1935        u8 status;
1936
1937        BT_DBG("%s", hdev->name);
1938
1939        hci_req_sync(hdev, le_scan_restart, 0, HCI_CMD_TIMEOUT, &status);
1940        if (status) {
1941                BT_ERR("Failed to restart LE scan: status %d", status);
1942                return;
1943        }
1944
1945        hci_dev_lock(hdev);
1946
1947        if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
1948            !hdev->discovery.scan_start)
1949                goto unlock;
1950
1951        /* When the scan was started, hdev->le_scan_disable has been queued
1952         * after duration from scan_start. During scan restart this job
1953         * has been canceled, and we need to queue it again after proper
1954         * timeout, to make sure that scan does not run indefinitely.
1955         */
1956        duration = hdev->discovery.scan_duration;
1957        scan_start = hdev->discovery.scan_start;
1958        now = jiffies;
1959        if (now - scan_start <= duration) {
1960                int elapsed;
1961
1962                if (now >= scan_start)
1963                        elapsed = now - scan_start;
1964                else
1965                        elapsed = ULONG_MAX - scan_start + now;
1966
1967                timeout = duration - elapsed;
1968        } else {
1969                timeout = 0;
1970        }
1971
1972        queue_delayed_work(hdev->req_workqueue,
1973                           &hdev->le_scan_disable, timeout);
1974
1975unlock:
1976        hci_dev_unlock(hdev);
1977}
1978
1979static void disable_advertising(struct hci_request *req)
1980{
1981        u8 enable = 0x00;
1982
1983        hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1984}
1985
1986static int active_scan(struct hci_request *req, unsigned long opt)
1987{
1988        uint16_t interval = opt;
1989        struct hci_dev *hdev = req->hdev;
1990        struct hci_cp_le_set_scan_param param_cp;
1991        struct hci_cp_le_set_scan_enable enable_cp;
1992        u8 own_addr_type;
1993        int err;
1994
1995        BT_DBG("%s", hdev->name);
1996
1997        if (hci_dev_test_flag(hdev, HCI_LE_ADV)) {
1998                hci_dev_lock(hdev);
1999
2000                /* Don't let discovery abort an outgoing connection attempt
2001                 * that's using directed advertising.
2002                 */
2003                if (hci_lookup_le_connect(hdev)) {
2004                        hci_dev_unlock(hdev);
2005                        return -EBUSY;
2006                }
2007
2008                cancel_adv_timeout(hdev);
2009                hci_dev_unlock(hdev);
2010
2011                disable_advertising(req);
2012        }
2013
2014        /* If controller is scanning, it means the background scanning is
2015         * running. Thus, we should temporarily stop it in order to set the
2016         * discovery scanning parameters.
2017         */
2018        if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
2019                hci_req_add_le_scan_disable(req);
2020
2021        /* All active scans will be done with either a resolvable private
2022         * address (when privacy feature has been enabled) or non-resolvable
2023         * private address.
2024         */
2025        err = hci_update_random_address(req, true, scan_use_rpa(hdev),
2026                                        &own_addr_type);
2027        if (err < 0)
2028                own_addr_type = ADDR_LE_DEV_PUBLIC;
2029
2030        memset(&param_cp, 0, sizeof(param_cp));
2031        param_cp.type = LE_SCAN_ACTIVE;
2032        param_cp.interval = cpu_to_le16(interval);
2033        param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
2034        param_cp.own_address_type = own_addr_type;
2035
2036        hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
2037                    &param_cp);
2038
2039        memset(&enable_cp, 0, sizeof(enable_cp));
2040        enable_cp.enable = LE_SCAN_ENABLE;
2041        enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2042
2043        hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
2044                    &enable_cp);
2045
2046        return 0;
2047}
2048
2049static int interleaved_discov(struct hci_request *req, unsigned long opt)
2050{
2051        int err;
2052
2053        BT_DBG("%s", req->hdev->name);
2054
2055        err = active_scan(req, opt);
2056        if (err)
2057                return err;
2058
2059        return bredr_inquiry(req, DISCOV_BREDR_INQUIRY_LEN);
2060}
2061
2062static void start_discovery(struct hci_dev *hdev, u8 *status)
2063{
2064        unsigned long timeout;
2065
2066        BT_DBG("%s type %u", hdev->name, hdev->discovery.type);
2067
2068        switch (hdev->discovery.type) {
2069        case DISCOV_TYPE_BREDR:
2070                if (!hci_dev_test_flag(hdev, HCI_INQUIRY))
2071                        hci_req_sync(hdev, bredr_inquiry,
2072                                     DISCOV_BREDR_INQUIRY_LEN, HCI_CMD_TIMEOUT,
2073                                     status);
2074                return;
2075        case DISCOV_TYPE_INTERLEAVED:
2076                /* When running simultaneous discovery, the LE scanning time
2077                 * should occupy the whole discovery time sine BR/EDR inquiry
2078                 * and LE scanning are scheduled by the controller.
2079                 *
2080                 * For interleaving discovery in comparison, BR/EDR inquiry
2081                 * and LE scanning are done sequentially with separate
2082                 * timeouts.
2083                 */
2084                if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
2085                             &hdev->quirks)) {
2086                        timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
2087                        /* During simultaneous discovery, we double LE scan
2088                         * interval. We must leave some time for the controller
2089                         * to do BR/EDR inquiry.
2090                         */
2091                        hci_req_sync(hdev, interleaved_discov,
2092                                     DISCOV_LE_SCAN_INT * 2, HCI_CMD_TIMEOUT,
2093                                     status);
2094                        break;
2095                }
2096
2097                timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
2098                hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT,
2099                             HCI_CMD_TIMEOUT, status);
2100                break;
2101        case DISCOV_TYPE_LE:
2102                timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
2103                hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT,
2104                             HCI_CMD_TIMEOUT, status);
2105                break;
2106        default:
2107                *status = HCI_ERROR_UNSPECIFIED;
2108                return;
2109        }
2110
2111        if (*status)
2112                return;
2113
2114        BT_DBG("%s timeout %u ms", hdev->name, jiffies_to_msecs(timeout));
2115
2116        /* When service discovery is used and the controller has a
2117         * strict duplicate filter, it is important to remember the
2118         * start and duration of the scan. This is required for
2119         * restarting scanning during the discovery phase.
2120         */
2121        if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) &&
2122                     hdev->discovery.result_filtering) {
2123                hdev->discovery.scan_start = jiffies;
2124                hdev->discovery.scan_duration = timeout;
2125        }
2126
2127        queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable,
2128                           timeout);
2129}
2130
2131bool hci_req_stop_discovery(struct hci_request *req)
2132{
2133        struct hci_dev *hdev = req->hdev;
2134        struct discovery_state *d = &hdev->discovery;
2135        struct hci_cp_remote_name_req_cancel cp;
2136        struct inquiry_entry *e;
2137        bool ret = false;
2138
2139        BT_DBG("%s state %u", hdev->name, hdev->discovery.state);
2140
2141        if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) {
2142                if (test_bit(HCI_INQUIRY, &hdev->flags))
2143                        hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2144
2145                if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2146                        cancel_delayed_work(&hdev->le_scan_disable);
2147                        hci_req_add_le_scan_disable(req);
2148                }
2149
2150                ret = true;
2151        } else {
2152                /* Passive scanning */
2153                if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2154                        hci_req_add_le_scan_disable(req);
2155                        ret = true;
2156                }
2157        }
2158
2159        /* No further actions needed for LE-only discovery */
2160        if (d->type == DISCOV_TYPE_LE)
2161                return ret;
2162
2163        if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) {
2164                e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
2165                                                     NAME_PENDING);
2166                if (!e)
2167                        return ret;
2168
2169                bacpy(&cp.bdaddr, &e->data.bdaddr);
2170                hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
2171                            &cp);
2172                ret = true;
2173        }
2174
2175        return ret;
2176}
2177
2178static int stop_discovery(struct hci_request *req, unsigned long opt)
2179{
2180        hci_dev_lock(req->hdev);
2181        hci_req_stop_discovery(req);
2182        hci_dev_unlock(req->hdev);
2183
2184        return 0;
2185}
2186
2187static void discov_update(struct work_struct *work)
2188{
2189        struct hci_dev *hdev = container_of(work, struct hci_dev,
2190                                            discov_update);
2191        u8 status = 0;
2192
2193        switch (hdev->discovery.state) {
2194        case DISCOVERY_STARTING:
2195                start_discovery(hdev, &status);
2196                mgmt_start_discovery_complete(hdev, status);
2197                if (status)
2198                        hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2199                else
2200                        hci_discovery_set_state(hdev, DISCOVERY_FINDING);
2201                break;
2202        case DISCOVERY_STOPPING:
2203                hci_req_sync(hdev, stop_discovery, 0, HCI_CMD_TIMEOUT, &status);
2204                mgmt_stop_discovery_complete(hdev, status);
2205                if (!status)
2206                        hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2207                break;
2208        case DISCOVERY_STOPPED:
2209        default:
2210                return;
2211        }
2212}
2213
2214static void discov_off(struct work_struct *work)
2215{
2216        struct hci_dev *hdev = container_of(work, struct hci_dev,
2217                                            discov_off.work);
2218
2219        BT_DBG("%s", hdev->name);
2220
2221        hci_dev_lock(hdev);
2222
2223        /* When discoverable timeout triggers, then just make sure
2224         * the limited discoverable flag is cleared. Even in the case
2225         * of a timeout triggered from general discoverable, it is
2226         * safe to unconditionally clear the flag.
2227         */
2228        hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
2229        hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
2230        hdev->discov_timeout = 0;
2231
2232        hci_dev_unlock(hdev);
2233
2234        hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, NULL);
2235        mgmt_new_settings(hdev);
2236}
2237
2238static int powered_update_hci(struct hci_request *req, unsigned long opt)
2239{
2240        struct hci_dev *hdev = req->hdev;
2241        u8 link_sec;
2242
2243        hci_dev_lock(hdev);
2244
2245        if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
2246            !lmp_host_ssp_capable(hdev)) {
2247                u8 mode = 0x01;
2248
2249                hci_req_add(req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
2250
2251                if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
2252                        u8 support = 0x01;
2253
2254                        hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
2255                                    sizeof(support), &support);
2256                }
2257        }
2258
2259        if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
2260            lmp_bredr_capable(hdev)) {
2261                struct hci_cp_write_le_host_supported cp;
2262
2263                cp.le = 0x01;
2264                cp.simul = 0x00;
2265
2266                /* Check first if we already have the right
2267                 * host state (host features set)
2268                 */
2269                if (cp.le != lmp_host_le_capable(hdev) ||
2270                    cp.simul != lmp_host_le_br_capable(hdev))
2271                        hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
2272                                    sizeof(cp), &cp);
2273        }
2274
2275        if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2276                /* Make sure the controller has a good default for
2277                 * advertising data. This also applies to the case
2278                 * where BR/EDR was toggled during the AUTO_OFF phase.
2279                 */
2280                if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2281                    list_empty(&hdev->adv_instances)) {
2282                        __hci_req_update_adv_data(req, 0x00);
2283                        __hci_req_update_scan_rsp_data(req, 0x00);
2284
2285                        if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
2286                                __hci_req_enable_advertising(req);
2287                } else if (!list_empty(&hdev->adv_instances)) {
2288                        struct adv_info *adv_instance;
2289
2290                        adv_instance = list_first_entry(&hdev->adv_instances,
2291                                                        struct adv_info, list);
2292                        __hci_req_schedule_adv_instance(req,
2293                                                        adv_instance->instance,
2294                                                        true);
2295                }
2296        }
2297
2298        link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
2299        if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
2300                hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE,
2301                            sizeof(link_sec), &link_sec);
2302
2303        if (lmp_bredr_capable(hdev)) {
2304                if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
2305                        __hci_req_write_fast_connectable(req, true);
2306                else
2307                        __hci_req_write_fast_connectable(req, false);
2308                __hci_req_update_scan(req);
2309                __hci_req_update_class(req);
2310                __hci_req_update_name(req);
2311                __hci_req_update_eir(req);
2312        }
2313
2314        hci_dev_unlock(hdev);
2315        return 0;
2316}
2317
2318int __hci_req_hci_power_on(struct hci_dev *hdev)
2319{
2320        /* Register the available SMP channels (BR/EDR and LE) only when
2321         * successfully powering on the controller. This late
2322         * registration is required so that LE SMP can clearly decide if
2323         * the public address or static address is used.
2324         */
2325        smp_register(hdev);
2326
2327        return __hci_req_sync(hdev, powered_update_hci, 0, HCI_CMD_TIMEOUT,
2328                              NULL);
2329}
2330
2331void hci_request_setup(struct hci_dev *hdev)
2332{
2333        INIT_WORK(&hdev->discov_update, discov_update);
2334        INIT_WORK(&hdev->bg_scan_update, bg_scan_update);
2335        INIT_WORK(&hdev->scan_update, scan_update_work);
2336        INIT_WORK(&hdev->connectable_update, connectable_update_work);
2337        INIT_WORK(&hdev->discoverable_update, discoverable_update_work);
2338        INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
2339        INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2340        INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
2341        INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire);
2342}
2343
2344void hci_request_cancel_all(struct hci_dev *hdev)
2345{
2346        hci_req_sync_cancel(hdev, ENODEV);
2347
2348        cancel_work_sync(&hdev->discov_update);
2349        cancel_work_sync(&hdev->bg_scan_update);
2350        cancel_work_sync(&hdev->scan_update);
2351        cancel_work_sync(&hdev->connectable_update);
2352        cancel_work_sync(&hdev->discoverable_update);
2353        cancel_delayed_work_sync(&hdev->discov_off);
2354        cancel_delayed_work_sync(&hdev->le_scan_disable);
2355        cancel_delayed_work_sync(&hdev->le_scan_restart);
2356
2357        if (hdev->adv_instance_timeout) {
2358                cancel_delayed_work_sync(&hdev->adv_instance_expire);
2359                hdev->adv_instance_timeout = 0;
2360        }
2361}
2362