linux/net/bluetooth/hci_request.c
<<
>>
Prefs
   1/*
   2   BlueZ - Bluetooth protocol stack for Linux
   3
   4   Copyright (C) 2014 Intel Corporation
   5
   6   This program is free software; you can redistribute it and/or modify
   7   it under the terms of the GNU General Public License version 2 as
   8   published by the Free Software Foundation;
   9
  10   THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
  11   OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  12   FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
  13   IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
  14   CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
  15   WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  16   ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  17   OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  18
  19   ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
  20   COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
  21   SOFTWARE IS DISCLAIMED.
  22*/
  23
  24#include <linux/sched/signal.h>
  25
  26#include <net/bluetooth/bluetooth.h>
  27#include <net/bluetooth/hci_core.h>
  28#include <net/bluetooth/mgmt.h>
  29
  30#include "smp.h"
  31#include "hci_request.h"
  32#include "msft.h"
  33
  34#define HCI_REQ_DONE      0
  35#define HCI_REQ_PEND      1
  36#define HCI_REQ_CANCELED  2
  37
  38void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
  39{
  40        skb_queue_head_init(&req->cmd_q);
  41        req->hdev = hdev;
  42        req->err = 0;
  43}
  44
  45void hci_req_purge(struct hci_request *req)
  46{
  47        skb_queue_purge(&req->cmd_q);
  48}
  49
  50bool hci_req_status_pend(struct hci_dev *hdev)
  51{
  52        return hdev->req_status == HCI_REQ_PEND;
  53}
  54
  55static int req_run(struct hci_request *req, hci_req_complete_t complete,
  56                   hci_req_complete_skb_t complete_skb)
  57{
  58        struct hci_dev *hdev = req->hdev;
  59        struct sk_buff *skb;
  60        unsigned long flags;
  61
  62        bt_dev_dbg(hdev, "length %u", skb_queue_len(&req->cmd_q));
  63
  64        /* If an error occurred during request building, remove all HCI
  65         * commands queued on the HCI request queue.
  66         */
  67        if (req->err) {
  68                skb_queue_purge(&req->cmd_q);
  69                return req->err;
  70        }
  71
  72        /* Do not allow empty requests */
  73        if (skb_queue_empty(&req->cmd_q))
  74                return -ENODATA;
  75
  76        skb = skb_peek_tail(&req->cmd_q);
  77        if (complete) {
  78                bt_cb(skb)->hci.req_complete = complete;
  79        } else if (complete_skb) {
  80                bt_cb(skb)->hci.req_complete_skb = complete_skb;
  81                bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
  82        }
  83
  84        spin_lock_irqsave(&hdev->cmd_q.lock, flags);
  85        skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
  86        spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
  87
  88        queue_work(hdev->workqueue, &hdev->cmd_work);
  89
  90        return 0;
  91}
  92
  93int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
  94{
  95        return req_run(req, complete, NULL);
  96}
  97
  98int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete)
  99{
 100        return req_run(req, NULL, complete);
 101}
 102
 103static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
 104                                  struct sk_buff *skb)
 105{
 106        bt_dev_dbg(hdev, "result 0x%2.2x", result);
 107
 108        if (hdev->req_status == HCI_REQ_PEND) {
 109                hdev->req_result = result;
 110                hdev->req_status = HCI_REQ_DONE;
 111                if (skb)
 112                        hdev->req_skb = skb_get(skb);
 113                wake_up_interruptible(&hdev->req_wait_q);
 114        }
 115}
 116
 117void hci_req_sync_cancel(struct hci_dev *hdev, int err)
 118{
 119        bt_dev_dbg(hdev, "err 0x%2.2x", err);
 120
 121        if (hdev->req_status == HCI_REQ_PEND) {
 122                hdev->req_result = err;
 123                hdev->req_status = HCI_REQ_CANCELED;
 124                wake_up_interruptible(&hdev->req_wait_q);
 125        }
 126}
 127
 128struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
 129                                  const void *param, u8 event, u32 timeout)
 130{
 131        struct hci_request req;
 132        struct sk_buff *skb;
 133        int err = 0;
 134
 135        bt_dev_dbg(hdev, "");
 136
 137        hci_req_init(&req, hdev);
 138
 139        hci_req_add_ev(&req, opcode, plen, param, event);
 140
 141        hdev->req_status = HCI_REQ_PEND;
 142
 143        err = hci_req_run_skb(&req, hci_req_sync_complete);
 144        if (err < 0)
 145                return ERR_PTR(err);
 146
 147        err = wait_event_interruptible_timeout(hdev->req_wait_q,
 148                        hdev->req_status != HCI_REQ_PEND, timeout);
 149
 150        if (err == -ERESTARTSYS)
 151                return ERR_PTR(-EINTR);
 152
 153        switch (hdev->req_status) {
 154        case HCI_REQ_DONE:
 155                err = -bt_to_errno(hdev->req_result);
 156                break;
 157
 158        case HCI_REQ_CANCELED:
 159                err = -hdev->req_result;
 160                break;
 161
 162        default:
 163                err = -ETIMEDOUT;
 164                break;
 165        }
 166
 167        hdev->req_status = hdev->req_result = 0;
 168        skb = hdev->req_skb;
 169        hdev->req_skb = NULL;
 170
 171        bt_dev_dbg(hdev, "end: err %d", err);
 172
 173        if (err < 0) {
 174                kfree_skb(skb);
 175                return ERR_PTR(err);
 176        }
 177
 178        if (!skb)
 179                return ERR_PTR(-ENODATA);
 180
 181        return skb;
 182}
 183EXPORT_SYMBOL(__hci_cmd_sync_ev);
 184
 185struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
 186                               const void *param, u32 timeout)
 187{
 188        return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
 189}
 190EXPORT_SYMBOL(__hci_cmd_sync);
 191
 192/* Execute request and wait for completion. */
 193int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
 194                                                     unsigned long opt),
 195                   unsigned long opt, u32 timeout, u8 *hci_status)
 196{
 197        struct hci_request req;
 198        int err = 0;
 199
 200        bt_dev_dbg(hdev, "start");
 201
 202        hci_req_init(&req, hdev);
 203
 204        hdev->req_status = HCI_REQ_PEND;
 205
 206        err = func(&req, opt);
 207        if (err) {
 208                if (hci_status)
 209                        *hci_status = HCI_ERROR_UNSPECIFIED;
 210                return err;
 211        }
 212
 213        err = hci_req_run_skb(&req, hci_req_sync_complete);
 214        if (err < 0) {
 215                hdev->req_status = 0;
 216
 217                /* ENODATA means the HCI request command queue is empty.
 218                 * This can happen when a request with conditionals doesn't
 219                 * trigger any commands to be sent. This is normal behavior
 220                 * and should not trigger an error return.
 221                 */
 222                if (err == -ENODATA) {
 223                        if (hci_status)
 224                                *hci_status = 0;
 225                        return 0;
 226                }
 227
 228                if (hci_status)
 229                        *hci_status = HCI_ERROR_UNSPECIFIED;
 230
 231                return err;
 232        }
 233
 234        err = wait_event_interruptible_timeout(hdev->req_wait_q,
 235                        hdev->req_status != HCI_REQ_PEND, timeout);
 236
 237        if (err == -ERESTARTSYS)
 238                return -EINTR;
 239
 240        switch (hdev->req_status) {
 241        case HCI_REQ_DONE:
 242                err = -bt_to_errno(hdev->req_result);
 243                if (hci_status)
 244                        *hci_status = hdev->req_result;
 245                break;
 246
 247        case HCI_REQ_CANCELED:
 248                err = -hdev->req_result;
 249                if (hci_status)
 250                        *hci_status = HCI_ERROR_UNSPECIFIED;
 251                break;
 252
 253        default:
 254                err = -ETIMEDOUT;
 255                if (hci_status)
 256                        *hci_status = HCI_ERROR_UNSPECIFIED;
 257                break;
 258        }
 259
 260        kfree_skb(hdev->req_skb);
 261        hdev->req_skb = NULL;
 262        hdev->req_status = hdev->req_result = 0;
 263
 264        bt_dev_dbg(hdev, "end: err %d", err);
 265
 266        return err;
 267}
 268
 269int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
 270                                                  unsigned long opt),
 271                 unsigned long opt, u32 timeout, u8 *hci_status)
 272{
 273        int ret;
 274
 275        /* Serialize all requests */
 276        hci_req_sync_lock(hdev);
 277        /* check the state after obtaing the lock to protect the HCI_UP
 278         * against any races from hci_dev_do_close when the controller
 279         * gets removed.
 280         */
 281        if (test_bit(HCI_UP, &hdev->flags))
 282                ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
 283        else
 284                ret = -ENETDOWN;
 285        hci_req_sync_unlock(hdev);
 286
 287        return ret;
 288}
 289
 290struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
 291                                const void *param)
 292{
 293        int len = HCI_COMMAND_HDR_SIZE + plen;
 294        struct hci_command_hdr *hdr;
 295        struct sk_buff *skb;
 296
 297        skb = bt_skb_alloc(len, GFP_ATOMIC);
 298        if (!skb)
 299                return NULL;
 300
 301        hdr = skb_put(skb, HCI_COMMAND_HDR_SIZE);
 302        hdr->opcode = cpu_to_le16(opcode);
 303        hdr->plen   = plen;
 304
 305        if (plen)
 306                skb_put_data(skb, param, plen);
 307
 308        bt_dev_dbg(hdev, "skb len %d", skb->len);
 309
 310        hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
 311        hci_skb_opcode(skb) = opcode;
 312
 313        return skb;
 314}
 315
 316/* Queue a command to an asynchronous HCI request */
 317void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
 318                    const void *param, u8 event)
 319{
 320        struct hci_dev *hdev = req->hdev;
 321        struct sk_buff *skb;
 322
 323        bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
 324
 325        /* If an error occurred during request building, there is no point in
 326         * queueing the HCI command. We can simply return.
 327         */
 328        if (req->err)
 329                return;
 330
 331        skb = hci_prepare_cmd(hdev, opcode, plen, param);
 332        if (!skb) {
 333                bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
 334                           opcode);
 335                req->err = -ENOMEM;
 336                return;
 337        }
 338
 339        if (skb_queue_empty(&req->cmd_q))
 340                bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
 341
 342        bt_cb(skb)->hci.req_event = event;
 343
 344        skb_queue_tail(&req->cmd_q, skb);
 345}
 346
 347void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
 348                 const void *param)
 349{
 350        hci_req_add_ev(req, opcode, plen, param, 0);
 351}
 352
 353void __hci_req_write_fast_connectable(struct hci_request *req, bool enable)
 354{
 355        struct hci_dev *hdev = req->hdev;
 356        struct hci_cp_write_page_scan_activity acp;
 357        u8 type;
 358
 359        if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
 360                return;
 361
 362        if (hdev->hci_ver < BLUETOOTH_VER_1_2)
 363                return;
 364
 365        if (enable) {
 366                type = PAGE_SCAN_TYPE_INTERLACED;
 367
 368                /* 160 msec page scan interval */
 369                acp.interval = cpu_to_le16(0x0100);
 370        } else {
 371                type = hdev->def_page_scan_type;
 372                acp.interval = cpu_to_le16(hdev->def_page_scan_int);
 373        }
 374
 375        acp.window = cpu_to_le16(hdev->def_page_scan_window);
 376
 377        if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
 378            __cpu_to_le16(hdev->page_scan_window) != acp.window)
 379                hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
 380                            sizeof(acp), &acp);
 381
 382        if (hdev->page_scan_type != type)
 383                hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
 384}
 385
 386static void start_interleave_scan(struct hci_dev *hdev)
 387{
 388        hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER;
 389        queue_delayed_work(hdev->req_workqueue,
 390                           &hdev->interleave_scan, 0);
 391}
 392
 393static bool is_interleave_scanning(struct hci_dev *hdev)
 394{
 395        return hdev->interleave_scan_state != INTERLEAVE_SCAN_NONE;
 396}
 397
 398static void cancel_interleave_scan(struct hci_dev *hdev)
 399{
 400        bt_dev_dbg(hdev, "cancelling interleave scan");
 401
 402        cancel_delayed_work_sync(&hdev->interleave_scan);
 403
 404        hdev->interleave_scan_state = INTERLEAVE_SCAN_NONE;
 405}
 406
 407/* Return true if interleave_scan wasn't started until exiting this function,
 408 * otherwise, return false
 409 */
 410static bool __hci_update_interleaved_scan(struct hci_dev *hdev)
 411{
 412        /* Do interleaved scan only if all of the following are true:
 413         * - There is at least one ADV monitor
 414         * - At least one pending LE connection or one device to be scanned for
 415         * - Monitor offloading is not supported
 416         * If so, we should alternate between allowlist scan and one without
 417         * any filters to save power.
 418         */
 419        bool use_interleaving = hci_is_adv_monitoring(hdev) &&
 420                                !(list_empty(&hdev->pend_le_conns) &&
 421                                  list_empty(&hdev->pend_le_reports)) &&
 422                                hci_get_adv_monitor_offload_ext(hdev) ==
 423                                    HCI_ADV_MONITOR_EXT_NONE;
 424        bool is_interleaving = is_interleave_scanning(hdev);
 425
 426        if (use_interleaving && !is_interleaving) {
 427                start_interleave_scan(hdev);
 428                bt_dev_dbg(hdev, "starting interleave scan");
 429                return true;
 430        }
 431
 432        if (!use_interleaving && is_interleaving)
 433                cancel_interleave_scan(hdev);
 434
 435        return false;
 436}
 437
 438/* This function controls the background scanning based on hdev->pend_le_conns
 439 * list. If there are pending LE connection we start the background scanning,
 440 * otherwise we stop it.
 441 *
 442 * This function requires the caller holds hdev->lock.
 443 */
 444static void __hci_update_background_scan(struct hci_request *req)
 445{
 446        struct hci_dev *hdev = req->hdev;
 447
 448        if (!test_bit(HCI_UP, &hdev->flags) ||
 449            test_bit(HCI_INIT, &hdev->flags) ||
 450            hci_dev_test_flag(hdev, HCI_SETUP) ||
 451            hci_dev_test_flag(hdev, HCI_CONFIG) ||
 452            hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
 453            hci_dev_test_flag(hdev, HCI_UNREGISTER))
 454                return;
 455
 456        /* No point in doing scanning if LE support hasn't been enabled */
 457        if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
 458                return;
 459
 460        /* If discovery is active don't interfere with it */
 461        if (hdev->discovery.state != DISCOVERY_STOPPED)
 462                return;
 463
 464        /* Reset RSSI and UUID filters when starting background scanning
 465         * since these filters are meant for service discovery only.
 466         *
 467         * The Start Discovery and Start Service Discovery operations
 468         * ensure to set proper values for RSSI threshold and UUID
 469         * filter list. So it is safe to just reset them here.
 470         */
 471        hci_discovery_filter_clear(hdev);
 472
 473        bt_dev_dbg(hdev, "ADV monitoring is %s",
 474                   hci_is_adv_monitoring(hdev) ? "on" : "off");
 475
 476        if (list_empty(&hdev->pend_le_conns) &&
 477            list_empty(&hdev->pend_le_reports) &&
 478            !hci_is_adv_monitoring(hdev)) {
 479                /* If there is no pending LE connections or devices
 480                 * to be scanned for or no ADV monitors, we should stop the
 481                 * background scanning.
 482                 */
 483
 484                /* If controller is not scanning we are done. */
 485                if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
 486                        return;
 487
 488                hci_req_add_le_scan_disable(req, false);
 489
 490                bt_dev_dbg(hdev, "stopping background scanning");
 491        } else {
 492                /* If there is at least one pending LE connection, we should
 493                 * keep the background scan running.
 494                 */
 495
 496                /* If controller is connecting, we should not start scanning
 497                 * since some controllers are not able to scan and connect at
 498                 * the same time.
 499                 */
 500                if (hci_lookup_le_connect(hdev))
 501                        return;
 502
 503                /* If controller is currently scanning, we stop it to ensure we
 504                 * don't miss any advertising (due to duplicates filter).
 505                 */
 506                if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
 507                        hci_req_add_le_scan_disable(req, false);
 508
 509                hci_req_add_le_passive_scan(req);
 510                bt_dev_dbg(hdev, "starting background scanning");
 511        }
 512}
 513
 514void __hci_req_update_name(struct hci_request *req)
 515{
 516        struct hci_dev *hdev = req->hdev;
 517        struct hci_cp_write_local_name cp;
 518
 519        memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
 520
 521        hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
 522}
 523
 524#define PNP_INFO_SVCLASS_ID             0x1200
 525
 526static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
 527{
 528        u8 *ptr = data, *uuids_start = NULL;
 529        struct bt_uuid *uuid;
 530
 531        if (len < 4)
 532                return ptr;
 533
 534        list_for_each_entry(uuid, &hdev->uuids, list) {
 535                u16 uuid16;
 536
 537                if (uuid->size != 16)
 538                        continue;
 539
 540                uuid16 = get_unaligned_le16(&uuid->uuid[12]);
 541                if (uuid16 < 0x1100)
 542                        continue;
 543
 544                if (uuid16 == PNP_INFO_SVCLASS_ID)
 545                        continue;
 546
 547                if (!uuids_start) {
 548                        uuids_start = ptr;
 549                        uuids_start[0] = 1;
 550                        uuids_start[1] = EIR_UUID16_ALL;
 551                        ptr += 2;
 552                }
 553
 554                /* Stop if not enough space to put next UUID */
 555                if ((ptr - data) + sizeof(u16) > len) {
 556                        uuids_start[1] = EIR_UUID16_SOME;
 557                        break;
 558                }
 559
 560                *ptr++ = (uuid16 & 0x00ff);
 561                *ptr++ = (uuid16 & 0xff00) >> 8;
 562                uuids_start[0] += sizeof(uuid16);
 563        }
 564
 565        return ptr;
 566}
 567
 568static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
 569{
 570        u8 *ptr = data, *uuids_start = NULL;
 571        struct bt_uuid *uuid;
 572
 573        if (len < 6)
 574                return ptr;
 575
 576        list_for_each_entry(uuid, &hdev->uuids, list) {
 577                if (uuid->size != 32)
 578                        continue;
 579
 580                if (!uuids_start) {
 581                        uuids_start = ptr;
 582                        uuids_start[0] = 1;
 583                        uuids_start[1] = EIR_UUID32_ALL;
 584                        ptr += 2;
 585                }
 586
 587                /* Stop if not enough space to put next UUID */
 588                if ((ptr - data) + sizeof(u32) > len) {
 589                        uuids_start[1] = EIR_UUID32_SOME;
 590                        break;
 591                }
 592
 593                memcpy(ptr, &uuid->uuid[12], sizeof(u32));
 594                ptr += sizeof(u32);
 595                uuids_start[0] += sizeof(u32);
 596        }
 597
 598        return ptr;
 599}
 600
 601static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
 602{
 603        u8 *ptr = data, *uuids_start = NULL;
 604        struct bt_uuid *uuid;
 605
 606        if (len < 18)
 607                return ptr;
 608
 609        list_for_each_entry(uuid, &hdev->uuids, list) {
 610                if (uuid->size != 128)
 611                        continue;
 612
 613                if (!uuids_start) {
 614                        uuids_start = ptr;
 615                        uuids_start[0] = 1;
 616                        uuids_start[1] = EIR_UUID128_ALL;
 617                        ptr += 2;
 618                }
 619
 620                /* Stop if not enough space to put next UUID */
 621                if ((ptr - data) + 16 > len) {
 622                        uuids_start[1] = EIR_UUID128_SOME;
 623                        break;
 624                }
 625
 626                memcpy(ptr, uuid->uuid, 16);
 627                ptr += 16;
 628                uuids_start[0] += 16;
 629        }
 630
 631        return ptr;
 632}
 633
 634static void create_eir(struct hci_dev *hdev, u8 *data)
 635{
 636        u8 *ptr = data;
 637        size_t name_len;
 638
 639        name_len = strlen(hdev->dev_name);
 640
 641        if (name_len > 0) {
 642                /* EIR Data type */
 643                if (name_len > 48) {
 644                        name_len = 48;
 645                        ptr[1] = EIR_NAME_SHORT;
 646                } else
 647                        ptr[1] = EIR_NAME_COMPLETE;
 648
 649                /* EIR Data length */
 650                ptr[0] = name_len + 1;
 651
 652                memcpy(ptr + 2, hdev->dev_name, name_len);
 653
 654                ptr += (name_len + 2);
 655        }
 656
 657        if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
 658                ptr[0] = 2;
 659                ptr[1] = EIR_TX_POWER;
 660                ptr[2] = (u8) hdev->inq_tx_power;
 661
 662                ptr += 3;
 663        }
 664
 665        if (hdev->devid_source > 0) {
 666                ptr[0] = 9;
 667                ptr[1] = EIR_DEVICE_ID;
 668
 669                put_unaligned_le16(hdev->devid_source, ptr + 2);
 670                put_unaligned_le16(hdev->devid_vendor, ptr + 4);
 671                put_unaligned_le16(hdev->devid_product, ptr + 6);
 672                put_unaligned_le16(hdev->devid_version, ptr + 8);
 673
 674                ptr += 10;
 675        }
 676
 677        ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
 678        ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
 679        ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
 680}
 681
 682void __hci_req_update_eir(struct hci_request *req)
 683{
 684        struct hci_dev *hdev = req->hdev;
 685        struct hci_cp_write_eir cp;
 686
 687        if (!hdev_is_powered(hdev))
 688                return;
 689
 690        if (!lmp_ext_inq_capable(hdev))
 691                return;
 692
 693        if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
 694                return;
 695
 696        if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
 697                return;
 698
 699        memset(&cp, 0, sizeof(cp));
 700
 701        create_eir(hdev, cp.data);
 702
 703        if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
 704                return;
 705
 706        memcpy(hdev->eir, cp.data, sizeof(cp.data));
 707
 708        hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
 709}
 710
 711void hci_req_add_le_scan_disable(struct hci_request *req, bool rpa_le_conn)
 712{
 713        struct hci_dev *hdev = req->hdev;
 714
 715        if (hdev->scanning_paused) {
 716                bt_dev_dbg(hdev, "Scanning is paused for suspend");
 717                return;
 718        }
 719
 720        if (hdev->suspended)
 721                set_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
 722
 723        if (use_ext_scan(hdev)) {
 724                struct hci_cp_le_set_ext_scan_enable cp;
 725
 726                memset(&cp, 0, sizeof(cp));
 727                cp.enable = LE_SCAN_DISABLE;
 728                hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE, sizeof(cp),
 729                            &cp);
 730        } else {
 731                struct hci_cp_le_set_scan_enable cp;
 732
 733                memset(&cp, 0, sizeof(cp));
 734                cp.enable = LE_SCAN_DISABLE;
 735                hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
 736        }
 737
 738        /* Disable address resolution */
 739        if (use_ll_privacy(hdev) &&
 740            hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
 741            hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION) && !rpa_le_conn) {
 742                __u8 enable = 0x00;
 743
 744                hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
 745        }
 746}
 747
 748static void del_from_accept_list(struct hci_request *req, bdaddr_t *bdaddr,
 749                                 u8 bdaddr_type)
 750{
 751        struct hci_cp_le_del_from_accept_list cp;
 752
 753        cp.bdaddr_type = bdaddr_type;
 754        bacpy(&cp.bdaddr, bdaddr);
 755
 756        bt_dev_dbg(req->hdev, "Remove %pMR (0x%x) from accept list", &cp.bdaddr,
 757                   cp.bdaddr_type);
 758        hci_req_add(req, HCI_OP_LE_DEL_FROM_ACCEPT_LIST, sizeof(cp), &cp);
 759
 760        if (use_ll_privacy(req->hdev) &&
 761            hci_dev_test_flag(req->hdev, HCI_ENABLE_LL_PRIVACY)) {
 762                struct smp_irk *irk;
 763
 764                irk = hci_find_irk_by_addr(req->hdev, bdaddr, bdaddr_type);
 765                if (irk) {
 766                        struct hci_cp_le_del_from_resolv_list cp;
 767
 768                        cp.bdaddr_type = bdaddr_type;
 769                        bacpy(&cp.bdaddr, bdaddr);
 770
 771                        hci_req_add(req, HCI_OP_LE_DEL_FROM_RESOLV_LIST,
 772                                    sizeof(cp), &cp);
 773                }
 774        }
 775}
 776
 777/* Adds connection to accept list if needed. On error, returns -1. */
 778static int add_to_accept_list(struct hci_request *req,
 779                              struct hci_conn_params *params, u8 *num_entries,
 780                              bool allow_rpa)
 781{
 782        struct hci_cp_le_add_to_accept_list cp;
 783        struct hci_dev *hdev = req->hdev;
 784
 785        /* Already in accept list */
 786        if (hci_bdaddr_list_lookup(&hdev->le_accept_list, &params->addr,
 787                                   params->addr_type))
 788                return 0;
 789
 790        /* Select filter policy to accept all advertising */
 791        if (*num_entries >= hdev->le_accept_list_size)
 792                return -1;
 793
 794        /* Accept list can not be used with RPAs */
 795        if (!allow_rpa &&
 796            !hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
 797            hci_find_irk_by_addr(hdev, &params->addr, params->addr_type)) {
 798                return -1;
 799        }
 800
 801        /* During suspend, only wakeable devices can be in accept list */
 802        if (hdev->suspended && !hci_conn_test_flag(HCI_CONN_FLAG_REMOTE_WAKEUP,
 803                                                   params->current_flags))
 804                return 0;
 805
 806        *num_entries += 1;
 807        cp.bdaddr_type = params->addr_type;
 808        bacpy(&cp.bdaddr, &params->addr);
 809
 810        bt_dev_dbg(hdev, "Add %pMR (0x%x) to accept list", &cp.bdaddr,
 811                   cp.bdaddr_type);
 812        hci_req_add(req, HCI_OP_LE_ADD_TO_ACCEPT_LIST, sizeof(cp), &cp);
 813
 814        if (use_ll_privacy(hdev) &&
 815            hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY)) {
 816                struct smp_irk *irk;
 817
 818                irk = hci_find_irk_by_addr(hdev, &params->addr,
 819                                           params->addr_type);
 820                if (irk) {
 821                        struct hci_cp_le_add_to_resolv_list cp;
 822
 823                        cp.bdaddr_type = params->addr_type;
 824                        bacpy(&cp.bdaddr, &params->addr);
 825                        memcpy(cp.peer_irk, irk->val, 16);
 826
 827                        if (hci_dev_test_flag(hdev, HCI_PRIVACY))
 828                                memcpy(cp.local_irk, hdev->irk, 16);
 829                        else
 830                                memset(cp.local_irk, 0, 16);
 831
 832                        hci_req_add(req, HCI_OP_LE_ADD_TO_RESOLV_LIST,
 833                                    sizeof(cp), &cp);
 834                }
 835        }
 836
 837        return 0;
 838}
 839
 840static u8 update_accept_list(struct hci_request *req)
 841{
 842        struct hci_dev *hdev = req->hdev;
 843        struct hci_conn_params *params;
 844        struct bdaddr_list *b;
 845        u8 num_entries = 0;
 846        bool pend_conn, pend_report;
 847        /* We allow usage of accept list even with RPAs in suspend. In the worst
 848         * case, we won't be able to wake from devices that use the privacy1.2
 849         * features. Additionally, once we support privacy1.2 and IRK
 850         * offloading, we can update this to also check for those conditions.
 851         */
 852        bool allow_rpa = hdev->suspended;
 853
 854        if (use_ll_privacy(hdev) &&
 855            hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
 856                allow_rpa = true;
 857
 858        /* Go through the current accept list programmed into the
 859         * controller one by one and check if that address is still
 860         * in the list of pending connections or list of devices to
 861         * report. If not present in either list, then queue the
 862         * command to remove it from the controller.
 863         */
 864        list_for_each_entry(b, &hdev->le_accept_list, list) {
 865                pend_conn = hci_pend_le_action_lookup(&hdev->pend_le_conns,
 866                                                      &b->bdaddr,
 867                                                      b->bdaddr_type);
 868                pend_report = hci_pend_le_action_lookup(&hdev->pend_le_reports,
 869                                                        &b->bdaddr,
 870                                                        b->bdaddr_type);
 871
 872                /* If the device is not likely to connect or report,
 873                 * remove it from the accept list.
 874                 */
 875                if (!pend_conn && !pend_report) {
 876                        del_from_accept_list(req, &b->bdaddr, b->bdaddr_type);
 877                        continue;
 878                }
 879
 880                /* Accept list can not be used with RPAs */
 881                if (!allow_rpa &&
 882                    !hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
 883                    hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) {
 884                        return 0x00;
 885                }
 886
 887                num_entries++;
 888        }
 889
 890        /* Since all no longer valid accept list entries have been
 891         * removed, walk through the list of pending connections
 892         * and ensure that any new device gets programmed into
 893         * the controller.
 894         *
 895         * If the list of the devices is larger than the list of
 896         * available accept list entries in the controller, then
 897         * just abort and return filer policy value to not use the
 898         * accept list.
 899         */
 900        list_for_each_entry(params, &hdev->pend_le_conns, action) {
 901                if (add_to_accept_list(req, params, &num_entries, allow_rpa))
 902                        return 0x00;
 903        }
 904
 905        /* After adding all new pending connections, walk through
 906         * the list of pending reports and also add these to the
 907         * accept list if there is still space. Abort if space runs out.
 908         */
 909        list_for_each_entry(params, &hdev->pend_le_reports, action) {
 910                if (add_to_accept_list(req, params, &num_entries, allow_rpa))
 911                        return 0x00;
 912        }
 913
 914        /* Use the allowlist unless the following conditions are all true:
 915         * - We are not currently suspending
 916         * - There are 1 or more ADV monitors registered and it's not offloaded
 917         * - Interleaved scanning is not currently using the allowlist
 918         */
 919        if (!idr_is_empty(&hdev->adv_monitors_idr) && !hdev->suspended &&
 920            hci_get_adv_monitor_offload_ext(hdev) == HCI_ADV_MONITOR_EXT_NONE &&
 921            hdev->interleave_scan_state != INTERLEAVE_SCAN_ALLOWLIST)
 922                return 0x00;
 923
 924        /* Select filter policy to use accept list */
 925        return 0x01;
 926}
 927
 928static bool scan_use_rpa(struct hci_dev *hdev)
 929{
 930        return hci_dev_test_flag(hdev, HCI_PRIVACY);
 931}
 932
 933static void hci_req_start_scan(struct hci_request *req, u8 type, u16 interval,
 934                               u16 window, u8 own_addr_type, u8 filter_policy,
 935                               bool filter_dup, bool addr_resolv)
 936{
 937        struct hci_dev *hdev = req->hdev;
 938
 939        if (hdev->scanning_paused) {
 940                bt_dev_dbg(hdev, "Scanning is paused for suspend");
 941                return;
 942        }
 943
 944        if (use_ll_privacy(hdev) &&
 945            hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
 946            addr_resolv) {
 947                u8 enable = 0x01;
 948
 949                hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
 950        }
 951
 952        /* Use ext scanning if set ext scan param and ext scan enable is
 953         * supported
 954         */
 955        if (use_ext_scan(hdev)) {
 956                struct hci_cp_le_set_ext_scan_params *ext_param_cp;
 957                struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
 958                struct hci_cp_le_scan_phy_params *phy_params;
 959                u8 data[sizeof(*ext_param_cp) + sizeof(*phy_params) * 2];
 960                u32 plen;
 961
 962                ext_param_cp = (void *)data;
 963                phy_params = (void *)ext_param_cp->data;
 964
 965                memset(ext_param_cp, 0, sizeof(*ext_param_cp));
 966                ext_param_cp->own_addr_type = own_addr_type;
 967                ext_param_cp->filter_policy = filter_policy;
 968
 969                plen = sizeof(*ext_param_cp);
 970
 971                if (scan_1m(hdev) || scan_2m(hdev)) {
 972                        ext_param_cp->scanning_phys |= LE_SCAN_PHY_1M;
 973
 974                        memset(phy_params, 0, sizeof(*phy_params));
 975                        phy_params->type = type;
 976                        phy_params->interval = cpu_to_le16(interval);
 977                        phy_params->window = cpu_to_le16(window);
 978
 979                        plen += sizeof(*phy_params);
 980                        phy_params++;
 981                }
 982
 983                if (scan_coded(hdev)) {
 984                        ext_param_cp->scanning_phys |= LE_SCAN_PHY_CODED;
 985
 986                        memset(phy_params, 0, sizeof(*phy_params));
 987                        phy_params->type = type;
 988                        phy_params->interval = cpu_to_le16(interval);
 989                        phy_params->window = cpu_to_le16(window);
 990
 991                        plen += sizeof(*phy_params);
 992                        phy_params++;
 993                }
 994
 995                hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_PARAMS,
 996                            plen, ext_param_cp);
 997
 998                memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
 999                ext_enable_cp.enable = LE_SCAN_ENABLE;
1000                ext_enable_cp.filter_dup = filter_dup;
1001
1002                hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
1003                            sizeof(ext_enable_cp), &ext_enable_cp);
1004        } else {
1005                struct hci_cp_le_set_scan_param param_cp;
1006                struct hci_cp_le_set_scan_enable enable_cp;
1007
1008                memset(&param_cp, 0, sizeof(param_cp));
1009                param_cp.type = type;
1010                param_cp.interval = cpu_to_le16(interval);
1011                param_cp.window = cpu_to_le16(window);
1012                param_cp.own_address_type = own_addr_type;
1013                param_cp.filter_policy = filter_policy;
1014                hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
1015                            &param_cp);
1016
1017                memset(&enable_cp, 0, sizeof(enable_cp));
1018                enable_cp.enable = LE_SCAN_ENABLE;
1019                enable_cp.filter_dup = filter_dup;
1020                hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
1021                            &enable_cp);
1022        }
1023}
1024
1025/* Returns true if an le connection is in the scanning state */
1026static inline bool hci_is_le_conn_scanning(struct hci_dev *hdev)
1027{
1028        struct hci_conn_hash *h = &hdev->conn_hash;
1029        struct hci_conn  *c;
1030
1031        rcu_read_lock();
1032
1033        list_for_each_entry_rcu(c, &h->list, list) {
1034                if (c->type == LE_LINK && c->state == BT_CONNECT &&
1035                    test_bit(HCI_CONN_SCANNING, &c->flags)) {
1036                        rcu_read_unlock();
1037                        return true;
1038                }
1039        }
1040
1041        rcu_read_unlock();
1042
1043        return false;
1044}
1045
1046/* Ensure to call hci_req_add_le_scan_disable() first to disable the
1047 * controller based address resolution to be able to reconfigure
1048 * resolving list.
1049 */
1050void hci_req_add_le_passive_scan(struct hci_request *req)
1051{
1052        struct hci_dev *hdev = req->hdev;
1053        u8 own_addr_type;
1054        u8 filter_policy;
1055        u16 window, interval;
1056        /* Default is to enable duplicates filter */
1057        u8 filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
1058        /* Background scanning should run with address resolution */
1059        bool addr_resolv = true;
1060
1061        if (hdev->scanning_paused) {
1062                bt_dev_dbg(hdev, "Scanning is paused for suspend");
1063                return;
1064        }
1065
1066        /* Set require_privacy to false since no SCAN_REQ are send
1067         * during passive scanning. Not using an non-resolvable address
1068         * here is important so that peer devices using direct
1069         * advertising with our address will be correctly reported
1070         * by the controller.
1071         */
1072        if (hci_update_random_address(req, false, scan_use_rpa(hdev),
1073                                      &own_addr_type))
1074                return;
1075
1076        if (hdev->enable_advmon_interleave_scan &&
1077            __hci_update_interleaved_scan(hdev))
1078                return;
1079
1080        bt_dev_dbg(hdev, "interleave state %d", hdev->interleave_scan_state);
1081        /* Adding or removing entries from the accept list must
1082         * happen before enabling scanning. The controller does
1083         * not allow accept list modification while scanning.
1084         */
1085        filter_policy = update_accept_list(req);
1086
1087        /* When the controller is using random resolvable addresses and
1088         * with that having LE privacy enabled, then controllers with
1089         * Extended Scanner Filter Policies support can now enable support
1090         * for handling directed advertising.
1091         *
1092         * So instead of using filter polices 0x00 (no accept list)
1093         * and 0x01 (accept list enabled) use the new filter policies
1094         * 0x02 (no accept list) and 0x03 (accept list enabled).
1095         */
1096        if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
1097            (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
1098                filter_policy |= 0x02;
1099
1100        if (hdev->suspended) {
1101                window = hdev->le_scan_window_suspend;
1102                interval = hdev->le_scan_int_suspend;
1103
1104                set_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks);
1105        } else if (hci_is_le_conn_scanning(hdev)) {
1106                window = hdev->le_scan_window_connect;
1107                interval = hdev->le_scan_int_connect;
1108        } else if (hci_is_adv_monitoring(hdev)) {
1109                window = hdev->le_scan_window_adv_monitor;
1110                interval = hdev->le_scan_int_adv_monitor;
1111
1112                /* Disable duplicates filter when scanning for advertisement
1113                 * monitor for the following reasons.
1114                 *
1115                 * For HW pattern filtering (ex. MSFT), Realtek and Qualcomm
1116                 * controllers ignore RSSI_Sampling_Period when the duplicates
1117                 * filter is enabled.
1118                 *
1119                 * For SW pattern filtering, when we're not doing interleaved
1120                 * scanning, it is necessary to disable duplicates filter,
1121                 * otherwise hosts can only receive one advertisement and it's
1122                 * impossible to know if a peer is still in range.
1123                 */
1124                filter_dup = LE_SCAN_FILTER_DUP_DISABLE;
1125        } else {
1126                window = hdev->le_scan_window;
1127                interval = hdev->le_scan_interval;
1128        }
1129
1130        bt_dev_dbg(hdev, "LE passive scan with accept list = %d",
1131                   filter_policy);
1132        hci_req_start_scan(req, LE_SCAN_PASSIVE, interval, window,
1133                           own_addr_type, filter_policy, filter_dup,
1134                           addr_resolv);
1135}
1136
1137static bool adv_instance_is_scannable(struct hci_dev *hdev, u8 instance)
1138{
1139        struct adv_info *adv_instance;
1140
1141        /* Instance 0x00 always set local name */
1142        if (instance == 0x00)
1143                return true;
1144
1145        adv_instance = hci_find_adv_instance(hdev, instance);
1146        if (!adv_instance)
1147                return false;
1148
1149        if (adv_instance->flags & MGMT_ADV_FLAG_APPEARANCE ||
1150            adv_instance->flags & MGMT_ADV_FLAG_LOCAL_NAME)
1151                return true;
1152
1153        return adv_instance->scan_rsp_len ? true : false;
1154}
1155
1156static void hci_req_clear_event_filter(struct hci_request *req)
1157{
1158        struct hci_cp_set_event_filter f;
1159
1160        if (!hci_dev_test_flag(req->hdev, HCI_BREDR_ENABLED))
1161                return;
1162
1163        if (hci_dev_test_flag(req->hdev, HCI_EVENT_FILTER_CONFIGURED)) {
1164                memset(&f, 0, sizeof(f));
1165                f.flt_type = HCI_FLT_CLEAR_ALL;
1166                hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &f);
1167        }
1168}
1169
1170static void hci_req_set_event_filter(struct hci_request *req)
1171{
1172        struct bdaddr_list_with_flags *b;
1173        struct hci_cp_set_event_filter f;
1174        struct hci_dev *hdev = req->hdev;
1175        u8 scan = SCAN_DISABLED;
1176        bool scanning = test_bit(HCI_PSCAN, &hdev->flags);
1177
1178        if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1179                return;
1180
1181        /* Always clear event filter when starting */
1182        hci_req_clear_event_filter(req);
1183
1184        list_for_each_entry(b, &hdev->accept_list, list) {
1185                if (!hci_conn_test_flag(HCI_CONN_FLAG_REMOTE_WAKEUP,
1186                                        b->current_flags))
1187                        continue;
1188
1189                memset(&f, 0, sizeof(f));
1190                bacpy(&f.addr_conn_flt.bdaddr, &b->bdaddr);
1191                f.flt_type = HCI_FLT_CONN_SETUP;
1192                f.cond_type = HCI_CONN_SETUP_ALLOW_BDADDR;
1193                f.addr_conn_flt.auto_accept = HCI_CONN_SETUP_AUTO_ON;
1194
1195                bt_dev_dbg(hdev, "Adding event filters for %pMR", &b->bdaddr);
1196                hci_req_add(req, HCI_OP_SET_EVENT_FLT, sizeof(f), &f);
1197                scan = SCAN_PAGE;
1198        }
1199
1200        if (scan && !scanning) {
1201                set_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks);
1202                hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1203        } else if (!scan && scanning) {
1204                set_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
1205                hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1206        }
1207}
1208
1209static void cancel_adv_timeout(struct hci_dev *hdev)
1210{
1211        if (hdev->adv_instance_timeout) {
1212                hdev->adv_instance_timeout = 0;
1213                cancel_delayed_work(&hdev->adv_instance_expire);
1214        }
1215}
1216
1217/* This function requires the caller holds hdev->lock */
1218void __hci_req_pause_adv_instances(struct hci_request *req)
1219{
1220        bt_dev_dbg(req->hdev, "Pausing advertising instances");
1221
1222        /* Call to disable any advertisements active on the controller.
1223         * This will succeed even if no advertisements are configured.
1224         */
1225        __hci_req_disable_advertising(req);
1226
1227        /* If we are using software rotation, pause the loop */
1228        if (!ext_adv_capable(req->hdev))
1229                cancel_adv_timeout(req->hdev);
1230}
1231
1232/* This function requires the caller holds hdev->lock */
1233static void __hci_req_resume_adv_instances(struct hci_request *req)
1234{
1235        struct adv_info *adv;
1236
1237        bt_dev_dbg(req->hdev, "Resuming advertising instances");
1238
1239        if (ext_adv_capable(req->hdev)) {
1240                /* Call for each tracked instance to be re-enabled */
1241                list_for_each_entry(adv, &req->hdev->adv_instances, list) {
1242                        __hci_req_enable_ext_advertising(req,
1243                                                         adv->instance);
1244                }
1245
1246        } else {
1247                /* Schedule for most recent instance to be restarted and begin
1248                 * the software rotation loop
1249                 */
1250                __hci_req_schedule_adv_instance(req,
1251                                                req->hdev->cur_adv_instance,
1252                                                true);
1253        }
1254}
1255
1256/* This function requires the caller holds hdev->lock */
1257int hci_req_resume_adv_instances(struct hci_dev *hdev)
1258{
1259        struct hci_request req;
1260
1261        hci_req_init(&req, hdev);
1262        __hci_req_resume_adv_instances(&req);
1263
1264        return hci_req_run(&req, NULL);
1265}
1266
1267static void suspend_req_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1268{
1269        bt_dev_dbg(hdev, "Request complete opcode=0x%x, status=0x%x", opcode,
1270                   status);
1271        if (test_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks) ||
1272            test_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks)) {
1273                clear_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks);
1274                clear_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
1275                wake_up(&hdev->suspend_wait_q);
1276        }
1277
1278        if (test_bit(SUSPEND_SET_ADV_FILTER, hdev->suspend_tasks)) {
1279                clear_bit(SUSPEND_SET_ADV_FILTER, hdev->suspend_tasks);
1280                wake_up(&hdev->suspend_wait_q);
1281        }
1282}
1283
1284static void hci_req_add_set_adv_filter_enable(struct hci_request *req,
1285                                              bool enable)
1286{
1287        struct hci_dev *hdev = req->hdev;
1288
1289        switch (hci_get_adv_monitor_offload_ext(hdev)) {
1290        case HCI_ADV_MONITOR_EXT_MSFT:
1291                msft_req_add_set_filter_enable(req, enable);
1292                break;
1293        default:
1294                return;
1295        }
1296
1297        /* No need to block when enabling since it's on resume path */
1298        if (hdev->suspended && !enable)
1299                set_bit(SUSPEND_SET_ADV_FILTER, hdev->suspend_tasks);
1300}
1301
1302/* Call with hci_dev_lock */
1303void hci_req_prepare_suspend(struct hci_dev *hdev, enum suspended_state next)
1304{
1305        int old_state;
1306        struct hci_conn *conn;
1307        struct hci_request req;
1308        u8 page_scan;
1309        int disconnect_counter;
1310
1311        if (next == hdev->suspend_state) {
1312                bt_dev_dbg(hdev, "Same state before and after: %d", next);
1313                goto done;
1314        }
1315
1316        hdev->suspend_state = next;
1317        hci_req_init(&req, hdev);
1318
1319        if (next == BT_SUSPEND_DISCONNECT) {
1320                /* Mark device as suspended */
1321                hdev->suspended = true;
1322
1323                /* Pause discovery if not already stopped */
1324                old_state = hdev->discovery.state;
1325                if (old_state != DISCOVERY_STOPPED) {
1326                        set_bit(SUSPEND_PAUSE_DISCOVERY, hdev->suspend_tasks);
1327                        hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1328                        queue_work(hdev->req_workqueue, &hdev->discov_update);
1329                }
1330
1331                hdev->discovery_paused = true;
1332                hdev->discovery_old_state = old_state;
1333
1334                /* Stop directed advertising */
1335                old_state = hci_dev_test_flag(hdev, HCI_ADVERTISING);
1336                if (old_state) {
1337                        set_bit(SUSPEND_PAUSE_ADVERTISING, hdev->suspend_tasks);
1338                        cancel_delayed_work(&hdev->discov_off);
1339                        queue_delayed_work(hdev->req_workqueue,
1340                                           &hdev->discov_off, 0);
1341                }
1342
1343                /* Pause other advertisements */
1344                if (hdev->adv_instance_cnt)
1345                        __hci_req_pause_adv_instances(&req);
1346
1347                hdev->advertising_paused = true;
1348                hdev->advertising_old_state = old_state;
1349
1350                /* Disable page scan if enabled */
1351                if (test_bit(HCI_PSCAN, &hdev->flags)) {
1352                        page_scan = SCAN_DISABLED;
1353                        hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1,
1354                                    &page_scan);
1355                        set_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
1356                }
1357
1358                /* Disable LE passive scan if enabled */
1359                if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
1360                        cancel_interleave_scan(hdev);
1361                        hci_req_add_le_scan_disable(&req, false);
1362                }
1363
1364                /* Disable advertisement filters */
1365                hci_req_add_set_adv_filter_enable(&req, false);
1366
1367                /* Prevent disconnects from causing scanning to be re-enabled */
1368                hdev->scanning_paused = true;
1369
1370                /* Run commands before disconnecting */
1371                hci_req_run(&req, suspend_req_complete);
1372
1373                disconnect_counter = 0;
1374                /* Soft disconnect everything (power off) */
1375                list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1376                        hci_disconnect(conn, HCI_ERROR_REMOTE_POWER_OFF);
1377                        disconnect_counter++;
1378                }
1379
1380                if (disconnect_counter > 0) {
1381                        bt_dev_dbg(hdev,
1382                                   "Had %d disconnects. Will wait on them",
1383                                   disconnect_counter);
1384                        set_bit(SUSPEND_DISCONNECTING, hdev->suspend_tasks);
1385                }
1386        } else if (next == BT_SUSPEND_CONFIGURE_WAKE) {
1387                /* Unpause to take care of updating scanning params */
1388                hdev->scanning_paused = false;
1389                /* Enable event filter for paired devices */
1390                hci_req_set_event_filter(&req);
1391                /* Enable passive scan at lower duty cycle */
1392                __hci_update_background_scan(&req);
1393                /* Pause scan changes again. */
1394                hdev->scanning_paused = true;
1395                hci_req_run(&req, suspend_req_complete);
1396        } else {
1397                hdev->suspended = false;
1398                hdev->scanning_paused = false;
1399
1400                /* Clear any event filters and restore scan state */
1401                hci_req_clear_event_filter(&req);
1402                __hci_req_update_scan(&req);
1403
1404                /* Reset passive/background scanning to normal */
1405                __hci_update_background_scan(&req);
1406                /* Enable all of the advertisement filters */
1407                hci_req_add_set_adv_filter_enable(&req, true);
1408
1409                /* Unpause directed advertising */
1410                hdev->advertising_paused = false;
1411                if (hdev->advertising_old_state) {
1412                        set_bit(SUSPEND_UNPAUSE_ADVERTISING,
1413                                hdev->suspend_tasks);
1414                        hci_dev_set_flag(hdev, HCI_ADVERTISING);
1415                        queue_work(hdev->req_workqueue,
1416                                   &hdev->discoverable_update);
1417                        hdev->advertising_old_state = 0;
1418                }
1419
1420                /* Resume other advertisements */
1421                if (hdev->adv_instance_cnt)
1422                        __hci_req_resume_adv_instances(&req);
1423
1424                /* Unpause discovery */
1425                hdev->discovery_paused = false;
1426                if (hdev->discovery_old_state != DISCOVERY_STOPPED &&
1427                    hdev->discovery_old_state != DISCOVERY_STOPPING) {
1428                        set_bit(SUSPEND_UNPAUSE_DISCOVERY, hdev->suspend_tasks);
1429                        hci_discovery_set_state(hdev, DISCOVERY_STARTING);
1430                        queue_work(hdev->req_workqueue, &hdev->discov_update);
1431                }
1432
1433                hci_req_run(&req, suspend_req_complete);
1434        }
1435
1436        hdev->suspend_state = next;
1437
1438done:
1439        clear_bit(SUSPEND_PREPARE_NOTIFIER, hdev->suspend_tasks);
1440        wake_up(&hdev->suspend_wait_q);
1441}
1442
1443static bool adv_cur_instance_is_scannable(struct hci_dev *hdev)
1444{
1445        return adv_instance_is_scannable(hdev, hdev->cur_adv_instance);
1446}
1447
1448void __hci_req_disable_advertising(struct hci_request *req)
1449{
1450        if (ext_adv_capable(req->hdev)) {
1451                __hci_req_disable_ext_adv_instance(req, 0x00);
1452
1453        } else {
1454                u8 enable = 0x00;
1455
1456                hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1457        }
1458}
1459
1460static u32 get_adv_instance_flags(struct hci_dev *hdev, u8 instance)
1461{
1462        u32 flags;
1463        struct adv_info *adv_instance;
1464
1465        if (instance == 0x00) {
1466                /* Instance 0 always manages the "Tx Power" and "Flags"
1467                 * fields
1468                 */
1469                flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
1470
1471                /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
1472                 * corresponds to the "connectable" instance flag.
1473                 */
1474                if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
1475                        flags |= MGMT_ADV_FLAG_CONNECTABLE;
1476
1477                if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1478                        flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
1479                else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1480                        flags |= MGMT_ADV_FLAG_DISCOV;
1481
1482                return flags;
1483        }
1484
1485        adv_instance = hci_find_adv_instance(hdev, instance);
1486
1487        /* Return 0 when we got an invalid instance identifier. */
1488        if (!adv_instance)
1489                return 0;
1490
1491        return adv_instance->flags;
1492}
1493
1494static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags)
1495{
1496        /* If privacy is not enabled don't use RPA */
1497        if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
1498                return false;
1499
1500        /* If basic privacy mode is enabled use RPA */
1501        if (!hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
1502                return true;
1503
1504        /* If limited privacy mode is enabled don't use RPA if we're
1505         * both discoverable and bondable.
1506         */
1507        if ((flags & MGMT_ADV_FLAG_DISCOV) &&
1508            hci_dev_test_flag(hdev, HCI_BONDABLE))
1509                return false;
1510
1511        /* We're neither bondable nor discoverable in the limited
1512         * privacy mode, therefore use RPA.
1513         */
1514        return true;
1515}
1516
1517static bool is_advertising_allowed(struct hci_dev *hdev, bool connectable)
1518{
1519        /* If there is no connection we are OK to advertise. */
1520        if (hci_conn_num(hdev, LE_LINK) == 0)
1521                return true;
1522
1523        /* Check le_states if there is any connection in peripheral role. */
1524        if (hdev->conn_hash.le_num_peripheral > 0) {
1525                /* Peripheral connection state and non connectable mode bit 20.
1526                 */
1527                if (!connectable && !(hdev->le_states[2] & 0x10))
1528                        return false;
1529
1530                /* Peripheral connection state and connectable mode bit 38
1531                 * and scannable bit 21.
1532                 */
1533                if (connectable && (!(hdev->le_states[4] & 0x40) ||
1534                                    !(hdev->le_states[2] & 0x20)))
1535                        return false;
1536        }
1537
1538        /* Check le_states if there is any connection in central role. */
1539        if (hci_conn_num(hdev, LE_LINK) != hdev->conn_hash.le_num_peripheral) {
1540                /* Central connection state and non connectable mode bit 18. */
1541                if (!connectable && !(hdev->le_states[2] & 0x02))
1542                        return false;
1543
1544                /* Central connection state and connectable mode bit 35 and
1545                 * scannable 19.
1546                 */
1547                if (connectable && (!(hdev->le_states[4] & 0x08) ||
1548                                    !(hdev->le_states[2] & 0x08)))
1549                        return false;
1550        }
1551
1552        return true;
1553}
1554
1555void __hci_req_enable_advertising(struct hci_request *req)
1556{
1557        struct hci_dev *hdev = req->hdev;
1558        struct adv_info *adv_instance;
1559        struct hci_cp_le_set_adv_param cp;
1560        u8 own_addr_type, enable = 0x01;
1561        bool connectable;
1562        u16 adv_min_interval, adv_max_interval;
1563        u32 flags;
1564
1565        flags = get_adv_instance_flags(hdev, hdev->cur_adv_instance);
1566        adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
1567
1568        /* If the "connectable" instance flag was not set, then choose between
1569         * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1570         */
1571        connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
1572                      mgmt_get_connectable(hdev);
1573
1574        if (!is_advertising_allowed(hdev, connectable))
1575                return;
1576
1577        if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1578                __hci_req_disable_advertising(req);
1579
1580        /* Clear the HCI_LE_ADV bit temporarily so that the
1581         * hci_update_random_address knows that it's safe to go ahead
1582         * and write a new random address. The flag will be set back on
1583         * as soon as the SET_ADV_ENABLE HCI command completes.
1584         */
1585        hci_dev_clear_flag(hdev, HCI_LE_ADV);
1586
1587        /* Set require_privacy to true only when non-connectable
1588         * advertising is used. In that case it is fine to use a
1589         * non-resolvable private address.
1590         */
1591        if (hci_update_random_address(req, !connectable,
1592                                      adv_use_rpa(hdev, flags),
1593                                      &own_addr_type) < 0)
1594                return;
1595
1596        memset(&cp, 0, sizeof(cp));
1597
1598        if (adv_instance) {
1599                adv_min_interval = adv_instance->min_interval;
1600                adv_max_interval = adv_instance->max_interval;
1601        } else {
1602                adv_min_interval = hdev->le_adv_min_interval;
1603                adv_max_interval = hdev->le_adv_max_interval;
1604        }
1605
1606        if (connectable) {
1607                cp.type = LE_ADV_IND;
1608        } else {
1609                if (adv_cur_instance_is_scannable(hdev))
1610                        cp.type = LE_ADV_SCAN_IND;
1611                else
1612                        cp.type = LE_ADV_NONCONN_IND;
1613
1614                if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE) ||
1615                    hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
1616                        adv_min_interval = DISCOV_LE_FAST_ADV_INT_MIN;
1617                        adv_max_interval = DISCOV_LE_FAST_ADV_INT_MAX;
1618                }
1619        }
1620
1621        cp.min_interval = cpu_to_le16(adv_min_interval);
1622        cp.max_interval = cpu_to_le16(adv_max_interval);
1623        cp.own_address_type = own_addr_type;
1624        cp.channel_map = hdev->le_adv_channel_map;
1625
1626        hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
1627
1628        hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1629}
1630
1631u8 append_local_name(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
1632{
1633        size_t short_len;
1634        size_t complete_len;
1635
1636        /* no space left for name (+ NULL + type + len) */
1637        if ((HCI_MAX_AD_LENGTH - ad_len) < HCI_MAX_SHORT_NAME_LENGTH + 3)
1638                return ad_len;
1639
1640        /* use complete name if present and fits */
1641        complete_len = strlen(hdev->dev_name);
1642        if (complete_len && complete_len <= HCI_MAX_SHORT_NAME_LENGTH)
1643                return eir_append_data(ptr, ad_len, EIR_NAME_COMPLETE,
1644                                       hdev->dev_name, complete_len + 1);
1645
1646        /* use short name if present */
1647        short_len = strlen(hdev->short_name);
1648        if (short_len)
1649                return eir_append_data(ptr, ad_len, EIR_NAME_SHORT,
1650                                       hdev->short_name, short_len + 1);
1651
1652        /* use shortened full name if present, we already know that name
1653         * is longer then HCI_MAX_SHORT_NAME_LENGTH
1654         */
1655        if (complete_len) {
1656                u8 name[HCI_MAX_SHORT_NAME_LENGTH + 1];
1657
1658                memcpy(name, hdev->dev_name, HCI_MAX_SHORT_NAME_LENGTH);
1659                name[HCI_MAX_SHORT_NAME_LENGTH] = '\0';
1660
1661                return eir_append_data(ptr, ad_len, EIR_NAME_SHORT, name,
1662                                       sizeof(name));
1663        }
1664
1665        return ad_len;
1666}
1667
1668static u8 append_appearance(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
1669{
1670        return eir_append_le16(ptr, ad_len, EIR_APPEARANCE, hdev->appearance);
1671}
1672
1673static u8 create_default_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
1674{
1675        u8 scan_rsp_len = 0;
1676
1677        if (hdev->appearance)
1678                scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
1679
1680        return append_local_name(hdev, ptr, scan_rsp_len);
1681}
1682
1683static u8 create_instance_scan_rsp_data(struct hci_dev *hdev, u8 instance,
1684                                        u8 *ptr)
1685{
1686        struct adv_info *adv_instance;
1687        u32 instance_flags;
1688        u8 scan_rsp_len = 0;
1689
1690        adv_instance = hci_find_adv_instance(hdev, instance);
1691        if (!adv_instance)
1692                return 0;
1693
1694        instance_flags = adv_instance->flags;
1695
1696        if ((instance_flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance)
1697                scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
1698
1699        memcpy(&ptr[scan_rsp_len], adv_instance->scan_rsp_data,
1700               adv_instance->scan_rsp_len);
1701
1702        scan_rsp_len += adv_instance->scan_rsp_len;
1703
1704        if (instance_flags & MGMT_ADV_FLAG_LOCAL_NAME)
1705                scan_rsp_len = append_local_name(hdev, ptr, scan_rsp_len);
1706
1707        return scan_rsp_len;
1708}
1709
1710void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance)
1711{
1712        struct hci_dev *hdev = req->hdev;
1713        u8 len;
1714
1715        if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1716                return;
1717
1718        if (ext_adv_capable(hdev)) {
1719                struct {
1720                        struct hci_cp_le_set_ext_scan_rsp_data cp;
1721                        u8 data[HCI_MAX_EXT_AD_LENGTH];
1722                } pdu;
1723
1724                memset(&pdu, 0, sizeof(pdu));
1725
1726                if (instance)
1727                        len = create_instance_scan_rsp_data(hdev, instance,
1728                                                            pdu.data);
1729                else
1730                        len = create_default_scan_rsp_data(hdev, pdu.data);
1731
1732                if (hdev->scan_rsp_data_len == len &&
1733                    !memcmp(pdu.data, hdev->scan_rsp_data, len))
1734                        return;
1735
1736                memcpy(hdev->scan_rsp_data, pdu.data, len);
1737                hdev->scan_rsp_data_len = len;
1738
1739                pdu.cp.handle = instance;
1740                pdu.cp.length = len;
1741                pdu.cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1742                pdu.cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1743
1744                hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_RSP_DATA,
1745                            sizeof(pdu.cp) + len, &pdu.cp);
1746        } else {
1747                struct hci_cp_le_set_scan_rsp_data cp;
1748
1749                memset(&cp, 0, sizeof(cp));
1750
1751                if (instance)
1752                        len = create_instance_scan_rsp_data(hdev, instance,
1753                                                            cp.data);
1754                else
1755                        len = create_default_scan_rsp_data(hdev, cp.data);
1756
1757                if (hdev->scan_rsp_data_len == len &&
1758                    !memcmp(cp.data, hdev->scan_rsp_data, len))
1759                        return;
1760
1761                memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1762                hdev->scan_rsp_data_len = len;
1763
1764                cp.length = len;
1765
1766                hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
1767        }
1768}
1769
1770static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
1771{
1772        struct adv_info *adv_instance = NULL;
1773        u8 ad_len = 0, flags = 0;
1774        u32 instance_flags;
1775
1776        /* Return 0 when the current instance identifier is invalid. */
1777        if (instance) {
1778                adv_instance = hci_find_adv_instance(hdev, instance);
1779                if (!adv_instance)
1780                        return 0;
1781        }
1782
1783        instance_flags = get_adv_instance_flags(hdev, instance);
1784
1785        /* If instance already has the flags set skip adding it once
1786         * again.
1787         */
1788        if (adv_instance && eir_get_data(adv_instance->adv_data,
1789                                         adv_instance->adv_data_len, EIR_FLAGS,
1790                                         NULL))
1791                goto skip_flags;
1792
1793        /* The Add Advertising command allows userspace to set both the general
1794         * and limited discoverable flags.
1795         */
1796        if (instance_flags & MGMT_ADV_FLAG_DISCOV)
1797                flags |= LE_AD_GENERAL;
1798
1799        if (instance_flags & MGMT_ADV_FLAG_LIMITED_DISCOV)
1800                flags |= LE_AD_LIMITED;
1801
1802        if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1803                flags |= LE_AD_NO_BREDR;
1804
1805        if (flags || (instance_flags & MGMT_ADV_FLAG_MANAGED_FLAGS)) {
1806                /* If a discovery flag wasn't provided, simply use the global
1807                 * settings.
1808                 */
1809                if (!flags)
1810                        flags |= mgmt_get_adv_discov_flags(hdev);
1811
1812                /* If flags would still be empty, then there is no need to
1813                 * include the "Flags" AD field".
1814                 */
1815                if (flags) {
1816                        ptr[0] = 0x02;
1817                        ptr[1] = EIR_FLAGS;
1818                        ptr[2] = flags;
1819
1820                        ad_len += 3;
1821                        ptr += 3;
1822                }
1823        }
1824
1825skip_flags:
1826        if (adv_instance) {
1827                memcpy(ptr, adv_instance->adv_data,
1828                       adv_instance->adv_data_len);
1829                ad_len += adv_instance->adv_data_len;
1830                ptr += adv_instance->adv_data_len;
1831        }
1832
1833        if (instance_flags & MGMT_ADV_FLAG_TX_POWER) {
1834                s8 adv_tx_power;
1835
1836                if (ext_adv_capable(hdev)) {
1837                        if (adv_instance)
1838                                adv_tx_power = adv_instance->tx_power;
1839                        else
1840                                adv_tx_power = hdev->adv_tx_power;
1841                } else {
1842                        adv_tx_power = hdev->adv_tx_power;
1843                }
1844
1845                /* Provide Tx Power only if we can provide a valid value for it */
1846                if (adv_tx_power != HCI_TX_POWER_INVALID) {
1847                        ptr[0] = 0x02;
1848                        ptr[1] = EIR_TX_POWER;
1849                        ptr[2] = (u8)adv_tx_power;
1850
1851                        ad_len += 3;
1852                        ptr += 3;
1853                }
1854        }
1855
1856        return ad_len;
1857}
1858
1859void __hci_req_update_adv_data(struct hci_request *req, u8 instance)
1860{
1861        struct hci_dev *hdev = req->hdev;
1862        u8 len;
1863
1864        if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1865                return;
1866
1867        if (ext_adv_capable(hdev)) {
1868                struct {
1869                        struct hci_cp_le_set_ext_adv_data cp;
1870                        u8 data[HCI_MAX_EXT_AD_LENGTH];
1871                } pdu;
1872
1873                memset(&pdu, 0, sizeof(pdu));
1874
1875                len = create_instance_adv_data(hdev, instance, pdu.data);
1876
1877                /* There's nothing to do if the data hasn't changed */
1878                if (hdev->adv_data_len == len &&
1879                    memcmp(pdu.data, hdev->adv_data, len) == 0)
1880                        return;
1881
1882                memcpy(hdev->adv_data, pdu.data, len);
1883                hdev->adv_data_len = len;
1884
1885                pdu.cp.length = len;
1886                pdu.cp.handle = instance;
1887                pdu.cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1888                pdu.cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1889
1890                hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_DATA,
1891                            sizeof(pdu.cp) + len, &pdu.cp);
1892        } else {
1893                struct hci_cp_le_set_adv_data cp;
1894
1895                memset(&cp, 0, sizeof(cp));
1896
1897                len = create_instance_adv_data(hdev, instance, cp.data);
1898
1899                /* There's nothing to do if the data hasn't changed */
1900                if (hdev->adv_data_len == len &&
1901                    memcmp(cp.data, hdev->adv_data, len) == 0)
1902                        return;
1903
1904                memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1905                hdev->adv_data_len = len;
1906
1907                cp.length = len;
1908
1909                hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1910        }
1911}
1912
1913int hci_req_update_adv_data(struct hci_dev *hdev, u8 instance)
1914{
1915        struct hci_request req;
1916
1917        hci_req_init(&req, hdev);
1918        __hci_req_update_adv_data(&req, instance);
1919
1920        return hci_req_run(&req, NULL);
1921}
1922
1923static void enable_addr_resolution_complete(struct hci_dev *hdev, u8 status,
1924                                            u16 opcode)
1925{
1926        BT_DBG("%s status %u", hdev->name, status);
1927}
1928
1929void hci_req_disable_address_resolution(struct hci_dev *hdev)
1930{
1931        struct hci_request req;
1932        __u8 enable = 0x00;
1933
1934        if (!use_ll_privacy(hdev) &&
1935            !hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION))
1936                return;
1937
1938        hci_req_init(&req, hdev);
1939
1940        hci_req_add(&req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
1941
1942        hci_req_run(&req, enable_addr_resolution_complete);
1943}
1944
1945static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1946{
1947        bt_dev_dbg(hdev, "status %u", status);
1948}
1949
1950void hci_req_reenable_advertising(struct hci_dev *hdev)
1951{
1952        struct hci_request req;
1953
1954        if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1955            list_empty(&hdev->adv_instances))
1956                return;
1957
1958        hci_req_init(&req, hdev);
1959
1960        if (hdev->cur_adv_instance) {
1961                __hci_req_schedule_adv_instance(&req, hdev->cur_adv_instance,
1962                                                true);
1963        } else {
1964                if (ext_adv_capable(hdev)) {
1965                        __hci_req_start_ext_adv(&req, 0x00);
1966                } else {
1967                        __hci_req_update_adv_data(&req, 0x00);
1968                        __hci_req_update_scan_rsp_data(&req, 0x00);
1969                        __hci_req_enable_advertising(&req);
1970                }
1971        }
1972
1973        hci_req_run(&req, adv_enable_complete);
1974}
1975
1976static void adv_timeout_expire(struct work_struct *work)
1977{
1978        struct hci_dev *hdev = container_of(work, struct hci_dev,
1979                                            adv_instance_expire.work);
1980
1981        struct hci_request req;
1982        u8 instance;
1983
1984        bt_dev_dbg(hdev, "");
1985
1986        hci_dev_lock(hdev);
1987
1988        hdev->adv_instance_timeout = 0;
1989
1990        instance = hdev->cur_adv_instance;
1991        if (instance == 0x00)
1992                goto unlock;
1993
1994        hci_req_init(&req, hdev);
1995
1996        hci_req_clear_adv_instance(hdev, NULL, &req, instance, false);
1997
1998        if (list_empty(&hdev->adv_instances))
1999                __hci_req_disable_advertising(&req);
2000
2001        hci_req_run(&req, NULL);
2002
2003unlock:
2004        hci_dev_unlock(hdev);
2005}
2006
2007static int hci_req_add_le_interleaved_scan(struct hci_request *req,
2008                                           unsigned long opt)
2009{
2010        struct hci_dev *hdev = req->hdev;
2011        int ret = 0;
2012
2013        hci_dev_lock(hdev);
2014
2015        if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
2016                hci_req_add_le_scan_disable(req, false);
2017        hci_req_add_le_passive_scan(req);
2018
2019        switch (hdev->interleave_scan_state) {
2020        case INTERLEAVE_SCAN_ALLOWLIST:
2021                bt_dev_dbg(hdev, "next state: allowlist");
2022                hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER;
2023                break;
2024        case INTERLEAVE_SCAN_NO_FILTER:
2025                bt_dev_dbg(hdev, "next state: no filter");
2026                hdev->interleave_scan_state = INTERLEAVE_SCAN_ALLOWLIST;
2027                break;
2028        case INTERLEAVE_SCAN_NONE:
2029                BT_ERR("unexpected error");
2030                ret = -1;
2031        }
2032
2033        hci_dev_unlock(hdev);
2034
2035        return ret;
2036}
2037
2038static void interleave_scan_work(struct work_struct *work)
2039{
2040        struct hci_dev *hdev = container_of(work, struct hci_dev,
2041                                            interleave_scan.work);
2042        u8 status;
2043        unsigned long timeout;
2044
2045        if (hdev->interleave_scan_state == INTERLEAVE_SCAN_ALLOWLIST) {
2046                timeout = msecs_to_jiffies(hdev->advmon_allowlist_duration);
2047        } else if (hdev->interleave_scan_state == INTERLEAVE_SCAN_NO_FILTER) {
2048                timeout = msecs_to_jiffies(hdev->advmon_no_filter_duration);
2049        } else {
2050                bt_dev_err(hdev, "unexpected error");
2051                return;
2052        }
2053
2054        hci_req_sync(hdev, hci_req_add_le_interleaved_scan, 0,
2055                     HCI_CMD_TIMEOUT, &status);
2056
2057        /* Don't continue interleaving if it was canceled */
2058        if (is_interleave_scanning(hdev))
2059                queue_delayed_work(hdev->req_workqueue,
2060                                   &hdev->interleave_scan, timeout);
2061}
2062
2063int hci_get_random_address(struct hci_dev *hdev, bool require_privacy,
2064                           bool use_rpa, struct adv_info *adv_instance,
2065                           u8 *own_addr_type, bdaddr_t *rand_addr)
2066{
2067        int err;
2068
2069        bacpy(rand_addr, BDADDR_ANY);
2070
2071        /* If privacy is enabled use a resolvable private address. If
2072         * current RPA has expired then generate a new one.
2073         */
2074        if (use_rpa) {
2075                int to;
2076
2077                /* If Controller supports LL Privacy use own address type is
2078                 * 0x03
2079                 */
2080                if (use_ll_privacy(hdev) &&
2081                    hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
2082                        *own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED;
2083                else
2084                        *own_addr_type = ADDR_LE_DEV_RANDOM;
2085
2086                if (adv_instance) {
2087                        if (!adv_instance->rpa_expired &&
2088                            !bacmp(&adv_instance->random_addr, &hdev->rpa))
2089                                return 0;
2090
2091                        adv_instance->rpa_expired = false;
2092                } else {
2093                        if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
2094                            !bacmp(&hdev->random_addr, &hdev->rpa))
2095                                return 0;
2096                }
2097
2098                err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
2099                if (err < 0) {
2100                        bt_dev_err(hdev, "failed to generate new RPA");
2101                        return err;
2102                }
2103
2104                bacpy(rand_addr, &hdev->rpa);
2105
2106                to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
2107                if (adv_instance)
2108                        queue_delayed_work(hdev->workqueue,
2109                                           &adv_instance->rpa_expired_cb, to);
2110                else
2111                        queue_delayed_work(hdev->workqueue,
2112                                           &hdev->rpa_expired, to);
2113
2114                return 0;
2115        }
2116
2117        /* In case of required privacy without resolvable private address,
2118         * use an non-resolvable private address. This is useful for
2119         * non-connectable advertising.
2120         */
2121        if (require_privacy) {
2122                bdaddr_t nrpa;
2123
2124                while (true) {
2125                        /* The non-resolvable private address is generated
2126                         * from random six bytes with the two most significant
2127                         * bits cleared.
2128                         */
2129                        get_random_bytes(&nrpa, 6);
2130                        nrpa.b[5] &= 0x3f;
2131
2132                        /* The non-resolvable private address shall not be
2133                         * equal to the public address.
2134                         */
2135                        if (bacmp(&hdev->bdaddr, &nrpa))
2136                                break;
2137                }
2138
2139                *own_addr_type = ADDR_LE_DEV_RANDOM;
2140                bacpy(rand_addr, &nrpa);
2141
2142                return 0;
2143        }
2144
2145        /* No privacy so use a public address. */
2146        *own_addr_type = ADDR_LE_DEV_PUBLIC;
2147
2148        return 0;
2149}
2150
2151void __hci_req_clear_ext_adv_sets(struct hci_request *req)
2152{
2153        hci_req_add(req, HCI_OP_LE_CLEAR_ADV_SETS, 0, NULL);
2154}
2155
2156int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance)
2157{
2158        struct hci_cp_le_set_ext_adv_params cp;
2159        struct hci_dev *hdev = req->hdev;
2160        bool connectable;
2161        u32 flags;
2162        bdaddr_t random_addr;
2163        u8 own_addr_type;
2164        int err;
2165        struct adv_info *adv_instance;
2166        bool secondary_adv;
2167
2168        if (instance > 0) {
2169                adv_instance = hci_find_adv_instance(hdev, instance);
2170                if (!adv_instance)
2171                        return -EINVAL;
2172        } else {
2173                adv_instance = NULL;
2174        }
2175
2176        flags = get_adv_instance_flags(hdev, instance);
2177
2178        /* If the "connectable" instance flag was not set, then choose between
2179         * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
2180         */
2181        connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
2182                      mgmt_get_connectable(hdev);
2183
2184        if (!is_advertising_allowed(hdev, connectable))
2185                return -EPERM;
2186
2187        /* Set require_privacy to true only when non-connectable
2188         * advertising is used. In that case it is fine to use a
2189         * non-resolvable private address.
2190         */
2191        err = hci_get_random_address(hdev, !connectable,
2192                                     adv_use_rpa(hdev, flags), adv_instance,
2193                                     &own_addr_type, &random_addr);
2194        if (err < 0)
2195                return err;
2196
2197        memset(&cp, 0, sizeof(cp));
2198
2199        if (adv_instance) {
2200                hci_cpu_to_le24(adv_instance->min_interval, cp.min_interval);
2201                hci_cpu_to_le24(adv_instance->max_interval, cp.max_interval);
2202                cp.tx_power = adv_instance->tx_power;
2203        } else {
2204                hci_cpu_to_le24(hdev->le_adv_min_interval, cp.min_interval);
2205                hci_cpu_to_le24(hdev->le_adv_max_interval, cp.max_interval);
2206                cp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
2207        }
2208
2209        secondary_adv = (flags & MGMT_ADV_FLAG_SEC_MASK);
2210
2211        if (connectable) {
2212                if (secondary_adv)
2213                        cp.evt_properties = cpu_to_le16(LE_EXT_ADV_CONN_IND);
2214                else
2215                        cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_IND);
2216        } else if (adv_instance_is_scannable(hdev, instance) ||
2217                   (flags & MGMT_ADV_PARAM_SCAN_RSP)) {
2218                if (secondary_adv)
2219                        cp.evt_properties = cpu_to_le16(LE_EXT_ADV_SCAN_IND);
2220                else
2221                        cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_SCAN_IND);
2222        } else {
2223                if (secondary_adv)
2224                        cp.evt_properties = cpu_to_le16(LE_EXT_ADV_NON_CONN_IND);
2225                else
2226                        cp.evt_properties = cpu_to_le16(LE_LEGACY_NONCONN_IND);
2227        }
2228
2229        cp.own_addr_type = own_addr_type;
2230        cp.channel_map = hdev->le_adv_channel_map;
2231        cp.handle = instance;
2232
2233        if (flags & MGMT_ADV_FLAG_SEC_2M) {
2234                cp.primary_phy = HCI_ADV_PHY_1M;
2235                cp.secondary_phy = HCI_ADV_PHY_2M;
2236        } else if (flags & MGMT_ADV_FLAG_SEC_CODED) {
2237                cp.primary_phy = HCI_ADV_PHY_CODED;
2238                cp.secondary_phy = HCI_ADV_PHY_CODED;
2239        } else {
2240                /* In all other cases use 1M */
2241                cp.primary_phy = HCI_ADV_PHY_1M;
2242                cp.secondary_phy = HCI_ADV_PHY_1M;
2243        }
2244
2245        hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_PARAMS, sizeof(cp), &cp);
2246
2247        if (own_addr_type == ADDR_LE_DEV_RANDOM &&
2248            bacmp(&random_addr, BDADDR_ANY)) {
2249                struct hci_cp_le_set_adv_set_rand_addr cp;
2250
2251                /* Check if random address need to be updated */
2252                if (adv_instance) {
2253                        if (!bacmp(&random_addr, &adv_instance->random_addr))
2254                                return 0;
2255                } else {
2256                        if (!bacmp(&random_addr, &hdev->random_addr))
2257                                return 0;
2258                }
2259
2260                memset(&cp, 0, sizeof(cp));
2261
2262                cp.handle = instance;
2263                bacpy(&cp.bdaddr, &random_addr);
2264
2265                hci_req_add(req,
2266                            HCI_OP_LE_SET_ADV_SET_RAND_ADDR,
2267                            sizeof(cp), &cp);
2268        }
2269
2270        return 0;
2271}
2272
2273int __hci_req_enable_ext_advertising(struct hci_request *req, u8 instance)
2274{
2275        struct hci_dev *hdev = req->hdev;
2276        struct hci_cp_le_set_ext_adv_enable *cp;
2277        struct hci_cp_ext_adv_set *adv_set;
2278        u8 data[sizeof(*cp) + sizeof(*adv_set) * 1];
2279        struct adv_info *adv_instance;
2280
2281        if (instance > 0) {
2282                adv_instance = hci_find_adv_instance(hdev, instance);
2283                if (!adv_instance)
2284                        return -EINVAL;
2285        } else {
2286                adv_instance = NULL;
2287        }
2288
2289        cp = (void *) data;
2290        adv_set = (void *) cp->data;
2291
2292        memset(cp, 0, sizeof(*cp));
2293
2294        cp->enable = 0x01;
2295        cp->num_of_sets = 0x01;
2296
2297        memset(adv_set, 0, sizeof(*adv_set));
2298
2299        adv_set->handle = instance;
2300
2301        /* Set duration per instance since controller is responsible for
2302         * scheduling it.
2303         */
2304        if (adv_instance && adv_instance->duration) {
2305                u16 duration = adv_instance->timeout * MSEC_PER_SEC;
2306
2307                /* Time = N * 10 ms */
2308                adv_set->duration = cpu_to_le16(duration / 10);
2309        }
2310
2311        hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE,
2312                    sizeof(*cp) + sizeof(*adv_set) * cp->num_of_sets,
2313                    data);
2314
2315        return 0;
2316}
2317
2318int __hci_req_disable_ext_adv_instance(struct hci_request *req, u8 instance)
2319{
2320        struct hci_dev *hdev = req->hdev;
2321        struct hci_cp_le_set_ext_adv_enable *cp;
2322        struct hci_cp_ext_adv_set *adv_set;
2323        u8 data[sizeof(*cp) + sizeof(*adv_set) * 1];
2324        u8 req_size;
2325
2326        /* If request specifies an instance that doesn't exist, fail */
2327        if (instance > 0 && !hci_find_adv_instance(hdev, instance))
2328                return -EINVAL;
2329
2330        memset(data, 0, sizeof(data));
2331
2332        cp = (void *)data;
2333        adv_set = (void *)cp->data;
2334
2335        /* Instance 0x00 indicates all advertising instances will be disabled */
2336        cp->num_of_sets = !!instance;
2337        cp->enable = 0x00;
2338
2339        adv_set->handle = instance;
2340
2341        req_size = sizeof(*cp) + sizeof(*adv_set) * cp->num_of_sets;
2342        hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE, req_size, data);
2343
2344        return 0;
2345}
2346
2347int __hci_req_remove_ext_adv_instance(struct hci_request *req, u8 instance)
2348{
2349        struct hci_dev *hdev = req->hdev;
2350
2351        /* If request specifies an instance that doesn't exist, fail */
2352        if (instance > 0 && !hci_find_adv_instance(hdev, instance))
2353                return -EINVAL;
2354
2355        hci_req_add(req, HCI_OP_LE_REMOVE_ADV_SET, sizeof(instance), &instance);
2356
2357        return 0;
2358}
2359
2360int __hci_req_start_ext_adv(struct hci_request *req, u8 instance)
2361{
2362        struct hci_dev *hdev = req->hdev;
2363        struct adv_info *adv_instance = hci_find_adv_instance(hdev, instance);
2364        int err;
2365
2366        /* If instance isn't pending, the chip knows about it, and it's safe to
2367         * disable
2368         */
2369        if (adv_instance && !adv_instance->pending)
2370                __hci_req_disable_ext_adv_instance(req, instance);
2371
2372        err = __hci_req_setup_ext_adv_instance(req, instance);
2373        if (err < 0)
2374                return err;
2375
2376        __hci_req_update_scan_rsp_data(req, instance);
2377        __hci_req_enable_ext_advertising(req, instance);
2378
2379        return 0;
2380}
2381
2382int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance,
2383                                    bool force)
2384{
2385        struct hci_dev *hdev = req->hdev;
2386        struct adv_info *adv_instance = NULL;
2387        u16 timeout;
2388
2389        if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2390            list_empty(&hdev->adv_instances))
2391                return -EPERM;
2392
2393        if (hdev->adv_instance_timeout)
2394                return -EBUSY;
2395
2396        adv_instance = hci_find_adv_instance(hdev, instance);
2397        if (!adv_instance)
2398                return -ENOENT;
2399
2400        /* A zero timeout means unlimited advertising. As long as there is
2401         * only one instance, duration should be ignored. We still set a timeout
2402         * in case further instances are being added later on.
2403         *
2404         * If the remaining lifetime of the instance is more than the duration
2405         * then the timeout corresponds to the duration, otherwise it will be
2406         * reduced to the remaining instance lifetime.
2407         */
2408        if (adv_instance->timeout == 0 ||
2409            adv_instance->duration <= adv_instance->remaining_time)
2410                timeout = adv_instance->duration;
2411        else
2412                timeout = adv_instance->remaining_time;
2413
2414        /* The remaining time is being reduced unless the instance is being
2415         * advertised without time limit.
2416         */
2417        if (adv_instance->timeout)
2418                adv_instance->remaining_time =
2419                                adv_instance->remaining_time - timeout;
2420
2421        /* Only use work for scheduling instances with legacy advertising */
2422        if (!ext_adv_capable(hdev)) {
2423                hdev->adv_instance_timeout = timeout;
2424                queue_delayed_work(hdev->req_workqueue,
2425                           &hdev->adv_instance_expire,
2426                           msecs_to_jiffies(timeout * 1000));
2427        }
2428
2429        /* If we're just re-scheduling the same instance again then do not
2430         * execute any HCI commands. This happens when a single instance is
2431         * being advertised.
2432         */
2433        if (!force && hdev->cur_adv_instance == instance &&
2434            hci_dev_test_flag(hdev, HCI_LE_ADV))
2435                return 0;
2436
2437        hdev->cur_adv_instance = instance;
2438        if (ext_adv_capable(hdev)) {
2439                __hci_req_start_ext_adv(req, instance);
2440        } else {
2441                __hci_req_update_adv_data(req, instance);
2442                __hci_req_update_scan_rsp_data(req, instance);
2443                __hci_req_enable_advertising(req);
2444        }
2445
2446        return 0;
2447}
2448
2449/* For a single instance:
2450 * - force == true: The instance will be removed even when its remaining
2451 *   lifetime is not zero.
2452 * - force == false: the instance will be deactivated but kept stored unless
2453 *   the remaining lifetime is zero.
2454 *
2455 * For instance == 0x00:
2456 * - force == true: All instances will be removed regardless of their timeout
2457 *   setting.
2458 * - force == false: Only instances that have a timeout will be removed.
2459 */
2460void hci_req_clear_adv_instance(struct hci_dev *hdev, struct sock *sk,
2461                                struct hci_request *req, u8 instance,
2462                                bool force)
2463{
2464        struct adv_info *adv_instance, *n, *next_instance = NULL;
2465        int err;
2466        u8 rem_inst;
2467
2468        /* Cancel any timeout concerning the removed instance(s). */
2469        if (!instance || hdev->cur_adv_instance == instance)
2470                cancel_adv_timeout(hdev);
2471
2472        /* Get the next instance to advertise BEFORE we remove
2473         * the current one. This can be the same instance again
2474         * if there is only one instance.
2475         */
2476        if (instance && hdev->cur_adv_instance == instance)
2477                next_instance = hci_get_next_instance(hdev, instance);
2478
2479        if (instance == 0x00) {
2480                list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances,
2481                                         list) {
2482                        if (!(force || adv_instance->timeout))
2483                                continue;
2484
2485                        rem_inst = adv_instance->instance;
2486                        err = hci_remove_adv_instance(hdev, rem_inst);
2487                        if (!err)
2488                                mgmt_advertising_removed(sk, hdev, rem_inst);
2489                }
2490        } else {
2491                adv_instance = hci_find_adv_instance(hdev, instance);
2492
2493                if (force || (adv_instance && adv_instance->timeout &&
2494                              !adv_instance->remaining_time)) {
2495                        /* Don't advertise a removed instance. */
2496                        if (next_instance &&
2497                            next_instance->instance == instance)
2498                                next_instance = NULL;
2499
2500                        err = hci_remove_adv_instance(hdev, instance);
2501                        if (!err)
2502                                mgmt_advertising_removed(sk, hdev, instance);
2503                }
2504        }
2505
2506        if (!req || !hdev_is_powered(hdev) ||
2507            hci_dev_test_flag(hdev, HCI_ADVERTISING))
2508                return;
2509
2510        if (next_instance && !ext_adv_capable(hdev))
2511                __hci_req_schedule_adv_instance(req, next_instance->instance,
2512                                                false);
2513}
2514
2515static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
2516{
2517        struct hci_dev *hdev = req->hdev;
2518
2519        /* If we're advertising or initiating an LE connection we can't
2520         * go ahead and change the random address at this time. This is
2521         * because the eventual initiator address used for the
2522         * subsequently created connection will be undefined (some
2523         * controllers use the new address and others the one we had
2524         * when the operation started).
2525         *
2526         * In this kind of scenario skip the update and let the random
2527         * address be updated at the next cycle.
2528         */
2529        if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
2530            hci_lookup_le_connect(hdev)) {
2531                bt_dev_dbg(hdev, "Deferring random address update");
2532                hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
2533                return;
2534        }
2535
2536        hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
2537}
2538
2539int hci_update_random_address(struct hci_request *req, bool require_privacy,
2540                              bool use_rpa, u8 *own_addr_type)
2541{
2542        struct hci_dev *hdev = req->hdev;
2543        int err;
2544
2545        /* If privacy is enabled use a resolvable private address. If
2546         * current RPA has expired or there is something else than
2547         * the current RPA in use, then generate a new one.
2548         */
2549        if (use_rpa) {
2550                int to;
2551
2552                /* If Controller supports LL Privacy use own address type is
2553                 * 0x03
2554                 */
2555                if (use_ll_privacy(hdev) &&
2556                    hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
2557                        *own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED;
2558                else
2559                        *own_addr_type = ADDR_LE_DEV_RANDOM;
2560
2561                if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
2562                    !bacmp(&hdev->random_addr, &hdev->rpa))
2563                        return 0;
2564
2565                err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
2566                if (err < 0) {
2567                        bt_dev_err(hdev, "failed to generate new RPA");
2568                        return err;
2569                }
2570
2571                set_random_addr(req, &hdev->rpa);
2572
2573                to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
2574                queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
2575
2576                return 0;
2577        }
2578
2579        /* In case of required privacy without resolvable private address,
2580         * use an non-resolvable private address. This is useful for active
2581         * scanning and non-connectable advertising.
2582         */
2583        if (require_privacy) {
2584                bdaddr_t nrpa;
2585
2586                while (true) {
2587                        /* The non-resolvable private address is generated
2588                         * from random six bytes with the two most significant
2589                         * bits cleared.
2590                         */
2591                        get_random_bytes(&nrpa, 6);
2592                        nrpa.b[5] &= 0x3f;
2593
2594                        /* The non-resolvable private address shall not be
2595                         * equal to the public address.
2596                         */
2597                        if (bacmp(&hdev->bdaddr, &nrpa))
2598                                break;
2599                }
2600
2601                *own_addr_type = ADDR_LE_DEV_RANDOM;
2602                set_random_addr(req, &nrpa);
2603                return 0;
2604        }
2605
2606        /* If forcing static address is in use or there is no public
2607         * address use the static address as random address (but skip
2608         * the HCI command if the current random address is already the
2609         * static one.
2610         *
2611         * In case BR/EDR has been disabled on a dual-mode controller
2612         * and a static address has been configured, then use that
2613         * address instead of the public BR/EDR address.
2614         */
2615        if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
2616            !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
2617            (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
2618             bacmp(&hdev->static_addr, BDADDR_ANY))) {
2619                *own_addr_type = ADDR_LE_DEV_RANDOM;
2620                if (bacmp(&hdev->static_addr, &hdev->random_addr))
2621                        hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
2622                                    &hdev->static_addr);
2623                return 0;
2624        }
2625
2626        /* Neither privacy nor static address is being used so use a
2627         * public address.
2628         */
2629        *own_addr_type = ADDR_LE_DEV_PUBLIC;
2630
2631        return 0;
2632}
2633
2634static bool disconnected_accept_list_entries(struct hci_dev *hdev)
2635{
2636        struct bdaddr_list *b;
2637
2638        list_for_each_entry(b, &hdev->accept_list, list) {
2639                struct hci_conn *conn;
2640
2641                conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
2642                if (!conn)
2643                        return true;
2644
2645                if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2646                        return true;
2647        }
2648
2649        return false;
2650}
2651
2652void __hci_req_update_scan(struct hci_request *req)
2653{
2654        struct hci_dev *hdev = req->hdev;
2655        u8 scan;
2656
2657        if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2658                return;
2659
2660        if (!hdev_is_powered(hdev))
2661                return;
2662
2663        if (mgmt_powering_down(hdev))
2664                return;
2665
2666        if (hdev->scanning_paused)
2667                return;
2668
2669        if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) ||
2670            disconnected_accept_list_entries(hdev))
2671                scan = SCAN_PAGE;
2672        else
2673                scan = SCAN_DISABLED;
2674
2675        if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
2676                scan |= SCAN_INQUIRY;
2677
2678        if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) &&
2679            test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY))
2680                return;
2681
2682        hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
2683}
2684
2685static int update_scan(struct hci_request *req, unsigned long opt)
2686{
2687        hci_dev_lock(req->hdev);
2688        __hci_req_update_scan(req);
2689        hci_dev_unlock(req->hdev);
2690        return 0;
2691}
2692
2693static void scan_update_work(struct work_struct *work)
2694{
2695        struct hci_dev *hdev = container_of(work, struct hci_dev, scan_update);
2696
2697        hci_req_sync(hdev, update_scan, 0, HCI_CMD_TIMEOUT, NULL);
2698}
2699
2700static int connectable_update(struct hci_request *req, unsigned long opt)
2701{
2702        struct hci_dev *hdev = req->hdev;
2703
2704        hci_dev_lock(hdev);
2705
2706        __hci_req_update_scan(req);
2707
2708        /* If BR/EDR is not enabled and we disable advertising as a
2709         * by-product of disabling connectable, we need to update the
2710         * advertising flags.
2711         */
2712        if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2713                __hci_req_update_adv_data(req, hdev->cur_adv_instance);
2714
2715        /* Update the advertising parameters if necessary */
2716        if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2717            !list_empty(&hdev->adv_instances)) {
2718                if (ext_adv_capable(hdev))
2719                        __hci_req_start_ext_adv(req, hdev->cur_adv_instance);
2720                else
2721                        __hci_req_enable_advertising(req);
2722        }
2723
2724        __hci_update_background_scan(req);
2725
2726        hci_dev_unlock(hdev);
2727
2728        return 0;
2729}
2730
2731static void connectable_update_work(struct work_struct *work)
2732{
2733        struct hci_dev *hdev = container_of(work, struct hci_dev,
2734                                            connectable_update);
2735        u8 status;
2736
2737        hci_req_sync(hdev, connectable_update, 0, HCI_CMD_TIMEOUT, &status);
2738        mgmt_set_connectable_complete(hdev, status);
2739}
2740
2741static u8 get_service_classes(struct hci_dev *hdev)
2742{
2743        struct bt_uuid *uuid;
2744        u8 val = 0;
2745
2746        list_for_each_entry(uuid, &hdev->uuids, list)
2747                val |= uuid->svc_hint;
2748
2749        return val;
2750}
2751
2752void __hci_req_update_class(struct hci_request *req)
2753{
2754        struct hci_dev *hdev = req->hdev;
2755        u8 cod[3];
2756
2757        bt_dev_dbg(hdev, "");
2758
2759        if (!hdev_is_powered(hdev))
2760                return;
2761
2762        if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2763                return;
2764
2765        if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
2766                return;
2767
2768        cod[0] = hdev->minor_class;
2769        cod[1] = hdev->major_class;
2770        cod[2] = get_service_classes(hdev);
2771
2772        if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
2773                cod[1] |= 0x20;
2774
2775        if (memcmp(cod, hdev->dev_class, 3) == 0)
2776                return;
2777
2778        hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
2779}
2780
2781static void write_iac(struct hci_request *req)
2782{
2783        struct hci_dev *hdev = req->hdev;
2784        struct hci_cp_write_current_iac_lap cp;
2785
2786        if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
2787                return;
2788
2789        if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
2790                /* Limited discoverable mode */
2791                cp.num_iac = min_t(u8, hdev->num_iac, 2);
2792                cp.iac_lap[0] = 0x00;   /* LIAC */
2793                cp.iac_lap[1] = 0x8b;
2794                cp.iac_lap[2] = 0x9e;
2795                cp.iac_lap[3] = 0x33;   /* GIAC */
2796                cp.iac_lap[4] = 0x8b;
2797                cp.iac_lap[5] = 0x9e;
2798        } else {
2799                /* General discoverable mode */
2800                cp.num_iac = 1;
2801                cp.iac_lap[0] = 0x33;   /* GIAC */
2802                cp.iac_lap[1] = 0x8b;
2803                cp.iac_lap[2] = 0x9e;
2804        }
2805
2806        hci_req_add(req, HCI_OP_WRITE_CURRENT_IAC_LAP,
2807                    (cp.num_iac * 3) + 1, &cp);
2808}
2809
2810static int discoverable_update(struct hci_request *req, unsigned long opt)
2811{
2812        struct hci_dev *hdev = req->hdev;
2813
2814        hci_dev_lock(hdev);
2815
2816        if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2817                write_iac(req);
2818                __hci_req_update_scan(req);
2819                __hci_req_update_class(req);
2820        }
2821
2822        /* Advertising instances don't use the global discoverable setting, so
2823         * only update AD if advertising was enabled using Set Advertising.
2824         */
2825        if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2826                __hci_req_update_adv_data(req, 0x00);
2827
2828                /* Discoverable mode affects the local advertising
2829                 * address in limited privacy mode.
2830                 */
2831                if (hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY)) {
2832                        if (ext_adv_capable(hdev))
2833                                __hci_req_start_ext_adv(req, 0x00);
2834                        else
2835                                __hci_req_enable_advertising(req);
2836                }
2837        }
2838
2839        hci_dev_unlock(hdev);
2840
2841        return 0;
2842}
2843
2844static void discoverable_update_work(struct work_struct *work)
2845{
2846        struct hci_dev *hdev = container_of(work, struct hci_dev,
2847                                            discoverable_update);
2848        u8 status;
2849
2850        hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, &status);
2851        mgmt_set_discoverable_complete(hdev, status);
2852}
2853
2854void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
2855                      u8 reason)
2856{
2857        switch (conn->state) {
2858        case BT_CONNECTED:
2859        case BT_CONFIG:
2860                if (conn->type == AMP_LINK) {
2861                        struct hci_cp_disconn_phy_link cp;
2862
2863                        cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
2864                        cp.reason = reason;
2865                        hci_req_add(req, HCI_OP_DISCONN_PHY_LINK, sizeof(cp),
2866                                    &cp);
2867                } else {
2868                        struct hci_cp_disconnect dc;
2869
2870                        dc.handle = cpu_to_le16(conn->handle);
2871                        dc.reason = reason;
2872                        hci_req_add(req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2873                }
2874
2875                conn->state = BT_DISCONN;
2876
2877                break;
2878        case BT_CONNECT:
2879                if (conn->type == LE_LINK) {
2880                        if (test_bit(HCI_CONN_SCANNING, &conn->flags))
2881                                break;
2882                        hci_req_add(req, HCI_OP_LE_CREATE_CONN_CANCEL,
2883                                    0, NULL);
2884                } else if (conn->type == ACL_LINK) {
2885                        if (req->hdev->hci_ver < BLUETOOTH_VER_1_2)
2886                                break;
2887                        hci_req_add(req, HCI_OP_CREATE_CONN_CANCEL,
2888                                    6, &conn->dst);
2889                }
2890                break;
2891        case BT_CONNECT2:
2892                if (conn->type == ACL_LINK) {
2893                        struct hci_cp_reject_conn_req rej;
2894
2895                        bacpy(&rej.bdaddr, &conn->dst);
2896                        rej.reason = reason;
2897
2898                        hci_req_add(req, HCI_OP_REJECT_CONN_REQ,
2899                                    sizeof(rej), &rej);
2900                } else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
2901                        struct hci_cp_reject_sync_conn_req rej;
2902
2903                        bacpy(&rej.bdaddr, &conn->dst);
2904
2905                        /* SCO rejection has its own limited set of
2906                         * allowed error values (0x0D-0x0F) which isn't
2907                         * compatible with most values passed to this
2908                         * function. To be safe hard-code one of the
2909                         * values that's suitable for SCO.
2910                         */
2911                        rej.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
2912
2913                        hci_req_add(req, HCI_OP_REJECT_SYNC_CONN_REQ,
2914                                    sizeof(rej), &rej);
2915                }
2916                break;
2917        default:
2918                conn->state = BT_CLOSED;
2919                break;
2920        }
2921}
2922
2923static void abort_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2924{
2925        if (status)
2926                bt_dev_dbg(hdev, "Failed to abort connection: status 0x%2.2x", status);
2927}
2928
2929int hci_abort_conn(struct hci_conn *conn, u8 reason)
2930{
2931        struct hci_request req;
2932        int err;
2933
2934        hci_req_init(&req, conn->hdev);
2935
2936        __hci_abort_conn(&req, conn, reason);
2937
2938        err = hci_req_run(&req, abort_conn_complete);
2939        if (err && err != -ENODATA) {
2940                bt_dev_err(conn->hdev, "failed to run HCI request: err %d", err);
2941                return err;
2942        }
2943
2944        return 0;
2945}
2946
2947static int update_bg_scan(struct hci_request *req, unsigned long opt)
2948{
2949        hci_dev_lock(req->hdev);
2950        __hci_update_background_scan(req);
2951        hci_dev_unlock(req->hdev);
2952        return 0;
2953}
2954
2955static void bg_scan_update(struct work_struct *work)
2956{
2957        struct hci_dev *hdev = container_of(work, struct hci_dev,
2958                                            bg_scan_update);
2959        struct hci_conn *conn;
2960        u8 status;
2961        int err;
2962
2963        err = hci_req_sync(hdev, update_bg_scan, 0, HCI_CMD_TIMEOUT, &status);
2964        if (!err)
2965                return;
2966
2967        hci_dev_lock(hdev);
2968
2969        conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
2970        if (conn)
2971                hci_le_conn_failed(conn, status);
2972
2973        hci_dev_unlock(hdev);
2974}
2975
2976static int le_scan_disable(struct hci_request *req, unsigned long opt)
2977{
2978        hci_req_add_le_scan_disable(req, false);
2979        return 0;
2980}
2981
2982static int bredr_inquiry(struct hci_request *req, unsigned long opt)
2983{
2984        u8 length = opt;
2985        const u8 giac[3] = { 0x33, 0x8b, 0x9e };
2986        const u8 liac[3] = { 0x00, 0x8b, 0x9e };
2987        struct hci_cp_inquiry cp;
2988
2989        if (test_bit(HCI_INQUIRY, &req->hdev->flags))
2990                return 0;
2991
2992        bt_dev_dbg(req->hdev, "");
2993
2994        hci_dev_lock(req->hdev);
2995        hci_inquiry_cache_flush(req->hdev);
2996        hci_dev_unlock(req->hdev);
2997
2998        memset(&cp, 0, sizeof(cp));
2999
3000        if (req->hdev->discovery.limited)
3001                memcpy(&cp.lap, liac, sizeof(cp.lap));
3002        else
3003                memcpy(&cp.lap, giac, sizeof(cp.lap));
3004
3005        cp.length = length;
3006
3007        hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3008
3009        return 0;
3010}
3011
3012static void le_scan_disable_work(struct work_struct *work)
3013{
3014        struct hci_dev *hdev = container_of(work, struct hci_dev,
3015                                            le_scan_disable.work);
3016        u8 status;
3017
3018        bt_dev_dbg(hdev, "");
3019
3020        if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
3021                return;
3022
3023        cancel_delayed_work(&hdev->le_scan_restart);
3024
3025        hci_req_sync(hdev, le_scan_disable, 0, HCI_CMD_TIMEOUT, &status);
3026        if (status) {
3027                bt_dev_err(hdev, "failed to disable LE scan: status 0x%02x",
3028                           status);
3029                return;
3030        }
3031
3032        hdev->discovery.scan_start = 0;
3033
3034        /* If we were running LE only scan, change discovery state. If
3035         * we were running both LE and BR/EDR inquiry simultaneously,
3036         * and BR/EDR inquiry is already finished, stop discovery,
3037         * otherwise BR/EDR inquiry will stop discovery when finished.
3038         * If we will resolve remote device name, do not change
3039         * discovery state.
3040         */
3041
3042        if (hdev->discovery.type == DISCOV_TYPE_LE)
3043                goto discov_stopped;
3044
3045        if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED)
3046                return;
3047
3048        if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) {
3049                if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
3050                    hdev->discovery.state != DISCOVERY_RESOLVING)
3051                        goto discov_stopped;
3052
3053                return;
3054        }
3055
3056        hci_req_sync(hdev, bredr_inquiry, DISCOV_INTERLEAVED_INQUIRY_LEN,
3057                     HCI_CMD_TIMEOUT, &status);
3058        if (status) {
3059                bt_dev_err(hdev, "inquiry failed: status 0x%02x", status);
3060                goto discov_stopped;
3061        }
3062
3063        return;
3064
3065discov_stopped:
3066        hci_dev_lock(hdev);
3067        hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3068        hci_dev_unlock(hdev);
3069}
3070
3071static int le_scan_restart(struct hci_request *req, unsigned long opt)
3072{
3073        struct hci_dev *hdev = req->hdev;
3074
3075        /* If controller is not scanning we are done. */
3076        if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
3077                return 0;
3078
3079        if (hdev->scanning_paused) {
3080                bt_dev_dbg(hdev, "Scanning is paused for suspend");
3081                return 0;
3082        }
3083
3084        hci_req_add_le_scan_disable(req, false);
3085
3086        if (use_ext_scan(hdev)) {
3087                struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
3088
3089                memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
3090                ext_enable_cp.enable = LE_SCAN_ENABLE;
3091                ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3092
3093                hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
3094                            sizeof(ext_enable_cp), &ext_enable_cp);
3095        } else {
3096                struct hci_cp_le_set_scan_enable cp;
3097
3098                memset(&cp, 0, sizeof(cp));
3099                cp.enable = LE_SCAN_ENABLE;
3100                cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3101                hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
3102        }
3103
3104        return 0;
3105}
3106
3107static void le_scan_restart_work(struct work_struct *work)
3108{
3109        struct hci_dev *hdev = container_of(work, struct hci_dev,
3110                                            le_scan_restart.work);
3111        unsigned long timeout, duration, scan_start, now;
3112        u8 status;
3113
3114        bt_dev_dbg(hdev, "");
3115
3116        hci_req_sync(hdev, le_scan_restart, 0, HCI_CMD_TIMEOUT, &status);
3117        if (status) {
3118                bt_dev_err(hdev, "failed to restart LE scan: status %d",
3119                           status);
3120                return;
3121        }
3122
3123        hci_dev_lock(hdev);
3124
3125        if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
3126            !hdev->discovery.scan_start)
3127                goto unlock;
3128
3129        /* When the scan was started, hdev->le_scan_disable has been queued
3130         * after duration from scan_start. During scan restart this job
3131         * has been canceled, and we need to queue it again after proper
3132         * timeout, to make sure that scan does not run indefinitely.
3133         */
3134        duration = hdev->discovery.scan_duration;
3135        scan_start = hdev->discovery.scan_start;
3136        now = jiffies;
3137        if (now - scan_start <= duration) {
3138                int elapsed;
3139
3140                if (now >= scan_start)
3141                        elapsed = now - scan_start;
3142                else
3143                        elapsed = ULONG_MAX - scan_start + now;
3144
3145                timeout = duration - elapsed;
3146        } else {
3147                timeout = 0;
3148        }
3149
3150        queue_delayed_work(hdev->req_workqueue,
3151                           &hdev->le_scan_disable, timeout);
3152
3153unlock:
3154        hci_dev_unlock(hdev);
3155}
3156
3157static int active_scan(struct hci_request *req, unsigned long opt)
3158{
3159        uint16_t interval = opt;
3160        struct hci_dev *hdev = req->hdev;
3161        u8 own_addr_type;
3162        /* Accept list is not used for discovery */
3163        u8 filter_policy = 0x00;
3164        /* Default is to enable duplicates filter */
3165        u8 filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3166        /* Discovery doesn't require controller address resolution */
3167        bool addr_resolv = false;
3168        int err;
3169
3170        bt_dev_dbg(hdev, "");
3171
3172        /* If controller is scanning, it means the background scanning is
3173         * running. Thus, we should temporarily stop it in order to set the
3174         * discovery scanning parameters.
3175         */
3176        if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
3177                hci_req_add_le_scan_disable(req, false);
3178                cancel_interleave_scan(hdev);
3179        }
3180
3181        /* All active scans will be done with either a resolvable private
3182         * address (when privacy feature has been enabled) or non-resolvable
3183         * private address.
3184         */
3185        err = hci_update_random_address(req, true, scan_use_rpa(hdev),
3186                                        &own_addr_type);
3187        if (err < 0)
3188                own_addr_type = ADDR_LE_DEV_PUBLIC;
3189
3190        if (hci_is_adv_monitoring(hdev)) {
3191                /* Duplicate filter should be disabled when some advertisement
3192                 * monitor is activated, otherwise AdvMon can only receive one
3193                 * advertisement for one peer(*) during active scanning, and
3194                 * might report loss to these peers.
3195                 *
3196                 * Note that different controllers have different meanings of
3197                 * |duplicate|. Some of them consider packets with the same
3198                 * address as duplicate, and others consider packets with the
3199                 * same address and the same RSSI as duplicate. Although in the
3200                 * latter case we don't need to disable duplicate filter, but
3201                 * it is common to have active scanning for a short period of
3202                 * time, the power impact should be neglectable.
3203                 */
3204                filter_dup = LE_SCAN_FILTER_DUP_DISABLE;
3205        }
3206
3207        hci_req_start_scan(req, LE_SCAN_ACTIVE, interval,
3208                           hdev->le_scan_window_discovery, own_addr_type,
3209                           filter_policy, filter_dup, addr_resolv);
3210        return 0;
3211}
3212
3213static int interleaved_discov(struct hci_request *req, unsigned long opt)
3214{
3215        int err;
3216
3217        bt_dev_dbg(req->hdev, "");
3218
3219        err = active_scan(req, opt);
3220        if (err)
3221                return err;
3222
3223        return bredr_inquiry(req, DISCOV_BREDR_INQUIRY_LEN);
3224}
3225
3226static void start_discovery(struct hci_dev *hdev, u8 *status)
3227{
3228        unsigned long timeout;
3229
3230        bt_dev_dbg(hdev, "type %u", hdev->discovery.type);
3231
3232        switch (hdev->discovery.type) {
3233        case DISCOV_TYPE_BREDR:
3234                if (!hci_dev_test_flag(hdev, HCI_INQUIRY))
3235                        hci_req_sync(hdev, bredr_inquiry,
3236                                     DISCOV_BREDR_INQUIRY_LEN, HCI_CMD_TIMEOUT,
3237                                     status);
3238                return;
3239        case DISCOV_TYPE_INTERLEAVED:
3240                /* When running simultaneous discovery, the LE scanning time
3241                 * should occupy the whole discovery time sine BR/EDR inquiry
3242                 * and LE scanning are scheduled by the controller.
3243                 *
3244                 * For interleaving discovery in comparison, BR/EDR inquiry
3245                 * and LE scanning are done sequentially with separate
3246                 * timeouts.
3247                 */
3248                if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
3249                             &hdev->quirks)) {
3250                        timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
3251                        /* During simultaneous discovery, we double LE scan
3252                         * interval. We must leave some time for the controller
3253                         * to do BR/EDR inquiry.
3254                         */
3255                        hci_req_sync(hdev, interleaved_discov,
3256                                     hdev->le_scan_int_discovery * 2, HCI_CMD_TIMEOUT,
3257                                     status);
3258                        break;
3259                }
3260
3261                timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
3262                hci_req_sync(hdev, active_scan, hdev->le_scan_int_discovery,
3263                             HCI_CMD_TIMEOUT, status);
3264                break;
3265        case DISCOV_TYPE_LE:
3266                timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
3267                hci_req_sync(hdev, active_scan, hdev->le_scan_int_discovery,
3268                             HCI_CMD_TIMEOUT, status);
3269                break;
3270        default:
3271                *status = HCI_ERROR_UNSPECIFIED;
3272                return;
3273        }
3274
3275        if (*status)
3276                return;
3277
3278        bt_dev_dbg(hdev, "timeout %u ms", jiffies_to_msecs(timeout));
3279
3280        /* When service discovery is used and the controller has a
3281         * strict duplicate filter, it is important to remember the
3282         * start and duration of the scan. This is required for
3283         * restarting scanning during the discovery phase.
3284         */
3285        if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) &&
3286                     hdev->discovery.result_filtering) {
3287                hdev->discovery.scan_start = jiffies;
3288                hdev->discovery.scan_duration = timeout;
3289        }
3290
3291        queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable,
3292                           timeout);
3293}
3294
3295bool hci_req_stop_discovery(struct hci_request *req)
3296{
3297        struct hci_dev *hdev = req->hdev;
3298        struct discovery_state *d = &hdev->discovery;
3299        struct hci_cp_remote_name_req_cancel cp;
3300        struct inquiry_entry *e;
3301        bool ret = false;
3302
3303        bt_dev_dbg(hdev, "state %u", hdev->discovery.state);
3304
3305        if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) {
3306                if (test_bit(HCI_INQUIRY, &hdev->flags))
3307                        hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
3308
3309                if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
3310                        cancel_delayed_work(&hdev->le_scan_disable);
3311                        cancel_delayed_work(&hdev->le_scan_restart);
3312                        hci_req_add_le_scan_disable(req, false);
3313                }
3314
3315                ret = true;
3316        } else {
3317                /* Passive scanning */
3318                if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
3319                        hci_req_add_le_scan_disable(req, false);
3320                        ret = true;
3321                }
3322        }
3323
3324        /* No further actions needed for LE-only discovery */
3325        if (d->type == DISCOV_TYPE_LE)
3326                return ret;
3327
3328        if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) {
3329                e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
3330                                                     NAME_PENDING);
3331                if (!e)
3332                        return ret;
3333
3334                bacpy(&cp.bdaddr, &e->data.bdaddr);
3335                hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
3336                            &cp);
3337                ret = true;
3338        }
3339
3340        return ret;
3341}
3342
3343static int stop_discovery(struct hci_request *req, unsigned long opt)
3344{
3345        hci_dev_lock(req->hdev);
3346        hci_req_stop_discovery(req);
3347        hci_dev_unlock(req->hdev);
3348
3349        return 0;
3350}
3351
3352static void discov_update(struct work_struct *work)
3353{
3354        struct hci_dev *hdev = container_of(work, struct hci_dev,
3355                                            discov_update);
3356        u8 status = 0;
3357
3358        switch (hdev->discovery.state) {
3359        case DISCOVERY_STARTING:
3360                start_discovery(hdev, &status);
3361                mgmt_start_discovery_complete(hdev, status);
3362                if (status)
3363                        hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3364                else
3365                        hci_discovery_set_state(hdev, DISCOVERY_FINDING);
3366                break;
3367        case DISCOVERY_STOPPING:
3368                hci_req_sync(hdev, stop_discovery, 0, HCI_CMD_TIMEOUT, &status);
3369                mgmt_stop_discovery_complete(hdev, status);
3370                if (!status)
3371                        hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3372                break;
3373        case DISCOVERY_STOPPED:
3374        default:
3375                return;
3376        }
3377}
3378
3379static void discov_off(struct work_struct *work)
3380{
3381        struct hci_dev *hdev = container_of(work, struct hci_dev,
3382                                            discov_off.work);
3383
3384        bt_dev_dbg(hdev, "");
3385
3386        hci_dev_lock(hdev);
3387
3388        /* When discoverable timeout triggers, then just make sure
3389         * the limited discoverable flag is cleared. Even in the case
3390         * of a timeout triggered from general discoverable, it is
3391         * safe to unconditionally clear the flag.
3392         */
3393        hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
3394        hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
3395        hdev->discov_timeout = 0;
3396
3397        hci_dev_unlock(hdev);
3398
3399        hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, NULL);
3400        mgmt_new_settings(hdev);
3401}
3402
3403static int powered_update_hci(struct hci_request *req, unsigned long opt)
3404{
3405        struct hci_dev *hdev = req->hdev;
3406        u8 link_sec;
3407
3408        hci_dev_lock(hdev);
3409
3410        if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
3411            !lmp_host_ssp_capable(hdev)) {
3412                u8 mode = 0x01;
3413
3414                hci_req_add(req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
3415
3416                if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
3417                        u8 support = 0x01;
3418
3419                        hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
3420                                    sizeof(support), &support);
3421                }
3422        }
3423
3424        if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
3425            lmp_bredr_capable(hdev)) {
3426                struct hci_cp_write_le_host_supported cp;
3427
3428                cp.le = 0x01;
3429                cp.simul = 0x00;
3430
3431                /* Check first if we already have the right
3432                 * host state (host features set)
3433                 */
3434                if (cp.le != lmp_host_le_capable(hdev) ||
3435                    cp.simul != lmp_host_le_br_capable(hdev))
3436                        hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
3437                                    sizeof(cp), &cp);
3438        }
3439
3440        if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
3441                /* Make sure the controller has a good default for
3442                 * advertising data. This also applies to the case
3443                 * where BR/EDR was toggled during the AUTO_OFF phase.
3444                 */
3445                if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
3446                    list_empty(&hdev->adv_instances)) {
3447                        int err;
3448
3449                        if (ext_adv_capable(hdev)) {
3450                                err = __hci_req_setup_ext_adv_instance(req,
3451                                                                       0x00);
3452                                if (!err)
3453                                        __hci_req_update_scan_rsp_data(req,
3454                                                                       0x00);
3455                        } else {
3456                                err = 0;
3457                                __hci_req_update_adv_data(req, 0x00);
3458                                __hci_req_update_scan_rsp_data(req, 0x00);
3459                        }
3460
3461                        if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
3462                                if (!ext_adv_capable(hdev))
3463                                        __hci_req_enable_advertising(req);
3464                                else if (!err)
3465                                        __hci_req_enable_ext_advertising(req,
3466                                                                         0x00);
3467                        }
3468                } else if (!list_empty(&hdev->adv_instances)) {
3469                        struct adv_info *adv_instance;
3470
3471                        adv_instance = list_first_entry(&hdev->adv_instances,
3472                                                        struct adv_info, list);
3473                        __hci_req_schedule_adv_instance(req,
3474                                                        adv_instance->instance,
3475                                                        true);
3476                }
3477        }
3478
3479        link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
3480        if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
3481                hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE,
3482                            sizeof(link_sec), &link_sec);
3483
3484        if (lmp_bredr_capable(hdev)) {
3485                if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
3486                        __hci_req_write_fast_connectable(req, true);
3487                else
3488                        __hci_req_write_fast_connectable(req, false);
3489                __hci_req_update_scan(req);
3490                __hci_req_update_class(req);
3491                __hci_req_update_name(req);
3492                __hci_req_update_eir(req);
3493        }
3494
3495        hci_dev_unlock(hdev);
3496        return 0;
3497}
3498
3499int __hci_req_hci_power_on(struct hci_dev *hdev)
3500{
3501        /* Register the available SMP channels (BR/EDR and LE) only when
3502         * successfully powering on the controller. This late
3503         * registration is required so that LE SMP can clearly decide if
3504         * the public address or static address is used.
3505         */
3506        smp_register(hdev);
3507
3508        return __hci_req_sync(hdev, powered_update_hci, 0, HCI_CMD_TIMEOUT,
3509                              NULL);
3510}
3511
3512void hci_request_setup(struct hci_dev *hdev)
3513{
3514        INIT_WORK(&hdev->discov_update, discov_update);
3515        INIT_WORK(&hdev->bg_scan_update, bg_scan_update);
3516        INIT_WORK(&hdev->scan_update, scan_update_work);
3517        INIT_WORK(&hdev->connectable_update, connectable_update_work);
3518        INIT_WORK(&hdev->discoverable_update, discoverable_update_work);
3519        INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
3520        INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3521        INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
3522        INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire);
3523        INIT_DELAYED_WORK(&hdev->interleave_scan, interleave_scan_work);
3524}
3525
3526void hci_request_cancel_all(struct hci_dev *hdev)
3527{
3528        hci_req_sync_cancel(hdev, ENODEV);
3529
3530        cancel_work_sync(&hdev->discov_update);
3531        cancel_work_sync(&hdev->bg_scan_update);
3532        cancel_work_sync(&hdev->scan_update);
3533        cancel_work_sync(&hdev->connectable_update);
3534        cancel_work_sync(&hdev->discoverable_update);
3535        cancel_delayed_work_sync(&hdev->discov_off);
3536        cancel_delayed_work_sync(&hdev->le_scan_disable);
3537        cancel_delayed_work_sync(&hdev->le_scan_restart);
3538
3539        if (hdev->adv_instance_timeout) {
3540                cancel_delayed_work_sync(&hdev->adv_instance_expire);
3541                hdev->adv_instance_timeout = 0;
3542        }
3543
3544        cancel_interleave_scan(hdev);
3545}
3546