linux/net/bluetooth/hci_request.c
<<
>>
Prefs
   1/*
   2   BlueZ - Bluetooth protocol stack for Linux
   3
   4   Copyright (C) 2014 Intel Corporation
   5
   6   This program is free software; you can redistribute it and/or modify
   7   it under the terms of the GNU General Public License version 2 as
   8   published by the Free Software Foundation;
   9
  10   THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
  11   OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  12   FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
  13   IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
  14   CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
  15   WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  16   ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  17   OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  18
  19   ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
  20   COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
  21   SOFTWARE IS DISCLAIMED.
  22*/
  23
  24#include <net/bluetooth/bluetooth.h>
  25#include <net/bluetooth/hci_core.h>
  26
  27#include "smp.h"
  28#include "hci_request.h"
  29
  30void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
  31{
  32        skb_queue_head_init(&req->cmd_q);
  33        req->hdev = hdev;
  34        req->err = 0;
  35}
  36
  37static int req_run(struct hci_request *req, hci_req_complete_t complete,
  38                   hci_req_complete_skb_t complete_skb)
  39{
  40        struct hci_dev *hdev = req->hdev;
  41        struct sk_buff *skb;
  42        unsigned long flags;
  43
  44        BT_DBG("length %u", skb_queue_len(&req->cmd_q));
  45
  46        /* If an error occurred during request building, remove all HCI
  47         * commands queued on the HCI request queue.
  48         */
  49        if (req->err) {
  50                skb_queue_purge(&req->cmd_q);
  51                return req->err;
  52        }
  53
  54        /* Do not allow empty requests */
  55        if (skb_queue_empty(&req->cmd_q))
  56                return -ENODATA;
  57
  58        skb = skb_peek_tail(&req->cmd_q);
  59        bt_cb(skb)->req.complete = complete;
  60        bt_cb(skb)->req.complete_skb = complete_skb;
  61
  62        spin_lock_irqsave(&hdev->cmd_q.lock, flags);
  63        skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
  64        spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
  65
  66        queue_work(hdev->workqueue, &hdev->cmd_work);
  67
  68        return 0;
  69}
  70
  71int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
  72{
  73        return req_run(req, complete, NULL);
  74}
  75
  76int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete)
  77{
  78        return req_run(req, NULL, complete);
  79}
  80
  81struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
  82                                const void *param)
  83{
  84        int len = HCI_COMMAND_HDR_SIZE + plen;
  85        struct hci_command_hdr *hdr;
  86        struct sk_buff *skb;
  87
  88        skb = bt_skb_alloc(len, GFP_ATOMIC);
  89        if (!skb)
  90                return NULL;
  91
  92        hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
  93        hdr->opcode = cpu_to_le16(opcode);
  94        hdr->plen   = plen;
  95
  96        if (plen)
  97                memcpy(skb_put(skb, plen), param, plen);
  98
  99        BT_DBG("skb len %d", skb->len);
 100
 101        bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
 102        bt_cb(skb)->opcode = opcode;
 103
 104        return skb;
 105}
 106
 107/* Queue a command to an asynchronous HCI request */
 108void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
 109                    const void *param, u8 event)
 110{
 111        struct hci_dev *hdev = req->hdev;
 112        struct sk_buff *skb;
 113
 114        BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
 115
 116        /* If an error occurred during request building, there is no point in
 117         * queueing the HCI command. We can simply return.
 118         */
 119        if (req->err)
 120                return;
 121
 122        skb = hci_prepare_cmd(hdev, opcode, plen, param);
 123        if (!skb) {
 124                BT_ERR("%s no memory for command (opcode 0x%4.4x)",
 125                       hdev->name, opcode);
 126                req->err = -ENOMEM;
 127                return;
 128        }
 129
 130        if (skb_queue_empty(&req->cmd_q))
 131                bt_cb(skb)->req.start = true;
 132
 133        bt_cb(skb)->req.event = event;
 134
 135        skb_queue_tail(&req->cmd_q, skb);
 136}
 137
 138void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
 139                 const void *param)
 140{
 141        hci_req_add_ev(req, opcode, plen, param, 0);
 142}
 143
 144void hci_req_add_le_scan_disable(struct hci_request *req)
 145{
 146        struct hci_cp_le_set_scan_enable cp;
 147
 148        memset(&cp, 0, sizeof(cp));
 149        cp.enable = LE_SCAN_DISABLE;
 150        hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
 151}
 152
 153static void add_to_white_list(struct hci_request *req,
 154                              struct hci_conn_params *params)
 155{
 156        struct hci_cp_le_add_to_white_list cp;
 157
 158        cp.bdaddr_type = params->addr_type;
 159        bacpy(&cp.bdaddr, &params->addr);
 160
 161        hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
 162}
 163
 164static u8 update_white_list(struct hci_request *req)
 165{
 166        struct hci_dev *hdev = req->hdev;
 167        struct hci_conn_params *params;
 168        struct bdaddr_list *b;
 169        uint8_t white_list_entries = 0;
 170
 171        /* Go through the current white list programmed into the
 172         * controller one by one and check if that address is still
 173         * in the list of pending connections or list of devices to
 174         * report. If not present in either list, then queue the
 175         * command to remove it from the controller.
 176         */
 177        list_for_each_entry(b, &hdev->le_white_list, list) {
 178                struct hci_cp_le_del_from_white_list cp;
 179
 180                if (hci_pend_le_action_lookup(&hdev->pend_le_conns,
 181                                              &b->bdaddr, b->bdaddr_type) ||
 182                    hci_pend_le_action_lookup(&hdev->pend_le_reports,
 183                                              &b->bdaddr, b->bdaddr_type)) {
 184                        white_list_entries++;
 185                        continue;
 186                }
 187
 188                cp.bdaddr_type = b->bdaddr_type;
 189                bacpy(&cp.bdaddr, &b->bdaddr);
 190
 191                hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST,
 192                            sizeof(cp), &cp);
 193        }
 194
 195        /* Since all no longer valid white list entries have been
 196         * removed, walk through the list of pending connections
 197         * and ensure that any new device gets programmed into
 198         * the controller.
 199         *
 200         * If the list of the devices is larger than the list of
 201         * available white list entries in the controller, then
 202         * just abort and return filer policy value to not use the
 203         * white list.
 204         */
 205        list_for_each_entry(params, &hdev->pend_le_conns, action) {
 206                if (hci_bdaddr_list_lookup(&hdev->le_white_list,
 207                                           &params->addr, params->addr_type))
 208                        continue;
 209
 210                if (white_list_entries >= hdev->le_white_list_size) {
 211                        /* Select filter policy to accept all advertising */
 212                        return 0x00;
 213                }
 214
 215                if (hci_find_irk_by_addr(hdev, &params->addr,
 216                                         params->addr_type)) {
 217                        /* White list can not be used with RPAs */
 218                        return 0x00;
 219                }
 220
 221                white_list_entries++;
 222                add_to_white_list(req, params);
 223        }
 224
 225        /* After adding all new pending connections, walk through
 226         * the list of pending reports and also add these to the
 227         * white list if there is still space.
 228         */
 229        list_for_each_entry(params, &hdev->pend_le_reports, action) {
 230                if (hci_bdaddr_list_lookup(&hdev->le_white_list,
 231                                           &params->addr, params->addr_type))
 232                        continue;
 233
 234                if (white_list_entries >= hdev->le_white_list_size) {
 235                        /* Select filter policy to accept all advertising */
 236                        return 0x00;
 237                }
 238
 239                if (hci_find_irk_by_addr(hdev, &params->addr,
 240                                         params->addr_type)) {
 241                        /* White list can not be used with RPAs */
 242                        return 0x00;
 243                }
 244
 245                white_list_entries++;
 246                add_to_white_list(req, params);
 247        }
 248
 249        /* Select filter policy to use white list */
 250        return 0x01;
 251}
 252
 253void hci_req_add_le_passive_scan(struct hci_request *req)
 254{
 255        struct hci_cp_le_set_scan_param param_cp;
 256        struct hci_cp_le_set_scan_enable enable_cp;
 257        struct hci_dev *hdev = req->hdev;
 258        u8 own_addr_type;
 259        u8 filter_policy;
 260
 261        /* Set require_privacy to false since no SCAN_REQ are send
 262         * during passive scanning. Not using an non-resolvable address
 263         * here is important so that peer devices using direct
 264         * advertising with our address will be correctly reported
 265         * by the controller.
 266         */
 267        if (hci_update_random_address(req, false, &own_addr_type))
 268                return;
 269
 270        /* Adding or removing entries from the white list must
 271         * happen before enabling scanning. The controller does
 272         * not allow white list modification while scanning.
 273         */
 274        filter_policy = update_white_list(req);
 275
 276        /* When the controller is using random resolvable addresses and
 277         * with that having LE privacy enabled, then controllers with
 278         * Extended Scanner Filter Policies support can now enable support
 279         * for handling directed advertising.
 280         *
 281         * So instead of using filter polices 0x00 (no whitelist)
 282         * and 0x01 (whitelist enabled) use the new filter policies
 283         * 0x02 (no whitelist) and 0x03 (whitelist enabled).
 284         */
 285        if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
 286            (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
 287                filter_policy |= 0x02;
 288
 289        memset(&param_cp, 0, sizeof(param_cp));
 290        param_cp.type = LE_SCAN_PASSIVE;
 291        param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
 292        param_cp.window = cpu_to_le16(hdev->le_scan_window);
 293        param_cp.own_address_type = own_addr_type;
 294        param_cp.filter_policy = filter_policy;
 295        hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
 296                    &param_cp);
 297
 298        memset(&enable_cp, 0, sizeof(enable_cp));
 299        enable_cp.enable = LE_SCAN_ENABLE;
 300        enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
 301        hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
 302                    &enable_cp);
 303}
 304
 305static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
 306{
 307        struct hci_dev *hdev = req->hdev;
 308
 309        /* If we're advertising or initiating an LE connection we can't
 310         * go ahead and change the random address at this time. This is
 311         * because the eventual initiator address used for the
 312         * subsequently created connection will be undefined (some
 313         * controllers use the new address and others the one we had
 314         * when the operation started).
 315         *
 316         * In this kind of scenario skip the update and let the random
 317         * address be updated at the next cycle.
 318         */
 319        if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
 320            hci_lookup_le_connect(hdev)) {
 321                BT_DBG("Deferring random address update");
 322                hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
 323                return;
 324        }
 325
 326        hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
 327}
 328
 329int hci_update_random_address(struct hci_request *req, bool require_privacy,
 330                              u8 *own_addr_type)
 331{
 332        struct hci_dev *hdev = req->hdev;
 333        int err;
 334
 335        /* If privacy is enabled use a resolvable private address. If
 336         * current RPA has expired or there is something else than
 337         * the current RPA in use, then generate a new one.
 338         */
 339        if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
 340                int to;
 341
 342                *own_addr_type = ADDR_LE_DEV_RANDOM;
 343
 344                if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
 345                    !bacmp(&hdev->random_addr, &hdev->rpa))
 346                        return 0;
 347
 348                err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
 349                if (err < 0) {
 350                        BT_ERR("%s failed to generate new RPA", hdev->name);
 351                        return err;
 352                }
 353
 354                set_random_addr(req, &hdev->rpa);
 355
 356                to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
 357                queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
 358
 359                return 0;
 360        }
 361
 362        /* In case of required privacy without resolvable private address,
 363         * use an non-resolvable private address. This is useful for active
 364         * scanning and non-connectable advertising.
 365         */
 366        if (require_privacy) {
 367                bdaddr_t nrpa;
 368
 369                while (true) {
 370                        /* The non-resolvable private address is generated
 371                         * from random six bytes with the two most significant
 372                         * bits cleared.
 373                         */
 374                        get_random_bytes(&nrpa, 6);
 375                        nrpa.b[5] &= 0x3f;
 376
 377                        /* The non-resolvable private address shall not be
 378                         * equal to the public address.
 379                         */
 380                        if (bacmp(&hdev->bdaddr, &nrpa))
 381                                break;
 382                }
 383
 384                *own_addr_type = ADDR_LE_DEV_RANDOM;
 385                set_random_addr(req, &nrpa);
 386                return 0;
 387        }
 388
 389        /* If forcing static address is in use or there is no public
 390         * address use the static address as random address (but skip
 391         * the HCI command if the current random address is already the
 392         * static one.
 393         *
 394         * In case BR/EDR has been disabled on a dual-mode controller
 395         * and a static address has been configured, then use that
 396         * address instead of the public BR/EDR address.
 397         */
 398        if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
 399            !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
 400            (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
 401             bacmp(&hdev->static_addr, BDADDR_ANY))) {
 402                *own_addr_type = ADDR_LE_DEV_RANDOM;
 403                if (bacmp(&hdev->static_addr, &hdev->random_addr))
 404                        hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
 405                                    &hdev->static_addr);
 406                return 0;
 407        }
 408
 409        /* Neither privacy nor static address is being used so use a
 410         * public address.
 411         */
 412        *own_addr_type = ADDR_LE_DEV_PUBLIC;
 413
 414        return 0;
 415}
 416
 417static bool disconnected_whitelist_entries(struct hci_dev *hdev)
 418{
 419        struct bdaddr_list *b;
 420
 421        list_for_each_entry(b, &hdev->whitelist, list) {
 422                struct hci_conn *conn;
 423
 424                conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
 425                if (!conn)
 426                        return true;
 427
 428                if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
 429                        return true;
 430        }
 431
 432        return false;
 433}
 434
 435void __hci_update_page_scan(struct hci_request *req)
 436{
 437        struct hci_dev *hdev = req->hdev;
 438        u8 scan;
 439
 440        if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
 441                return;
 442
 443        if (!hdev_is_powered(hdev))
 444                return;
 445
 446        if (mgmt_powering_down(hdev))
 447                return;
 448
 449        if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) ||
 450            disconnected_whitelist_entries(hdev))
 451                scan = SCAN_PAGE;
 452        else
 453                scan = SCAN_DISABLED;
 454
 455        if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE))
 456                return;
 457
 458        if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
 459                scan |= SCAN_INQUIRY;
 460
 461        hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
 462}
 463
 464void hci_update_page_scan(struct hci_dev *hdev)
 465{
 466        struct hci_request req;
 467
 468        hci_req_init(&req, hdev);
 469        __hci_update_page_scan(&req);
 470        hci_req_run(&req, NULL);
 471}
 472
 473/* This function controls the background scanning based on hdev->pend_le_conns
 474 * list. If there are pending LE connection we start the background scanning,
 475 * otherwise we stop it.
 476 *
 477 * This function requires the caller holds hdev->lock.
 478 */
 479void __hci_update_background_scan(struct hci_request *req)
 480{
 481        struct hci_dev *hdev = req->hdev;
 482
 483        if (!test_bit(HCI_UP, &hdev->flags) ||
 484            test_bit(HCI_INIT, &hdev->flags) ||
 485            hci_dev_test_flag(hdev, HCI_SETUP) ||
 486            hci_dev_test_flag(hdev, HCI_CONFIG) ||
 487            hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
 488            hci_dev_test_flag(hdev, HCI_UNREGISTER))
 489                return;
 490
 491        /* No point in doing scanning if LE support hasn't been enabled */
 492        if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
 493                return;
 494
 495        /* If discovery is active don't interfere with it */
 496        if (hdev->discovery.state != DISCOVERY_STOPPED)
 497                return;
 498
 499        /* Reset RSSI and UUID filters when starting background scanning
 500         * since these filters are meant for service discovery only.
 501         *
 502         * The Start Discovery and Start Service Discovery operations
 503         * ensure to set proper values for RSSI threshold and UUID
 504         * filter list. So it is safe to just reset them here.
 505         */
 506        hci_discovery_filter_clear(hdev);
 507
 508        if (list_empty(&hdev->pend_le_conns) &&
 509            list_empty(&hdev->pend_le_reports)) {
 510                /* If there is no pending LE connections or devices
 511                 * to be scanned for, we should stop the background
 512                 * scanning.
 513                 */
 514
 515                /* If controller is not scanning we are done. */
 516                if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
 517                        return;
 518
 519                hci_req_add_le_scan_disable(req);
 520
 521                BT_DBG("%s stopping background scanning", hdev->name);
 522        } else {
 523                /* If there is at least one pending LE connection, we should
 524                 * keep the background scan running.
 525                 */
 526
 527                /* If controller is connecting, we should not start scanning
 528                 * since some controllers are not able to scan and connect at
 529                 * the same time.
 530                 */
 531                if (hci_lookup_le_connect(hdev))
 532                        return;
 533
 534                /* If controller is currently scanning, we stop it to ensure we
 535                 * don't miss any advertising (due to duplicates filter).
 536                 */
 537                if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
 538                        hci_req_add_le_scan_disable(req);
 539
 540                hci_req_add_le_passive_scan(req);
 541
 542                BT_DBG("%s starting background scanning", hdev->name);
 543        }
 544}
 545
 546static void update_background_scan_complete(struct hci_dev *hdev, u8 status,
 547                                            u16 opcode)
 548{
 549        if (status)
 550                BT_DBG("HCI request failed to update background scanning: "
 551                       "status 0x%2.2x", status);
 552}
 553
 554void hci_update_background_scan(struct hci_dev *hdev)
 555{
 556        int err;
 557        struct hci_request req;
 558
 559        hci_req_init(&req, hdev);
 560
 561        __hci_update_background_scan(&req);
 562
 563        err = hci_req_run(&req, update_background_scan_complete);
 564        if (err && err != -ENODATA)
 565                BT_ERR("Failed to run HCI request: err %d", err);
 566}
 567