linux/drivers/bluetooth/hci_intel.c
<<
>>
Prefs
   1/*
   2 *
   3 *  Bluetooth HCI UART driver for Intel devices
   4 *
   5 *  Copyright (C) 2015  Intel Corporation
   6 *
   7 *
   8 *  This program is free software; you can redistribute it and/or modify
   9 *  it under the terms of the GNU General Public License as published by
  10 *  the Free Software Foundation; either version 2 of the License, or
  11 *  (at your option) any later version.
  12 *
  13 *  This program is distributed in the hope that it will be useful,
  14 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
  15 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  16 *  GNU General Public License for more details.
  17 *
  18 *  You should have received a copy of the GNU General Public License
  19 *  along with this program; if not, write to the Free Software
  20 *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  21 *
  22 */
  23
  24#include <linux/kernel.h>
  25#include <linux/errno.h>
  26#include <linux/skbuff.h>
  27#include <linux/firmware.h>
  28#include <linux/module.h>
  29#include <linux/wait.h>
  30#include <linux/tty.h>
  31#include <linux/platform_device.h>
  32#include <linux/gpio/consumer.h>
  33#include <linux/acpi.h>
  34#include <linux/interrupt.h>
  35#include <linux/pm_runtime.h>
  36
  37#include <net/bluetooth/bluetooth.h>
  38#include <net/bluetooth/hci_core.h>
  39
  40#include "hci_uart.h"
  41#include "btintel.h"
  42
  43#define STATE_BOOTLOADER        0
  44#define STATE_DOWNLOADING       1
  45#define STATE_FIRMWARE_LOADED   2
  46#define STATE_FIRMWARE_FAILED   3
  47#define STATE_BOOTING           4
  48#define STATE_LPM_ENABLED       5
  49#define STATE_TX_ACTIVE         6
  50#define STATE_SUSPENDED         7
  51#define STATE_LPM_TRANSACTION   8
  52
  53#define HCI_LPM_WAKE_PKT 0xf0
  54#define HCI_LPM_PKT 0xf1
  55#define HCI_LPM_MAX_SIZE 10
  56#define HCI_LPM_HDR_SIZE HCI_EVENT_HDR_SIZE
  57
  58#define LPM_OP_TX_NOTIFY 0x00
  59#define LPM_OP_SUSPEND_ACK 0x02
  60#define LPM_OP_RESUME_ACK 0x03
  61
  62#define LPM_SUSPEND_DELAY_MS 1000
  63
  64struct hci_lpm_pkt {
  65        __u8 opcode;
  66        __u8 dlen;
  67        __u8 data[0];
  68} __packed;
  69
  70struct intel_device {
  71        struct list_head list;
  72        struct platform_device *pdev;
  73        struct gpio_desc *reset;
  74        struct hci_uart *hu;
  75        struct mutex hu_lock;
  76        int irq;
  77};
  78
  79static LIST_HEAD(intel_device_list);
  80static DEFINE_MUTEX(intel_device_list_lock);
  81
  82struct intel_data {
  83        struct sk_buff *rx_skb;
  84        struct sk_buff_head txq;
  85        struct work_struct busy_work;
  86        struct hci_uart *hu;
  87        unsigned long flags;
  88};
  89
  90static u8 intel_convert_speed(unsigned int speed)
  91{
  92        switch (speed) {
  93        case 9600:
  94                return 0x00;
  95        case 19200:
  96                return 0x01;
  97        case 38400:
  98                return 0x02;
  99        case 57600:
 100                return 0x03;
 101        case 115200:
 102                return 0x04;
 103        case 230400:
 104                return 0x05;
 105        case 460800:
 106                return 0x06;
 107        case 921600:
 108                return 0x07;
 109        case 1843200:
 110                return 0x08;
 111        case 3250000:
 112                return 0x09;
 113        case 2000000:
 114                return 0x0a;
 115        case 3000000:
 116                return 0x0b;
 117        default:
 118                return 0xff;
 119        }
 120}
 121
 122static int intel_wait_booting(struct hci_uart *hu)
 123{
 124        struct intel_data *intel = hu->priv;
 125        int err;
 126
 127        err = wait_on_bit_timeout(&intel->flags, STATE_BOOTING,
 128                                  TASK_INTERRUPTIBLE,
 129                                  msecs_to_jiffies(1000));
 130
 131        if (err == -EINTR) {
 132                bt_dev_err(hu->hdev, "Device boot interrupted");
 133                return -EINTR;
 134        }
 135
 136        if (err) {
 137                bt_dev_err(hu->hdev, "Device boot timeout");
 138                return -ETIMEDOUT;
 139        }
 140
 141        return err;
 142}
 143
 144#ifdef CONFIG_PM
 145static int intel_wait_lpm_transaction(struct hci_uart *hu)
 146{
 147        struct intel_data *intel = hu->priv;
 148        int err;
 149
 150        err = wait_on_bit_timeout(&intel->flags, STATE_LPM_TRANSACTION,
 151                                  TASK_INTERRUPTIBLE,
 152                                  msecs_to_jiffies(1000));
 153
 154        if (err == -EINTR) {
 155                bt_dev_err(hu->hdev, "LPM transaction interrupted");
 156                return -EINTR;
 157        }
 158
 159        if (err) {
 160                bt_dev_err(hu->hdev, "LPM transaction timeout");
 161                return -ETIMEDOUT;
 162        }
 163
 164        return err;
 165}
 166
 167static int intel_lpm_suspend(struct hci_uart *hu)
 168{
 169        static const u8 suspend[] = { 0x01, 0x01, 0x01 };
 170        struct intel_data *intel = hu->priv;
 171        struct sk_buff *skb;
 172
 173        if (!test_bit(STATE_LPM_ENABLED, &intel->flags) ||
 174            test_bit(STATE_SUSPENDED, &intel->flags))
 175                return 0;
 176
 177        if (test_bit(STATE_TX_ACTIVE, &intel->flags))
 178                return -EAGAIN;
 179
 180        bt_dev_dbg(hu->hdev, "Suspending");
 181
 182        skb = bt_skb_alloc(sizeof(suspend), GFP_KERNEL);
 183        if (!skb) {
 184                bt_dev_err(hu->hdev, "Failed to alloc memory for LPM packet");
 185                return -ENOMEM;
 186        }
 187
 188        skb_put_data(skb, suspend, sizeof(suspend));
 189        hci_skb_pkt_type(skb) = HCI_LPM_PKT;
 190
 191        set_bit(STATE_LPM_TRANSACTION, &intel->flags);
 192
 193        /* LPM flow is a priority, enqueue packet at list head */
 194        skb_queue_head(&intel->txq, skb);
 195        hci_uart_tx_wakeup(hu);
 196
 197        intel_wait_lpm_transaction(hu);
 198        /* Even in case of failure, continue and test the suspended flag */
 199
 200        clear_bit(STATE_LPM_TRANSACTION, &intel->flags);
 201
 202        if (!test_bit(STATE_SUSPENDED, &intel->flags)) {
 203                bt_dev_err(hu->hdev, "Device suspend error");
 204                return -EINVAL;
 205        }
 206
 207        bt_dev_dbg(hu->hdev, "Suspended");
 208
 209        hci_uart_set_flow_control(hu, true);
 210
 211        return 0;
 212}
 213
 214static int intel_lpm_resume(struct hci_uart *hu)
 215{
 216        struct intel_data *intel = hu->priv;
 217        struct sk_buff *skb;
 218
 219        if (!test_bit(STATE_LPM_ENABLED, &intel->flags) ||
 220            !test_bit(STATE_SUSPENDED, &intel->flags))
 221                return 0;
 222
 223        bt_dev_dbg(hu->hdev, "Resuming");
 224
 225        hci_uart_set_flow_control(hu, false);
 226
 227        skb = bt_skb_alloc(0, GFP_KERNEL);
 228        if (!skb) {
 229                bt_dev_err(hu->hdev, "Failed to alloc memory for LPM packet");
 230                return -ENOMEM;
 231        }
 232
 233        hci_skb_pkt_type(skb) = HCI_LPM_WAKE_PKT;
 234
 235        set_bit(STATE_LPM_TRANSACTION, &intel->flags);
 236
 237        /* LPM flow is a priority, enqueue packet at list head */
 238        skb_queue_head(&intel->txq, skb);
 239        hci_uart_tx_wakeup(hu);
 240
 241        intel_wait_lpm_transaction(hu);
 242        /* Even in case of failure, continue and test the suspended flag */
 243
 244        clear_bit(STATE_LPM_TRANSACTION, &intel->flags);
 245
 246        if (test_bit(STATE_SUSPENDED, &intel->flags)) {
 247                bt_dev_err(hu->hdev, "Device resume error");
 248                return -EINVAL;
 249        }
 250
 251        bt_dev_dbg(hu->hdev, "Resumed");
 252
 253        return 0;
 254}
 255#endif /* CONFIG_PM */
 256
 257static int intel_lpm_host_wake(struct hci_uart *hu)
 258{
 259        static const u8 lpm_resume_ack[] = { LPM_OP_RESUME_ACK, 0x00 };
 260        struct intel_data *intel = hu->priv;
 261        struct sk_buff *skb;
 262
 263        hci_uart_set_flow_control(hu, false);
 264
 265        clear_bit(STATE_SUSPENDED, &intel->flags);
 266
 267        skb = bt_skb_alloc(sizeof(lpm_resume_ack), GFP_KERNEL);
 268        if (!skb) {
 269                bt_dev_err(hu->hdev, "Failed to alloc memory for LPM packet");
 270                return -ENOMEM;
 271        }
 272
 273        skb_put_data(skb, lpm_resume_ack, sizeof(lpm_resume_ack));
 274        hci_skb_pkt_type(skb) = HCI_LPM_PKT;
 275
 276        /* LPM flow is a priority, enqueue packet at list head */
 277        skb_queue_head(&intel->txq, skb);
 278        hci_uart_tx_wakeup(hu);
 279
 280        bt_dev_dbg(hu->hdev, "Resumed by controller");
 281
 282        return 0;
 283}
 284
 285static irqreturn_t intel_irq(int irq, void *dev_id)
 286{
 287        struct intel_device *idev = dev_id;
 288
 289        dev_info(&idev->pdev->dev, "hci_intel irq\n");
 290
 291        mutex_lock(&idev->hu_lock);
 292        if (idev->hu)
 293                intel_lpm_host_wake(idev->hu);
 294        mutex_unlock(&idev->hu_lock);
 295
 296        /* Host/Controller are now LPM resumed, trigger a new delayed suspend */
 297        pm_runtime_get(&idev->pdev->dev);
 298        pm_runtime_mark_last_busy(&idev->pdev->dev);
 299        pm_runtime_put_autosuspend(&idev->pdev->dev);
 300
 301        return IRQ_HANDLED;
 302}
 303
 304static int intel_set_power(struct hci_uart *hu, bool powered)
 305{
 306        struct list_head *p;
 307        int err = -ENODEV;
 308
 309        if (!hu->tty->dev)
 310                return err;
 311
 312        mutex_lock(&intel_device_list_lock);
 313
 314        list_for_each(p, &intel_device_list) {
 315                struct intel_device *idev = list_entry(p, struct intel_device,
 316                                                       list);
 317
 318                /* tty device and pdev device should share the same parent
 319                 * which is the UART port.
 320                 */
 321                if (hu->tty->dev->parent != idev->pdev->dev.parent)
 322                        continue;
 323
 324                if (!idev->reset) {
 325                        err = -ENOTSUPP;
 326                        break;
 327                }
 328
 329                BT_INFO("hu %p, Switching compatible pm device (%s) to %u",
 330                        hu, dev_name(&idev->pdev->dev), powered);
 331
 332                gpiod_set_value(idev->reset, powered);
 333
 334                /* Provide to idev a hu reference which is used to run LPM
 335                 * transactions (lpm suspend/resume) from PM callbacks.
 336                 * hu needs to be protected against concurrent removing during
 337                 * these PM ops.
 338                 */
 339                mutex_lock(&idev->hu_lock);
 340                idev->hu = powered ? hu : NULL;
 341                mutex_unlock(&idev->hu_lock);
 342
 343                if (idev->irq < 0)
 344                        break;
 345
 346                if (powered && device_can_wakeup(&idev->pdev->dev)) {
 347                        err = devm_request_threaded_irq(&idev->pdev->dev,
 348                                                        idev->irq, NULL,
 349                                                        intel_irq,
 350                                                        IRQF_ONESHOT,
 351                                                        "bt-host-wake", idev);
 352                        if (err) {
 353                                BT_ERR("hu %p, unable to allocate irq-%d",
 354                                       hu, idev->irq);
 355                                break;
 356                        }
 357
 358                        device_wakeup_enable(&idev->pdev->dev);
 359
 360                        pm_runtime_set_active(&idev->pdev->dev);
 361                        pm_runtime_use_autosuspend(&idev->pdev->dev);
 362                        pm_runtime_set_autosuspend_delay(&idev->pdev->dev,
 363                                                         LPM_SUSPEND_DELAY_MS);
 364                        pm_runtime_enable(&idev->pdev->dev);
 365                } else if (!powered && device_may_wakeup(&idev->pdev->dev)) {
 366                        devm_free_irq(&idev->pdev->dev, idev->irq, idev);
 367                        device_wakeup_disable(&idev->pdev->dev);
 368
 369                        pm_runtime_disable(&idev->pdev->dev);
 370                }
 371        }
 372
 373        mutex_unlock(&intel_device_list_lock);
 374
 375        return err;
 376}
 377
 378static void intel_busy_work(struct work_struct *work)
 379{
 380        struct list_head *p;
 381        struct intel_data *intel = container_of(work, struct intel_data,
 382                                                busy_work);
 383
 384        if (!intel->hu->tty->dev)
 385                return;
 386
 387        /* Link is busy, delay the suspend */
 388        mutex_lock(&intel_device_list_lock);
 389        list_for_each(p, &intel_device_list) {
 390                struct intel_device *idev = list_entry(p, struct intel_device,
 391                                                       list);
 392
 393                if (intel->hu->tty->dev->parent == idev->pdev->dev.parent) {
 394                        pm_runtime_get(&idev->pdev->dev);
 395                        pm_runtime_mark_last_busy(&idev->pdev->dev);
 396                        pm_runtime_put_autosuspend(&idev->pdev->dev);
 397                        break;
 398                }
 399        }
 400        mutex_unlock(&intel_device_list_lock);
 401}
 402
 403static int intel_open(struct hci_uart *hu)
 404{
 405        struct intel_data *intel;
 406
 407        BT_DBG("hu %p", hu);
 408
 409        intel = kzalloc(sizeof(*intel), GFP_KERNEL);
 410        if (!intel)
 411                return -ENOMEM;
 412
 413        skb_queue_head_init(&intel->txq);
 414        INIT_WORK(&intel->busy_work, intel_busy_work);
 415
 416        intel->hu = hu;
 417
 418        hu->priv = intel;
 419
 420        if (!intel_set_power(hu, true))
 421                set_bit(STATE_BOOTING, &intel->flags);
 422
 423        return 0;
 424}
 425
 426static int intel_close(struct hci_uart *hu)
 427{
 428        struct intel_data *intel = hu->priv;
 429
 430        BT_DBG("hu %p", hu);
 431
 432        cancel_work_sync(&intel->busy_work);
 433
 434        intel_set_power(hu, false);
 435
 436        skb_queue_purge(&intel->txq);
 437        kfree_skb(intel->rx_skb);
 438        kfree(intel);
 439
 440        hu->priv = NULL;
 441        return 0;
 442}
 443
 444static int intel_flush(struct hci_uart *hu)
 445{
 446        struct intel_data *intel = hu->priv;
 447
 448        BT_DBG("hu %p", hu);
 449
 450        skb_queue_purge(&intel->txq);
 451
 452        return 0;
 453}
 454
 455static int inject_cmd_complete(struct hci_dev *hdev, __u16 opcode)
 456{
 457        struct sk_buff *skb;
 458        struct hci_event_hdr *hdr;
 459        struct hci_ev_cmd_complete *evt;
 460
 461        skb = bt_skb_alloc(sizeof(*hdr) + sizeof(*evt) + 1, GFP_ATOMIC);
 462        if (!skb)
 463                return -ENOMEM;
 464
 465        hdr = skb_put(skb, sizeof(*hdr));
 466        hdr->evt = HCI_EV_CMD_COMPLETE;
 467        hdr->plen = sizeof(*evt) + 1;
 468
 469        evt = skb_put(skb, sizeof(*evt));
 470        evt->ncmd = 0x01;
 471        evt->opcode = cpu_to_le16(opcode);
 472
 473        skb_put_u8(skb, 0x00);
 474
 475        hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
 476
 477        return hci_recv_frame(hdev, skb);
 478}
 479
 480static int intel_set_baudrate(struct hci_uart *hu, unsigned int speed)
 481{
 482        struct intel_data *intel = hu->priv;
 483        struct hci_dev *hdev = hu->hdev;
 484        u8 speed_cmd[] = { 0x06, 0xfc, 0x01, 0x00 };
 485        struct sk_buff *skb;
 486        int err;
 487
 488        /* This can be the first command sent to the chip, check
 489         * that the controller is ready.
 490         */
 491        err = intel_wait_booting(hu);
 492
 493        clear_bit(STATE_BOOTING, &intel->flags);
 494
 495        /* In case of timeout, try to continue anyway */
 496        if (err && err != -ETIMEDOUT)
 497                return err;
 498
 499        bt_dev_info(hdev, "Change controller speed to %d", speed);
 500
 501        speed_cmd[3] = intel_convert_speed(speed);
 502        if (speed_cmd[3] == 0xff) {
 503                bt_dev_err(hdev, "Unsupported speed");
 504                return -EINVAL;
 505        }
 506
 507        /* Device will not accept speed change if Intel version has not been
 508         * previously requested.
 509         */
 510        skb = __hci_cmd_sync(hdev, 0xfc05, 0, NULL, HCI_CMD_TIMEOUT);
 511        if (IS_ERR(skb)) {
 512                bt_dev_err(hdev, "Reading Intel version information failed (%ld)",
 513                           PTR_ERR(skb));
 514                return PTR_ERR(skb);
 515        }
 516        kfree_skb(skb);
 517
 518        skb = bt_skb_alloc(sizeof(speed_cmd), GFP_KERNEL);
 519        if (!skb) {
 520                bt_dev_err(hdev, "Failed to alloc memory for baudrate packet");
 521                return -ENOMEM;
 522        }
 523
 524        skb_put_data(skb, speed_cmd, sizeof(speed_cmd));
 525        hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
 526
 527        hci_uart_set_flow_control(hu, true);
 528
 529        skb_queue_tail(&intel->txq, skb);
 530        hci_uart_tx_wakeup(hu);
 531
 532        /* wait 100ms to change baudrate on controller side */
 533        msleep(100);
 534
 535        hci_uart_set_baudrate(hu, speed);
 536        hci_uart_set_flow_control(hu, false);
 537
 538        return 0;
 539}
 540
 541static int intel_setup(struct hci_uart *hu)
 542{
 543        struct intel_data *intel = hu->priv;
 544        struct hci_dev *hdev = hu->hdev;
 545        struct sk_buff *skb;
 546        struct intel_version ver;
 547        struct intel_boot_params params;
 548        struct list_head *p;
 549        const struct firmware *fw;
 550        char fwname[64];
 551        u32 boot_param;
 552        ktime_t calltime, delta, rettime;
 553        unsigned long long duration;
 554        unsigned int init_speed, oper_speed;
 555        int speed_change = 0;
 556        int err;
 557
 558        bt_dev_dbg(hdev, "start intel_setup");
 559
 560        hu->hdev->set_diag = btintel_set_diag;
 561        hu->hdev->set_bdaddr = btintel_set_bdaddr;
 562
 563        /* Set the default boot parameter to 0x0 and it is updated to
 564         * SKU specific boot parameter after reading Intel_Write_Boot_Params
 565         * command while downloading the firmware.
 566         */
 567        boot_param = 0x00000000;
 568
 569        calltime = ktime_get();
 570
 571        if (hu->init_speed)
 572                init_speed = hu->init_speed;
 573        else
 574                init_speed = hu->proto->init_speed;
 575
 576        if (hu->oper_speed)
 577                oper_speed = hu->oper_speed;
 578        else
 579                oper_speed = hu->proto->oper_speed;
 580
 581        if (oper_speed && init_speed && oper_speed != init_speed)
 582                speed_change = 1;
 583
 584        /* Check that the controller is ready */
 585        err = intel_wait_booting(hu);
 586
 587        clear_bit(STATE_BOOTING, &intel->flags);
 588
 589        /* In case of timeout, try to continue anyway */
 590        if (err && err != -ETIMEDOUT)
 591                return err;
 592
 593        set_bit(STATE_BOOTLOADER, &intel->flags);
 594
 595        /* Read the Intel version information to determine if the device
 596         * is in bootloader mode or if it already has operational firmware
 597         * loaded.
 598         */
 599         err = btintel_read_version(hdev, &ver);
 600         if (err)
 601                return err;
 602
 603        /* The hardware platform number has a fixed value of 0x37 and
 604         * for now only accept this single value.
 605         */
 606        if (ver.hw_platform != 0x37) {
 607                bt_dev_err(hdev, "Unsupported Intel hardware platform (%u)",
 608                           ver.hw_platform);
 609                return -EINVAL;
 610        }
 611
 612        /* Check for supported iBT hardware variants of this firmware
 613         * loading method.
 614         *
 615         * This check has been put in place to ensure correct forward
 616         * compatibility options when newer hardware variants come along.
 617         */
 618        switch (ver.hw_variant) {
 619        case 0x0b:      /* LnP */
 620        case 0x0c:      /* WsP */
 621        case 0x12:      /* ThP */
 622                break;
 623        default:
 624                bt_dev_err(hdev, "Unsupported Intel hardware variant (%u)",
 625                           ver.hw_variant);
 626                return -EINVAL;
 627        }
 628
 629        btintel_version_info(hdev, &ver);
 630
 631        /* The firmware variant determines if the device is in bootloader
 632         * mode or is running operational firmware. The value 0x06 identifies
 633         * the bootloader and the value 0x23 identifies the operational
 634         * firmware.
 635         *
 636         * When the operational firmware is already present, then only
 637         * the check for valid Bluetooth device address is needed. This
 638         * determines if the device will be added as configured or
 639         * unconfigured controller.
 640         *
 641         * It is not possible to use the Secure Boot Parameters in this
 642         * case since that command is only available in bootloader mode.
 643         */
 644        if (ver.fw_variant == 0x23) {
 645                clear_bit(STATE_BOOTLOADER, &intel->flags);
 646                btintel_check_bdaddr(hdev);
 647                return 0;
 648        }
 649
 650        /* If the device is not in bootloader mode, then the only possible
 651         * choice is to return an error and abort the device initialization.
 652         */
 653        if (ver.fw_variant != 0x06) {
 654                bt_dev_err(hdev, "Unsupported Intel firmware variant (%u)",
 655                           ver.fw_variant);
 656                return -ENODEV;
 657        }
 658
 659        /* Read the secure boot parameters to identify the operating
 660         * details of the bootloader.
 661         */
 662        err = btintel_read_boot_params(hdev, &params);
 663        if (err)
 664                return err;
 665
 666        /* It is required that every single firmware fragment is acknowledged
 667         * with a command complete event. If the boot parameters indicate
 668         * that this bootloader does not send them, then abort the setup.
 669         */
 670        if (params.limited_cce != 0x00) {
 671                bt_dev_err(hdev, "Unsupported Intel firmware loading method (%u)",
 672                           params.limited_cce);
 673                return -EINVAL;
 674        }
 675
 676        /* If the OTP has no valid Bluetooth device address, then there will
 677         * also be no valid address for the operational firmware.
 678         */
 679        if (!bacmp(&params.otp_bdaddr, BDADDR_ANY)) {
 680                bt_dev_info(hdev, "No device address configured");
 681                set_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks);
 682        }
 683
 684        /* With this Intel bootloader only the hardware variant and device
 685         * revision information are used to select the right firmware for SfP
 686         * and WsP.
 687         *
 688         * The firmware filename is ibt-<hw_variant>-<dev_revid>.sfi.
 689         *
 690         * Currently the supported hardware variants are:
 691         *   11 (0x0b) for iBT 3.0 (LnP/SfP)
 692         *   12 (0x0c) for iBT 3.5 (WsP)
 693         *
 694         * For ThP/JfP and for future SKU's, the FW name varies based on HW
 695         * variant, HW revision and FW revision, as these are dependent on CNVi
 696         * and RF Combination.
 697         *
 698         *   18 (0x12) for iBT3.5 (ThP/JfP)
 699         *
 700         * The firmware file name for these will be
 701         * ibt-<hw_variant>-<hw_revision>-<fw_revision>.sfi.
 702         *
 703         */
 704        switch (ver.hw_variant) {
 705        case 0x0b:      /* SfP */
 706        case 0x0c:      /* WsP */
 707                snprintf(fwname, sizeof(fwname), "intel/ibt-%u-%u.sfi",
 708                         le16_to_cpu(ver.hw_variant),
 709                         le16_to_cpu(params.dev_revid));
 710                break;
 711        case 0x12:      /* ThP */
 712                snprintf(fwname, sizeof(fwname), "intel/ibt-%u-%u-%u.sfi",
 713                         le16_to_cpu(ver.hw_variant),
 714                         le16_to_cpu(ver.hw_revision),
 715                         le16_to_cpu(ver.fw_revision));
 716                break;
 717        default:
 718                bt_dev_err(hdev, "Unsupported Intel hardware variant (%u)",
 719                           ver.hw_variant);
 720                return -EINVAL;
 721        }
 722
 723        err = request_firmware(&fw, fwname, &hdev->dev);
 724        if (err < 0) {
 725                bt_dev_err(hdev, "Failed to load Intel firmware file (%d)",
 726                           err);
 727                return err;
 728        }
 729
 730        bt_dev_info(hdev, "Found device firmware: %s", fwname);
 731
 732        /* Save the DDC file name for later */
 733        switch (ver.hw_variant) {
 734        case 0x0b:      /* SfP */
 735        case 0x0c:      /* WsP */
 736                snprintf(fwname, sizeof(fwname), "intel/ibt-%u-%u.ddc",
 737                         le16_to_cpu(ver.hw_variant),
 738                         le16_to_cpu(params.dev_revid));
 739                break;
 740        case 0x12:      /* ThP */
 741                snprintf(fwname, sizeof(fwname), "intel/ibt-%u-%u-%u.ddc",
 742                         le16_to_cpu(ver.hw_variant),
 743                         le16_to_cpu(ver.hw_revision),
 744                         le16_to_cpu(ver.fw_revision));
 745                break;
 746        default:
 747                bt_dev_err(hdev, "Unsupported Intel hardware variant (%u)",
 748                           ver.hw_variant);
 749                return -EINVAL;
 750        }
 751
 752        if (fw->size < 644) {
 753                bt_dev_err(hdev, "Invalid size of firmware file (%zu)",
 754                           fw->size);
 755                err = -EBADF;
 756                goto done;
 757        }
 758
 759        set_bit(STATE_DOWNLOADING, &intel->flags);
 760
 761        /* Start firmware downloading and get boot parameter */
 762        err = btintel_download_firmware(hdev, fw, &boot_param);
 763        if (err < 0)
 764                goto done;
 765
 766        set_bit(STATE_FIRMWARE_LOADED, &intel->flags);
 767
 768        bt_dev_info(hdev, "Waiting for firmware download to complete");
 769
 770        /* Before switching the device into operational mode and with that
 771         * booting the loaded firmware, wait for the bootloader notification
 772         * that all fragments have been successfully received.
 773         *
 774         * When the event processing receives the notification, then the
 775         * STATE_DOWNLOADING flag will be cleared.
 776         *
 777         * The firmware loading should not take longer than 5 seconds
 778         * and thus just timeout if that happens and fail the setup
 779         * of this device.
 780         */
 781        err = wait_on_bit_timeout(&intel->flags, STATE_DOWNLOADING,
 782                                  TASK_INTERRUPTIBLE,
 783                                  msecs_to_jiffies(5000));
 784        if (err == -EINTR) {
 785                bt_dev_err(hdev, "Firmware loading interrupted");
 786                err = -EINTR;
 787                goto done;
 788        }
 789
 790        if (err) {
 791                bt_dev_err(hdev, "Firmware loading timeout");
 792                err = -ETIMEDOUT;
 793                goto done;
 794        }
 795
 796        if (test_bit(STATE_FIRMWARE_FAILED, &intel->flags)) {
 797                bt_dev_err(hdev, "Firmware loading failed");
 798                err = -ENOEXEC;
 799                goto done;
 800        }
 801
 802        rettime = ktime_get();
 803        delta = ktime_sub(rettime, calltime);
 804        duration = (unsigned long long) ktime_to_ns(delta) >> 10;
 805
 806        bt_dev_info(hdev, "Firmware loaded in %llu usecs", duration);
 807
 808done:
 809        release_firmware(fw);
 810
 811        if (err < 0)
 812                return err;
 813
 814        /* We need to restore the default speed before Intel reset */
 815        if (speed_change) {
 816                err = intel_set_baudrate(hu, init_speed);
 817                if (err)
 818                        return err;
 819        }
 820
 821        calltime = ktime_get();
 822
 823        set_bit(STATE_BOOTING, &intel->flags);
 824
 825        err = btintel_send_intel_reset(hdev, boot_param);
 826        if (err)
 827                return err;
 828
 829        /* The bootloader will not indicate when the device is ready. This
 830         * is done by the operational firmware sending bootup notification.
 831         *
 832         * Booting into operational firmware should not take longer than
 833         * 1 second. However if that happens, then just fail the setup
 834         * since something went wrong.
 835         */
 836        bt_dev_info(hdev, "Waiting for device to boot");
 837
 838        err = intel_wait_booting(hu);
 839        if (err)
 840                return err;
 841
 842        clear_bit(STATE_BOOTING, &intel->flags);
 843
 844        rettime = ktime_get();
 845        delta = ktime_sub(rettime, calltime);
 846        duration = (unsigned long long) ktime_to_ns(delta) >> 10;
 847
 848        bt_dev_info(hdev, "Device booted in %llu usecs", duration);
 849
 850        /* Enable LPM if matching pdev with wakeup enabled, set TX active
 851         * until further LPM TX notification.
 852         */
 853        mutex_lock(&intel_device_list_lock);
 854        list_for_each(p, &intel_device_list) {
 855                struct intel_device *dev = list_entry(p, struct intel_device,
 856                                                      list);
 857                if (!hu->tty->dev)
 858                        break;
 859                if (hu->tty->dev->parent == dev->pdev->dev.parent) {
 860                        if (device_may_wakeup(&dev->pdev->dev)) {
 861                                set_bit(STATE_LPM_ENABLED, &intel->flags);
 862                                set_bit(STATE_TX_ACTIVE, &intel->flags);
 863                        }
 864                        break;
 865                }
 866        }
 867        mutex_unlock(&intel_device_list_lock);
 868
 869        /* Ignore errors, device can work without DDC parameters */
 870        btintel_load_ddc_config(hdev, fwname);
 871
 872        skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_CMD_TIMEOUT);
 873        if (IS_ERR(skb))
 874                return PTR_ERR(skb);
 875        kfree_skb(skb);
 876
 877        if (speed_change) {
 878                err = intel_set_baudrate(hu, oper_speed);
 879                if (err)
 880                        return err;
 881        }
 882
 883        bt_dev_info(hdev, "Setup complete");
 884
 885        clear_bit(STATE_BOOTLOADER, &intel->flags);
 886
 887        return 0;
 888}
 889
 890static int intel_recv_event(struct hci_dev *hdev, struct sk_buff *skb)
 891{
 892        struct hci_uart *hu = hci_get_drvdata(hdev);
 893        struct intel_data *intel = hu->priv;
 894        struct hci_event_hdr *hdr;
 895
 896        if (!test_bit(STATE_BOOTLOADER, &intel->flags) &&
 897            !test_bit(STATE_BOOTING, &intel->flags))
 898                goto recv;
 899
 900        hdr = (void *)skb->data;
 901
 902        /* When the firmware loading completes the device sends
 903         * out a vendor specific event indicating the result of
 904         * the firmware loading.
 905         */
 906        if (skb->len == 7 && hdr->evt == 0xff && hdr->plen == 0x05 &&
 907            skb->data[2] == 0x06) {
 908                if (skb->data[3] != 0x00)
 909                        set_bit(STATE_FIRMWARE_FAILED, &intel->flags);
 910
 911                if (test_and_clear_bit(STATE_DOWNLOADING, &intel->flags) &&
 912                    test_bit(STATE_FIRMWARE_LOADED, &intel->flags)) {
 913                        smp_mb__after_atomic();
 914                        wake_up_bit(&intel->flags, STATE_DOWNLOADING);
 915                }
 916
 917        /* When switching to the operational firmware the device
 918         * sends a vendor specific event indicating that the bootup
 919         * completed.
 920         */
 921        } else if (skb->len == 9 && hdr->evt == 0xff && hdr->plen == 0x07 &&
 922                   skb->data[2] == 0x02) {
 923                if (test_and_clear_bit(STATE_BOOTING, &intel->flags)) {
 924                        smp_mb__after_atomic();
 925                        wake_up_bit(&intel->flags, STATE_BOOTING);
 926                }
 927        }
 928recv:
 929        return hci_recv_frame(hdev, skb);
 930}
 931
 932static void intel_recv_lpm_notify(struct hci_dev *hdev, int value)
 933{
 934        struct hci_uart *hu = hci_get_drvdata(hdev);
 935        struct intel_data *intel = hu->priv;
 936
 937        bt_dev_dbg(hdev, "TX idle notification (%d)", value);
 938
 939        if (value) {
 940                set_bit(STATE_TX_ACTIVE, &intel->flags);
 941                schedule_work(&intel->busy_work);
 942        } else {
 943                clear_bit(STATE_TX_ACTIVE, &intel->flags);
 944        }
 945}
 946
 947static int intel_recv_lpm(struct hci_dev *hdev, struct sk_buff *skb)
 948{
 949        struct hci_lpm_pkt *lpm = (void *)skb->data;
 950        struct hci_uart *hu = hci_get_drvdata(hdev);
 951        struct intel_data *intel = hu->priv;
 952
 953        switch (lpm->opcode) {
 954        case LPM_OP_TX_NOTIFY:
 955                if (lpm->dlen < 1) {
 956                        bt_dev_err(hu->hdev, "Invalid LPM notification packet");
 957                        break;
 958                }
 959                intel_recv_lpm_notify(hdev, lpm->data[0]);
 960                break;
 961        case LPM_OP_SUSPEND_ACK:
 962                set_bit(STATE_SUSPENDED, &intel->flags);
 963                if (test_and_clear_bit(STATE_LPM_TRANSACTION, &intel->flags)) {
 964                        smp_mb__after_atomic();
 965                        wake_up_bit(&intel->flags, STATE_LPM_TRANSACTION);
 966                }
 967                break;
 968        case LPM_OP_RESUME_ACK:
 969                clear_bit(STATE_SUSPENDED, &intel->flags);
 970                if (test_and_clear_bit(STATE_LPM_TRANSACTION, &intel->flags)) {
 971                        smp_mb__after_atomic();
 972                        wake_up_bit(&intel->flags, STATE_LPM_TRANSACTION);
 973                }
 974                break;
 975        default:
 976                bt_dev_err(hdev, "Unknown LPM opcode (%02x)", lpm->opcode);
 977                break;
 978        }
 979
 980        kfree_skb(skb);
 981
 982        return 0;
 983}
 984
 985#define INTEL_RECV_LPM \
 986        .type = HCI_LPM_PKT, \
 987        .hlen = HCI_LPM_HDR_SIZE, \
 988        .loff = 1, \
 989        .lsize = 1, \
 990        .maxlen = HCI_LPM_MAX_SIZE
 991
 992static const struct h4_recv_pkt intel_recv_pkts[] = {
 993        { H4_RECV_ACL,    .recv = hci_recv_frame   },
 994        { H4_RECV_SCO,    .recv = hci_recv_frame   },
 995        { H4_RECV_EVENT,  .recv = intel_recv_event },
 996        { INTEL_RECV_LPM, .recv = intel_recv_lpm   },
 997};
 998
 999static int intel_recv(struct hci_uart *hu, const void *data, int count)
1000{
1001        struct intel_data *intel = hu->priv;
1002
1003        if (!test_bit(HCI_UART_REGISTERED, &hu->flags))
1004                return -EUNATCH;
1005
1006        intel->rx_skb = h4_recv_buf(hu->hdev, intel->rx_skb, data, count,
1007                                    intel_recv_pkts,
1008                                    ARRAY_SIZE(intel_recv_pkts));
1009        if (IS_ERR(intel->rx_skb)) {
1010                int err = PTR_ERR(intel->rx_skb);
1011                bt_dev_err(hu->hdev, "Frame reassembly failed (%d)", err);
1012                intel->rx_skb = NULL;
1013                return err;
1014        }
1015
1016        return count;
1017}
1018
1019static int intel_enqueue(struct hci_uart *hu, struct sk_buff *skb)
1020{
1021        struct intel_data *intel = hu->priv;
1022        struct list_head *p;
1023
1024        BT_DBG("hu %p skb %p", hu, skb);
1025
1026        if (!hu->tty->dev)
1027                goto out_enqueue;
1028
1029        /* Be sure our controller is resumed and potential LPM transaction
1030         * completed before enqueuing any packet.
1031         */
1032        mutex_lock(&intel_device_list_lock);
1033        list_for_each(p, &intel_device_list) {
1034                struct intel_device *idev = list_entry(p, struct intel_device,
1035                                                       list);
1036
1037                if (hu->tty->dev->parent == idev->pdev->dev.parent) {
1038                        pm_runtime_get_sync(&idev->pdev->dev);
1039                        pm_runtime_mark_last_busy(&idev->pdev->dev);
1040                        pm_runtime_put_autosuspend(&idev->pdev->dev);
1041                        break;
1042                }
1043        }
1044        mutex_unlock(&intel_device_list_lock);
1045out_enqueue:
1046        skb_queue_tail(&intel->txq, skb);
1047
1048        return 0;
1049}
1050
1051static struct sk_buff *intel_dequeue(struct hci_uart *hu)
1052{
1053        struct intel_data *intel = hu->priv;
1054        struct sk_buff *skb;
1055
1056        skb = skb_dequeue(&intel->txq);
1057        if (!skb)
1058                return skb;
1059
1060        if (test_bit(STATE_BOOTLOADER, &intel->flags) &&
1061            (hci_skb_pkt_type(skb) == HCI_COMMAND_PKT)) {
1062                struct hci_command_hdr *cmd = (void *)skb->data;
1063                __u16 opcode = le16_to_cpu(cmd->opcode);
1064
1065                /* When the 0xfc01 command is issued to boot into
1066                 * the operational firmware, it will actually not
1067                 * send a command complete event. To keep the flow
1068                 * control working inject that event here.
1069                 */
1070                if (opcode == 0xfc01)
1071                        inject_cmd_complete(hu->hdev, opcode);
1072        }
1073
1074        /* Prepend skb with frame type */
1075        memcpy(skb_push(skb, 1), &hci_skb_pkt_type(skb), 1);
1076
1077        return skb;
1078}
1079
1080static const struct hci_uart_proto intel_proto = {
1081        .id             = HCI_UART_INTEL,
1082        .name           = "Intel",
1083        .manufacturer   = 2,
1084        .init_speed     = 115200,
1085        .oper_speed     = 3000000,
1086        .open           = intel_open,
1087        .close          = intel_close,
1088        .flush          = intel_flush,
1089        .setup          = intel_setup,
1090        .set_baudrate   = intel_set_baudrate,
1091        .recv           = intel_recv,
1092        .enqueue        = intel_enqueue,
1093        .dequeue        = intel_dequeue,
1094};
1095
1096#ifdef CONFIG_ACPI
1097static const struct acpi_device_id intel_acpi_match[] = {
1098        { "INT33E1", 0 },
1099        { },
1100};
1101MODULE_DEVICE_TABLE(acpi, intel_acpi_match);
1102#endif
1103
1104#ifdef CONFIG_PM
1105static int intel_suspend_device(struct device *dev)
1106{
1107        struct intel_device *idev = dev_get_drvdata(dev);
1108
1109        mutex_lock(&idev->hu_lock);
1110        if (idev->hu)
1111                intel_lpm_suspend(idev->hu);
1112        mutex_unlock(&idev->hu_lock);
1113
1114        return 0;
1115}
1116
1117static int intel_resume_device(struct device *dev)
1118{
1119        struct intel_device *idev = dev_get_drvdata(dev);
1120
1121        mutex_lock(&idev->hu_lock);
1122        if (idev->hu)
1123                intel_lpm_resume(idev->hu);
1124        mutex_unlock(&idev->hu_lock);
1125
1126        return 0;
1127}
1128#endif
1129
1130#ifdef CONFIG_PM_SLEEP
1131static int intel_suspend(struct device *dev)
1132{
1133        struct intel_device *idev = dev_get_drvdata(dev);
1134
1135        if (device_may_wakeup(dev))
1136                enable_irq_wake(idev->irq);
1137
1138        return intel_suspend_device(dev);
1139}
1140
1141static int intel_resume(struct device *dev)
1142{
1143        struct intel_device *idev = dev_get_drvdata(dev);
1144
1145        if (device_may_wakeup(dev))
1146                disable_irq_wake(idev->irq);
1147
1148        return intel_resume_device(dev);
1149}
1150#endif
1151
1152static const struct dev_pm_ops intel_pm_ops = {
1153        SET_SYSTEM_SLEEP_PM_OPS(intel_suspend, intel_resume)
1154        SET_RUNTIME_PM_OPS(intel_suspend_device, intel_resume_device, NULL)
1155};
1156
1157static const struct acpi_gpio_params reset_gpios = { 0, 0, false };
1158static const struct acpi_gpio_params host_wake_gpios = { 1, 0, false };
1159
1160static const struct acpi_gpio_mapping acpi_hci_intel_gpios[] = {
1161        { "reset-gpios", &reset_gpios, 1 },
1162        { "host-wake-gpios", &host_wake_gpios, 1 },
1163        { },
1164};
1165
1166static int intel_probe(struct platform_device *pdev)
1167{
1168        struct intel_device *idev;
1169        int ret;
1170
1171        idev = devm_kzalloc(&pdev->dev, sizeof(*idev), GFP_KERNEL);
1172        if (!idev)
1173                return -ENOMEM;
1174
1175        mutex_init(&idev->hu_lock);
1176
1177        idev->pdev = pdev;
1178
1179        ret = devm_acpi_dev_add_driver_gpios(&pdev->dev, acpi_hci_intel_gpios);
1180        if (ret)
1181                dev_dbg(&pdev->dev, "Unable to add GPIO mapping table\n");
1182
1183        idev->reset = devm_gpiod_get(&pdev->dev, "reset", GPIOD_OUT_LOW);
1184        if (IS_ERR(idev->reset)) {
1185                dev_err(&pdev->dev, "Unable to retrieve gpio\n");
1186                return PTR_ERR(idev->reset);
1187        }
1188
1189        idev->irq = platform_get_irq(pdev, 0);
1190        if (idev->irq < 0) {
1191                struct gpio_desc *host_wake;
1192
1193                dev_err(&pdev->dev, "No IRQ, falling back to gpio-irq\n");
1194
1195                host_wake = devm_gpiod_get(&pdev->dev, "host-wake", GPIOD_IN);
1196                if (IS_ERR(host_wake)) {
1197                        dev_err(&pdev->dev, "Unable to retrieve IRQ\n");
1198                        goto no_irq;
1199                }
1200
1201                idev->irq = gpiod_to_irq(host_wake);
1202                if (idev->irq < 0) {
1203                        dev_err(&pdev->dev, "No corresponding irq for gpio\n");
1204                        goto no_irq;
1205                }
1206        }
1207
1208        /* Only enable wake-up/irq when controller is powered */
1209        device_set_wakeup_capable(&pdev->dev, true);
1210        device_wakeup_disable(&pdev->dev);
1211
1212no_irq:
1213        platform_set_drvdata(pdev, idev);
1214
1215        /* Place this instance on the device list */
1216        mutex_lock(&intel_device_list_lock);
1217        list_add_tail(&idev->list, &intel_device_list);
1218        mutex_unlock(&intel_device_list_lock);
1219
1220        dev_info(&pdev->dev, "registered, gpio(%d)/irq(%d).\n",
1221                 desc_to_gpio(idev->reset), idev->irq);
1222
1223        return 0;
1224}
1225
1226static int intel_remove(struct platform_device *pdev)
1227{
1228        struct intel_device *idev = platform_get_drvdata(pdev);
1229
1230        device_wakeup_disable(&pdev->dev);
1231
1232        mutex_lock(&intel_device_list_lock);
1233        list_del(&idev->list);
1234        mutex_unlock(&intel_device_list_lock);
1235
1236        dev_info(&pdev->dev, "unregistered.\n");
1237
1238        return 0;
1239}
1240
1241static struct platform_driver intel_driver = {
1242        .probe = intel_probe,
1243        .remove = intel_remove,
1244        .driver = {
1245                .name = "hci_intel",
1246                .acpi_match_table = ACPI_PTR(intel_acpi_match),
1247                .pm = &intel_pm_ops,
1248        },
1249};
1250
1251int __init intel_init(void)
1252{
1253        platform_driver_register(&intel_driver);
1254
1255        return hci_uart_register_proto(&intel_proto);
1256}
1257
1258int __exit intel_deinit(void)
1259{
1260        platform_driver_unregister(&intel_driver);
1261
1262        return hci_uart_unregister_proto(&intel_proto);
1263}
1264