linux/drivers/scsi/ufs/ufshcd-pci.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * Universal Flash Storage Host controller PCI glue driver
   4 *
   5 * This code is based on drivers/scsi/ufs/ufshcd-pci.c
   6 * Copyright (C) 2011-2013 Samsung India Software Operations
   7 *
   8 * Authors:
   9 *      Santosh Yaraganavi <santosh.sy@samsung.com>
  10 *      Vinayak Holikatti <h.vinayak@samsung.com>
  11 */
  12
  13#include "ufshcd.h"
  14#include <linux/pci.h>
  15#include <linux/pm_runtime.h>
  16#include <linux/pm_qos.h>
  17#include <linux/debugfs.h>
  18#include <linux/uuid.h>
  19#include <linux/acpi.h>
  20#include <linux/gpio/consumer.h>
  21
  22struct ufs_host {
  23        void (*late_init)(struct ufs_hba *hba);
  24};
  25
  26enum {
  27        INTEL_DSM_FNS           =  0,
  28        INTEL_DSM_RESET         =  1,
  29};
  30
  31struct intel_host {
  32        struct ufs_host ufs_host;
  33        u32             dsm_fns;
  34        u32             active_ltr;
  35        u32             idle_ltr;
  36        struct dentry   *debugfs_root;
  37        struct gpio_desc *reset_gpio;
  38};
  39
  40static const guid_t intel_dsm_guid =
  41        GUID_INIT(0x1A4832A0, 0x7D03, 0x43CA,
  42                  0xB0, 0x20, 0xF6, 0xDC, 0xD1, 0x2A, 0x19, 0x50);
  43
  44static int __intel_dsm(struct intel_host *intel_host, struct device *dev,
  45                       unsigned int fn, u32 *result)
  46{
  47        union acpi_object *obj;
  48        int err = 0;
  49        size_t len;
  50
  51        obj = acpi_evaluate_dsm(ACPI_HANDLE(dev), &intel_dsm_guid, 0, fn, NULL);
  52        if (!obj)
  53                return -EOPNOTSUPP;
  54
  55        if (obj->type != ACPI_TYPE_BUFFER || obj->buffer.length < 1) {
  56                err = -EINVAL;
  57                goto out;
  58        }
  59
  60        len = min_t(size_t, obj->buffer.length, 4);
  61
  62        *result = 0;
  63        memcpy(result, obj->buffer.pointer, len);
  64out:
  65        ACPI_FREE(obj);
  66
  67        return err;
  68}
  69
  70static int intel_dsm(struct intel_host *intel_host, struct device *dev,
  71                     unsigned int fn, u32 *result)
  72{
  73        if (fn > 31 || !(intel_host->dsm_fns & (1 << fn)))
  74                return -EOPNOTSUPP;
  75
  76        return __intel_dsm(intel_host, dev, fn, result);
  77}
  78
  79static void intel_dsm_init(struct intel_host *intel_host, struct device *dev)
  80{
  81        int err;
  82
  83        err = __intel_dsm(intel_host, dev, INTEL_DSM_FNS, &intel_host->dsm_fns);
  84        dev_dbg(dev, "DSM fns %#x, error %d\n", intel_host->dsm_fns, err);
  85}
  86
  87static int ufs_intel_hce_enable_notify(struct ufs_hba *hba,
  88                                       enum ufs_notify_change_status status)
  89{
  90        /* Cannot enable ICE until after HC enable */
  91        if (status == POST_CHANGE && hba->caps & UFSHCD_CAP_CRYPTO) {
  92                u32 hce = ufshcd_readl(hba, REG_CONTROLLER_ENABLE);
  93
  94                hce |= CRYPTO_GENERAL_ENABLE;
  95                ufshcd_writel(hba, hce, REG_CONTROLLER_ENABLE);
  96        }
  97
  98        return 0;
  99}
 100
 101static int ufs_intel_disable_lcc(struct ufs_hba *hba)
 102{
 103        u32 attr = UIC_ARG_MIB(PA_LOCAL_TX_LCC_ENABLE);
 104        u32 lcc_enable = 0;
 105
 106        ufshcd_dme_get(hba, attr, &lcc_enable);
 107        if (lcc_enable)
 108                ufshcd_disable_host_tx_lcc(hba);
 109
 110        return 0;
 111}
 112
 113static int ufs_intel_link_startup_notify(struct ufs_hba *hba,
 114                                         enum ufs_notify_change_status status)
 115{
 116        int err = 0;
 117
 118        switch (status) {
 119        case PRE_CHANGE:
 120                err = ufs_intel_disable_lcc(hba);
 121                break;
 122        case POST_CHANGE:
 123                break;
 124        default:
 125                break;
 126        }
 127
 128        return err;
 129}
 130
 131static int ufs_intel_set_lanes(struct ufs_hba *hba, u32 lanes)
 132{
 133        struct ufs_pa_layer_attr pwr_info = hba->pwr_info;
 134        int ret;
 135
 136        pwr_info.lane_rx = lanes;
 137        pwr_info.lane_tx = lanes;
 138        ret = ufshcd_config_pwr_mode(hba, &pwr_info);
 139        if (ret)
 140                dev_err(hba->dev, "%s: Setting %u lanes, err = %d\n",
 141                        __func__, lanes, ret);
 142        return ret;
 143}
 144
 145static int ufs_intel_lkf_pwr_change_notify(struct ufs_hba *hba,
 146                                enum ufs_notify_change_status status,
 147                                struct ufs_pa_layer_attr *dev_max_params,
 148                                struct ufs_pa_layer_attr *dev_req_params)
 149{
 150        int err = 0;
 151
 152        switch (status) {
 153        case PRE_CHANGE:
 154                if (ufshcd_is_hs_mode(dev_max_params) &&
 155                    (hba->pwr_info.lane_rx != 2 || hba->pwr_info.lane_tx != 2))
 156                        ufs_intel_set_lanes(hba, 2);
 157                memcpy(dev_req_params, dev_max_params, sizeof(*dev_req_params));
 158                break;
 159        case POST_CHANGE:
 160                if (ufshcd_is_hs_mode(dev_req_params)) {
 161                        u32 peer_granularity;
 162
 163                        usleep_range(1000, 1250);
 164                        err = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
 165                                                  &peer_granularity);
 166                }
 167                break;
 168        default:
 169                break;
 170        }
 171
 172        return err;
 173}
 174
 175static int ufs_intel_lkf_apply_dev_quirks(struct ufs_hba *hba)
 176{
 177        u32 granularity, peer_granularity;
 178        u32 pa_tactivate, peer_pa_tactivate;
 179        int ret;
 180
 181        ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_GRANULARITY), &granularity);
 182        if (ret)
 183                goto out;
 184
 185        ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_GRANULARITY), &peer_granularity);
 186        if (ret)
 187                goto out;
 188
 189        ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_TACTIVATE), &pa_tactivate);
 190        if (ret)
 191                goto out;
 192
 193        ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_TACTIVATE), &peer_pa_tactivate);
 194        if (ret)
 195                goto out;
 196
 197        if (granularity == peer_granularity) {
 198                u32 new_peer_pa_tactivate = pa_tactivate + 2;
 199
 200                ret = ufshcd_dme_peer_set(hba, UIC_ARG_MIB(PA_TACTIVATE), new_peer_pa_tactivate);
 201        }
 202out:
 203        return ret;
 204}
 205
 206#define INTEL_ACTIVELTR         0x804
 207#define INTEL_IDLELTR           0x808
 208
 209#define INTEL_LTR_REQ           BIT(15)
 210#define INTEL_LTR_SCALE_MASK    GENMASK(11, 10)
 211#define INTEL_LTR_SCALE_1US     (2 << 10)
 212#define INTEL_LTR_SCALE_32US    (3 << 10)
 213#define INTEL_LTR_VALUE_MASK    GENMASK(9, 0)
 214
 215static void intel_cache_ltr(struct ufs_hba *hba)
 216{
 217        struct intel_host *host = ufshcd_get_variant(hba);
 218
 219        host->active_ltr = readl(hba->mmio_base + INTEL_ACTIVELTR);
 220        host->idle_ltr = readl(hba->mmio_base + INTEL_IDLELTR);
 221}
 222
 223static void intel_ltr_set(struct device *dev, s32 val)
 224{
 225        struct ufs_hba *hba = dev_get_drvdata(dev);
 226        struct intel_host *host = ufshcd_get_variant(hba);
 227        u32 ltr;
 228
 229        pm_runtime_get_sync(dev);
 230
 231        /*
 232         * Program latency tolerance (LTR) accordingly what has been asked
 233         * by the PM QoS layer or disable it in case we were passed
 234         * negative value or PM_QOS_LATENCY_ANY.
 235         */
 236        ltr = readl(hba->mmio_base + INTEL_ACTIVELTR);
 237
 238        if (val == PM_QOS_LATENCY_ANY || val < 0) {
 239                ltr &= ~INTEL_LTR_REQ;
 240        } else {
 241                ltr |= INTEL_LTR_REQ;
 242                ltr &= ~INTEL_LTR_SCALE_MASK;
 243                ltr &= ~INTEL_LTR_VALUE_MASK;
 244
 245                if (val > INTEL_LTR_VALUE_MASK) {
 246                        val >>= 5;
 247                        if (val > INTEL_LTR_VALUE_MASK)
 248                                val = INTEL_LTR_VALUE_MASK;
 249                        ltr |= INTEL_LTR_SCALE_32US | val;
 250                } else {
 251                        ltr |= INTEL_LTR_SCALE_1US | val;
 252                }
 253        }
 254
 255        if (ltr == host->active_ltr)
 256                goto out;
 257
 258        writel(ltr, hba->mmio_base + INTEL_ACTIVELTR);
 259        writel(ltr, hba->mmio_base + INTEL_IDLELTR);
 260
 261        /* Cache the values into intel_host structure */
 262        intel_cache_ltr(hba);
 263out:
 264        pm_runtime_put(dev);
 265}
 266
 267static void intel_ltr_expose(struct device *dev)
 268{
 269        dev->power.set_latency_tolerance = intel_ltr_set;
 270        dev_pm_qos_expose_latency_tolerance(dev);
 271}
 272
 273static void intel_ltr_hide(struct device *dev)
 274{
 275        dev_pm_qos_hide_latency_tolerance(dev);
 276        dev->power.set_latency_tolerance = NULL;
 277}
 278
 279static void intel_add_debugfs(struct ufs_hba *hba)
 280{
 281        struct dentry *dir = debugfs_create_dir(dev_name(hba->dev), NULL);
 282        struct intel_host *host = ufshcd_get_variant(hba);
 283
 284        intel_cache_ltr(hba);
 285
 286        host->debugfs_root = dir;
 287        debugfs_create_x32("active_ltr", 0444, dir, &host->active_ltr);
 288        debugfs_create_x32("idle_ltr", 0444, dir, &host->idle_ltr);
 289}
 290
 291static void intel_remove_debugfs(struct ufs_hba *hba)
 292{
 293        struct intel_host *host = ufshcd_get_variant(hba);
 294
 295        debugfs_remove_recursive(host->debugfs_root);
 296}
 297
 298static int ufs_intel_device_reset(struct ufs_hba *hba)
 299{
 300        struct intel_host *host = ufshcd_get_variant(hba);
 301
 302        if (host->dsm_fns & INTEL_DSM_RESET) {
 303                u32 result = 0;
 304                int err;
 305
 306                err = intel_dsm(host, hba->dev, INTEL_DSM_RESET, &result);
 307                if (!err && !result)
 308                        err = -EIO;
 309                if (err)
 310                        dev_err(hba->dev, "%s: DSM error %d result %u\n",
 311                                __func__, err, result);
 312                return err;
 313        }
 314
 315        if (!host->reset_gpio)
 316                return -EOPNOTSUPP;
 317
 318        gpiod_set_value_cansleep(host->reset_gpio, 1);
 319        usleep_range(10, 15);
 320
 321        gpiod_set_value_cansleep(host->reset_gpio, 0);
 322        usleep_range(10, 15);
 323
 324        return 0;
 325}
 326
 327static struct gpio_desc *ufs_intel_get_reset_gpio(struct device *dev)
 328{
 329        /* GPIO in _DSD has active low setting */
 330        return devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
 331}
 332
 333static int ufs_intel_common_init(struct ufs_hba *hba)
 334{
 335        struct intel_host *host;
 336
 337        hba->caps |= UFSHCD_CAP_RPM_AUTOSUSPEND;
 338
 339        host = devm_kzalloc(hba->dev, sizeof(*host), GFP_KERNEL);
 340        if (!host)
 341                return -ENOMEM;
 342        ufshcd_set_variant(hba, host);
 343        intel_dsm_init(host, hba->dev);
 344        if (host->dsm_fns & INTEL_DSM_RESET) {
 345                if (hba->vops->device_reset)
 346                        hba->caps |= UFSHCD_CAP_DEEPSLEEP;
 347        } else {
 348                if (hba->vops->device_reset)
 349                        host->reset_gpio = ufs_intel_get_reset_gpio(hba->dev);
 350                if (IS_ERR(host->reset_gpio)) {
 351                        dev_err(hba->dev, "%s: failed to get reset GPIO, error %ld\n",
 352                                __func__, PTR_ERR(host->reset_gpio));
 353                        host->reset_gpio = NULL;
 354                }
 355                if (host->reset_gpio) {
 356                        gpiod_set_value_cansleep(host->reset_gpio, 0);
 357                        hba->caps |= UFSHCD_CAP_DEEPSLEEP;
 358                }
 359        }
 360        intel_ltr_expose(hba->dev);
 361        intel_add_debugfs(hba);
 362        return 0;
 363}
 364
 365static void ufs_intel_common_exit(struct ufs_hba *hba)
 366{
 367        intel_remove_debugfs(hba);
 368        intel_ltr_hide(hba->dev);
 369}
 370
 371static int ufs_intel_resume(struct ufs_hba *hba, enum ufs_pm_op op)
 372{
 373        if (ufshcd_is_link_hibern8(hba)) {
 374                int ret = ufshcd_uic_hibern8_exit(hba);
 375
 376                if (!ret) {
 377                        ufshcd_set_link_active(hba);
 378                } else {
 379                        dev_err(hba->dev, "%s: hibern8 exit failed %d\n",
 380                                __func__, ret);
 381                        /*
 382                         * Force reset and restore. Any other actions can lead
 383                         * to an unrecoverable state.
 384                         */
 385                        ufshcd_set_link_off(hba);
 386                }
 387        }
 388
 389        return 0;
 390}
 391
 392static int ufs_intel_ehl_init(struct ufs_hba *hba)
 393{
 394        hba->quirks |= UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8;
 395        return ufs_intel_common_init(hba);
 396}
 397
 398static void ufs_intel_lkf_late_init(struct ufs_hba *hba)
 399{
 400        /* LKF always needs a full reset, so set PM accordingly */
 401        if (hba->caps & UFSHCD_CAP_DEEPSLEEP) {
 402                hba->spm_lvl = UFS_PM_LVL_6;
 403                hba->rpm_lvl = UFS_PM_LVL_6;
 404        } else {
 405                hba->spm_lvl = UFS_PM_LVL_5;
 406                hba->rpm_lvl = UFS_PM_LVL_5;
 407        }
 408}
 409
 410static int ufs_intel_lkf_init(struct ufs_hba *hba)
 411{
 412        struct ufs_host *ufs_host;
 413        int err;
 414
 415        hba->nop_out_timeout = 200;
 416        hba->quirks |= UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8;
 417        hba->caps |= UFSHCD_CAP_CRYPTO;
 418        err = ufs_intel_common_init(hba);
 419        ufs_host = ufshcd_get_variant(hba);
 420        ufs_host->late_init = ufs_intel_lkf_late_init;
 421        return err;
 422}
 423
 424static struct ufs_hba_variant_ops ufs_intel_cnl_hba_vops = {
 425        .name                   = "intel-pci",
 426        .init                   = ufs_intel_common_init,
 427        .exit                   = ufs_intel_common_exit,
 428        .link_startup_notify    = ufs_intel_link_startup_notify,
 429        .resume                 = ufs_intel_resume,
 430};
 431
 432static struct ufs_hba_variant_ops ufs_intel_ehl_hba_vops = {
 433        .name                   = "intel-pci",
 434        .init                   = ufs_intel_ehl_init,
 435        .exit                   = ufs_intel_common_exit,
 436        .link_startup_notify    = ufs_intel_link_startup_notify,
 437        .resume                 = ufs_intel_resume,
 438};
 439
 440static struct ufs_hba_variant_ops ufs_intel_lkf_hba_vops = {
 441        .name                   = "intel-pci",
 442        .init                   = ufs_intel_lkf_init,
 443        .exit                   = ufs_intel_common_exit,
 444        .hce_enable_notify      = ufs_intel_hce_enable_notify,
 445        .link_startup_notify    = ufs_intel_link_startup_notify,
 446        .pwr_change_notify      = ufs_intel_lkf_pwr_change_notify,
 447        .apply_dev_quirks       = ufs_intel_lkf_apply_dev_quirks,
 448        .resume                 = ufs_intel_resume,
 449        .device_reset           = ufs_intel_device_reset,
 450};
 451
 452#ifdef CONFIG_PM_SLEEP
 453static int ufshcd_pci_restore(struct device *dev)
 454{
 455        struct ufs_hba *hba = dev_get_drvdata(dev);
 456
 457        /* Force a full reset and restore */
 458        ufshcd_set_link_off(hba);
 459
 460        return ufshcd_system_resume(dev);
 461}
 462#endif
 463
 464/**
 465 * ufshcd_pci_shutdown - main function to put the controller in reset state
 466 * @pdev: pointer to PCI device handle
 467 */
 468static void ufshcd_pci_shutdown(struct pci_dev *pdev)
 469{
 470        ufshcd_shutdown((struct ufs_hba *)pci_get_drvdata(pdev));
 471}
 472
 473/**
 474 * ufshcd_pci_remove - de-allocate PCI/SCSI host and host memory space
 475 *              data structure memory
 476 * @pdev: pointer to PCI handle
 477 */
 478static void ufshcd_pci_remove(struct pci_dev *pdev)
 479{
 480        struct ufs_hba *hba = pci_get_drvdata(pdev);
 481
 482        pm_runtime_forbid(&pdev->dev);
 483        pm_runtime_get_noresume(&pdev->dev);
 484        ufshcd_remove(hba);
 485        ufshcd_dealloc_host(hba);
 486}
 487
 488/**
 489 * ufshcd_pci_probe - probe routine of the driver
 490 * @pdev: pointer to PCI device handle
 491 * @id: PCI device id
 492 *
 493 * Returns 0 on success, non-zero value on failure
 494 */
 495static int
 496ufshcd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 497{
 498        struct ufs_host *ufs_host;
 499        struct ufs_hba *hba;
 500        void __iomem *mmio_base;
 501        int err;
 502
 503        err = pcim_enable_device(pdev);
 504        if (err) {
 505                dev_err(&pdev->dev, "pcim_enable_device failed\n");
 506                return err;
 507        }
 508
 509        pci_set_master(pdev);
 510
 511        err = pcim_iomap_regions(pdev, 1 << 0, UFSHCD);
 512        if (err < 0) {
 513                dev_err(&pdev->dev, "request and iomap failed\n");
 514                return err;
 515        }
 516
 517        mmio_base = pcim_iomap_table(pdev)[0];
 518
 519        err = ufshcd_alloc_host(&pdev->dev, &hba);
 520        if (err) {
 521                dev_err(&pdev->dev, "Allocation failed\n");
 522                return err;
 523        }
 524
 525        pci_set_drvdata(pdev, hba);
 526
 527        hba->vops = (struct ufs_hba_variant_ops *)id->driver_data;
 528
 529        err = ufshcd_init(hba, mmio_base, pdev->irq);
 530        if (err) {
 531                dev_err(&pdev->dev, "Initialization failed\n");
 532                ufshcd_dealloc_host(hba);
 533                return err;
 534        }
 535
 536        ufs_host = ufshcd_get_variant(hba);
 537        if (ufs_host && ufs_host->late_init)
 538                ufs_host->late_init(hba);
 539
 540        pm_runtime_put_noidle(&pdev->dev);
 541        pm_runtime_allow(&pdev->dev);
 542
 543        return 0;
 544}
 545
 546static const struct dev_pm_ops ufshcd_pci_pm_ops = {
 547        SET_RUNTIME_PM_OPS(ufshcd_runtime_suspend, ufshcd_runtime_resume, NULL)
 548#ifdef CONFIG_PM_SLEEP
 549        .suspend        = ufshcd_system_suspend,
 550        .resume         = ufshcd_system_resume,
 551        .freeze         = ufshcd_system_suspend,
 552        .thaw           = ufshcd_system_resume,
 553        .poweroff       = ufshcd_system_suspend,
 554        .restore        = ufshcd_pci_restore,
 555        .prepare        = ufshcd_suspend_prepare,
 556        .complete       = ufshcd_resume_complete,
 557#endif
 558};
 559
 560static const struct pci_device_id ufshcd_pci_tbl[] = {
 561        { PCI_VENDOR_ID_SAMSUNG, 0xC00C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
 562        { PCI_VDEVICE(INTEL, 0x9DFA), (kernel_ulong_t)&ufs_intel_cnl_hba_vops },
 563        { PCI_VDEVICE(INTEL, 0x4B41), (kernel_ulong_t)&ufs_intel_ehl_hba_vops },
 564        { PCI_VDEVICE(INTEL, 0x4B43), (kernel_ulong_t)&ufs_intel_ehl_hba_vops },
 565        { PCI_VDEVICE(INTEL, 0x98FA), (kernel_ulong_t)&ufs_intel_lkf_hba_vops },
 566        { }     /* terminate list */
 567};
 568
 569MODULE_DEVICE_TABLE(pci, ufshcd_pci_tbl);
 570
 571static struct pci_driver ufshcd_pci_driver = {
 572        .name = UFSHCD,
 573        .id_table = ufshcd_pci_tbl,
 574        .probe = ufshcd_pci_probe,
 575        .remove = ufshcd_pci_remove,
 576        .shutdown = ufshcd_pci_shutdown,
 577        .driver = {
 578                .pm = &ufshcd_pci_pm_ops
 579        },
 580};
 581
 582module_pci_driver(ufshcd_pci_driver);
 583
 584MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>");
 585MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>");
 586MODULE_DESCRIPTION("UFS host controller PCI glue driver");
 587MODULE_LICENSE("GPL");
 588MODULE_VERSION(UFSHCD_DRIVER_VERSION);
 589