linux/drivers/scsi/ufs/ufs-qcom.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (c) 2013-2016, Linux Foundation. All rights reserved.
   4 */
   5
   6#include <linux/acpi.h>
   7#include <linux/time.h>
   8#include <linux/of.h>
   9#include <linux/platform_device.h>
  10#include <linux/phy/phy.h>
  11#include <linux/gpio/consumer.h>
  12#include <linux/reset-controller.h>
  13#include <linux/devfreq.h>
  14
  15#include "ufshcd.h"
  16#include "ufshcd-pltfrm.h"
  17#include "unipro.h"
  18#include "ufs-qcom.h"
  19#include "ufshci.h"
  20#include "ufs_quirks.h"
  21#define UFS_QCOM_DEFAULT_DBG_PRINT_EN   \
  22        (UFS_QCOM_DBG_PRINT_REGS_EN | UFS_QCOM_DBG_PRINT_TEST_BUS_EN)
  23
  24enum {
  25        TSTBUS_UAWM,
  26        TSTBUS_UARM,
  27        TSTBUS_TXUC,
  28        TSTBUS_RXUC,
  29        TSTBUS_DFC,
  30        TSTBUS_TRLUT,
  31        TSTBUS_TMRLUT,
  32        TSTBUS_OCSC,
  33        TSTBUS_UTP_HCI,
  34        TSTBUS_COMBINED,
  35        TSTBUS_WRAPPER,
  36        TSTBUS_UNIPRO,
  37        TSTBUS_MAX,
  38};
  39
  40static struct ufs_qcom_host *ufs_qcom_hosts[MAX_UFS_QCOM_HOSTS];
  41
  42static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host);
  43static int ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(struct ufs_hba *hba,
  44                                                       u32 clk_cycles);
  45
  46static struct ufs_qcom_host *rcdev_to_ufs_host(struct reset_controller_dev *rcd)
  47{
  48        return container_of(rcd, struct ufs_qcom_host, rcdev);
  49}
  50
  51static void ufs_qcom_dump_regs_wrapper(struct ufs_hba *hba, int offset, int len,
  52                                       const char *prefix, void *priv)
  53{
  54        ufshcd_dump_regs(hba, offset, len * 4, prefix);
  55}
  56
  57static int ufs_qcom_get_connected_tx_lanes(struct ufs_hba *hba, u32 *tx_lanes)
  58{
  59        int err = 0;
  60
  61        err = ufshcd_dme_get(hba,
  62                        UIC_ARG_MIB(PA_CONNECTEDTXDATALANES), tx_lanes);
  63        if (err)
  64                dev_err(hba->dev, "%s: couldn't read PA_CONNECTEDTXDATALANES %d\n",
  65                                __func__, err);
  66
  67        return err;
  68}
  69
  70static int ufs_qcom_host_clk_get(struct device *dev,
  71                const char *name, struct clk **clk_out, bool optional)
  72{
  73        struct clk *clk;
  74        int err = 0;
  75
  76        clk = devm_clk_get(dev, name);
  77        if (!IS_ERR(clk)) {
  78                *clk_out = clk;
  79                return 0;
  80        }
  81
  82        err = PTR_ERR(clk);
  83
  84        if (optional && err == -ENOENT) {
  85                *clk_out = NULL;
  86                return 0;
  87        }
  88
  89        if (err != -EPROBE_DEFER)
  90                dev_err(dev, "failed to get %s err %d\n", name, err);
  91
  92        return err;
  93}
  94
  95static int ufs_qcom_host_clk_enable(struct device *dev,
  96                const char *name, struct clk *clk)
  97{
  98        int err = 0;
  99
 100        err = clk_prepare_enable(clk);
 101        if (err)
 102                dev_err(dev, "%s: %s enable failed %d\n", __func__, name, err);
 103
 104        return err;
 105}
 106
 107static void ufs_qcom_disable_lane_clks(struct ufs_qcom_host *host)
 108{
 109        if (!host->is_lane_clks_enabled)
 110                return;
 111
 112        clk_disable_unprepare(host->tx_l1_sync_clk);
 113        clk_disable_unprepare(host->tx_l0_sync_clk);
 114        clk_disable_unprepare(host->rx_l1_sync_clk);
 115        clk_disable_unprepare(host->rx_l0_sync_clk);
 116
 117        host->is_lane_clks_enabled = false;
 118}
 119
 120static int ufs_qcom_enable_lane_clks(struct ufs_qcom_host *host)
 121{
 122        int err = 0;
 123        struct device *dev = host->hba->dev;
 124
 125        if (host->is_lane_clks_enabled)
 126                return 0;
 127
 128        err = ufs_qcom_host_clk_enable(dev, "rx_lane0_sync_clk",
 129                host->rx_l0_sync_clk);
 130        if (err)
 131                goto out;
 132
 133        err = ufs_qcom_host_clk_enable(dev, "tx_lane0_sync_clk",
 134                host->tx_l0_sync_clk);
 135        if (err)
 136                goto disable_rx_l0;
 137
 138        err = ufs_qcom_host_clk_enable(dev, "rx_lane1_sync_clk",
 139                        host->rx_l1_sync_clk);
 140        if (err)
 141                goto disable_tx_l0;
 142
 143        err = ufs_qcom_host_clk_enable(dev, "tx_lane1_sync_clk",
 144                        host->tx_l1_sync_clk);
 145        if (err)
 146                goto disable_rx_l1;
 147
 148        host->is_lane_clks_enabled = true;
 149        goto out;
 150
 151disable_rx_l1:
 152        clk_disable_unprepare(host->rx_l1_sync_clk);
 153disable_tx_l0:
 154        clk_disable_unprepare(host->tx_l0_sync_clk);
 155disable_rx_l0:
 156        clk_disable_unprepare(host->rx_l0_sync_clk);
 157out:
 158        return err;
 159}
 160
 161static int ufs_qcom_init_lane_clks(struct ufs_qcom_host *host)
 162{
 163        int err = 0;
 164        struct device *dev = host->hba->dev;
 165
 166        if (has_acpi_companion(dev))
 167                return 0;
 168
 169        err = ufs_qcom_host_clk_get(dev, "rx_lane0_sync_clk",
 170                                        &host->rx_l0_sync_clk, false);
 171        if (err)
 172                goto out;
 173
 174        err = ufs_qcom_host_clk_get(dev, "tx_lane0_sync_clk",
 175                                        &host->tx_l0_sync_clk, false);
 176        if (err)
 177                goto out;
 178
 179        /* In case of single lane per direction, don't read lane1 clocks */
 180        if (host->hba->lanes_per_direction > 1) {
 181                err = ufs_qcom_host_clk_get(dev, "rx_lane1_sync_clk",
 182                        &host->rx_l1_sync_clk, false);
 183                if (err)
 184                        goto out;
 185
 186                err = ufs_qcom_host_clk_get(dev, "tx_lane1_sync_clk",
 187                        &host->tx_l1_sync_clk, true);
 188        }
 189out:
 190        return err;
 191}
 192
 193static int ufs_qcom_link_startup_post_change(struct ufs_hba *hba)
 194{
 195        u32 tx_lanes;
 196
 197        return ufs_qcom_get_connected_tx_lanes(hba, &tx_lanes);
 198}
 199
 200static int ufs_qcom_check_hibern8(struct ufs_hba *hba)
 201{
 202        int err;
 203        u32 tx_fsm_val = 0;
 204        unsigned long timeout = jiffies + msecs_to_jiffies(HBRN8_POLL_TOUT_MS);
 205
 206        do {
 207                err = ufshcd_dme_get(hba,
 208                                UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE,
 209                                        UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
 210                                &tx_fsm_val);
 211                if (err || tx_fsm_val == TX_FSM_HIBERN8)
 212                        break;
 213
 214                /* sleep for max. 200us */
 215                usleep_range(100, 200);
 216        } while (time_before(jiffies, timeout));
 217
 218        /*
 219         * we might have scheduled out for long during polling so
 220         * check the state again.
 221         */
 222        if (time_after(jiffies, timeout))
 223                err = ufshcd_dme_get(hba,
 224                                UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE,
 225                                        UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
 226                                &tx_fsm_val);
 227
 228        if (err) {
 229                dev_err(hba->dev, "%s: unable to get TX_FSM_STATE, err %d\n",
 230                                __func__, err);
 231        } else if (tx_fsm_val != TX_FSM_HIBERN8) {
 232                err = tx_fsm_val;
 233                dev_err(hba->dev, "%s: invalid TX_FSM_STATE = %d\n",
 234                                __func__, err);
 235        }
 236
 237        return err;
 238}
 239
 240static void ufs_qcom_select_unipro_mode(struct ufs_qcom_host *host)
 241{
 242        ufshcd_rmwl(host->hba, QUNIPRO_SEL,
 243                   ufs_qcom_cap_qunipro(host) ? QUNIPRO_SEL : 0,
 244                   REG_UFS_CFG1);
 245        /* make sure above configuration is applied before we return */
 246        mb();
 247}
 248
 249/*
 250 * ufs_qcom_host_reset - reset host controller and PHY
 251 */
 252static int ufs_qcom_host_reset(struct ufs_hba *hba)
 253{
 254        int ret = 0;
 255        struct ufs_qcom_host *host = ufshcd_get_variant(hba);
 256        bool reenable_intr = false;
 257
 258        if (!host->core_reset) {
 259                dev_warn(hba->dev, "%s: reset control not set\n", __func__);
 260                goto out;
 261        }
 262
 263        reenable_intr = hba->is_irq_enabled;
 264        disable_irq(hba->irq);
 265        hba->is_irq_enabled = false;
 266
 267        ret = reset_control_assert(host->core_reset);
 268        if (ret) {
 269                dev_err(hba->dev, "%s: core_reset assert failed, err = %d\n",
 270                                 __func__, ret);
 271                goto out;
 272        }
 273
 274        /*
 275         * The hardware requirement for delay between assert/deassert
 276         * is at least 3-4 sleep clock (32.7KHz) cycles, which comes to
 277         * ~125us (4/32768). To be on the safe side add 200us delay.
 278         */
 279        usleep_range(200, 210);
 280
 281        ret = reset_control_deassert(host->core_reset);
 282        if (ret)
 283                dev_err(hba->dev, "%s: core_reset deassert failed, err = %d\n",
 284                                 __func__, ret);
 285
 286        usleep_range(1000, 1100);
 287
 288        if (reenable_intr) {
 289                enable_irq(hba->irq);
 290                hba->is_irq_enabled = true;
 291        }
 292
 293out:
 294        return ret;
 295}
 296
 297static int ufs_qcom_power_up_sequence(struct ufs_hba *hba)
 298{
 299        struct ufs_qcom_host *host = ufshcd_get_variant(hba);
 300        struct phy *phy = host->generic_phy;
 301        int ret = 0;
 302        bool is_rate_B = (UFS_QCOM_LIMIT_HS_RATE == PA_HS_MODE_B)
 303                                                        ? true : false;
 304
 305        /* Reset UFS Host Controller and PHY */
 306        ret = ufs_qcom_host_reset(hba);
 307        if (ret)
 308                dev_warn(hba->dev, "%s: host reset returned %d\n",
 309                                  __func__, ret);
 310
 311        if (is_rate_B)
 312                phy_set_mode(phy, PHY_MODE_UFS_HS_B);
 313
 314        /* phy initialization - calibrate the phy */
 315        ret = phy_init(phy);
 316        if (ret) {
 317                dev_err(hba->dev, "%s: phy init failed, ret = %d\n",
 318                        __func__, ret);
 319                goto out;
 320        }
 321
 322        /* power on phy - start serdes and phy's power and clocks */
 323        ret = phy_power_on(phy);
 324        if (ret) {
 325                dev_err(hba->dev, "%s: phy power on failed, ret = %d\n",
 326                        __func__, ret);
 327                goto out_disable_phy;
 328        }
 329
 330        ufs_qcom_select_unipro_mode(host);
 331
 332        return 0;
 333
 334out_disable_phy:
 335        phy_exit(phy);
 336out:
 337        return ret;
 338}
 339
 340/*
 341 * The UTP controller has a number of internal clock gating cells (CGCs).
 342 * Internal hardware sub-modules within the UTP controller control the CGCs.
 343 * Hardware CGCs disable the clock to inactivate UTP sub-modules not involved
 344 * in a specific operation, UTP controller CGCs are by default disabled and
 345 * this function enables them (after every UFS link startup) to save some power
 346 * leakage.
 347 */
 348static void ufs_qcom_enable_hw_clk_gating(struct ufs_hba *hba)
 349{
 350        ufshcd_writel(hba,
 351                ufshcd_readl(hba, REG_UFS_CFG2) | REG_UFS_CFG2_CGC_EN_ALL,
 352                REG_UFS_CFG2);
 353
 354        /* Ensure that HW clock gating is enabled before next operations */
 355        mb();
 356}
 357
 358static int ufs_qcom_hce_enable_notify(struct ufs_hba *hba,
 359                                      enum ufs_notify_change_status status)
 360{
 361        struct ufs_qcom_host *host = ufshcd_get_variant(hba);
 362        int err = 0;
 363
 364        switch (status) {
 365        case PRE_CHANGE:
 366                ufs_qcom_power_up_sequence(hba);
 367                /*
 368                 * The PHY PLL output is the source of tx/rx lane symbol
 369                 * clocks, hence, enable the lane clocks only after PHY
 370                 * is initialized.
 371                 */
 372                err = ufs_qcom_enable_lane_clks(host);
 373                break;
 374        case POST_CHANGE:
 375                /* check if UFS PHY moved from DISABLED to HIBERN8 */
 376                err = ufs_qcom_check_hibern8(hba);
 377                ufs_qcom_enable_hw_clk_gating(hba);
 378                ufs_qcom_ice_enable(host);
 379                break;
 380        default:
 381                dev_err(hba->dev, "%s: invalid status %d\n", __func__, status);
 382                err = -EINVAL;
 383                break;
 384        }
 385        return err;
 386}
 387
 388/*
 389 * Returns zero for success and non-zero in case of a failure
 390 */
 391static int ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear,
 392                               u32 hs, u32 rate, bool update_link_startup_timer)
 393{
 394        int ret = 0;
 395        struct ufs_qcom_host *host = ufshcd_get_variant(hba);
 396        struct ufs_clk_info *clki;
 397        u32 core_clk_period_in_ns;
 398        u32 tx_clk_cycles_per_us = 0;
 399        unsigned long core_clk_rate = 0;
 400        u32 core_clk_cycles_per_us = 0;
 401
 402        static u32 pwm_fr_table[][2] = {
 403                {UFS_PWM_G1, 0x1},
 404                {UFS_PWM_G2, 0x1},
 405                {UFS_PWM_G3, 0x1},
 406                {UFS_PWM_G4, 0x1},
 407        };
 408
 409        static u32 hs_fr_table_rA[][2] = {
 410                {UFS_HS_G1, 0x1F},
 411                {UFS_HS_G2, 0x3e},
 412                {UFS_HS_G3, 0x7D},
 413        };
 414
 415        static u32 hs_fr_table_rB[][2] = {
 416                {UFS_HS_G1, 0x24},
 417                {UFS_HS_G2, 0x49},
 418                {UFS_HS_G3, 0x92},
 419        };
 420
 421        /*
 422         * The Qunipro controller does not use following registers:
 423         * SYS1CLK_1US_REG, TX_SYMBOL_CLK_1US_REG, CLK_NS_REG &
 424         * UFS_REG_PA_LINK_STARTUP_TIMER
 425         * But UTP controller uses SYS1CLK_1US_REG register for Interrupt
 426         * Aggregation logic.
 427        */
 428        if (ufs_qcom_cap_qunipro(host) && !ufshcd_is_intr_aggr_allowed(hba))
 429                goto out;
 430
 431        if (gear == 0) {
 432                dev_err(hba->dev, "%s: invalid gear = %d\n", __func__, gear);
 433                goto out_error;
 434        }
 435
 436        list_for_each_entry(clki, &hba->clk_list_head, list) {
 437                if (!strcmp(clki->name, "core_clk"))
 438                        core_clk_rate = clk_get_rate(clki->clk);
 439        }
 440
 441        /* If frequency is smaller than 1MHz, set to 1MHz */
 442        if (core_clk_rate < DEFAULT_CLK_RATE_HZ)
 443                core_clk_rate = DEFAULT_CLK_RATE_HZ;
 444
 445        core_clk_cycles_per_us = core_clk_rate / USEC_PER_SEC;
 446        if (ufshcd_readl(hba, REG_UFS_SYS1CLK_1US) != core_clk_cycles_per_us) {
 447                ufshcd_writel(hba, core_clk_cycles_per_us, REG_UFS_SYS1CLK_1US);
 448                /*
 449                 * make sure above write gets applied before we return from
 450                 * this function.
 451                 */
 452                mb();
 453        }
 454
 455        if (ufs_qcom_cap_qunipro(host))
 456                goto out;
 457
 458        core_clk_period_in_ns = NSEC_PER_SEC / core_clk_rate;
 459        core_clk_period_in_ns <<= OFFSET_CLK_NS_REG;
 460        core_clk_period_in_ns &= MASK_CLK_NS_REG;
 461
 462        switch (hs) {
 463        case FASTAUTO_MODE:
 464        case FAST_MODE:
 465                if (rate == PA_HS_MODE_A) {
 466                        if (gear > ARRAY_SIZE(hs_fr_table_rA)) {
 467                                dev_err(hba->dev,
 468                                        "%s: index %d exceeds table size %zu\n",
 469                                        __func__, gear,
 470                                        ARRAY_SIZE(hs_fr_table_rA));
 471                                goto out_error;
 472                        }
 473                        tx_clk_cycles_per_us = hs_fr_table_rA[gear-1][1];
 474                } else if (rate == PA_HS_MODE_B) {
 475                        if (gear > ARRAY_SIZE(hs_fr_table_rB)) {
 476                                dev_err(hba->dev,
 477                                        "%s: index %d exceeds table size %zu\n",
 478                                        __func__, gear,
 479                                        ARRAY_SIZE(hs_fr_table_rB));
 480                                goto out_error;
 481                        }
 482                        tx_clk_cycles_per_us = hs_fr_table_rB[gear-1][1];
 483                } else {
 484                        dev_err(hba->dev, "%s: invalid rate = %d\n",
 485                                __func__, rate);
 486                        goto out_error;
 487                }
 488                break;
 489        case SLOWAUTO_MODE:
 490        case SLOW_MODE:
 491                if (gear > ARRAY_SIZE(pwm_fr_table)) {
 492                        dev_err(hba->dev,
 493                                        "%s: index %d exceeds table size %zu\n",
 494                                        __func__, gear,
 495                                        ARRAY_SIZE(pwm_fr_table));
 496                        goto out_error;
 497                }
 498                tx_clk_cycles_per_us = pwm_fr_table[gear-1][1];
 499                break;
 500        case UNCHANGED:
 501        default:
 502                dev_err(hba->dev, "%s: invalid mode = %d\n", __func__, hs);
 503                goto out_error;
 504        }
 505
 506        if (ufshcd_readl(hba, REG_UFS_TX_SYMBOL_CLK_NS_US) !=
 507            (core_clk_period_in_ns | tx_clk_cycles_per_us)) {
 508                /* this register 2 fields shall be written at once */
 509                ufshcd_writel(hba, core_clk_period_in_ns | tx_clk_cycles_per_us,
 510                              REG_UFS_TX_SYMBOL_CLK_NS_US);
 511                /*
 512                 * make sure above write gets applied before we return from
 513                 * this function.
 514                 */
 515                mb();
 516        }
 517
 518        if (update_link_startup_timer) {
 519                ufshcd_writel(hba, ((core_clk_rate / MSEC_PER_SEC) * 100),
 520                              REG_UFS_PA_LINK_STARTUP_TIMER);
 521                /*
 522                 * make sure that this configuration is applied before
 523                 * we return
 524                 */
 525                mb();
 526        }
 527        goto out;
 528
 529out_error:
 530        ret = -EINVAL;
 531out:
 532        return ret;
 533}
 534
 535static int ufs_qcom_link_startup_notify(struct ufs_hba *hba,
 536                                        enum ufs_notify_change_status status)
 537{
 538        int err = 0;
 539        struct ufs_qcom_host *host = ufshcd_get_variant(hba);
 540
 541        switch (status) {
 542        case PRE_CHANGE:
 543                if (ufs_qcom_cfg_timers(hba, UFS_PWM_G1, SLOWAUTO_MODE,
 544                                        0, true)) {
 545                        dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n",
 546                                __func__);
 547                        err = -EINVAL;
 548                        goto out;
 549                }
 550
 551                if (ufs_qcom_cap_qunipro(host))
 552                        /*
 553                         * set unipro core clock cycles to 150 & clear clock
 554                         * divider
 555                         */
 556                        err = ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba,
 557                                                                          150);
 558
 559                /*
 560                 * Some UFS devices (and may be host) have issues if LCC is
 561                 * enabled. So we are setting PA_Local_TX_LCC_Enable to 0
 562                 * before link startup which will make sure that both host
 563                 * and device TX LCC are disabled once link startup is
 564                 * completed.
 565                 */
 566                if (ufshcd_get_local_unipro_ver(hba) != UFS_UNIPRO_VER_1_41)
 567                        err = ufshcd_disable_host_tx_lcc(hba);
 568
 569                break;
 570        case POST_CHANGE:
 571                ufs_qcom_link_startup_post_change(hba);
 572                break;
 573        default:
 574                break;
 575        }
 576
 577out:
 578        return err;
 579}
 580
 581static void ufs_qcom_device_reset_ctrl(struct ufs_hba *hba, bool asserted)
 582{
 583        struct ufs_qcom_host *host = ufshcd_get_variant(hba);
 584
 585        /* reset gpio is optional */
 586        if (!host->device_reset)
 587                return;
 588
 589        gpiod_set_value_cansleep(host->device_reset, asserted);
 590}
 591
 592static int ufs_qcom_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
 593{
 594        struct ufs_qcom_host *host = ufshcd_get_variant(hba);
 595        struct phy *phy = host->generic_phy;
 596
 597        if (ufs_qcom_is_link_off(hba)) {
 598                /*
 599                 * Disable the tx/rx lane symbol clocks before PHY is
 600                 * powered down as the PLL source should be disabled
 601                 * after downstream clocks are disabled.
 602                 */
 603                ufs_qcom_disable_lane_clks(host);
 604                phy_power_off(phy);
 605
 606                /* reset the connected UFS device during power down */
 607                ufs_qcom_device_reset_ctrl(hba, true);
 608
 609        } else if (!ufs_qcom_is_link_active(hba)) {
 610                ufs_qcom_disable_lane_clks(host);
 611        }
 612
 613        return 0;
 614}
 615
 616static int ufs_qcom_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
 617{
 618        struct ufs_qcom_host *host = ufshcd_get_variant(hba);
 619        struct phy *phy = host->generic_phy;
 620        int err;
 621
 622        if (ufs_qcom_is_link_off(hba)) {
 623                err = phy_power_on(phy);
 624                if (err) {
 625                        dev_err(hba->dev, "%s: failed PHY power on: %d\n",
 626                                __func__, err);
 627                        return err;
 628                }
 629
 630                err = ufs_qcom_enable_lane_clks(host);
 631                if (err)
 632                        return err;
 633
 634        } else if (!ufs_qcom_is_link_active(hba)) {
 635                err = ufs_qcom_enable_lane_clks(host);
 636                if (err)
 637                        return err;
 638        }
 639
 640        err = ufs_qcom_ice_resume(host);
 641        if (err)
 642                return err;
 643
 644        hba->is_sys_suspended = false;
 645        return 0;
 646}
 647
 648static void ufs_qcom_dev_ref_clk_ctrl(struct ufs_qcom_host *host, bool enable)
 649{
 650        if (host->dev_ref_clk_ctrl_mmio &&
 651            (enable ^ host->is_dev_ref_clk_enabled)) {
 652                u32 temp = readl_relaxed(host->dev_ref_clk_ctrl_mmio);
 653
 654                if (enable)
 655                        temp |= host->dev_ref_clk_en_mask;
 656                else
 657                        temp &= ~host->dev_ref_clk_en_mask;
 658
 659                /*
 660                 * If we are here to disable this clock it might be immediately
 661                 * after entering into hibern8 in which case we need to make
 662                 * sure that device ref_clk is active for specific time after
 663                 * hibern8 enter.
 664                 */
 665                if (!enable) {
 666                        unsigned long gating_wait;
 667
 668                        gating_wait = host->hba->dev_info.clk_gating_wait_us;
 669                        if (!gating_wait) {
 670                                udelay(1);
 671                        } else {
 672                                /*
 673                                 * bRefClkGatingWaitTime defines the minimum
 674                                 * time for which the reference clock is
 675                                 * required by device during transition from
 676                                 * HS-MODE to LS-MODE or HIBERN8 state. Give it
 677                                 * more delay to be on the safe side.
 678                                 */
 679                                gating_wait += 10;
 680                                usleep_range(gating_wait, gating_wait + 10);
 681                        }
 682                }
 683
 684                writel_relaxed(temp, host->dev_ref_clk_ctrl_mmio);
 685
 686                /* ensure that ref_clk is enabled/disabled before we return */
 687                wmb();
 688
 689                /*
 690                 * If we call hibern8 exit after this, we need to make sure that
 691                 * device ref_clk is stable for at least 1us before the hibern8
 692                 * exit command.
 693                 */
 694                if (enable)
 695                        udelay(1);
 696
 697                host->is_dev_ref_clk_enabled = enable;
 698        }
 699}
 700
 701static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba,
 702                                enum ufs_notify_change_status status,
 703                                struct ufs_pa_layer_attr *dev_max_params,
 704                                struct ufs_pa_layer_attr *dev_req_params)
 705{
 706        struct ufs_qcom_host *host = ufshcd_get_variant(hba);
 707        struct ufs_dev_params ufs_qcom_cap;
 708        int ret = 0;
 709
 710        if (!dev_req_params) {
 711                pr_err("%s: incoming dev_req_params is NULL\n", __func__);
 712                ret = -EINVAL;
 713                goto out;
 714        }
 715
 716        switch (status) {
 717        case PRE_CHANGE:
 718                ufshcd_init_pwr_dev_param(&ufs_qcom_cap);
 719                ufs_qcom_cap.hs_rate = UFS_QCOM_LIMIT_HS_RATE;
 720
 721                if (host->hw_ver.major == 0x1) {
 722                        /*
 723                         * HS-G3 operations may not reliably work on legacy QCOM
 724                         * UFS host controller hardware even though capability
 725                         * exchange during link startup phase may end up
 726                         * negotiating maximum supported gear as G3.
 727                         * Hence downgrade the maximum supported gear to HS-G2.
 728                         */
 729                        if (ufs_qcom_cap.hs_tx_gear > UFS_HS_G2)
 730                                ufs_qcom_cap.hs_tx_gear = UFS_HS_G2;
 731                        if (ufs_qcom_cap.hs_rx_gear > UFS_HS_G2)
 732                                ufs_qcom_cap.hs_rx_gear = UFS_HS_G2;
 733                }
 734
 735                ret = ufshcd_get_pwr_dev_param(&ufs_qcom_cap,
 736                                               dev_max_params,
 737                                               dev_req_params);
 738                if (ret) {
 739                        pr_err("%s: failed to determine capabilities\n",
 740                                        __func__);
 741                        goto out;
 742                }
 743
 744                /* enable the device ref clock before changing to HS mode */
 745                if (!ufshcd_is_hs_mode(&hba->pwr_info) &&
 746                        ufshcd_is_hs_mode(dev_req_params))
 747                        ufs_qcom_dev_ref_clk_ctrl(host, true);
 748
 749                if (host->hw_ver.major >= 0x4) {
 750                        ufshcd_dme_configure_adapt(hba,
 751                                                dev_req_params->gear_tx,
 752                                                PA_INITIAL_ADAPT);
 753                }
 754                break;
 755        case POST_CHANGE:
 756                if (ufs_qcom_cfg_timers(hba, dev_req_params->gear_rx,
 757                                        dev_req_params->pwr_rx,
 758                                        dev_req_params->hs_rate, false)) {
 759                        dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n",
 760                                __func__);
 761                        /*
 762                         * we return error code at the end of the routine,
 763                         * but continue to configure UFS_PHY_TX_LANE_ENABLE
 764                         * and bus voting as usual
 765                         */
 766                        ret = -EINVAL;
 767                }
 768
 769                /* cache the power mode parameters to use internally */
 770                memcpy(&host->dev_req_params,
 771                                dev_req_params, sizeof(*dev_req_params));
 772
 773                /* disable the device ref clock if entered PWM mode */
 774                if (ufshcd_is_hs_mode(&hba->pwr_info) &&
 775                        !ufshcd_is_hs_mode(dev_req_params))
 776                        ufs_qcom_dev_ref_clk_ctrl(host, false);
 777                break;
 778        default:
 779                ret = -EINVAL;
 780                break;
 781        }
 782out:
 783        return ret;
 784}
 785
 786static int ufs_qcom_quirk_host_pa_saveconfigtime(struct ufs_hba *hba)
 787{
 788        int err;
 789        u32 pa_vs_config_reg1;
 790
 791        err = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_VS_CONFIG_REG1),
 792                             &pa_vs_config_reg1);
 793        if (err)
 794                goto out;
 795
 796        /* Allow extension of MSB bits of PA_SaveConfigTime attribute */
 797        err = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_VS_CONFIG_REG1),
 798                            (pa_vs_config_reg1 | (1 << 12)));
 799
 800out:
 801        return err;
 802}
 803
 804static int ufs_qcom_apply_dev_quirks(struct ufs_hba *hba)
 805{
 806        int err = 0;
 807
 808        if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME)
 809                err = ufs_qcom_quirk_host_pa_saveconfigtime(hba);
 810
 811        if (hba->dev_info.wmanufacturerid == UFS_VENDOR_WDC)
 812                hba->dev_quirks |= UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE;
 813
 814        return err;
 815}
 816
 817static u32 ufs_qcom_get_ufs_hci_version(struct ufs_hba *hba)
 818{
 819        struct ufs_qcom_host *host = ufshcd_get_variant(hba);
 820
 821        if (host->hw_ver.major == 0x1)
 822                return ufshci_version(1, 1);
 823        else
 824                return ufshci_version(2, 0);
 825}
 826
 827/**
 828 * ufs_qcom_advertise_quirks - advertise the known QCOM UFS controller quirks
 829 * @hba: host controller instance
 830 *
 831 * QCOM UFS host controller might have some non standard behaviours (quirks)
 832 * than what is specified by UFSHCI specification. Advertise all such
 833 * quirks to standard UFS host controller driver so standard takes them into
 834 * account.
 835 */
 836static void ufs_qcom_advertise_quirks(struct ufs_hba *hba)
 837{
 838        struct ufs_qcom_host *host = ufshcd_get_variant(hba);
 839
 840        if (host->hw_ver.major == 0x01) {
 841                hba->quirks |= UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS
 842                            | UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP
 843                            | UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE;
 844
 845                if (host->hw_ver.minor == 0x0001 && host->hw_ver.step == 0x0001)
 846                        hba->quirks |= UFSHCD_QUIRK_BROKEN_INTR_AGGR;
 847
 848                hba->quirks |= UFSHCD_QUIRK_BROKEN_LCC;
 849        }
 850
 851        if (host->hw_ver.major == 0x2) {
 852                hba->quirks |= UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION;
 853
 854                if (!ufs_qcom_cap_qunipro(host))
 855                        /* Legacy UniPro mode still need following quirks */
 856                        hba->quirks |= (UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS
 857                                | UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE
 858                                | UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP);
 859        }
 860}
 861
 862static void ufs_qcom_set_caps(struct ufs_hba *hba)
 863{
 864        struct ufs_qcom_host *host = ufshcd_get_variant(hba);
 865
 866        hba->caps |= UFSHCD_CAP_CLK_GATING | UFSHCD_CAP_HIBERN8_WITH_CLK_GATING;
 867        hba->caps |= UFSHCD_CAP_CLK_SCALING;
 868        hba->caps |= UFSHCD_CAP_AUTO_BKOPS_SUSPEND;
 869        hba->caps |= UFSHCD_CAP_WB_EN;
 870        hba->caps |= UFSHCD_CAP_CRYPTO;
 871        hba->caps |= UFSHCD_CAP_AGGR_POWER_COLLAPSE;
 872
 873        if (host->hw_ver.major >= 0x2) {
 874                host->caps = UFS_QCOM_CAP_QUNIPRO |
 875                             UFS_QCOM_CAP_RETAIN_SEC_CFG_AFTER_PWR_COLLAPSE;
 876        }
 877}
 878
 879/**
 880 * ufs_qcom_setup_clocks - enables/disable clocks
 881 * @hba: host controller instance
 882 * @on: If true, enable clocks else disable them.
 883 * @status: PRE_CHANGE or POST_CHANGE notify
 884 *
 885 * Returns 0 on success, non-zero on failure.
 886 */
 887static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on,
 888                                 enum ufs_notify_change_status status)
 889{
 890        struct ufs_qcom_host *host = ufshcd_get_variant(hba);
 891        int err = 0;
 892
 893        /*
 894         * In case ufs_qcom_init() is not yet done, simply ignore.
 895         * This ufs_qcom_setup_clocks() shall be called from
 896         * ufs_qcom_init() after init is done.
 897         */
 898        if (!host)
 899                return 0;
 900
 901        switch (status) {
 902        case PRE_CHANGE:
 903                if (!on) {
 904                        if (!ufs_qcom_is_link_active(hba)) {
 905                                /* disable device ref_clk */
 906                                ufs_qcom_dev_ref_clk_ctrl(host, false);
 907                        }
 908                }
 909                break;
 910        case POST_CHANGE:
 911                if (on) {
 912                        /* enable the device ref clock for HS mode*/
 913                        if (ufshcd_is_hs_mode(&hba->pwr_info))
 914                                ufs_qcom_dev_ref_clk_ctrl(host, true);
 915                }
 916                break;
 917        }
 918
 919        return err;
 920}
 921
 922static int
 923ufs_qcom_reset_assert(struct reset_controller_dev *rcdev, unsigned long id)
 924{
 925        struct ufs_qcom_host *host = rcdev_to_ufs_host(rcdev);
 926
 927        /* Currently this code only knows about a single reset. */
 928        WARN_ON(id);
 929        ufs_qcom_assert_reset(host->hba);
 930        /* provide 1ms delay to let the reset pulse propagate. */
 931        usleep_range(1000, 1100);
 932        return 0;
 933}
 934
 935static int
 936ufs_qcom_reset_deassert(struct reset_controller_dev *rcdev, unsigned long id)
 937{
 938        struct ufs_qcom_host *host = rcdev_to_ufs_host(rcdev);
 939
 940        /* Currently this code only knows about a single reset. */
 941        WARN_ON(id);
 942        ufs_qcom_deassert_reset(host->hba);
 943
 944        /*
 945         * after reset deassertion, phy will need all ref clocks,
 946         * voltage, current to settle down before starting serdes.
 947         */
 948        usleep_range(1000, 1100);
 949        return 0;
 950}
 951
 952static const struct reset_control_ops ufs_qcom_reset_ops = {
 953        .assert = ufs_qcom_reset_assert,
 954        .deassert = ufs_qcom_reset_deassert,
 955};
 956
 957#define ANDROID_BOOT_DEV_MAX    30
 958static char android_boot_dev[ANDROID_BOOT_DEV_MAX];
 959
 960#ifndef MODULE
 961static int __init get_android_boot_dev(char *str)
 962{
 963        strlcpy(android_boot_dev, str, ANDROID_BOOT_DEV_MAX);
 964        return 1;
 965}
 966__setup("androidboot.bootdevice=", get_android_boot_dev);
 967#endif
 968
 969/**
 970 * ufs_qcom_init - bind phy with controller
 971 * @hba: host controller instance
 972 *
 973 * Binds PHY with controller and powers up PHY enabling clocks
 974 * and regulators.
 975 *
 976 * Returns -EPROBE_DEFER if binding fails, returns negative error
 977 * on phy power up failure and returns zero on success.
 978 */
 979static int ufs_qcom_init(struct ufs_hba *hba)
 980{
 981        int err;
 982        struct device *dev = hba->dev;
 983        struct platform_device *pdev = to_platform_device(dev);
 984        struct ufs_qcom_host *host;
 985        struct resource *res;
 986        struct ufs_clk_info *clki;
 987
 988        if (strlen(android_boot_dev) && strcmp(android_boot_dev, dev_name(dev)))
 989                return -ENODEV;
 990
 991        host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
 992        if (!host) {
 993                err = -ENOMEM;
 994                dev_err(dev, "%s: no memory for qcom ufs host\n", __func__);
 995                goto out;
 996        }
 997
 998        /* Make a two way bind between the qcom host and the hba */
 999        host->hba = hba;
1000        ufshcd_set_variant(hba, host);
1001
1002        /* Setup the reset control of HCI */
1003        host->core_reset = devm_reset_control_get(hba->dev, "rst");
1004        if (IS_ERR(host->core_reset)) {
1005                err = PTR_ERR(host->core_reset);
1006                dev_warn(dev, "Failed to get reset control %d\n", err);
1007                host->core_reset = NULL;
1008                err = 0;
1009        }
1010
1011        /* Fire up the reset controller. Failure here is non-fatal. */
1012        host->rcdev.of_node = dev->of_node;
1013        host->rcdev.ops = &ufs_qcom_reset_ops;
1014        host->rcdev.owner = dev->driver->owner;
1015        host->rcdev.nr_resets = 1;
1016        err = devm_reset_controller_register(dev, &host->rcdev);
1017        if (err) {
1018                dev_warn(dev, "Failed to register reset controller\n");
1019                err = 0;
1020        }
1021
1022        /*
1023         * voting/devoting device ref_clk source is time consuming hence
1024         * skip devoting it during aggressive clock gating. This clock
1025         * will still be gated off during runtime suspend.
1026         */
1027        host->generic_phy = devm_phy_get(dev, "ufsphy");
1028
1029        if (host->generic_phy == ERR_PTR(-EPROBE_DEFER)) {
1030                /*
1031                 * UFS driver might be probed before the phy driver does.
1032                 * In that case we would like to return EPROBE_DEFER code.
1033                 */
1034                err = -EPROBE_DEFER;
1035                dev_warn(dev, "%s: required phy device. hasn't probed yet. err = %d\n",
1036                        __func__, err);
1037                goto out_variant_clear;
1038        } else if (IS_ERR(host->generic_phy)) {
1039                if (has_acpi_companion(dev)) {
1040                        host->generic_phy = NULL;
1041                } else {
1042                        err = PTR_ERR(host->generic_phy);
1043                        dev_err(dev, "%s: PHY get failed %d\n", __func__, err);
1044                        goto out_variant_clear;
1045                }
1046        }
1047
1048        host->device_reset = devm_gpiod_get_optional(dev, "reset",
1049                                                     GPIOD_OUT_HIGH);
1050        if (IS_ERR(host->device_reset)) {
1051                err = PTR_ERR(host->device_reset);
1052                if (err != -EPROBE_DEFER)
1053                        dev_err(dev, "failed to acquire reset gpio: %d\n", err);
1054                goto out_variant_clear;
1055        }
1056
1057        ufs_qcom_get_controller_revision(hba, &host->hw_ver.major,
1058                &host->hw_ver.minor, &host->hw_ver.step);
1059
1060        /*
1061         * for newer controllers, device reference clock control bit has
1062         * moved inside UFS controller register address space itself.
1063         */
1064        if (host->hw_ver.major >= 0x02) {
1065                host->dev_ref_clk_ctrl_mmio = hba->mmio_base + REG_UFS_CFG1;
1066                host->dev_ref_clk_en_mask = BIT(26);
1067        } else {
1068                /* "dev_ref_clk_ctrl_mem" is optional resource */
1069                res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1070                                                   "dev_ref_clk_ctrl_mem");
1071                if (res) {
1072                        host->dev_ref_clk_ctrl_mmio =
1073                                        devm_ioremap_resource(dev, res);
1074                        if (IS_ERR(host->dev_ref_clk_ctrl_mmio))
1075                                host->dev_ref_clk_ctrl_mmio = NULL;
1076                        host->dev_ref_clk_en_mask = BIT(5);
1077                }
1078        }
1079
1080        list_for_each_entry(clki, &hba->clk_list_head, list) {
1081                if (!strcmp(clki->name, "core_clk_unipro"))
1082                        clki->keep_link_active = true;
1083        }
1084
1085        err = ufs_qcom_init_lane_clks(host);
1086        if (err)
1087                goto out_variant_clear;
1088
1089        ufs_qcom_set_caps(hba);
1090        ufs_qcom_advertise_quirks(hba);
1091
1092        err = ufs_qcom_ice_init(host);
1093        if (err)
1094                goto out_variant_clear;
1095
1096        ufs_qcom_setup_clocks(hba, true, POST_CHANGE);
1097
1098        if (hba->dev->id < MAX_UFS_QCOM_HOSTS)
1099                ufs_qcom_hosts[hba->dev->id] = host;
1100
1101        host->dbg_print_en |= UFS_QCOM_DEFAULT_DBG_PRINT_EN;
1102        ufs_qcom_get_default_testbus_cfg(host);
1103        err = ufs_qcom_testbus_config(host);
1104        if (err) {
1105                dev_warn(dev, "%s: failed to configure the testbus %d\n",
1106                                __func__, err);
1107                err = 0;
1108        }
1109
1110        goto out;
1111
1112out_variant_clear:
1113        ufshcd_set_variant(hba, NULL);
1114out:
1115        return err;
1116}
1117
1118static void ufs_qcom_exit(struct ufs_hba *hba)
1119{
1120        struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1121
1122        ufs_qcom_disable_lane_clks(host);
1123        phy_power_off(host->generic_phy);
1124        phy_exit(host->generic_phy);
1125}
1126
1127static int ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(struct ufs_hba *hba,
1128                                                       u32 clk_cycles)
1129{
1130        int err;
1131        u32 core_clk_ctrl_reg;
1132
1133        if (clk_cycles > DME_VS_CORE_CLK_CTRL_MAX_CORE_CLK_1US_CYCLES_MASK)
1134                return -EINVAL;
1135
1136        err = ufshcd_dme_get(hba,
1137                            UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
1138                            &core_clk_ctrl_reg);
1139        if (err)
1140                goto out;
1141
1142        core_clk_ctrl_reg &= ~DME_VS_CORE_CLK_CTRL_MAX_CORE_CLK_1US_CYCLES_MASK;
1143        core_clk_ctrl_reg |= clk_cycles;
1144
1145        /* Clear CORE_CLK_DIV_EN */
1146        core_clk_ctrl_reg &= ~DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT;
1147
1148        err = ufshcd_dme_set(hba,
1149                            UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
1150                            core_clk_ctrl_reg);
1151out:
1152        return err;
1153}
1154
1155static int ufs_qcom_clk_scale_up_pre_change(struct ufs_hba *hba)
1156{
1157        /* nothing to do as of now */
1158        return 0;
1159}
1160
1161static int ufs_qcom_clk_scale_up_post_change(struct ufs_hba *hba)
1162{
1163        struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1164
1165        if (!ufs_qcom_cap_qunipro(host))
1166                return 0;
1167
1168        /* set unipro core clock cycles to 150 and clear clock divider */
1169        return ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba, 150);
1170}
1171
1172static int ufs_qcom_clk_scale_down_pre_change(struct ufs_hba *hba)
1173{
1174        struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1175        int err;
1176        u32 core_clk_ctrl_reg;
1177
1178        if (!ufs_qcom_cap_qunipro(host))
1179                return 0;
1180
1181        err = ufshcd_dme_get(hba,
1182                            UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
1183                            &core_clk_ctrl_reg);
1184
1185        /* make sure CORE_CLK_DIV_EN is cleared */
1186        if (!err &&
1187            (core_clk_ctrl_reg & DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT)) {
1188                core_clk_ctrl_reg &= ~DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT;
1189                err = ufshcd_dme_set(hba,
1190                                    UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
1191                                    core_clk_ctrl_reg);
1192        }
1193
1194        return err;
1195}
1196
1197static int ufs_qcom_clk_scale_down_post_change(struct ufs_hba *hba)
1198{
1199        struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1200
1201        if (!ufs_qcom_cap_qunipro(host))
1202                return 0;
1203
1204        /* set unipro core clock cycles to 75 and clear clock divider */
1205        return ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba, 75);
1206}
1207
1208static int ufs_qcom_clk_scale_notify(struct ufs_hba *hba,
1209                bool scale_up, enum ufs_notify_change_status status)
1210{
1211        struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1212        struct ufs_pa_layer_attr *dev_req_params = &host->dev_req_params;
1213        int err = 0;
1214
1215        if (status == PRE_CHANGE) {
1216                if (scale_up)
1217                        err = ufs_qcom_clk_scale_up_pre_change(hba);
1218                else
1219                        err = ufs_qcom_clk_scale_down_pre_change(hba);
1220        } else {
1221                if (scale_up)
1222                        err = ufs_qcom_clk_scale_up_post_change(hba);
1223                else
1224                        err = ufs_qcom_clk_scale_down_post_change(hba);
1225
1226                if (err || !dev_req_params)
1227                        goto out;
1228
1229                ufs_qcom_cfg_timers(hba,
1230                                    dev_req_params->gear_rx,
1231                                    dev_req_params->pwr_rx,
1232                                    dev_req_params->hs_rate,
1233                                    false);
1234        }
1235
1236out:
1237        return err;
1238}
1239
1240static void ufs_qcom_print_hw_debug_reg_all(struct ufs_hba *hba,
1241                void *priv, void (*print_fn)(struct ufs_hba *hba,
1242                int offset, int num_regs, const char *str, void *priv))
1243{
1244        u32 reg;
1245        struct ufs_qcom_host *host;
1246
1247        if (unlikely(!hba)) {
1248                pr_err("%s: hba is NULL\n", __func__);
1249                return;
1250        }
1251        if (unlikely(!print_fn)) {
1252                dev_err(hba->dev, "%s: print_fn is NULL\n", __func__);
1253                return;
1254        }
1255
1256        host = ufshcd_get_variant(hba);
1257        if (!(host->dbg_print_en & UFS_QCOM_DBG_PRINT_REGS_EN))
1258                return;
1259
1260        reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_REG_OCSC);
1261        print_fn(hba, reg, 44, "UFS_UFS_DBG_RD_REG_OCSC ", priv);
1262
1263        reg = ufshcd_readl(hba, REG_UFS_CFG1);
1264        reg |= UTP_DBG_RAMS_EN;
1265        ufshcd_writel(hba, reg, REG_UFS_CFG1);
1266
1267        reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_EDTL_RAM);
1268        print_fn(hba, reg, 32, "UFS_UFS_DBG_RD_EDTL_RAM ", priv);
1269
1270        reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_DESC_RAM);
1271        print_fn(hba, reg, 128, "UFS_UFS_DBG_RD_DESC_RAM ", priv);
1272
1273        reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_PRDT_RAM);
1274        print_fn(hba, reg, 64, "UFS_UFS_DBG_RD_PRDT_RAM ", priv);
1275
1276        /* clear bit 17 - UTP_DBG_RAMS_EN */
1277        ufshcd_rmwl(hba, UTP_DBG_RAMS_EN, 0, REG_UFS_CFG1);
1278
1279        reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_UAWM);
1280        print_fn(hba, reg, 4, "UFS_DBG_RD_REG_UAWM ", priv);
1281
1282        reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_UARM);
1283        print_fn(hba, reg, 4, "UFS_DBG_RD_REG_UARM ", priv);
1284
1285        reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TXUC);
1286        print_fn(hba, reg, 48, "UFS_DBG_RD_REG_TXUC ", priv);
1287
1288        reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_RXUC);
1289        print_fn(hba, reg, 27, "UFS_DBG_RD_REG_RXUC ", priv);
1290
1291        reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_DFC);
1292        print_fn(hba, reg, 19, "UFS_DBG_RD_REG_DFC ", priv);
1293
1294        reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TRLUT);
1295        print_fn(hba, reg, 34, "UFS_DBG_RD_REG_TRLUT ", priv);
1296
1297        reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TMRLUT);
1298        print_fn(hba, reg, 9, "UFS_DBG_RD_REG_TMRLUT ", priv);
1299}
1300
1301static void ufs_qcom_enable_test_bus(struct ufs_qcom_host *host)
1302{
1303        if (host->dbg_print_en & UFS_QCOM_DBG_PRINT_TEST_BUS_EN) {
1304                ufshcd_rmwl(host->hba, UFS_REG_TEST_BUS_EN,
1305                                UFS_REG_TEST_BUS_EN, REG_UFS_CFG1);
1306                ufshcd_rmwl(host->hba, TEST_BUS_EN, TEST_BUS_EN, REG_UFS_CFG1);
1307        } else {
1308                ufshcd_rmwl(host->hba, UFS_REG_TEST_BUS_EN, 0, REG_UFS_CFG1);
1309                ufshcd_rmwl(host->hba, TEST_BUS_EN, 0, REG_UFS_CFG1);
1310        }
1311}
1312
1313static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host)
1314{
1315        /* provide a legal default configuration */
1316        host->testbus.select_major = TSTBUS_UNIPRO;
1317        host->testbus.select_minor = 37;
1318}
1319
1320static bool ufs_qcom_testbus_cfg_is_ok(struct ufs_qcom_host *host)
1321{
1322        if (host->testbus.select_major >= TSTBUS_MAX) {
1323                dev_err(host->hba->dev,
1324                        "%s: UFS_CFG1[TEST_BUS_SEL} may not equal 0x%05X\n",
1325                        __func__, host->testbus.select_major);
1326                return false;
1327        }
1328
1329        return true;
1330}
1331
1332int ufs_qcom_testbus_config(struct ufs_qcom_host *host)
1333{
1334        int reg;
1335        int offset;
1336        u32 mask = TEST_BUS_SUB_SEL_MASK;
1337
1338        if (!host)
1339                return -EINVAL;
1340
1341        if (!ufs_qcom_testbus_cfg_is_ok(host))
1342                return -EPERM;
1343
1344        switch (host->testbus.select_major) {
1345        case TSTBUS_UAWM:
1346                reg = UFS_TEST_BUS_CTRL_0;
1347                offset = 24;
1348                break;
1349        case TSTBUS_UARM:
1350                reg = UFS_TEST_BUS_CTRL_0;
1351                offset = 16;
1352                break;
1353        case TSTBUS_TXUC:
1354                reg = UFS_TEST_BUS_CTRL_0;
1355                offset = 8;
1356                break;
1357        case TSTBUS_RXUC:
1358                reg = UFS_TEST_BUS_CTRL_0;
1359                offset = 0;
1360                break;
1361        case TSTBUS_DFC:
1362                reg = UFS_TEST_BUS_CTRL_1;
1363                offset = 24;
1364                break;
1365        case TSTBUS_TRLUT:
1366                reg = UFS_TEST_BUS_CTRL_1;
1367                offset = 16;
1368                break;
1369        case TSTBUS_TMRLUT:
1370                reg = UFS_TEST_BUS_CTRL_1;
1371                offset = 8;
1372                break;
1373        case TSTBUS_OCSC:
1374                reg = UFS_TEST_BUS_CTRL_1;
1375                offset = 0;
1376                break;
1377        case TSTBUS_WRAPPER:
1378                reg = UFS_TEST_BUS_CTRL_2;
1379                offset = 16;
1380                break;
1381        case TSTBUS_COMBINED:
1382                reg = UFS_TEST_BUS_CTRL_2;
1383                offset = 8;
1384                break;
1385        case TSTBUS_UTP_HCI:
1386                reg = UFS_TEST_BUS_CTRL_2;
1387                offset = 0;
1388                break;
1389        case TSTBUS_UNIPRO:
1390                reg = UFS_UNIPRO_CFG;
1391                offset = 20;
1392                mask = 0xFFF;
1393                break;
1394        /*
1395         * No need for a default case, since
1396         * ufs_qcom_testbus_cfg_is_ok() checks that the configuration
1397         * is legal
1398         */
1399        }
1400        mask <<= offset;
1401        ufshcd_rmwl(host->hba, TEST_BUS_SEL,
1402                    (u32)host->testbus.select_major << 19,
1403                    REG_UFS_CFG1);
1404        ufshcd_rmwl(host->hba, mask,
1405                    (u32)host->testbus.select_minor << offset,
1406                    reg);
1407        ufs_qcom_enable_test_bus(host);
1408        /*
1409         * Make sure the test bus configuration is
1410         * committed before returning.
1411         */
1412        mb();
1413
1414        return 0;
1415}
1416
1417static void ufs_qcom_dump_dbg_regs(struct ufs_hba *hba)
1418{
1419        ufshcd_dump_regs(hba, REG_UFS_SYS1CLK_1US, 16 * 4,
1420                         "HCI Vendor Specific Registers ");
1421
1422        ufs_qcom_print_hw_debug_reg_all(hba, NULL, ufs_qcom_dump_regs_wrapper);
1423}
1424
1425/**
1426 * ufs_qcom_device_reset() - toggle the (optional) device reset line
1427 * @hba: per-adapter instance
1428 *
1429 * Toggles the (optional) reset line to reset the attached device.
1430 */
1431static int ufs_qcom_device_reset(struct ufs_hba *hba)
1432{
1433        struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1434
1435        /* reset gpio is optional */
1436        if (!host->device_reset)
1437                return -EOPNOTSUPP;
1438
1439        /*
1440         * The UFS device shall detect reset pulses of 1us, sleep for 10us to
1441         * be on the safe side.
1442         */
1443        ufs_qcom_device_reset_ctrl(hba, true);
1444        usleep_range(10, 15);
1445
1446        ufs_qcom_device_reset_ctrl(hba, false);
1447        usleep_range(10, 15);
1448
1449        return 0;
1450}
1451
1452#if IS_ENABLED(CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND)
1453static void ufs_qcom_config_scaling_param(struct ufs_hba *hba,
1454                                          struct devfreq_dev_profile *p,
1455                                          void *data)
1456{
1457        static struct devfreq_simple_ondemand_data *d;
1458
1459        if (!data)
1460                return;
1461
1462        d = (struct devfreq_simple_ondemand_data *)data;
1463        p->polling_ms = 60;
1464        d->upthreshold = 70;
1465        d->downdifferential = 5;
1466}
1467#else
1468static void ufs_qcom_config_scaling_param(struct ufs_hba *hba,
1469                                          struct devfreq_dev_profile *p,
1470                                          void *data)
1471{
1472}
1473#endif
1474
1475/*
1476 * struct ufs_hba_qcom_vops - UFS QCOM specific variant operations
1477 *
1478 * The variant operations configure the necessary controller and PHY
1479 * handshake during initialization.
1480 */
1481static const struct ufs_hba_variant_ops ufs_hba_qcom_vops = {
1482        .name                   = "qcom",
1483        .init                   = ufs_qcom_init,
1484        .exit                   = ufs_qcom_exit,
1485        .get_ufs_hci_version    = ufs_qcom_get_ufs_hci_version,
1486        .clk_scale_notify       = ufs_qcom_clk_scale_notify,
1487        .setup_clocks           = ufs_qcom_setup_clocks,
1488        .hce_enable_notify      = ufs_qcom_hce_enable_notify,
1489        .link_startup_notify    = ufs_qcom_link_startup_notify,
1490        .pwr_change_notify      = ufs_qcom_pwr_change_notify,
1491        .apply_dev_quirks       = ufs_qcom_apply_dev_quirks,
1492        .suspend                = ufs_qcom_suspend,
1493        .resume                 = ufs_qcom_resume,
1494        .dbg_register_dump      = ufs_qcom_dump_dbg_regs,
1495        .device_reset           = ufs_qcom_device_reset,
1496        .config_scaling_param = ufs_qcom_config_scaling_param,
1497        .program_key            = ufs_qcom_ice_program_key,
1498};
1499
1500/**
1501 * ufs_qcom_probe - probe routine of the driver
1502 * @pdev: pointer to Platform device handle
1503 *
1504 * Return zero for success and non-zero for failure
1505 */
1506static int ufs_qcom_probe(struct platform_device *pdev)
1507{
1508        int err;
1509        struct device *dev = &pdev->dev;
1510
1511        /* Perform generic probe */
1512        err = ufshcd_pltfrm_init(pdev, &ufs_hba_qcom_vops);
1513        if (err)
1514                dev_err(dev, "ufshcd_pltfrm_init() failed %d\n", err);
1515
1516        return err;
1517}
1518
1519/**
1520 * ufs_qcom_remove - set driver_data of the device to NULL
1521 * @pdev: pointer to platform device handle
1522 *
1523 * Always returns 0
1524 */
1525static int ufs_qcom_remove(struct platform_device *pdev)
1526{
1527        struct ufs_hba *hba =  platform_get_drvdata(pdev);
1528
1529        pm_runtime_get_sync(&(pdev)->dev);
1530        ufshcd_remove(hba);
1531        return 0;
1532}
1533
1534static const struct of_device_id ufs_qcom_of_match[] = {
1535        { .compatible = "qcom,ufshc"},
1536        {},
1537};
1538MODULE_DEVICE_TABLE(of, ufs_qcom_of_match);
1539
1540#ifdef CONFIG_ACPI
1541static const struct acpi_device_id ufs_qcom_acpi_match[] = {
1542        { "QCOM24A5" },
1543        { },
1544};
1545MODULE_DEVICE_TABLE(acpi, ufs_qcom_acpi_match);
1546#endif
1547
1548static const struct dev_pm_ops ufs_qcom_pm_ops = {
1549        SET_SYSTEM_SLEEP_PM_OPS(ufshcd_system_suspend, ufshcd_system_resume)
1550        SET_RUNTIME_PM_OPS(ufshcd_runtime_suspend, ufshcd_runtime_resume, NULL)
1551        .prepare         = ufshcd_suspend_prepare,
1552        .complete        = ufshcd_resume_complete,
1553};
1554
1555static struct platform_driver ufs_qcom_pltform = {
1556        .probe  = ufs_qcom_probe,
1557        .remove = ufs_qcom_remove,
1558        .shutdown = ufshcd_pltfrm_shutdown,
1559        .driver = {
1560                .name   = "ufshcd-qcom",
1561                .pm     = &ufs_qcom_pm_ops,
1562                .of_match_table = of_match_ptr(ufs_qcom_of_match),
1563                .acpi_match_table = ACPI_PTR(ufs_qcom_acpi_match),
1564        },
1565};
1566module_platform_driver(ufs_qcom_pltform);
1567
1568MODULE_LICENSE("GPL v2");
1569