linux/drivers/scsi/ufs/ufs-qcom.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (c) 2013-2016, Linux Foundation. All rights reserved.
   4 */
   5
   6#include <linux/acpi.h>
   7#include <linux/time.h>
   8#include <linux/of.h>
   9#include <linux/platform_device.h>
  10#include <linux/phy/phy.h>
  11#include <linux/gpio/consumer.h>
  12#include <linux/reset-controller.h>
  13#include <linux/devfreq.h>
  14
  15#include "ufshcd.h"
  16#include "ufshcd-pltfrm.h"
  17#include "unipro.h"
  18#include "ufs-qcom.h"
  19#include "ufshci.h"
  20#include "ufs_quirks.h"
  21#define UFS_QCOM_DEFAULT_DBG_PRINT_EN   \
  22        (UFS_QCOM_DBG_PRINT_REGS_EN | UFS_QCOM_DBG_PRINT_TEST_BUS_EN)
  23
  24enum {
  25        TSTBUS_UAWM,
  26        TSTBUS_UARM,
  27        TSTBUS_TXUC,
  28        TSTBUS_RXUC,
  29        TSTBUS_DFC,
  30        TSTBUS_TRLUT,
  31        TSTBUS_TMRLUT,
  32        TSTBUS_OCSC,
  33        TSTBUS_UTP_HCI,
  34        TSTBUS_COMBINED,
  35        TSTBUS_WRAPPER,
  36        TSTBUS_UNIPRO,
  37        TSTBUS_MAX,
  38};
  39
  40static struct ufs_qcom_host *ufs_qcom_hosts[MAX_UFS_QCOM_HOSTS];
  41
  42static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host);
  43static int ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(struct ufs_hba *hba,
  44                                                       u32 clk_cycles);
  45
  46static struct ufs_qcom_host *rcdev_to_ufs_host(struct reset_controller_dev *rcd)
  47{
  48        return container_of(rcd, struct ufs_qcom_host, rcdev);
  49}
  50
  51static void ufs_qcom_dump_regs_wrapper(struct ufs_hba *hba, int offset, int len,
  52                                       const char *prefix, void *priv)
  53{
  54        ufshcd_dump_regs(hba, offset, len * 4, prefix);
  55}
  56
  57static int ufs_qcom_get_connected_tx_lanes(struct ufs_hba *hba, u32 *tx_lanes)
  58{
  59        int err = 0;
  60
  61        err = ufshcd_dme_get(hba,
  62                        UIC_ARG_MIB(PA_CONNECTEDTXDATALANES), tx_lanes);
  63        if (err)
  64                dev_err(hba->dev, "%s: couldn't read PA_CONNECTEDTXDATALANES %d\n",
  65                                __func__, err);
  66
  67        return err;
  68}
  69
  70static int ufs_qcom_host_clk_get(struct device *dev,
  71                const char *name, struct clk **clk_out, bool optional)
  72{
  73        struct clk *clk;
  74        int err = 0;
  75
  76        clk = devm_clk_get(dev, name);
  77        if (!IS_ERR(clk)) {
  78                *clk_out = clk;
  79                return 0;
  80        }
  81
  82        err = PTR_ERR(clk);
  83
  84        if (optional && err == -ENOENT) {
  85                *clk_out = NULL;
  86                return 0;
  87        }
  88
  89        if (err != -EPROBE_DEFER)
  90                dev_err(dev, "failed to get %s err %d\n", name, err);
  91
  92        return err;
  93}
  94
  95static int ufs_qcom_host_clk_enable(struct device *dev,
  96                const char *name, struct clk *clk)
  97{
  98        int err = 0;
  99
 100        err = clk_prepare_enable(clk);
 101        if (err)
 102                dev_err(dev, "%s: %s enable failed %d\n", __func__, name, err);
 103
 104        return err;
 105}
 106
 107static void ufs_qcom_disable_lane_clks(struct ufs_qcom_host *host)
 108{
 109        if (!host->is_lane_clks_enabled)
 110                return;
 111
 112        clk_disable_unprepare(host->tx_l1_sync_clk);
 113        clk_disable_unprepare(host->tx_l0_sync_clk);
 114        clk_disable_unprepare(host->rx_l1_sync_clk);
 115        clk_disable_unprepare(host->rx_l0_sync_clk);
 116
 117        host->is_lane_clks_enabled = false;
 118}
 119
 120static int ufs_qcom_enable_lane_clks(struct ufs_qcom_host *host)
 121{
 122        int err = 0;
 123        struct device *dev = host->hba->dev;
 124
 125        if (host->is_lane_clks_enabled)
 126                return 0;
 127
 128        err = ufs_qcom_host_clk_enable(dev, "rx_lane0_sync_clk",
 129                host->rx_l0_sync_clk);
 130        if (err)
 131                goto out;
 132
 133        err = ufs_qcom_host_clk_enable(dev, "tx_lane0_sync_clk",
 134                host->tx_l0_sync_clk);
 135        if (err)
 136                goto disable_rx_l0;
 137
 138        err = ufs_qcom_host_clk_enable(dev, "rx_lane1_sync_clk",
 139                        host->rx_l1_sync_clk);
 140        if (err)
 141                goto disable_tx_l0;
 142
 143        err = ufs_qcom_host_clk_enable(dev, "tx_lane1_sync_clk",
 144                        host->tx_l1_sync_clk);
 145        if (err)
 146                goto disable_rx_l1;
 147
 148        host->is_lane_clks_enabled = true;
 149        goto out;
 150
 151disable_rx_l1:
 152        clk_disable_unprepare(host->rx_l1_sync_clk);
 153disable_tx_l0:
 154        clk_disable_unprepare(host->tx_l0_sync_clk);
 155disable_rx_l0:
 156        clk_disable_unprepare(host->rx_l0_sync_clk);
 157out:
 158        return err;
 159}
 160
 161static int ufs_qcom_init_lane_clks(struct ufs_qcom_host *host)
 162{
 163        int err = 0;
 164        struct device *dev = host->hba->dev;
 165
 166        if (has_acpi_companion(dev))
 167                return 0;
 168
 169        err = ufs_qcom_host_clk_get(dev, "rx_lane0_sync_clk",
 170                                        &host->rx_l0_sync_clk, false);
 171        if (err)
 172                goto out;
 173
 174        err = ufs_qcom_host_clk_get(dev, "tx_lane0_sync_clk",
 175                                        &host->tx_l0_sync_clk, false);
 176        if (err)
 177                goto out;
 178
 179        /* In case of single lane per direction, don't read lane1 clocks */
 180        if (host->hba->lanes_per_direction > 1) {
 181                err = ufs_qcom_host_clk_get(dev, "rx_lane1_sync_clk",
 182                        &host->rx_l1_sync_clk, false);
 183                if (err)
 184                        goto out;
 185
 186                err = ufs_qcom_host_clk_get(dev, "tx_lane1_sync_clk",
 187                        &host->tx_l1_sync_clk, true);
 188        }
 189out:
 190        return err;
 191}
 192
 193static int ufs_qcom_link_startup_post_change(struct ufs_hba *hba)
 194{
 195        u32 tx_lanes;
 196
 197        return ufs_qcom_get_connected_tx_lanes(hba, &tx_lanes);
 198}
 199
 200static int ufs_qcom_check_hibern8(struct ufs_hba *hba)
 201{
 202        int err;
 203        u32 tx_fsm_val = 0;
 204        unsigned long timeout = jiffies + msecs_to_jiffies(HBRN8_POLL_TOUT_MS);
 205
 206        do {
 207                err = ufshcd_dme_get(hba,
 208                                UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE,
 209                                        UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
 210                                &tx_fsm_val);
 211                if (err || tx_fsm_val == TX_FSM_HIBERN8)
 212                        break;
 213
 214                /* sleep for max. 200us */
 215                usleep_range(100, 200);
 216        } while (time_before(jiffies, timeout));
 217
 218        /*
 219         * we might have scheduled out for long during polling so
 220         * check the state again.
 221         */
 222        if (time_after(jiffies, timeout))
 223                err = ufshcd_dme_get(hba,
 224                                UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE,
 225                                        UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
 226                                &tx_fsm_val);
 227
 228        if (err) {
 229                dev_err(hba->dev, "%s: unable to get TX_FSM_STATE, err %d\n",
 230                                __func__, err);
 231        } else if (tx_fsm_val != TX_FSM_HIBERN8) {
 232                err = tx_fsm_val;
 233                dev_err(hba->dev, "%s: invalid TX_FSM_STATE = %d\n",
 234                                __func__, err);
 235        }
 236
 237        return err;
 238}
 239
 240static void ufs_qcom_select_unipro_mode(struct ufs_qcom_host *host)
 241{
 242        ufshcd_rmwl(host->hba, QUNIPRO_SEL,
 243                   ufs_qcom_cap_qunipro(host) ? QUNIPRO_SEL : 0,
 244                   REG_UFS_CFG1);
 245        /* make sure above configuration is applied before we return */
 246        mb();
 247}
 248
 249/*
 250 * ufs_qcom_host_reset - reset host controller and PHY
 251 */
 252static int ufs_qcom_host_reset(struct ufs_hba *hba)
 253{
 254        int ret = 0;
 255        struct ufs_qcom_host *host = ufshcd_get_variant(hba);
 256        bool reenable_intr = false;
 257
 258        if (!host->core_reset) {
 259                dev_warn(hba->dev, "%s: reset control not set\n", __func__);
 260                goto out;
 261        }
 262
 263        reenable_intr = hba->is_irq_enabled;
 264        disable_irq(hba->irq);
 265        hba->is_irq_enabled = false;
 266
 267        ret = reset_control_assert(host->core_reset);
 268        if (ret) {
 269                dev_err(hba->dev, "%s: core_reset assert failed, err = %d\n",
 270                                 __func__, ret);
 271                goto out;
 272        }
 273
 274        /*
 275         * The hardware requirement for delay between assert/deassert
 276         * is at least 3-4 sleep clock (32.7KHz) cycles, which comes to
 277         * ~125us (4/32768). To be on the safe side add 200us delay.
 278         */
 279        usleep_range(200, 210);
 280
 281        ret = reset_control_deassert(host->core_reset);
 282        if (ret)
 283                dev_err(hba->dev, "%s: core_reset deassert failed, err = %d\n",
 284                                 __func__, ret);
 285
 286        usleep_range(1000, 1100);
 287
 288        if (reenable_intr) {
 289                enable_irq(hba->irq);
 290                hba->is_irq_enabled = true;
 291        }
 292
 293out:
 294        return ret;
 295}
 296
 297static int ufs_qcom_power_up_sequence(struct ufs_hba *hba)
 298{
 299        struct ufs_qcom_host *host = ufshcd_get_variant(hba);
 300        struct phy *phy = host->generic_phy;
 301        int ret = 0;
 302        bool is_rate_B = (UFS_QCOM_LIMIT_HS_RATE == PA_HS_MODE_B)
 303                                                        ? true : false;
 304
 305        /* Reset UFS Host Controller and PHY */
 306        ret = ufs_qcom_host_reset(hba);
 307        if (ret)
 308                dev_warn(hba->dev, "%s: host reset returned %d\n",
 309                                  __func__, ret);
 310
 311        if (is_rate_B)
 312                phy_set_mode(phy, PHY_MODE_UFS_HS_B);
 313
 314        /* phy initialization - calibrate the phy */
 315        ret = phy_init(phy);
 316        if (ret) {
 317                dev_err(hba->dev, "%s: phy init failed, ret = %d\n",
 318                        __func__, ret);
 319                goto out;
 320        }
 321
 322        /* power on phy - start serdes and phy's power and clocks */
 323        ret = phy_power_on(phy);
 324        if (ret) {
 325                dev_err(hba->dev, "%s: phy power on failed, ret = %d\n",
 326                        __func__, ret);
 327                goto out_disable_phy;
 328        }
 329
 330        ufs_qcom_select_unipro_mode(host);
 331
 332        return 0;
 333
 334out_disable_phy:
 335        phy_exit(phy);
 336out:
 337        return ret;
 338}
 339
 340/*
 341 * The UTP controller has a number of internal clock gating cells (CGCs).
 342 * Internal hardware sub-modules within the UTP controller control the CGCs.
 343 * Hardware CGCs disable the clock to inactivate UTP sub-modules not involved
 344 * in a specific operation, UTP controller CGCs are by default disabled and
 345 * this function enables them (after every UFS link startup) to save some power
 346 * leakage.
 347 */
 348static void ufs_qcom_enable_hw_clk_gating(struct ufs_hba *hba)
 349{
 350        ufshcd_writel(hba,
 351                ufshcd_readl(hba, REG_UFS_CFG2) | REG_UFS_CFG2_CGC_EN_ALL,
 352                REG_UFS_CFG2);
 353
 354        /* Ensure that HW clock gating is enabled before next operations */
 355        mb();
 356}
 357
 358static int ufs_qcom_hce_enable_notify(struct ufs_hba *hba,
 359                                      enum ufs_notify_change_status status)
 360{
 361        struct ufs_qcom_host *host = ufshcd_get_variant(hba);
 362        int err = 0;
 363
 364        switch (status) {
 365        case PRE_CHANGE:
 366                ufs_qcom_power_up_sequence(hba);
 367                /*
 368                 * The PHY PLL output is the source of tx/rx lane symbol
 369                 * clocks, hence, enable the lane clocks only after PHY
 370                 * is initialized.
 371                 */
 372                err = ufs_qcom_enable_lane_clks(host);
 373                break;
 374        case POST_CHANGE:
 375                /* check if UFS PHY moved from DISABLED to HIBERN8 */
 376                err = ufs_qcom_check_hibern8(hba);
 377                ufs_qcom_enable_hw_clk_gating(hba);
 378                ufs_qcom_ice_enable(host);
 379                break;
 380        default:
 381                dev_err(hba->dev, "%s: invalid status %d\n", __func__, status);
 382                err = -EINVAL;
 383                break;
 384        }
 385        return err;
 386}
 387
 388/*
 389 * Returns zero for success and non-zero in case of a failure
 390 */
 391static int ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear,
 392                               u32 hs, u32 rate, bool update_link_startup_timer)
 393{
 394        int ret = 0;
 395        struct ufs_qcom_host *host = ufshcd_get_variant(hba);
 396        struct ufs_clk_info *clki;
 397        u32 core_clk_period_in_ns;
 398        u32 tx_clk_cycles_per_us = 0;
 399        unsigned long core_clk_rate = 0;
 400        u32 core_clk_cycles_per_us = 0;
 401
 402        static u32 pwm_fr_table[][2] = {
 403                {UFS_PWM_G1, 0x1},
 404                {UFS_PWM_G2, 0x1},
 405                {UFS_PWM_G3, 0x1},
 406                {UFS_PWM_G4, 0x1},
 407        };
 408
 409        static u32 hs_fr_table_rA[][2] = {
 410                {UFS_HS_G1, 0x1F},
 411                {UFS_HS_G2, 0x3e},
 412                {UFS_HS_G3, 0x7D},
 413        };
 414
 415        static u32 hs_fr_table_rB[][2] = {
 416                {UFS_HS_G1, 0x24},
 417                {UFS_HS_G2, 0x49},
 418                {UFS_HS_G3, 0x92},
 419        };
 420
 421        /*
 422         * The Qunipro controller does not use following registers:
 423         * SYS1CLK_1US_REG, TX_SYMBOL_CLK_1US_REG, CLK_NS_REG &
 424         * UFS_REG_PA_LINK_STARTUP_TIMER
 425         * But UTP controller uses SYS1CLK_1US_REG register for Interrupt
 426         * Aggregation logic.
 427        */
 428        if (ufs_qcom_cap_qunipro(host) && !ufshcd_is_intr_aggr_allowed(hba))
 429                goto out;
 430
 431        if (gear == 0) {
 432                dev_err(hba->dev, "%s: invalid gear = %d\n", __func__, gear);
 433                goto out_error;
 434        }
 435
 436        list_for_each_entry(clki, &hba->clk_list_head, list) {
 437                if (!strcmp(clki->name, "core_clk"))
 438                        core_clk_rate = clk_get_rate(clki->clk);
 439        }
 440
 441        /* If frequency is smaller than 1MHz, set to 1MHz */
 442        if (core_clk_rate < DEFAULT_CLK_RATE_HZ)
 443                core_clk_rate = DEFAULT_CLK_RATE_HZ;
 444
 445        core_clk_cycles_per_us = core_clk_rate / USEC_PER_SEC;
 446        if (ufshcd_readl(hba, REG_UFS_SYS1CLK_1US) != core_clk_cycles_per_us) {
 447                ufshcd_writel(hba, core_clk_cycles_per_us, REG_UFS_SYS1CLK_1US);
 448                /*
 449                 * make sure above write gets applied before we return from
 450                 * this function.
 451                 */
 452                mb();
 453        }
 454
 455        if (ufs_qcom_cap_qunipro(host))
 456                goto out;
 457
 458        core_clk_period_in_ns = NSEC_PER_SEC / core_clk_rate;
 459        core_clk_period_in_ns <<= OFFSET_CLK_NS_REG;
 460        core_clk_period_in_ns &= MASK_CLK_NS_REG;
 461
 462        switch (hs) {
 463        case FASTAUTO_MODE:
 464        case FAST_MODE:
 465                if (rate == PA_HS_MODE_A) {
 466                        if (gear > ARRAY_SIZE(hs_fr_table_rA)) {
 467                                dev_err(hba->dev,
 468                                        "%s: index %d exceeds table size %zu\n",
 469                                        __func__, gear,
 470                                        ARRAY_SIZE(hs_fr_table_rA));
 471                                goto out_error;
 472                        }
 473                        tx_clk_cycles_per_us = hs_fr_table_rA[gear-1][1];
 474                } else if (rate == PA_HS_MODE_B) {
 475                        if (gear > ARRAY_SIZE(hs_fr_table_rB)) {
 476                                dev_err(hba->dev,
 477                                        "%s: index %d exceeds table size %zu\n",
 478                                        __func__, gear,
 479                                        ARRAY_SIZE(hs_fr_table_rB));
 480                                goto out_error;
 481                        }
 482                        tx_clk_cycles_per_us = hs_fr_table_rB[gear-1][1];
 483                } else {
 484                        dev_err(hba->dev, "%s: invalid rate = %d\n",
 485                                __func__, rate);
 486                        goto out_error;
 487                }
 488                break;
 489        case SLOWAUTO_MODE:
 490        case SLOW_MODE:
 491                if (gear > ARRAY_SIZE(pwm_fr_table)) {
 492                        dev_err(hba->dev,
 493                                        "%s: index %d exceeds table size %zu\n",
 494                                        __func__, gear,
 495                                        ARRAY_SIZE(pwm_fr_table));
 496                        goto out_error;
 497                }
 498                tx_clk_cycles_per_us = pwm_fr_table[gear-1][1];
 499                break;
 500        case UNCHANGED:
 501        default:
 502                dev_err(hba->dev, "%s: invalid mode = %d\n", __func__, hs);
 503                goto out_error;
 504        }
 505
 506        if (ufshcd_readl(hba, REG_UFS_TX_SYMBOL_CLK_NS_US) !=
 507            (core_clk_period_in_ns | tx_clk_cycles_per_us)) {
 508                /* this register 2 fields shall be written at once */
 509                ufshcd_writel(hba, core_clk_period_in_ns | tx_clk_cycles_per_us,
 510                              REG_UFS_TX_SYMBOL_CLK_NS_US);
 511                /*
 512                 * make sure above write gets applied before we return from
 513                 * this function.
 514                 */
 515                mb();
 516        }
 517
 518        if (update_link_startup_timer) {
 519                ufshcd_writel(hba, ((core_clk_rate / MSEC_PER_SEC) * 100),
 520                              REG_UFS_PA_LINK_STARTUP_TIMER);
 521                /*
 522                 * make sure that this configuration is applied before
 523                 * we return
 524                 */
 525                mb();
 526        }
 527        goto out;
 528
 529out_error:
 530        ret = -EINVAL;
 531out:
 532        return ret;
 533}
 534
 535static int ufs_qcom_link_startup_notify(struct ufs_hba *hba,
 536                                        enum ufs_notify_change_status status)
 537{
 538        int err = 0;
 539        struct ufs_qcom_host *host = ufshcd_get_variant(hba);
 540
 541        switch (status) {
 542        case PRE_CHANGE:
 543                if (ufs_qcom_cfg_timers(hba, UFS_PWM_G1, SLOWAUTO_MODE,
 544                                        0, true)) {
 545                        dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n",
 546                                __func__);
 547                        err = -EINVAL;
 548                        goto out;
 549                }
 550
 551                if (ufs_qcom_cap_qunipro(host))
 552                        /*
 553                         * set unipro core clock cycles to 150 & clear clock
 554                         * divider
 555                         */
 556                        err = ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba,
 557                                                                          150);
 558
 559                /*
 560                 * Some UFS devices (and may be host) have issues if LCC is
 561                 * enabled. So we are setting PA_Local_TX_LCC_Enable to 0
 562                 * before link startup which will make sure that both host
 563                 * and device TX LCC are disabled once link startup is
 564                 * completed.
 565                 */
 566                if (ufshcd_get_local_unipro_ver(hba) != UFS_UNIPRO_VER_1_41)
 567                        err = ufshcd_disable_host_tx_lcc(hba);
 568
 569                break;
 570        case POST_CHANGE:
 571                ufs_qcom_link_startup_post_change(hba);
 572                break;
 573        default:
 574                break;
 575        }
 576
 577out:
 578        return err;
 579}
 580
 581static void ufs_qcom_device_reset_ctrl(struct ufs_hba *hba, bool asserted)
 582{
 583        struct ufs_qcom_host *host = ufshcd_get_variant(hba);
 584
 585        /* reset gpio is optional */
 586        if (!host->device_reset)
 587                return;
 588
 589        gpiod_set_value_cansleep(host->device_reset, asserted);
 590}
 591
 592static int ufs_qcom_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
 593{
 594        struct ufs_qcom_host *host = ufshcd_get_variant(hba);
 595        struct phy *phy = host->generic_phy;
 596
 597        if (ufs_qcom_is_link_off(hba)) {
 598                /*
 599                 * Disable the tx/rx lane symbol clocks before PHY is
 600                 * powered down as the PLL source should be disabled
 601                 * after downstream clocks are disabled.
 602                 */
 603                ufs_qcom_disable_lane_clks(host);
 604                phy_power_off(phy);
 605
 606                /* reset the connected UFS device during power down */
 607                ufs_qcom_device_reset_ctrl(hba, true);
 608
 609        } else if (!ufs_qcom_is_link_active(hba)) {
 610                ufs_qcom_disable_lane_clks(host);
 611        }
 612
 613        return 0;
 614}
 615
 616static int ufs_qcom_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
 617{
 618        struct ufs_qcom_host *host = ufshcd_get_variant(hba);
 619        struct phy *phy = host->generic_phy;
 620        int err;
 621
 622        if (ufs_qcom_is_link_off(hba)) {
 623                err = phy_power_on(phy);
 624                if (err) {
 625                        dev_err(hba->dev, "%s: failed PHY power on: %d\n",
 626                                __func__, err);
 627                        return err;
 628                }
 629
 630                err = ufs_qcom_enable_lane_clks(host);
 631                if (err)
 632                        return err;
 633
 634        } else if (!ufs_qcom_is_link_active(hba)) {
 635                err = ufs_qcom_enable_lane_clks(host);
 636                if (err)
 637                        return err;
 638        }
 639
 640        err = ufs_qcom_ice_resume(host);
 641        if (err)
 642                return err;
 643
 644        hba->is_sys_suspended = false;
 645        return 0;
 646}
 647
 648static void ufs_qcom_dev_ref_clk_ctrl(struct ufs_qcom_host *host, bool enable)
 649{
 650        if (host->dev_ref_clk_ctrl_mmio &&
 651            (enable ^ host->is_dev_ref_clk_enabled)) {
 652                u32 temp = readl_relaxed(host->dev_ref_clk_ctrl_mmio);
 653
 654                if (enable)
 655                        temp |= host->dev_ref_clk_en_mask;
 656                else
 657                        temp &= ~host->dev_ref_clk_en_mask;
 658
 659                /*
 660                 * If we are here to disable this clock it might be immediately
 661                 * after entering into hibern8 in which case we need to make
 662                 * sure that device ref_clk is active for specific time after
 663                 * hibern8 enter.
 664                 */
 665                if (!enable) {
 666                        unsigned long gating_wait;
 667
 668                        gating_wait = host->hba->dev_info.clk_gating_wait_us;
 669                        if (!gating_wait) {
 670                                udelay(1);
 671                        } else {
 672                                /*
 673                                 * bRefClkGatingWaitTime defines the minimum
 674                                 * time for which the reference clock is
 675                                 * required by device during transition from
 676                                 * HS-MODE to LS-MODE or HIBERN8 state. Give it
 677                                 * more delay to be on the safe side.
 678                                 */
 679                                gating_wait += 10;
 680                                usleep_range(gating_wait, gating_wait + 10);
 681                        }
 682                }
 683
 684                writel_relaxed(temp, host->dev_ref_clk_ctrl_mmio);
 685
 686                /* ensure that ref_clk is enabled/disabled before we return */
 687                wmb();
 688
 689                /*
 690                 * If we call hibern8 exit after this, we need to make sure that
 691                 * device ref_clk is stable for at least 1us before the hibern8
 692                 * exit command.
 693                 */
 694                if (enable)
 695                        udelay(1);
 696
 697                host->is_dev_ref_clk_enabled = enable;
 698        }
 699}
 700
 701static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba,
 702                                enum ufs_notify_change_status status,
 703                                struct ufs_pa_layer_attr *dev_max_params,
 704                                struct ufs_pa_layer_attr *dev_req_params)
 705{
 706        struct ufs_qcom_host *host = ufshcd_get_variant(hba);
 707        struct ufs_dev_params ufs_qcom_cap;
 708        int ret = 0;
 709
 710        if (!dev_req_params) {
 711                pr_err("%s: incoming dev_req_params is NULL\n", __func__);
 712                ret = -EINVAL;
 713                goto out;
 714        }
 715
 716        switch (status) {
 717        case PRE_CHANGE:
 718                ufshcd_init_pwr_dev_param(&ufs_qcom_cap);
 719                ufs_qcom_cap.hs_rate = UFS_QCOM_LIMIT_HS_RATE;
 720
 721                if (host->hw_ver.major == 0x1) {
 722                        /*
 723                         * HS-G3 operations may not reliably work on legacy QCOM
 724                         * UFS host controller hardware even though capability
 725                         * exchange during link startup phase may end up
 726                         * negotiating maximum supported gear as G3.
 727                         * Hence downgrade the maximum supported gear to HS-G2.
 728                         */
 729                        if (ufs_qcom_cap.hs_tx_gear > UFS_HS_G2)
 730                                ufs_qcom_cap.hs_tx_gear = UFS_HS_G2;
 731                        if (ufs_qcom_cap.hs_rx_gear > UFS_HS_G2)
 732                                ufs_qcom_cap.hs_rx_gear = UFS_HS_G2;
 733                }
 734
 735                ret = ufshcd_get_pwr_dev_param(&ufs_qcom_cap,
 736                                               dev_max_params,
 737                                               dev_req_params);
 738                if (ret) {
 739                        pr_err("%s: failed to determine capabilities\n",
 740                                        __func__);
 741                        goto out;
 742                }
 743
 744                /* enable the device ref clock before changing to HS mode */
 745                if (!ufshcd_is_hs_mode(&hba->pwr_info) &&
 746                        ufshcd_is_hs_mode(dev_req_params))
 747                        ufs_qcom_dev_ref_clk_ctrl(host, true);
 748
 749                if (host->hw_ver.major >= 0x4) {
 750                        ufshcd_dme_configure_adapt(hba,
 751                                                dev_req_params->gear_tx,
 752                                                PA_INITIAL_ADAPT);
 753                }
 754                break;
 755        case POST_CHANGE:
 756                if (ufs_qcom_cfg_timers(hba, dev_req_params->gear_rx,
 757                                        dev_req_params->pwr_rx,
 758                                        dev_req_params->hs_rate, false)) {
 759                        dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n",
 760                                __func__);
 761                        /*
 762                         * we return error code at the end of the routine,
 763                         * but continue to configure UFS_PHY_TX_LANE_ENABLE
 764                         * and bus voting as usual
 765                         */
 766                        ret = -EINVAL;
 767                }
 768
 769                /* cache the power mode parameters to use internally */
 770                memcpy(&host->dev_req_params,
 771                                dev_req_params, sizeof(*dev_req_params));
 772
 773                /* disable the device ref clock if entered PWM mode */
 774                if (ufshcd_is_hs_mode(&hba->pwr_info) &&
 775                        !ufshcd_is_hs_mode(dev_req_params))
 776                        ufs_qcom_dev_ref_clk_ctrl(host, false);
 777                break;
 778        default:
 779                ret = -EINVAL;
 780                break;
 781        }
 782out:
 783        return ret;
 784}
 785
 786static int ufs_qcom_quirk_host_pa_saveconfigtime(struct ufs_hba *hba)
 787{
 788        int err;
 789        u32 pa_vs_config_reg1;
 790
 791        err = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_VS_CONFIG_REG1),
 792                             &pa_vs_config_reg1);
 793        if (err)
 794                goto out;
 795
 796        /* Allow extension of MSB bits of PA_SaveConfigTime attribute */
 797        err = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_VS_CONFIG_REG1),
 798                            (pa_vs_config_reg1 | (1 << 12)));
 799
 800out:
 801        return err;
 802}
 803
 804static int ufs_qcom_apply_dev_quirks(struct ufs_hba *hba)
 805{
 806        int err = 0;
 807
 808        if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME)
 809                err = ufs_qcom_quirk_host_pa_saveconfigtime(hba);
 810
 811        if (hba->dev_info.wmanufacturerid == UFS_VENDOR_WDC)
 812                hba->dev_quirks |= UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE;
 813
 814        return err;
 815}
 816
 817static u32 ufs_qcom_get_ufs_hci_version(struct ufs_hba *hba)
 818{
 819        struct ufs_qcom_host *host = ufshcd_get_variant(hba);
 820
 821        if (host->hw_ver.major == 0x1)
 822                return UFSHCI_VERSION_11;
 823        else
 824                return UFSHCI_VERSION_20;
 825}
 826
 827/**
 828 * ufs_qcom_advertise_quirks - advertise the known QCOM UFS controller quirks
 829 * @hba: host controller instance
 830 *
 831 * QCOM UFS host controller might have some non standard behaviours (quirks)
 832 * than what is specified by UFSHCI specification. Advertise all such
 833 * quirks to standard UFS host controller driver so standard takes them into
 834 * account.
 835 */
 836static void ufs_qcom_advertise_quirks(struct ufs_hba *hba)
 837{
 838        struct ufs_qcom_host *host = ufshcd_get_variant(hba);
 839
 840        if (host->hw_ver.major == 0x01) {
 841                hba->quirks |= UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS
 842                            | UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP
 843                            | UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE;
 844
 845                if (host->hw_ver.minor == 0x0001 && host->hw_ver.step == 0x0001)
 846                        hba->quirks |= UFSHCD_QUIRK_BROKEN_INTR_AGGR;
 847
 848                hba->quirks |= UFSHCD_QUIRK_BROKEN_LCC;
 849        }
 850
 851        if (host->hw_ver.major == 0x2) {
 852                hba->quirks |= UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION;
 853
 854                if (!ufs_qcom_cap_qunipro(host))
 855                        /* Legacy UniPro mode still need following quirks */
 856                        hba->quirks |= (UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS
 857                                | UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE
 858                                | UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP);
 859        }
 860}
 861
 862static void ufs_qcom_set_caps(struct ufs_hba *hba)
 863{
 864        struct ufs_qcom_host *host = ufshcd_get_variant(hba);
 865
 866        hba->caps |= UFSHCD_CAP_CLK_GATING | UFSHCD_CAP_HIBERN8_WITH_CLK_GATING;
 867        hba->caps |= UFSHCD_CAP_CLK_SCALING;
 868        hba->caps |= UFSHCD_CAP_AUTO_BKOPS_SUSPEND;
 869        hba->caps |= UFSHCD_CAP_WB_EN;
 870        hba->caps |= UFSHCD_CAP_CRYPTO;
 871        hba->caps |= UFSHCD_CAP_AGGR_POWER_COLLAPSE;
 872
 873        if (host->hw_ver.major >= 0x2) {
 874                host->caps = UFS_QCOM_CAP_QUNIPRO |
 875                             UFS_QCOM_CAP_RETAIN_SEC_CFG_AFTER_PWR_COLLAPSE;
 876        }
 877}
 878
 879/**
 880 * ufs_qcom_setup_clocks - enables/disable clocks
 881 * @hba: host controller instance
 882 * @on: If true, enable clocks else disable them.
 883 * @status: PRE_CHANGE or POST_CHANGE notify
 884 *
 885 * Returns 0 on success, non-zero on failure.
 886 */
 887static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on,
 888                                 enum ufs_notify_change_status status)
 889{
 890        struct ufs_qcom_host *host = ufshcd_get_variant(hba);
 891        int err = 0;
 892
 893        /*
 894         * In case ufs_qcom_init() is not yet done, simply ignore.
 895         * This ufs_qcom_setup_clocks() shall be called from
 896         * ufs_qcom_init() after init is done.
 897         */
 898        if (!host)
 899                return 0;
 900
 901        switch (status) {
 902        case PRE_CHANGE:
 903                if (!on) {
 904                        if (!ufs_qcom_is_link_active(hba)) {
 905                                /* disable device ref_clk */
 906                                ufs_qcom_dev_ref_clk_ctrl(host, false);
 907                        }
 908                }
 909                break;
 910        case POST_CHANGE:
 911                if (on) {
 912                        /* enable the device ref clock for HS mode*/
 913                        if (ufshcd_is_hs_mode(&hba->pwr_info))
 914                                ufs_qcom_dev_ref_clk_ctrl(host, true);
 915                }
 916                break;
 917        }
 918
 919        return err;
 920}
 921
 922static int
 923ufs_qcom_reset_assert(struct reset_controller_dev *rcdev, unsigned long id)
 924{
 925        struct ufs_qcom_host *host = rcdev_to_ufs_host(rcdev);
 926
 927        /* Currently this code only knows about a single reset. */
 928        WARN_ON(id);
 929        ufs_qcom_assert_reset(host->hba);
 930        /* provide 1ms delay to let the reset pulse propagate. */
 931        usleep_range(1000, 1100);
 932        return 0;
 933}
 934
 935static int
 936ufs_qcom_reset_deassert(struct reset_controller_dev *rcdev, unsigned long id)
 937{
 938        struct ufs_qcom_host *host = rcdev_to_ufs_host(rcdev);
 939
 940        /* Currently this code only knows about a single reset. */
 941        WARN_ON(id);
 942        ufs_qcom_deassert_reset(host->hba);
 943
 944        /*
 945         * after reset deassertion, phy will need all ref clocks,
 946         * voltage, current to settle down before starting serdes.
 947         */
 948        usleep_range(1000, 1100);
 949        return 0;
 950}
 951
 952static const struct reset_control_ops ufs_qcom_reset_ops = {
 953        .assert = ufs_qcom_reset_assert,
 954        .deassert = ufs_qcom_reset_deassert,
 955};
 956
 957#define ANDROID_BOOT_DEV_MAX    30
 958static char android_boot_dev[ANDROID_BOOT_DEV_MAX];
 959
 960#ifndef MODULE
 961static int __init get_android_boot_dev(char *str)
 962{
 963        strlcpy(android_boot_dev, str, ANDROID_BOOT_DEV_MAX);
 964        return 1;
 965}
 966__setup("androidboot.bootdevice=", get_android_boot_dev);
 967#endif
 968
 969/**
 970 * ufs_qcom_init - bind phy with controller
 971 * @hba: host controller instance
 972 *
 973 * Binds PHY with controller and powers up PHY enabling clocks
 974 * and regulators.
 975 *
 976 * Returns -EPROBE_DEFER if binding fails, returns negative error
 977 * on phy power up failure and returns zero on success.
 978 */
 979static int ufs_qcom_init(struct ufs_hba *hba)
 980{
 981        int err;
 982        struct device *dev = hba->dev;
 983        struct platform_device *pdev = to_platform_device(dev);
 984        struct ufs_qcom_host *host;
 985        struct resource *res;
 986        struct ufs_clk_info *clki;
 987
 988        if (strlen(android_boot_dev) && strcmp(android_boot_dev, dev_name(dev)))
 989                return -ENODEV;
 990
 991        host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
 992        if (!host) {
 993                err = -ENOMEM;
 994                dev_err(dev, "%s: no memory for qcom ufs host\n", __func__);
 995                goto out;
 996        }
 997
 998        /* Make a two way bind between the qcom host and the hba */
 999        host->hba = hba;
1000        ufshcd_set_variant(hba, host);
1001
1002        /* Setup the reset control of HCI */
1003        host->core_reset = devm_reset_control_get(hba->dev, "rst");
1004        if (IS_ERR(host->core_reset)) {
1005                err = PTR_ERR(host->core_reset);
1006                dev_warn(dev, "Failed to get reset control %d\n", err);
1007                host->core_reset = NULL;
1008                err = 0;
1009        }
1010
1011        /* Fire up the reset controller. Failure here is non-fatal. */
1012        host->rcdev.of_node = dev->of_node;
1013        host->rcdev.ops = &ufs_qcom_reset_ops;
1014        host->rcdev.owner = dev->driver->owner;
1015        host->rcdev.nr_resets = 1;
1016        err = devm_reset_controller_register(dev, &host->rcdev);
1017        if (err) {
1018                dev_warn(dev, "Failed to register reset controller\n");
1019                err = 0;
1020        }
1021
1022        /*
1023         * voting/devoting device ref_clk source is time consuming hence
1024         * skip devoting it during aggressive clock gating. This clock
1025         * will still be gated off during runtime suspend.
1026         */
1027        host->generic_phy = devm_phy_get(dev, "ufsphy");
1028
1029        if (host->generic_phy == ERR_PTR(-EPROBE_DEFER)) {
1030                /*
1031                 * UFS driver might be probed before the phy driver does.
1032                 * In that case we would like to return EPROBE_DEFER code.
1033                 */
1034                err = -EPROBE_DEFER;
1035                dev_warn(dev, "%s: required phy device. hasn't probed yet. err = %d\n",
1036                        __func__, err);
1037                goto out_variant_clear;
1038        } else if (IS_ERR(host->generic_phy)) {
1039                if (has_acpi_companion(dev)) {
1040                        host->generic_phy = NULL;
1041                } else {
1042                        err = PTR_ERR(host->generic_phy);
1043                        dev_err(dev, "%s: PHY get failed %d\n", __func__, err);
1044                        goto out_variant_clear;
1045                }
1046        }
1047
1048        host->device_reset = devm_gpiod_get_optional(dev, "reset",
1049                                                     GPIOD_OUT_HIGH);
1050        if (IS_ERR(host->device_reset)) {
1051                err = PTR_ERR(host->device_reset);
1052                if (err != -EPROBE_DEFER)
1053                        dev_err(dev, "failed to acquire reset gpio: %d\n", err);
1054                goto out_variant_clear;
1055        }
1056
1057        ufs_qcom_get_controller_revision(hba, &host->hw_ver.major,
1058                &host->hw_ver.minor, &host->hw_ver.step);
1059
1060        /*
1061         * for newer controllers, device reference clock control bit has
1062         * moved inside UFS controller register address space itself.
1063         */
1064        if (host->hw_ver.major >= 0x02) {
1065                host->dev_ref_clk_ctrl_mmio = hba->mmio_base + REG_UFS_CFG1;
1066                host->dev_ref_clk_en_mask = BIT(26);
1067        } else {
1068                /* "dev_ref_clk_ctrl_mem" is optional resource */
1069                res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1070                                                   "dev_ref_clk_ctrl_mem");
1071                if (res) {
1072                        host->dev_ref_clk_ctrl_mmio =
1073                                        devm_ioremap_resource(dev, res);
1074                        if (IS_ERR(host->dev_ref_clk_ctrl_mmio)) {
1075                                dev_warn(dev,
1076                                        "%s: could not map dev_ref_clk_ctrl_mmio, err %ld\n",
1077                                        __func__,
1078                                        PTR_ERR(host->dev_ref_clk_ctrl_mmio));
1079                                host->dev_ref_clk_ctrl_mmio = NULL;
1080                        }
1081                        host->dev_ref_clk_en_mask = BIT(5);
1082                }
1083        }
1084
1085        list_for_each_entry(clki, &hba->clk_list_head, list) {
1086                if (!strcmp(clki->name, "core_clk_unipro"))
1087                        clki->keep_link_active = true;
1088        }
1089
1090        err = ufs_qcom_init_lane_clks(host);
1091        if (err)
1092                goto out_variant_clear;
1093
1094        ufs_qcom_set_caps(hba);
1095        ufs_qcom_advertise_quirks(hba);
1096
1097        err = ufs_qcom_ice_init(host);
1098        if (err)
1099                goto out_variant_clear;
1100
1101        ufs_qcom_setup_clocks(hba, true, POST_CHANGE);
1102
1103        if (hba->dev->id < MAX_UFS_QCOM_HOSTS)
1104                ufs_qcom_hosts[hba->dev->id] = host;
1105
1106        host->dbg_print_en |= UFS_QCOM_DEFAULT_DBG_PRINT_EN;
1107        ufs_qcom_get_default_testbus_cfg(host);
1108        err = ufs_qcom_testbus_config(host);
1109        if (err) {
1110                dev_warn(dev, "%s: failed to configure the testbus %d\n",
1111                                __func__, err);
1112                err = 0;
1113        }
1114
1115        goto out;
1116
1117out_variant_clear:
1118        ufshcd_set_variant(hba, NULL);
1119out:
1120        return err;
1121}
1122
1123static void ufs_qcom_exit(struct ufs_hba *hba)
1124{
1125        struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1126
1127        ufs_qcom_disable_lane_clks(host);
1128        phy_power_off(host->generic_phy);
1129        phy_exit(host->generic_phy);
1130}
1131
1132static int ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(struct ufs_hba *hba,
1133                                                       u32 clk_cycles)
1134{
1135        int err;
1136        u32 core_clk_ctrl_reg;
1137
1138        if (clk_cycles > DME_VS_CORE_CLK_CTRL_MAX_CORE_CLK_1US_CYCLES_MASK)
1139                return -EINVAL;
1140
1141        err = ufshcd_dme_get(hba,
1142                            UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
1143                            &core_clk_ctrl_reg);
1144        if (err)
1145                goto out;
1146
1147        core_clk_ctrl_reg &= ~DME_VS_CORE_CLK_CTRL_MAX_CORE_CLK_1US_CYCLES_MASK;
1148        core_clk_ctrl_reg |= clk_cycles;
1149
1150        /* Clear CORE_CLK_DIV_EN */
1151        core_clk_ctrl_reg &= ~DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT;
1152
1153        err = ufshcd_dme_set(hba,
1154                            UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
1155                            core_clk_ctrl_reg);
1156out:
1157        return err;
1158}
1159
1160static int ufs_qcom_clk_scale_up_pre_change(struct ufs_hba *hba)
1161{
1162        /* nothing to do as of now */
1163        return 0;
1164}
1165
1166static int ufs_qcom_clk_scale_up_post_change(struct ufs_hba *hba)
1167{
1168        struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1169
1170        if (!ufs_qcom_cap_qunipro(host))
1171                return 0;
1172
1173        /* set unipro core clock cycles to 150 and clear clock divider */
1174        return ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba, 150);
1175}
1176
1177static int ufs_qcom_clk_scale_down_pre_change(struct ufs_hba *hba)
1178{
1179        struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1180        int err;
1181        u32 core_clk_ctrl_reg;
1182
1183        if (!ufs_qcom_cap_qunipro(host))
1184                return 0;
1185
1186        err = ufshcd_dme_get(hba,
1187                            UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
1188                            &core_clk_ctrl_reg);
1189
1190        /* make sure CORE_CLK_DIV_EN is cleared */
1191        if (!err &&
1192            (core_clk_ctrl_reg & DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT)) {
1193                core_clk_ctrl_reg &= ~DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT;
1194                err = ufshcd_dme_set(hba,
1195                                    UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
1196                                    core_clk_ctrl_reg);
1197        }
1198
1199        return err;
1200}
1201
1202static int ufs_qcom_clk_scale_down_post_change(struct ufs_hba *hba)
1203{
1204        struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1205
1206        if (!ufs_qcom_cap_qunipro(host))
1207                return 0;
1208
1209        /* set unipro core clock cycles to 75 and clear clock divider */
1210        return ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba, 75);
1211}
1212
1213static int ufs_qcom_clk_scale_notify(struct ufs_hba *hba,
1214                bool scale_up, enum ufs_notify_change_status status)
1215{
1216        struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1217        struct ufs_pa_layer_attr *dev_req_params = &host->dev_req_params;
1218        int err = 0;
1219
1220        if (status == PRE_CHANGE) {
1221                if (scale_up)
1222                        err = ufs_qcom_clk_scale_up_pre_change(hba);
1223                else
1224                        err = ufs_qcom_clk_scale_down_pre_change(hba);
1225        } else {
1226                if (scale_up)
1227                        err = ufs_qcom_clk_scale_up_post_change(hba);
1228                else
1229                        err = ufs_qcom_clk_scale_down_post_change(hba);
1230
1231                if (err || !dev_req_params)
1232                        goto out;
1233
1234                ufs_qcom_cfg_timers(hba,
1235                                    dev_req_params->gear_rx,
1236                                    dev_req_params->pwr_rx,
1237                                    dev_req_params->hs_rate,
1238                                    false);
1239        }
1240
1241out:
1242        return err;
1243}
1244
1245static void ufs_qcom_print_hw_debug_reg_all(struct ufs_hba *hba,
1246                void *priv, void (*print_fn)(struct ufs_hba *hba,
1247                int offset, int num_regs, const char *str, void *priv))
1248{
1249        u32 reg;
1250        struct ufs_qcom_host *host;
1251
1252        if (unlikely(!hba)) {
1253                pr_err("%s: hba is NULL\n", __func__);
1254                return;
1255        }
1256        if (unlikely(!print_fn)) {
1257                dev_err(hba->dev, "%s: print_fn is NULL\n", __func__);
1258                return;
1259        }
1260
1261        host = ufshcd_get_variant(hba);
1262        if (!(host->dbg_print_en & UFS_QCOM_DBG_PRINT_REGS_EN))
1263                return;
1264
1265        reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_REG_OCSC);
1266        print_fn(hba, reg, 44, "UFS_UFS_DBG_RD_REG_OCSC ", priv);
1267
1268        reg = ufshcd_readl(hba, REG_UFS_CFG1);
1269        reg |= UTP_DBG_RAMS_EN;
1270        ufshcd_writel(hba, reg, REG_UFS_CFG1);
1271
1272        reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_EDTL_RAM);
1273        print_fn(hba, reg, 32, "UFS_UFS_DBG_RD_EDTL_RAM ", priv);
1274
1275        reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_DESC_RAM);
1276        print_fn(hba, reg, 128, "UFS_UFS_DBG_RD_DESC_RAM ", priv);
1277
1278        reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_PRDT_RAM);
1279        print_fn(hba, reg, 64, "UFS_UFS_DBG_RD_PRDT_RAM ", priv);
1280
1281        /* clear bit 17 - UTP_DBG_RAMS_EN */
1282        ufshcd_rmwl(hba, UTP_DBG_RAMS_EN, 0, REG_UFS_CFG1);
1283
1284        reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_UAWM);
1285        print_fn(hba, reg, 4, "UFS_DBG_RD_REG_UAWM ", priv);
1286
1287        reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_UARM);
1288        print_fn(hba, reg, 4, "UFS_DBG_RD_REG_UARM ", priv);
1289
1290        reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TXUC);
1291        print_fn(hba, reg, 48, "UFS_DBG_RD_REG_TXUC ", priv);
1292
1293        reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_RXUC);
1294        print_fn(hba, reg, 27, "UFS_DBG_RD_REG_RXUC ", priv);
1295
1296        reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_DFC);
1297        print_fn(hba, reg, 19, "UFS_DBG_RD_REG_DFC ", priv);
1298
1299        reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TRLUT);
1300        print_fn(hba, reg, 34, "UFS_DBG_RD_REG_TRLUT ", priv);
1301
1302        reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TMRLUT);
1303        print_fn(hba, reg, 9, "UFS_DBG_RD_REG_TMRLUT ", priv);
1304}
1305
1306static void ufs_qcom_enable_test_bus(struct ufs_qcom_host *host)
1307{
1308        if (host->dbg_print_en & UFS_QCOM_DBG_PRINT_TEST_BUS_EN) {
1309                ufshcd_rmwl(host->hba, UFS_REG_TEST_BUS_EN,
1310                                UFS_REG_TEST_BUS_EN, REG_UFS_CFG1);
1311                ufshcd_rmwl(host->hba, TEST_BUS_EN, TEST_BUS_EN, REG_UFS_CFG1);
1312        } else {
1313                ufshcd_rmwl(host->hba, UFS_REG_TEST_BUS_EN, 0, REG_UFS_CFG1);
1314                ufshcd_rmwl(host->hba, TEST_BUS_EN, 0, REG_UFS_CFG1);
1315        }
1316}
1317
1318static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host)
1319{
1320        /* provide a legal default configuration */
1321        host->testbus.select_major = TSTBUS_UNIPRO;
1322        host->testbus.select_minor = 37;
1323}
1324
1325static bool ufs_qcom_testbus_cfg_is_ok(struct ufs_qcom_host *host)
1326{
1327        if (host->testbus.select_major >= TSTBUS_MAX) {
1328                dev_err(host->hba->dev,
1329                        "%s: UFS_CFG1[TEST_BUS_SEL} may not equal 0x%05X\n",
1330                        __func__, host->testbus.select_major);
1331                return false;
1332        }
1333
1334        return true;
1335}
1336
1337int ufs_qcom_testbus_config(struct ufs_qcom_host *host)
1338{
1339        int reg;
1340        int offset;
1341        u32 mask = TEST_BUS_SUB_SEL_MASK;
1342
1343        if (!host)
1344                return -EINVAL;
1345
1346        if (!ufs_qcom_testbus_cfg_is_ok(host))
1347                return -EPERM;
1348
1349        switch (host->testbus.select_major) {
1350        case TSTBUS_UAWM:
1351                reg = UFS_TEST_BUS_CTRL_0;
1352                offset = 24;
1353                break;
1354        case TSTBUS_UARM:
1355                reg = UFS_TEST_BUS_CTRL_0;
1356                offset = 16;
1357                break;
1358        case TSTBUS_TXUC:
1359                reg = UFS_TEST_BUS_CTRL_0;
1360                offset = 8;
1361                break;
1362        case TSTBUS_RXUC:
1363                reg = UFS_TEST_BUS_CTRL_0;
1364                offset = 0;
1365                break;
1366        case TSTBUS_DFC:
1367                reg = UFS_TEST_BUS_CTRL_1;
1368                offset = 24;
1369                break;
1370        case TSTBUS_TRLUT:
1371                reg = UFS_TEST_BUS_CTRL_1;
1372                offset = 16;
1373                break;
1374        case TSTBUS_TMRLUT:
1375                reg = UFS_TEST_BUS_CTRL_1;
1376                offset = 8;
1377                break;
1378        case TSTBUS_OCSC:
1379                reg = UFS_TEST_BUS_CTRL_1;
1380                offset = 0;
1381                break;
1382        case TSTBUS_WRAPPER:
1383                reg = UFS_TEST_BUS_CTRL_2;
1384                offset = 16;
1385                break;
1386        case TSTBUS_COMBINED:
1387                reg = UFS_TEST_BUS_CTRL_2;
1388                offset = 8;
1389                break;
1390        case TSTBUS_UTP_HCI:
1391                reg = UFS_TEST_BUS_CTRL_2;
1392                offset = 0;
1393                break;
1394        case TSTBUS_UNIPRO:
1395                reg = UFS_UNIPRO_CFG;
1396                offset = 20;
1397                mask = 0xFFF;
1398                break;
1399        /*
1400         * No need for a default case, since
1401         * ufs_qcom_testbus_cfg_is_ok() checks that the configuration
1402         * is legal
1403         */
1404        }
1405        mask <<= offset;
1406        ufshcd_rmwl(host->hba, TEST_BUS_SEL,
1407                    (u32)host->testbus.select_major << 19,
1408                    REG_UFS_CFG1);
1409        ufshcd_rmwl(host->hba, mask,
1410                    (u32)host->testbus.select_minor << offset,
1411                    reg);
1412        ufs_qcom_enable_test_bus(host);
1413        /*
1414         * Make sure the test bus configuration is
1415         * committed before returning.
1416         */
1417        mb();
1418
1419        return 0;
1420}
1421
1422static void ufs_qcom_dump_dbg_regs(struct ufs_hba *hba)
1423{
1424        ufshcd_dump_regs(hba, REG_UFS_SYS1CLK_1US, 16 * 4,
1425                         "HCI Vendor Specific Registers ");
1426
1427        ufs_qcom_print_hw_debug_reg_all(hba, NULL, ufs_qcom_dump_regs_wrapper);
1428}
1429
1430/**
1431 * ufs_qcom_device_reset() - toggle the (optional) device reset line
1432 * @hba: per-adapter instance
1433 *
1434 * Toggles the (optional) reset line to reset the attached device.
1435 */
1436static int ufs_qcom_device_reset(struct ufs_hba *hba)
1437{
1438        struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1439
1440        /* reset gpio is optional */
1441        if (!host->device_reset)
1442                return -EOPNOTSUPP;
1443
1444        /*
1445         * The UFS device shall detect reset pulses of 1us, sleep for 10us to
1446         * be on the safe side.
1447         */
1448        ufs_qcom_device_reset_ctrl(hba, true);
1449        usleep_range(10, 15);
1450
1451        ufs_qcom_device_reset_ctrl(hba, false);
1452        usleep_range(10, 15);
1453
1454        return 0;
1455}
1456
1457#if IS_ENABLED(CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND)
1458static void ufs_qcom_config_scaling_param(struct ufs_hba *hba,
1459                                          struct devfreq_dev_profile *p,
1460                                          void *data)
1461{
1462        static struct devfreq_simple_ondemand_data *d;
1463
1464        if (!data)
1465                return;
1466
1467        d = (struct devfreq_simple_ondemand_data *)data;
1468        p->polling_ms = 60;
1469        d->upthreshold = 70;
1470        d->downdifferential = 5;
1471}
1472#else
1473static void ufs_qcom_config_scaling_param(struct ufs_hba *hba,
1474                                          struct devfreq_dev_profile *p,
1475                                          void *data)
1476{
1477}
1478#endif
1479
1480/*
1481 * struct ufs_hba_qcom_vops - UFS QCOM specific variant operations
1482 *
1483 * The variant operations configure the necessary controller and PHY
1484 * handshake during initialization.
1485 */
1486static const struct ufs_hba_variant_ops ufs_hba_qcom_vops = {
1487        .name                   = "qcom",
1488        .init                   = ufs_qcom_init,
1489        .exit                   = ufs_qcom_exit,
1490        .get_ufs_hci_version    = ufs_qcom_get_ufs_hci_version,
1491        .clk_scale_notify       = ufs_qcom_clk_scale_notify,
1492        .setup_clocks           = ufs_qcom_setup_clocks,
1493        .hce_enable_notify      = ufs_qcom_hce_enable_notify,
1494        .link_startup_notify    = ufs_qcom_link_startup_notify,
1495        .pwr_change_notify      = ufs_qcom_pwr_change_notify,
1496        .apply_dev_quirks       = ufs_qcom_apply_dev_quirks,
1497        .suspend                = ufs_qcom_suspend,
1498        .resume                 = ufs_qcom_resume,
1499        .dbg_register_dump      = ufs_qcom_dump_dbg_regs,
1500        .device_reset           = ufs_qcom_device_reset,
1501        .config_scaling_param = ufs_qcom_config_scaling_param,
1502        .program_key            = ufs_qcom_ice_program_key,
1503};
1504
1505/**
1506 * ufs_qcom_probe - probe routine of the driver
1507 * @pdev: pointer to Platform device handle
1508 *
1509 * Return zero for success and non-zero for failure
1510 */
1511static int ufs_qcom_probe(struct platform_device *pdev)
1512{
1513        int err;
1514        struct device *dev = &pdev->dev;
1515
1516        /* Perform generic probe */
1517        err = ufshcd_pltfrm_init(pdev, &ufs_hba_qcom_vops);
1518        if (err)
1519                dev_err(dev, "ufshcd_pltfrm_init() failed %d\n", err);
1520
1521        return err;
1522}
1523
1524/**
1525 * ufs_qcom_remove - set driver_data of the device to NULL
1526 * @pdev: pointer to platform device handle
1527 *
1528 * Always returns 0
1529 */
1530static int ufs_qcom_remove(struct platform_device *pdev)
1531{
1532        struct ufs_hba *hba =  platform_get_drvdata(pdev);
1533
1534        pm_runtime_get_sync(&(pdev)->dev);
1535        ufshcd_remove(hba);
1536        return 0;
1537}
1538
1539static const struct of_device_id ufs_qcom_of_match[] = {
1540        { .compatible = "qcom,ufshc"},
1541        {},
1542};
1543MODULE_DEVICE_TABLE(of, ufs_qcom_of_match);
1544
1545#ifdef CONFIG_ACPI
1546static const struct acpi_device_id ufs_qcom_acpi_match[] = {
1547        { "QCOM24A5" },
1548        { },
1549};
1550MODULE_DEVICE_TABLE(acpi, ufs_qcom_acpi_match);
1551#endif
1552
1553static const struct dev_pm_ops ufs_qcom_pm_ops = {
1554        .suspend        = ufshcd_pltfrm_suspend,
1555        .resume         = ufshcd_pltfrm_resume,
1556        .runtime_suspend = ufshcd_pltfrm_runtime_suspend,
1557        .runtime_resume  = ufshcd_pltfrm_runtime_resume,
1558        .runtime_idle    = ufshcd_pltfrm_runtime_idle,
1559};
1560
1561static struct platform_driver ufs_qcom_pltform = {
1562        .probe  = ufs_qcom_probe,
1563        .remove = ufs_qcom_remove,
1564        .shutdown = ufshcd_pltfrm_shutdown,
1565        .driver = {
1566                .name   = "ufshcd-qcom",
1567                .pm     = &ufs_qcom_pm_ops,
1568                .of_match_table = of_match_ptr(ufs_qcom_of_match),
1569                .acpi_match_table = ACPI_PTR(ufs_qcom_acpi_match),
1570        },
1571};
1572module_platform_driver(ufs_qcom_pltform);
1573
1574MODULE_LICENSE("GPL v2");
1575