linux/drivers/scsi/ufs/ufs-qcom.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2013-2016, Linux Foundation. All rights reserved.
   3 *
   4 * This program is free software; you can redistribute it and/or modify
   5 * it under the terms of the GNU General Public License version 2 and
   6 * only version 2 as published by the Free Software Foundation.
   7 *
   8 * This program is distributed in the hope that it will be useful,
   9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  11 * GNU General Public License for more details.
  12 *
  13 */
  14
  15#include <linux/time.h>
  16#include <linux/of.h>
  17#include <linux/platform_device.h>
  18#include <linux/phy/phy.h>
  19#include <linux/phy/phy-qcom-ufs.h>
  20
  21#include "ufshcd.h"
  22#include "ufshcd-pltfrm.h"
  23#include "unipro.h"
  24#include "ufs-qcom.h"
  25#include "ufshci.h"
  26#include "ufs_quirks.h"
  27#define UFS_QCOM_DEFAULT_DBG_PRINT_EN   \
  28        (UFS_QCOM_DBG_PRINT_REGS_EN | UFS_QCOM_DBG_PRINT_TEST_BUS_EN)
  29
  30enum {
  31        TSTBUS_UAWM,
  32        TSTBUS_UARM,
  33        TSTBUS_TXUC,
  34        TSTBUS_RXUC,
  35        TSTBUS_DFC,
  36        TSTBUS_TRLUT,
  37        TSTBUS_TMRLUT,
  38        TSTBUS_OCSC,
  39        TSTBUS_UTP_HCI,
  40        TSTBUS_COMBINED,
  41        TSTBUS_WRAPPER,
  42        TSTBUS_UNIPRO,
  43        TSTBUS_MAX,
  44};
  45
  46static struct ufs_qcom_host *ufs_qcom_hosts[MAX_UFS_QCOM_HOSTS];
  47
  48static int ufs_qcom_set_bus_vote(struct ufs_qcom_host *host, int vote);
  49static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host);
  50static int ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(struct ufs_hba *hba,
  51                                                       u32 clk_cycles);
  52
  53static void ufs_qcom_dump_regs(struct ufs_hba *hba, int offset, int len,
  54                char *prefix)
  55{
  56        print_hex_dump(KERN_ERR, prefix,
  57                        len > 4 ? DUMP_PREFIX_OFFSET : DUMP_PREFIX_NONE,
  58                        16, 4, (void __force *)hba->mmio_base + offset,
  59                        len * 4, false);
  60}
  61
  62static void ufs_qcom_dump_regs_wrapper(struct ufs_hba *hba, int offset, int len,
  63                char *prefix, void *priv)
  64{
  65        ufs_qcom_dump_regs(hba, offset, len, prefix);
  66}
  67
  68static int ufs_qcom_get_connected_tx_lanes(struct ufs_hba *hba, u32 *tx_lanes)
  69{
  70        int err = 0;
  71
  72        err = ufshcd_dme_get(hba,
  73                        UIC_ARG_MIB(PA_CONNECTEDTXDATALANES), tx_lanes);
  74        if (err)
  75                dev_err(hba->dev, "%s: couldn't read PA_CONNECTEDTXDATALANES %d\n",
  76                                __func__, err);
  77
  78        return err;
  79}
  80
  81static int ufs_qcom_host_clk_get(struct device *dev,
  82                const char *name, struct clk **clk_out)
  83{
  84        struct clk *clk;
  85        int err = 0;
  86
  87        clk = devm_clk_get(dev, name);
  88        if (IS_ERR(clk)) {
  89                err = PTR_ERR(clk);
  90                dev_err(dev, "%s: failed to get %s err %d",
  91                                __func__, name, err);
  92        } else {
  93                *clk_out = clk;
  94        }
  95
  96        return err;
  97}
  98
  99static int ufs_qcom_host_clk_enable(struct device *dev,
 100                const char *name, struct clk *clk)
 101{
 102        int err = 0;
 103
 104        err = clk_prepare_enable(clk);
 105        if (err)
 106                dev_err(dev, "%s: %s enable failed %d\n", __func__, name, err);
 107
 108        return err;
 109}
 110
 111static void ufs_qcom_disable_lane_clks(struct ufs_qcom_host *host)
 112{
 113        if (!host->is_lane_clks_enabled)
 114                return;
 115
 116        if (host->hba->lanes_per_direction > 1)
 117                clk_disable_unprepare(host->tx_l1_sync_clk);
 118        clk_disable_unprepare(host->tx_l0_sync_clk);
 119        if (host->hba->lanes_per_direction > 1)
 120                clk_disable_unprepare(host->rx_l1_sync_clk);
 121        clk_disable_unprepare(host->rx_l0_sync_clk);
 122
 123        host->is_lane_clks_enabled = false;
 124}
 125
 126static int ufs_qcom_enable_lane_clks(struct ufs_qcom_host *host)
 127{
 128        int err = 0;
 129        struct device *dev = host->hba->dev;
 130
 131        if (host->is_lane_clks_enabled)
 132                return 0;
 133
 134        err = ufs_qcom_host_clk_enable(dev, "rx_lane0_sync_clk",
 135                host->rx_l0_sync_clk);
 136        if (err)
 137                goto out;
 138
 139        err = ufs_qcom_host_clk_enable(dev, "tx_lane0_sync_clk",
 140                host->tx_l0_sync_clk);
 141        if (err)
 142                goto disable_rx_l0;
 143
 144        if (host->hba->lanes_per_direction > 1) {
 145                err = ufs_qcom_host_clk_enable(dev, "rx_lane1_sync_clk",
 146                        host->rx_l1_sync_clk);
 147                if (err)
 148                        goto disable_tx_l0;
 149
 150                err = ufs_qcom_host_clk_enable(dev, "tx_lane1_sync_clk",
 151                        host->tx_l1_sync_clk);
 152                if (err)
 153                        goto disable_rx_l1;
 154        }
 155
 156        host->is_lane_clks_enabled = true;
 157        goto out;
 158
 159disable_rx_l1:
 160        if (host->hba->lanes_per_direction > 1)
 161                clk_disable_unprepare(host->rx_l1_sync_clk);
 162disable_tx_l0:
 163        clk_disable_unprepare(host->tx_l0_sync_clk);
 164disable_rx_l0:
 165        clk_disable_unprepare(host->rx_l0_sync_clk);
 166out:
 167        return err;
 168}
 169
 170static int ufs_qcom_init_lane_clks(struct ufs_qcom_host *host)
 171{
 172        int err = 0;
 173        struct device *dev = host->hba->dev;
 174
 175        err = ufs_qcom_host_clk_get(dev,
 176                        "rx_lane0_sync_clk", &host->rx_l0_sync_clk);
 177        if (err)
 178                goto out;
 179
 180        err = ufs_qcom_host_clk_get(dev,
 181                        "tx_lane0_sync_clk", &host->tx_l0_sync_clk);
 182        if (err)
 183                goto out;
 184
 185        /* In case of single lane per direction, don't read lane1 clocks */
 186        if (host->hba->lanes_per_direction > 1) {
 187                err = ufs_qcom_host_clk_get(dev, "rx_lane1_sync_clk",
 188                        &host->rx_l1_sync_clk);
 189                if (err)
 190                        goto out;
 191
 192                err = ufs_qcom_host_clk_get(dev, "tx_lane1_sync_clk",
 193                        &host->tx_l1_sync_clk);
 194        }
 195out:
 196        return err;
 197}
 198
 199static int ufs_qcom_link_startup_post_change(struct ufs_hba *hba)
 200{
 201        struct ufs_qcom_host *host = ufshcd_get_variant(hba);
 202        struct phy *phy = host->generic_phy;
 203        u32 tx_lanes;
 204        int err = 0;
 205
 206        err = ufs_qcom_get_connected_tx_lanes(hba, &tx_lanes);
 207        if (err)
 208                goto out;
 209
 210        err = ufs_qcom_phy_set_tx_lane_enable(phy, tx_lanes);
 211        if (err)
 212                dev_err(hba->dev, "%s: ufs_qcom_phy_set_tx_lane_enable failed\n",
 213                        __func__);
 214
 215out:
 216        return err;
 217}
 218
 219static int ufs_qcom_check_hibern8(struct ufs_hba *hba)
 220{
 221        int err;
 222        u32 tx_fsm_val = 0;
 223        unsigned long timeout = jiffies + msecs_to_jiffies(HBRN8_POLL_TOUT_MS);
 224
 225        do {
 226                err = ufshcd_dme_get(hba,
 227                                UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE,
 228                                        UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
 229                                &tx_fsm_val);
 230                if (err || tx_fsm_val == TX_FSM_HIBERN8)
 231                        break;
 232
 233                /* sleep for max. 200us */
 234                usleep_range(100, 200);
 235        } while (time_before(jiffies, timeout));
 236
 237        /*
 238         * we might have scheduled out for long during polling so
 239         * check the state again.
 240         */
 241        if (time_after(jiffies, timeout))
 242                err = ufshcd_dme_get(hba,
 243                                UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE,
 244                                        UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
 245                                &tx_fsm_val);
 246
 247        if (err) {
 248                dev_err(hba->dev, "%s: unable to get TX_FSM_STATE, err %d\n",
 249                                __func__, err);
 250        } else if (tx_fsm_val != TX_FSM_HIBERN8) {
 251                err = tx_fsm_val;
 252                dev_err(hba->dev, "%s: invalid TX_FSM_STATE = %d\n",
 253                                __func__, err);
 254        }
 255
 256        return err;
 257}
 258
 259static void ufs_qcom_select_unipro_mode(struct ufs_qcom_host *host)
 260{
 261        ufshcd_rmwl(host->hba, QUNIPRO_SEL,
 262                   ufs_qcom_cap_qunipro(host) ? QUNIPRO_SEL : 0,
 263                   REG_UFS_CFG1);
 264        /* make sure above configuration is applied before we return */
 265        mb();
 266}
 267
 268static int ufs_qcom_power_up_sequence(struct ufs_hba *hba)
 269{
 270        struct ufs_qcom_host *host = ufshcd_get_variant(hba);
 271        struct phy *phy = host->generic_phy;
 272        int ret = 0;
 273        bool is_rate_B = (UFS_QCOM_LIMIT_HS_RATE == PA_HS_MODE_B)
 274                                                        ? true : false;
 275
 276        if (is_rate_B)
 277                phy_set_mode(phy, PHY_MODE_UFS_HS_B);
 278
 279        /* Assert PHY reset and apply PHY calibration values */
 280        ufs_qcom_assert_reset(hba);
 281        /* provide 1ms delay to let the reset pulse propagate */
 282        usleep_range(1000, 1100);
 283
 284        /* phy initialization - calibrate the phy */
 285        ret = phy_init(phy);
 286        if (ret) {
 287                dev_err(hba->dev, "%s: phy init failed, ret = %d\n",
 288                        __func__, ret);
 289                goto out;
 290        }
 291
 292        /* De-assert PHY reset and start serdes */
 293        ufs_qcom_deassert_reset(hba);
 294
 295        /*
 296         * after reset deassertion, phy will need all ref clocks,
 297         * voltage, current to settle down before starting serdes.
 298         */
 299        usleep_range(1000, 1100);
 300
 301        /* power on phy - start serdes and phy's power and clocks */
 302        ret = phy_power_on(phy);
 303        if (ret) {
 304                dev_err(hba->dev, "%s: phy power on failed, ret = %d\n",
 305                        __func__, ret);
 306                goto out_disable_phy;
 307        }
 308
 309        ufs_qcom_select_unipro_mode(host);
 310
 311        return 0;
 312
 313out_disable_phy:
 314        ufs_qcom_assert_reset(hba);
 315        phy_exit(phy);
 316out:
 317        return ret;
 318}
 319
 320/*
 321 * The UTP controller has a number of internal clock gating cells (CGCs).
 322 * Internal hardware sub-modules within the UTP controller control the CGCs.
 323 * Hardware CGCs disable the clock to inactivate UTP sub-modules not involved
 324 * in a specific operation, UTP controller CGCs are by default disabled and
 325 * this function enables them (after every UFS link startup) to save some power
 326 * leakage.
 327 */
 328static void ufs_qcom_enable_hw_clk_gating(struct ufs_hba *hba)
 329{
 330        ufshcd_writel(hba,
 331                ufshcd_readl(hba, REG_UFS_CFG2) | REG_UFS_CFG2_CGC_EN_ALL,
 332                REG_UFS_CFG2);
 333
 334        /* Ensure that HW clock gating is enabled before next operations */
 335        mb();
 336}
 337
 338static int ufs_qcom_hce_enable_notify(struct ufs_hba *hba,
 339                                      enum ufs_notify_change_status status)
 340{
 341        struct ufs_qcom_host *host = ufshcd_get_variant(hba);
 342        int err = 0;
 343
 344        switch (status) {
 345        case PRE_CHANGE:
 346                ufs_qcom_power_up_sequence(hba);
 347                /*
 348                 * The PHY PLL output is the source of tx/rx lane symbol
 349                 * clocks, hence, enable the lane clocks only after PHY
 350                 * is initialized.
 351                 */
 352                err = ufs_qcom_enable_lane_clks(host);
 353                break;
 354        case POST_CHANGE:
 355                /* check if UFS PHY moved from DISABLED to HIBERN8 */
 356                err = ufs_qcom_check_hibern8(hba);
 357                ufs_qcom_enable_hw_clk_gating(hba);
 358
 359                break;
 360        default:
 361                dev_err(hba->dev, "%s: invalid status %d\n", __func__, status);
 362                err = -EINVAL;
 363                break;
 364        }
 365        return err;
 366}
 367
 368/**
 369 * Returns zero for success and non-zero in case of a failure
 370 */
 371static int ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear,
 372                               u32 hs, u32 rate, bool update_link_startup_timer)
 373{
 374        int ret = 0;
 375        struct ufs_qcom_host *host = ufshcd_get_variant(hba);
 376        struct ufs_clk_info *clki;
 377        u32 core_clk_period_in_ns;
 378        u32 tx_clk_cycles_per_us = 0;
 379        unsigned long core_clk_rate = 0;
 380        u32 core_clk_cycles_per_us = 0;
 381
 382        static u32 pwm_fr_table[][2] = {
 383                {UFS_PWM_G1, 0x1},
 384                {UFS_PWM_G2, 0x1},
 385                {UFS_PWM_G3, 0x1},
 386                {UFS_PWM_G4, 0x1},
 387        };
 388
 389        static u32 hs_fr_table_rA[][2] = {
 390                {UFS_HS_G1, 0x1F},
 391                {UFS_HS_G2, 0x3e},
 392                {UFS_HS_G3, 0x7D},
 393        };
 394
 395        static u32 hs_fr_table_rB[][2] = {
 396                {UFS_HS_G1, 0x24},
 397                {UFS_HS_G2, 0x49},
 398                {UFS_HS_G3, 0x92},
 399        };
 400
 401        /*
 402         * The Qunipro controller does not use following registers:
 403         * SYS1CLK_1US_REG, TX_SYMBOL_CLK_1US_REG, CLK_NS_REG &
 404         * UFS_REG_PA_LINK_STARTUP_TIMER
 405         * But UTP controller uses SYS1CLK_1US_REG register for Interrupt
 406         * Aggregation logic.
 407        */
 408        if (ufs_qcom_cap_qunipro(host) && !ufshcd_is_intr_aggr_allowed(hba))
 409                goto out;
 410
 411        if (gear == 0) {
 412                dev_err(hba->dev, "%s: invalid gear = %d\n", __func__, gear);
 413                goto out_error;
 414        }
 415
 416        list_for_each_entry(clki, &hba->clk_list_head, list) {
 417                if (!strcmp(clki->name, "core_clk"))
 418                        core_clk_rate = clk_get_rate(clki->clk);
 419        }
 420
 421        /* If frequency is smaller than 1MHz, set to 1MHz */
 422        if (core_clk_rate < DEFAULT_CLK_RATE_HZ)
 423                core_clk_rate = DEFAULT_CLK_RATE_HZ;
 424
 425        core_clk_cycles_per_us = core_clk_rate / USEC_PER_SEC;
 426        if (ufshcd_readl(hba, REG_UFS_SYS1CLK_1US) != core_clk_cycles_per_us) {
 427                ufshcd_writel(hba, core_clk_cycles_per_us, REG_UFS_SYS1CLK_1US);
 428                /*
 429                 * make sure above write gets applied before we return from
 430                 * this function.
 431                 */
 432                mb();
 433        }
 434
 435        if (ufs_qcom_cap_qunipro(host))
 436                goto out;
 437
 438        core_clk_period_in_ns = NSEC_PER_SEC / core_clk_rate;
 439        core_clk_period_in_ns <<= OFFSET_CLK_NS_REG;
 440        core_clk_period_in_ns &= MASK_CLK_NS_REG;
 441
 442        switch (hs) {
 443        case FASTAUTO_MODE:
 444        case FAST_MODE:
 445                if (rate == PA_HS_MODE_A) {
 446                        if (gear > ARRAY_SIZE(hs_fr_table_rA)) {
 447                                dev_err(hba->dev,
 448                                        "%s: index %d exceeds table size %zu\n",
 449                                        __func__, gear,
 450                                        ARRAY_SIZE(hs_fr_table_rA));
 451                                goto out_error;
 452                        }
 453                        tx_clk_cycles_per_us = hs_fr_table_rA[gear-1][1];
 454                } else if (rate == PA_HS_MODE_B) {
 455                        if (gear > ARRAY_SIZE(hs_fr_table_rB)) {
 456                                dev_err(hba->dev,
 457                                        "%s: index %d exceeds table size %zu\n",
 458                                        __func__, gear,
 459                                        ARRAY_SIZE(hs_fr_table_rB));
 460                                goto out_error;
 461                        }
 462                        tx_clk_cycles_per_us = hs_fr_table_rB[gear-1][1];
 463                } else {
 464                        dev_err(hba->dev, "%s: invalid rate = %d\n",
 465                                __func__, rate);
 466                        goto out_error;
 467                }
 468                break;
 469        case SLOWAUTO_MODE:
 470        case SLOW_MODE:
 471                if (gear > ARRAY_SIZE(pwm_fr_table)) {
 472                        dev_err(hba->dev,
 473                                        "%s: index %d exceeds table size %zu\n",
 474                                        __func__, gear,
 475                                        ARRAY_SIZE(pwm_fr_table));
 476                        goto out_error;
 477                }
 478                tx_clk_cycles_per_us = pwm_fr_table[gear-1][1];
 479                break;
 480        case UNCHANGED:
 481        default:
 482                dev_err(hba->dev, "%s: invalid mode = %d\n", __func__, hs);
 483                goto out_error;
 484        }
 485
 486        if (ufshcd_readl(hba, REG_UFS_TX_SYMBOL_CLK_NS_US) !=
 487            (core_clk_period_in_ns | tx_clk_cycles_per_us)) {
 488                /* this register 2 fields shall be written at once */
 489                ufshcd_writel(hba, core_clk_period_in_ns | tx_clk_cycles_per_us,
 490                              REG_UFS_TX_SYMBOL_CLK_NS_US);
 491                /*
 492                 * make sure above write gets applied before we return from
 493                 * this function.
 494                 */
 495                mb();
 496        }
 497
 498        if (update_link_startup_timer) {
 499                ufshcd_writel(hba, ((core_clk_rate / MSEC_PER_SEC) * 100),
 500                              REG_UFS_PA_LINK_STARTUP_TIMER);
 501                /*
 502                 * make sure that this configuration is applied before
 503                 * we return
 504                 */
 505                mb();
 506        }
 507        goto out;
 508
 509out_error:
 510        ret = -EINVAL;
 511out:
 512        return ret;
 513}
 514
 515static int ufs_qcom_link_startup_notify(struct ufs_hba *hba,
 516                                        enum ufs_notify_change_status status)
 517{
 518        int err = 0;
 519        struct ufs_qcom_host *host = ufshcd_get_variant(hba);
 520
 521        switch (status) {
 522        case PRE_CHANGE:
 523                if (ufs_qcom_cfg_timers(hba, UFS_PWM_G1, SLOWAUTO_MODE,
 524                                        0, true)) {
 525                        dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n",
 526                                __func__);
 527                        err = -EINVAL;
 528                        goto out;
 529                }
 530
 531                if (ufs_qcom_cap_qunipro(host))
 532                        /*
 533                         * set unipro core clock cycles to 150 & clear clock
 534                         * divider
 535                         */
 536                        err = ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba,
 537                                                                          150);
 538
 539                /*
 540                 * Some UFS devices (and may be host) have issues if LCC is
 541                 * enabled. So we are setting PA_Local_TX_LCC_Enable to 0
 542                 * before link startup which will make sure that both host
 543                 * and device TX LCC are disabled once link startup is
 544                 * completed.
 545                 */
 546                if (ufshcd_get_local_unipro_ver(hba) != UFS_UNIPRO_VER_1_41)
 547                        err = ufshcd_dme_set(hba,
 548                                        UIC_ARG_MIB(PA_LOCAL_TX_LCC_ENABLE),
 549                                        0);
 550
 551                break;
 552        case POST_CHANGE:
 553                ufs_qcom_link_startup_post_change(hba);
 554                break;
 555        default:
 556                break;
 557        }
 558
 559out:
 560        return err;
 561}
 562
 563static int ufs_qcom_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
 564{
 565        struct ufs_qcom_host *host = ufshcd_get_variant(hba);
 566        struct phy *phy = host->generic_phy;
 567        int ret = 0;
 568
 569        if (ufs_qcom_is_link_off(hba)) {
 570                /*
 571                 * Disable the tx/rx lane symbol clocks before PHY is
 572                 * powered down as the PLL source should be disabled
 573                 * after downstream clocks are disabled.
 574                 */
 575                ufs_qcom_disable_lane_clks(host);
 576                phy_power_off(phy);
 577
 578                /* Assert PHY soft reset */
 579                ufs_qcom_assert_reset(hba);
 580                goto out;
 581        }
 582
 583        /*
 584         * If UniPro link is not active, PHY ref_clk, main PHY analog power
 585         * rail and low noise analog power rail for PLL can be switched off.
 586         */
 587        if (!ufs_qcom_is_link_active(hba)) {
 588                ufs_qcom_disable_lane_clks(host);
 589                phy_power_off(phy);
 590        }
 591
 592out:
 593        return ret;
 594}
 595
 596static int ufs_qcom_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
 597{
 598        struct ufs_qcom_host *host = ufshcd_get_variant(hba);
 599        struct phy *phy = host->generic_phy;
 600        int err;
 601
 602        err = phy_power_on(phy);
 603        if (err) {
 604                dev_err(hba->dev, "%s: failed enabling regs, err = %d\n",
 605                        __func__, err);
 606                goto out;
 607        }
 608
 609        err = ufs_qcom_enable_lane_clks(host);
 610        if (err)
 611                goto out;
 612
 613        hba->is_sys_suspended = false;
 614
 615out:
 616        return err;
 617}
 618
 619struct ufs_qcom_dev_params {
 620        u32 pwm_rx_gear;        /* pwm rx gear to work in */
 621        u32 pwm_tx_gear;        /* pwm tx gear to work in */
 622        u32 hs_rx_gear;         /* hs rx gear to work in */
 623        u32 hs_tx_gear;         /* hs tx gear to work in */
 624        u32 rx_lanes;           /* number of rx lanes */
 625        u32 tx_lanes;           /* number of tx lanes */
 626        u32 rx_pwr_pwm;         /* rx pwm working pwr */
 627        u32 tx_pwr_pwm;         /* tx pwm working pwr */
 628        u32 rx_pwr_hs;          /* rx hs working pwr */
 629        u32 tx_pwr_hs;          /* tx hs working pwr */
 630        u32 hs_rate;            /* rate A/B to work in HS */
 631        u32 desired_working_mode;
 632};
 633
 634static int ufs_qcom_get_pwr_dev_param(struct ufs_qcom_dev_params *qcom_param,
 635                                      struct ufs_pa_layer_attr *dev_max,
 636                                      struct ufs_pa_layer_attr *agreed_pwr)
 637{
 638        int min_qcom_gear;
 639        int min_dev_gear;
 640        bool is_dev_sup_hs = false;
 641        bool is_qcom_max_hs = false;
 642
 643        if (dev_max->pwr_rx == FAST_MODE)
 644                is_dev_sup_hs = true;
 645
 646        if (qcom_param->desired_working_mode == FAST) {
 647                is_qcom_max_hs = true;
 648                min_qcom_gear = min_t(u32, qcom_param->hs_rx_gear,
 649                                      qcom_param->hs_tx_gear);
 650        } else {
 651                min_qcom_gear = min_t(u32, qcom_param->pwm_rx_gear,
 652                                      qcom_param->pwm_tx_gear);
 653        }
 654
 655        /*
 656         * device doesn't support HS but qcom_param->desired_working_mode is
 657         * HS, thus device and qcom_param don't agree
 658         */
 659        if (!is_dev_sup_hs && is_qcom_max_hs) {
 660                pr_err("%s: failed to agree on power mode (device doesn't support HS but requested power is HS)\n",
 661                        __func__);
 662                return -ENOTSUPP;
 663        } else if (is_dev_sup_hs && is_qcom_max_hs) {
 664                /*
 665                 * since device supports HS, it supports FAST_MODE.
 666                 * since qcom_param->desired_working_mode is also HS
 667                 * then final decision (FAST/FASTAUTO) is done according
 668                 * to qcom_params as it is the restricting factor
 669                 */
 670                agreed_pwr->pwr_rx = agreed_pwr->pwr_tx =
 671                                                qcom_param->rx_pwr_hs;
 672        } else {
 673                /*
 674                 * here qcom_param->desired_working_mode is PWM.
 675                 * it doesn't matter whether device supports HS or PWM,
 676                 * in both cases qcom_param->desired_working_mode will
 677                 * determine the mode
 678                 */
 679                 agreed_pwr->pwr_rx = agreed_pwr->pwr_tx =
 680                                                qcom_param->rx_pwr_pwm;
 681        }
 682
 683        /*
 684         * we would like tx to work in the minimum number of lanes
 685         * between device capability and vendor preferences.
 686         * the same decision will be made for rx
 687         */
 688        agreed_pwr->lane_tx = min_t(u32, dev_max->lane_tx,
 689                                                qcom_param->tx_lanes);
 690        agreed_pwr->lane_rx = min_t(u32, dev_max->lane_rx,
 691                                                qcom_param->rx_lanes);
 692
 693        /* device maximum gear is the minimum between device rx and tx gears */
 694        min_dev_gear = min_t(u32, dev_max->gear_rx, dev_max->gear_tx);
 695
 696        /*
 697         * if both device capabilities and vendor pre-defined preferences are
 698         * both HS or both PWM then set the minimum gear to be the chosen
 699         * working gear.
 700         * if one is PWM and one is HS then the one that is PWM get to decide
 701         * what is the gear, as it is the one that also decided previously what
 702         * pwr the device will be configured to.
 703         */
 704        if ((is_dev_sup_hs && is_qcom_max_hs) ||
 705            (!is_dev_sup_hs && !is_qcom_max_hs))
 706                agreed_pwr->gear_rx = agreed_pwr->gear_tx =
 707                        min_t(u32, min_dev_gear, min_qcom_gear);
 708        else if (!is_dev_sup_hs)
 709                agreed_pwr->gear_rx = agreed_pwr->gear_tx = min_dev_gear;
 710        else
 711                agreed_pwr->gear_rx = agreed_pwr->gear_tx = min_qcom_gear;
 712
 713        agreed_pwr->hs_rate = qcom_param->hs_rate;
 714        return 0;
 715}
 716
 717#ifdef CONFIG_MSM_BUS_SCALING
 718static int ufs_qcom_get_bus_vote(struct ufs_qcom_host *host,
 719                const char *speed_mode)
 720{
 721        struct device *dev = host->hba->dev;
 722        struct device_node *np = dev->of_node;
 723        int err;
 724        const char *key = "qcom,bus-vector-names";
 725
 726        if (!speed_mode) {
 727                err = -EINVAL;
 728                goto out;
 729        }
 730
 731        if (host->bus_vote.is_max_bw_needed && !!strcmp(speed_mode, "MIN"))
 732                err = of_property_match_string(np, key, "MAX");
 733        else
 734                err = of_property_match_string(np, key, speed_mode);
 735
 736out:
 737        if (err < 0)
 738                dev_err(dev, "%s: Invalid %s mode %d\n",
 739                                __func__, speed_mode, err);
 740        return err;
 741}
 742
 743static void ufs_qcom_get_speed_mode(struct ufs_pa_layer_attr *p, char *result)
 744{
 745        int gear = max_t(u32, p->gear_rx, p->gear_tx);
 746        int lanes = max_t(u32, p->lane_rx, p->lane_tx);
 747        int pwr;
 748
 749        /* default to PWM Gear 1, Lane 1 if power mode is not initialized */
 750        if (!gear)
 751                gear = 1;
 752
 753        if (!lanes)
 754                lanes = 1;
 755
 756        if (!p->pwr_rx && !p->pwr_tx) {
 757                pwr = SLOWAUTO_MODE;
 758                snprintf(result, BUS_VECTOR_NAME_LEN, "MIN");
 759        } else if (p->pwr_rx == FAST_MODE || p->pwr_rx == FASTAUTO_MODE ||
 760                 p->pwr_tx == FAST_MODE || p->pwr_tx == FASTAUTO_MODE) {
 761                pwr = FAST_MODE;
 762                snprintf(result, BUS_VECTOR_NAME_LEN, "%s_R%s_G%d_L%d", "HS",
 763                         p->hs_rate == PA_HS_MODE_B ? "B" : "A", gear, lanes);
 764        } else {
 765                pwr = SLOW_MODE;
 766                snprintf(result, BUS_VECTOR_NAME_LEN, "%s_G%d_L%d",
 767                         "PWM", gear, lanes);
 768        }
 769}
 770
 771static int ufs_qcom_set_bus_vote(struct ufs_qcom_host *host, int vote)
 772{
 773        int err = 0;
 774
 775        if (vote != host->bus_vote.curr_vote) {
 776                err = msm_bus_scale_client_update_request(
 777                                host->bus_vote.client_handle, vote);
 778                if (err) {
 779                        dev_err(host->hba->dev,
 780                                "%s: msm_bus_scale_client_update_request() failed: bus_client_handle=0x%x, vote=%d, err=%d\n",
 781                                __func__, host->bus_vote.client_handle,
 782                                vote, err);
 783                        goto out;
 784                }
 785
 786                host->bus_vote.curr_vote = vote;
 787        }
 788out:
 789        return err;
 790}
 791
 792static int ufs_qcom_update_bus_bw_vote(struct ufs_qcom_host *host)
 793{
 794        int vote;
 795        int err = 0;
 796        char mode[BUS_VECTOR_NAME_LEN];
 797
 798        ufs_qcom_get_speed_mode(&host->dev_req_params, mode);
 799
 800        vote = ufs_qcom_get_bus_vote(host, mode);
 801        if (vote >= 0)
 802                err = ufs_qcom_set_bus_vote(host, vote);
 803        else
 804                err = vote;
 805
 806        if (err)
 807                dev_err(host->hba->dev, "%s: failed %d\n", __func__, err);
 808        else
 809                host->bus_vote.saved_vote = vote;
 810        return err;
 811}
 812
 813static ssize_t
 814show_ufs_to_mem_max_bus_bw(struct device *dev, struct device_attribute *attr,
 815                        char *buf)
 816{
 817        struct ufs_hba *hba = dev_get_drvdata(dev);
 818        struct ufs_qcom_host *host = ufshcd_get_variant(hba);
 819
 820        return snprintf(buf, PAGE_SIZE, "%u\n",
 821                        host->bus_vote.is_max_bw_needed);
 822}
 823
 824static ssize_t
 825store_ufs_to_mem_max_bus_bw(struct device *dev, struct device_attribute *attr,
 826                const char *buf, size_t count)
 827{
 828        struct ufs_hba *hba = dev_get_drvdata(dev);
 829        struct ufs_qcom_host *host = ufshcd_get_variant(hba);
 830        uint32_t value;
 831
 832        if (!kstrtou32(buf, 0, &value)) {
 833                host->bus_vote.is_max_bw_needed = !!value;
 834                ufs_qcom_update_bus_bw_vote(host);
 835        }
 836
 837        return count;
 838}
 839
 840static int ufs_qcom_bus_register(struct ufs_qcom_host *host)
 841{
 842        int err;
 843        struct msm_bus_scale_pdata *bus_pdata;
 844        struct device *dev = host->hba->dev;
 845        struct platform_device *pdev = to_platform_device(dev);
 846        struct device_node *np = dev->of_node;
 847
 848        bus_pdata = msm_bus_cl_get_pdata(pdev);
 849        if (!bus_pdata) {
 850                dev_err(dev, "%s: failed to get bus vectors\n", __func__);
 851                err = -ENODATA;
 852                goto out;
 853        }
 854
 855        err = of_property_count_strings(np, "qcom,bus-vector-names");
 856        if (err < 0 || err != bus_pdata->num_usecases) {
 857                dev_err(dev, "%s: qcom,bus-vector-names not specified correctly %d\n",
 858                                __func__, err);
 859                goto out;
 860        }
 861
 862        host->bus_vote.client_handle = msm_bus_scale_register_client(bus_pdata);
 863        if (!host->bus_vote.client_handle) {
 864                dev_err(dev, "%s: msm_bus_scale_register_client failed\n",
 865                                __func__);
 866                err = -EFAULT;
 867                goto out;
 868        }
 869
 870        /* cache the vote index for minimum and maximum bandwidth */
 871        host->bus_vote.min_bw_vote = ufs_qcom_get_bus_vote(host, "MIN");
 872        host->bus_vote.max_bw_vote = ufs_qcom_get_bus_vote(host, "MAX");
 873
 874        host->bus_vote.max_bus_bw.show = show_ufs_to_mem_max_bus_bw;
 875        host->bus_vote.max_bus_bw.store = store_ufs_to_mem_max_bus_bw;
 876        sysfs_attr_init(&host->bus_vote.max_bus_bw.attr);
 877        host->bus_vote.max_bus_bw.attr.name = "max_bus_bw";
 878        host->bus_vote.max_bus_bw.attr.mode = S_IRUGO | S_IWUSR;
 879        err = device_create_file(dev, &host->bus_vote.max_bus_bw);
 880out:
 881        return err;
 882}
 883#else /* CONFIG_MSM_BUS_SCALING */
 884static int ufs_qcom_update_bus_bw_vote(struct ufs_qcom_host *host)
 885{
 886        return 0;
 887}
 888
 889static int ufs_qcom_set_bus_vote(struct ufs_qcom_host *host, int vote)
 890{
 891        return 0;
 892}
 893
 894static int ufs_qcom_bus_register(struct ufs_qcom_host *host)
 895{
 896        return 0;
 897}
 898#endif /* CONFIG_MSM_BUS_SCALING */
 899
 900static void ufs_qcom_dev_ref_clk_ctrl(struct ufs_qcom_host *host, bool enable)
 901{
 902        if (host->dev_ref_clk_ctrl_mmio &&
 903            (enable ^ host->is_dev_ref_clk_enabled)) {
 904                u32 temp = readl_relaxed(host->dev_ref_clk_ctrl_mmio);
 905
 906                if (enable)
 907                        temp |= host->dev_ref_clk_en_mask;
 908                else
 909                        temp &= ~host->dev_ref_clk_en_mask;
 910
 911                /*
 912                 * If we are here to disable this clock it might be immediately
 913                 * after entering into hibern8 in which case we need to make
 914                 * sure that device ref_clk is active at least 1us after the
 915                 * hibern8 enter.
 916                 */
 917                if (!enable)
 918                        udelay(1);
 919
 920                writel_relaxed(temp, host->dev_ref_clk_ctrl_mmio);
 921
 922                /* ensure that ref_clk is enabled/disabled before we return */
 923                wmb();
 924
 925                /*
 926                 * If we call hibern8 exit after this, we need to make sure that
 927                 * device ref_clk is stable for at least 1us before the hibern8
 928                 * exit command.
 929                 */
 930                if (enable)
 931                        udelay(1);
 932
 933                host->is_dev_ref_clk_enabled = enable;
 934        }
 935}
 936
 937static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba,
 938                                enum ufs_notify_change_status status,
 939                                struct ufs_pa_layer_attr *dev_max_params,
 940                                struct ufs_pa_layer_attr *dev_req_params)
 941{
 942        u32 val;
 943        struct ufs_qcom_host *host = ufshcd_get_variant(hba);
 944        struct phy *phy = host->generic_phy;
 945        struct ufs_qcom_dev_params ufs_qcom_cap;
 946        int ret = 0;
 947        int res = 0;
 948
 949        if (!dev_req_params) {
 950                pr_err("%s: incoming dev_req_params is NULL\n", __func__);
 951                ret = -EINVAL;
 952                goto out;
 953        }
 954
 955        switch (status) {
 956        case PRE_CHANGE:
 957                ufs_qcom_cap.tx_lanes = UFS_QCOM_LIMIT_NUM_LANES_TX;
 958                ufs_qcom_cap.rx_lanes = UFS_QCOM_LIMIT_NUM_LANES_RX;
 959                ufs_qcom_cap.hs_rx_gear = UFS_QCOM_LIMIT_HSGEAR_RX;
 960                ufs_qcom_cap.hs_tx_gear = UFS_QCOM_LIMIT_HSGEAR_TX;
 961                ufs_qcom_cap.pwm_rx_gear = UFS_QCOM_LIMIT_PWMGEAR_RX;
 962                ufs_qcom_cap.pwm_tx_gear = UFS_QCOM_LIMIT_PWMGEAR_TX;
 963                ufs_qcom_cap.rx_pwr_pwm = UFS_QCOM_LIMIT_RX_PWR_PWM;
 964                ufs_qcom_cap.tx_pwr_pwm = UFS_QCOM_LIMIT_TX_PWR_PWM;
 965                ufs_qcom_cap.rx_pwr_hs = UFS_QCOM_LIMIT_RX_PWR_HS;
 966                ufs_qcom_cap.tx_pwr_hs = UFS_QCOM_LIMIT_TX_PWR_HS;
 967                ufs_qcom_cap.hs_rate = UFS_QCOM_LIMIT_HS_RATE;
 968                ufs_qcom_cap.desired_working_mode =
 969                                        UFS_QCOM_LIMIT_DESIRED_MODE;
 970
 971                if (host->hw_ver.major == 0x1) {
 972                        /*
 973                         * HS-G3 operations may not reliably work on legacy QCOM
 974                         * UFS host controller hardware even though capability
 975                         * exchange during link startup phase may end up
 976                         * negotiating maximum supported gear as G3.
 977                         * Hence downgrade the maximum supported gear to HS-G2.
 978                         */
 979                        if (ufs_qcom_cap.hs_tx_gear > UFS_HS_G2)
 980                                ufs_qcom_cap.hs_tx_gear = UFS_HS_G2;
 981                        if (ufs_qcom_cap.hs_rx_gear > UFS_HS_G2)
 982                                ufs_qcom_cap.hs_rx_gear = UFS_HS_G2;
 983                }
 984
 985                ret = ufs_qcom_get_pwr_dev_param(&ufs_qcom_cap,
 986                                                 dev_max_params,
 987                                                 dev_req_params);
 988                if (ret) {
 989                        pr_err("%s: failed to determine capabilities\n",
 990                                        __func__);
 991                        goto out;
 992                }
 993
 994                /* enable the device ref clock before changing to HS mode */
 995                if (!ufshcd_is_hs_mode(&hba->pwr_info) &&
 996                        ufshcd_is_hs_mode(dev_req_params))
 997                        ufs_qcom_dev_ref_clk_ctrl(host, true);
 998                break;
 999        case POST_CHANGE:
1000                if (ufs_qcom_cfg_timers(hba, dev_req_params->gear_rx,
1001                                        dev_req_params->pwr_rx,
1002                                        dev_req_params->hs_rate, false)) {
1003                        dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n",
1004                                __func__);
1005                        /*
1006                         * we return error code at the end of the routine,
1007                         * but continue to configure UFS_PHY_TX_LANE_ENABLE
1008                         * and bus voting as usual
1009                         */
1010                        ret = -EINVAL;
1011                }
1012
1013                val = ~(MAX_U32 << dev_req_params->lane_tx);
1014                res = ufs_qcom_phy_set_tx_lane_enable(phy, val);
1015                if (res) {
1016                        dev_err(hba->dev, "%s: ufs_qcom_phy_set_tx_lane_enable() failed res = %d\n",
1017                                __func__, res);
1018                        ret = res;
1019                }
1020
1021                /* cache the power mode parameters to use internally */
1022                memcpy(&host->dev_req_params,
1023                                dev_req_params, sizeof(*dev_req_params));
1024                ufs_qcom_update_bus_bw_vote(host);
1025
1026                /* disable the device ref clock if entered PWM mode */
1027                if (ufshcd_is_hs_mode(&hba->pwr_info) &&
1028                        !ufshcd_is_hs_mode(dev_req_params))
1029                        ufs_qcom_dev_ref_clk_ctrl(host, false);
1030                break;
1031        default:
1032                ret = -EINVAL;
1033                break;
1034        }
1035out:
1036        return ret;
1037}
1038
1039static int ufs_qcom_quirk_host_pa_saveconfigtime(struct ufs_hba *hba)
1040{
1041        int err;
1042        u32 pa_vs_config_reg1;
1043
1044        err = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_VS_CONFIG_REG1),
1045                             &pa_vs_config_reg1);
1046        if (err)
1047                goto out;
1048
1049        /* Allow extension of MSB bits of PA_SaveConfigTime attribute */
1050        err = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_VS_CONFIG_REG1),
1051                            (pa_vs_config_reg1 | (1 << 12)));
1052
1053out:
1054        return err;
1055}
1056
1057static int ufs_qcom_apply_dev_quirks(struct ufs_hba *hba)
1058{
1059        int err = 0;
1060
1061        if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME)
1062                err = ufs_qcom_quirk_host_pa_saveconfigtime(hba);
1063
1064        return err;
1065}
1066
1067static u32 ufs_qcom_get_ufs_hci_version(struct ufs_hba *hba)
1068{
1069        struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1070
1071        if (host->hw_ver.major == 0x1)
1072                return UFSHCI_VERSION_11;
1073        else
1074                return UFSHCI_VERSION_20;
1075}
1076
1077/**
1078 * ufs_qcom_advertise_quirks - advertise the known QCOM UFS controller quirks
1079 * @hba: host controller instance
1080 *
1081 * QCOM UFS host controller might have some non standard behaviours (quirks)
1082 * than what is specified by UFSHCI specification. Advertise all such
1083 * quirks to standard UFS host controller driver so standard takes them into
1084 * account.
1085 */
1086static void ufs_qcom_advertise_quirks(struct ufs_hba *hba)
1087{
1088        struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1089
1090        if (host->hw_ver.major == 0x01) {
1091                hba->quirks |= UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS
1092                            | UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP
1093                            | UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE;
1094
1095                if (host->hw_ver.minor == 0x0001 && host->hw_ver.step == 0x0001)
1096                        hba->quirks |= UFSHCD_QUIRK_BROKEN_INTR_AGGR;
1097
1098                hba->quirks |= UFSHCD_QUIRK_BROKEN_LCC;
1099        }
1100
1101        if (host->hw_ver.major >= 0x2) {
1102                hba->quirks |= UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION;
1103
1104                if (!ufs_qcom_cap_qunipro(host))
1105                        /* Legacy UniPro mode still need following quirks */
1106                        hba->quirks |= (UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS
1107                                | UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE
1108                                | UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP);
1109        }
1110}
1111
1112static void ufs_qcom_set_caps(struct ufs_hba *hba)
1113{
1114        struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1115
1116        hba->caps |= UFSHCD_CAP_CLK_GATING | UFSHCD_CAP_HIBERN8_WITH_CLK_GATING;
1117        hba->caps |= UFSHCD_CAP_CLK_SCALING;
1118        hba->caps |= UFSHCD_CAP_AUTO_BKOPS_SUSPEND;
1119
1120        if (host->hw_ver.major >= 0x2) {
1121                host->caps = UFS_QCOM_CAP_QUNIPRO |
1122                             UFS_QCOM_CAP_RETAIN_SEC_CFG_AFTER_PWR_COLLAPSE;
1123        }
1124}
1125
1126/**
1127 * ufs_qcom_setup_clocks - enables/disable clocks
1128 * @hba: host controller instance
1129 * @on: If true, enable clocks else disable them.
1130 * @status: PRE_CHANGE or POST_CHANGE notify
1131 *
1132 * Returns 0 on success, non-zero on failure.
1133 */
1134static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on,
1135                                 enum ufs_notify_change_status status)
1136{
1137        struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1138        int err;
1139        int vote = 0;
1140
1141        /*
1142         * In case ufs_qcom_init() is not yet done, simply ignore.
1143         * This ufs_qcom_setup_clocks() shall be called from
1144         * ufs_qcom_init() after init is done.
1145         */
1146        if (!host)
1147                return 0;
1148
1149        if (on && (status == POST_CHANGE)) {
1150                phy_power_on(host->generic_phy);
1151
1152                /* enable the device ref clock for HS mode*/
1153                if (ufshcd_is_hs_mode(&hba->pwr_info))
1154                        ufs_qcom_dev_ref_clk_ctrl(host, true);
1155                vote = host->bus_vote.saved_vote;
1156                if (vote == host->bus_vote.min_bw_vote)
1157                        ufs_qcom_update_bus_bw_vote(host);
1158
1159        } else if (!on && (status == PRE_CHANGE)) {
1160                if (!ufs_qcom_is_link_active(hba)) {
1161                        /* disable device ref_clk */
1162                        ufs_qcom_dev_ref_clk_ctrl(host, false);
1163
1164                        /* powering off PHY during aggressive clk gating */
1165                        phy_power_off(host->generic_phy);
1166                }
1167
1168                vote = host->bus_vote.min_bw_vote;
1169        }
1170
1171        err = ufs_qcom_set_bus_vote(host, vote);
1172        if (err)
1173                dev_err(hba->dev, "%s: set bus vote failed %d\n",
1174                                __func__, err);
1175
1176        return err;
1177}
1178
1179#define ANDROID_BOOT_DEV_MAX    30
1180static char android_boot_dev[ANDROID_BOOT_DEV_MAX];
1181
1182#ifndef MODULE
1183static int __init get_android_boot_dev(char *str)
1184{
1185        strlcpy(android_boot_dev, str, ANDROID_BOOT_DEV_MAX);
1186        return 1;
1187}
1188__setup("androidboot.bootdevice=", get_android_boot_dev);
1189#endif
1190
1191/**
1192 * ufs_qcom_init - bind phy with controller
1193 * @hba: host controller instance
1194 *
1195 * Binds PHY with controller and powers up PHY enabling clocks
1196 * and regulators.
1197 *
1198 * Returns -EPROBE_DEFER if binding fails, returns negative error
1199 * on phy power up failure and returns zero on success.
1200 */
1201static int ufs_qcom_init(struct ufs_hba *hba)
1202{
1203        int err;
1204        struct device *dev = hba->dev;
1205        struct platform_device *pdev = to_platform_device(dev);
1206        struct ufs_qcom_host *host;
1207        struct resource *res;
1208
1209        if (strlen(android_boot_dev) && strcmp(android_boot_dev, dev_name(dev)))
1210                return -ENODEV;
1211
1212        host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
1213        if (!host) {
1214                err = -ENOMEM;
1215                dev_err(dev, "%s: no memory for qcom ufs host\n", __func__);
1216                goto out;
1217        }
1218
1219        /* Make a two way bind between the qcom host and the hba */
1220        host->hba = hba;
1221        ufshcd_set_variant(hba, host);
1222
1223        /*
1224         * voting/devoting device ref_clk source is time consuming hence
1225         * skip devoting it during aggressive clock gating. This clock
1226         * will still be gated off during runtime suspend.
1227         */
1228        host->generic_phy = devm_phy_get(dev, "ufsphy");
1229
1230        if (host->generic_phy == ERR_PTR(-EPROBE_DEFER)) {
1231                /*
1232                 * UFS driver might be probed before the phy driver does.
1233                 * In that case we would like to return EPROBE_DEFER code.
1234                 */
1235                err = -EPROBE_DEFER;
1236                dev_warn(dev, "%s: required phy device. hasn't probed yet. err = %d\n",
1237                        __func__, err);
1238                goto out_variant_clear;
1239        } else if (IS_ERR(host->generic_phy)) {
1240                err = PTR_ERR(host->generic_phy);
1241                dev_err(dev, "%s: PHY get failed %d\n", __func__, err);
1242                goto out_variant_clear;
1243        }
1244
1245        err = ufs_qcom_bus_register(host);
1246        if (err)
1247                goto out_variant_clear;
1248
1249        ufs_qcom_get_controller_revision(hba, &host->hw_ver.major,
1250                &host->hw_ver.minor, &host->hw_ver.step);
1251
1252        /*
1253         * for newer controllers, device reference clock control bit has
1254         * moved inside UFS controller register address space itself.
1255         */
1256        if (host->hw_ver.major >= 0x02) {
1257                host->dev_ref_clk_ctrl_mmio = hba->mmio_base + REG_UFS_CFG1;
1258                host->dev_ref_clk_en_mask = BIT(26);
1259        } else {
1260                /* "dev_ref_clk_ctrl_mem" is optional resource */
1261                res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1262                if (res) {
1263                        host->dev_ref_clk_ctrl_mmio =
1264                                        devm_ioremap_resource(dev, res);
1265                        if (IS_ERR(host->dev_ref_clk_ctrl_mmio)) {
1266                                dev_warn(dev,
1267                                        "%s: could not map dev_ref_clk_ctrl_mmio, err %ld\n",
1268                                        __func__,
1269                                        PTR_ERR(host->dev_ref_clk_ctrl_mmio));
1270                                host->dev_ref_clk_ctrl_mmio = NULL;
1271                        }
1272                        host->dev_ref_clk_en_mask = BIT(5);
1273                }
1274        }
1275
1276        /* update phy revision information before calling phy_init() */
1277        ufs_qcom_phy_save_controller_version(host->generic_phy,
1278                host->hw_ver.major, host->hw_ver.minor, host->hw_ver.step);
1279
1280        err = ufs_qcom_init_lane_clks(host);
1281        if (err)
1282                goto out_variant_clear;
1283
1284        ufs_qcom_set_caps(hba);
1285        ufs_qcom_advertise_quirks(hba);
1286
1287        ufs_qcom_setup_clocks(hba, true, POST_CHANGE);
1288
1289        if (hba->dev->id < MAX_UFS_QCOM_HOSTS)
1290                ufs_qcom_hosts[hba->dev->id] = host;
1291
1292        host->dbg_print_en |= UFS_QCOM_DEFAULT_DBG_PRINT_EN;
1293        ufs_qcom_get_default_testbus_cfg(host);
1294        err = ufs_qcom_testbus_config(host);
1295        if (err) {
1296                dev_warn(dev, "%s: failed to configure the testbus %d\n",
1297                                __func__, err);
1298                err = 0;
1299        }
1300
1301        goto out;
1302
1303out_variant_clear:
1304        ufshcd_set_variant(hba, NULL);
1305out:
1306        return err;
1307}
1308
1309static void ufs_qcom_exit(struct ufs_hba *hba)
1310{
1311        struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1312
1313        ufs_qcom_disable_lane_clks(host);
1314        phy_power_off(host->generic_phy);
1315        phy_exit(host->generic_phy);
1316}
1317
1318static int ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(struct ufs_hba *hba,
1319                                                       u32 clk_cycles)
1320{
1321        int err;
1322        u32 core_clk_ctrl_reg;
1323
1324        if (clk_cycles > DME_VS_CORE_CLK_CTRL_MAX_CORE_CLK_1US_CYCLES_MASK)
1325                return -EINVAL;
1326
1327        err = ufshcd_dme_get(hba,
1328                            UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
1329                            &core_clk_ctrl_reg);
1330        if (err)
1331                goto out;
1332
1333        core_clk_ctrl_reg &= ~DME_VS_CORE_CLK_CTRL_MAX_CORE_CLK_1US_CYCLES_MASK;
1334        core_clk_ctrl_reg |= clk_cycles;
1335
1336        /* Clear CORE_CLK_DIV_EN */
1337        core_clk_ctrl_reg &= ~DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT;
1338
1339        err = ufshcd_dme_set(hba,
1340                            UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
1341                            core_clk_ctrl_reg);
1342out:
1343        return err;
1344}
1345
1346static int ufs_qcom_clk_scale_up_pre_change(struct ufs_hba *hba)
1347{
1348        /* nothing to do as of now */
1349        return 0;
1350}
1351
1352static int ufs_qcom_clk_scale_up_post_change(struct ufs_hba *hba)
1353{
1354        struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1355
1356        if (!ufs_qcom_cap_qunipro(host))
1357                return 0;
1358
1359        /* set unipro core clock cycles to 150 and clear clock divider */
1360        return ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba, 150);
1361}
1362
1363static int ufs_qcom_clk_scale_down_pre_change(struct ufs_hba *hba)
1364{
1365        struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1366        int err;
1367        u32 core_clk_ctrl_reg;
1368
1369        if (!ufs_qcom_cap_qunipro(host))
1370                return 0;
1371
1372        err = ufshcd_dme_get(hba,
1373                            UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
1374                            &core_clk_ctrl_reg);
1375
1376        /* make sure CORE_CLK_DIV_EN is cleared */
1377        if (!err &&
1378            (core_clk_ctrl_reg & DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT)) {
1379                core_clk_ctrl_reg &= ~DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT;
1380                err = ufshcd_dme_set(hba,
1381                                    UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
1382                                    core_clk_ctrl_reg);
1383        }
1384
1385        return err;
1386}
1387
1388static int ufs_qcom_clk_scale_down_post_change(struct ufs_hba *hba)
1389{
1390        struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1391
1392        if (!ufs_qcom_cap_qunipro(host))
1393                return 0;
1394
1395        /* set unipro core clock cycles to 75 and clear clock divider */
1396        return ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba, 75);
1397}
1398
1399static int ufs_qcom_clk_scale_notify(struct ufs_hba *hba,
1400                bool scale_up, enum ufs_notify_change_status status)
1401{
1402        struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1403        struct ufs_pa_layer_attr *dev_req_params = &host->dev_req_params;
1404        int err = 0;
1405
1406        if (status == PRE_CHANGE) {
1407                if (scale_up)
1408                        err = ufs_qcom_clk_scale_up_pre_change(hba);
1409                else
1410                        err = ufs_qcom_clk_scale_down_pre_change(hba);
1411        } else {
1412                if (scale_up)
1413                        err = ufs_qcom_clk_scale_up_post_change(hba);
1414                else
1415                        err = ufs_qcom_clk_scale_down_post_change(hba);
1416
1417                if (err || !dev_req_params)
1418                        goto out;
1419
1420                ufs_qcom_cfg_timers(hba,
1421                                    dev_req_params->gear_rx,
1422                                    dev_req_params->pwr_rx,
1423                                    dev_req_params->hs_rate,
1424                                    false);
1425                ufs_qcom_update_bus_bw_vote(host);
1426        }
1427
1428out:
1429        return err;
1430}
1431
1432static void ufs_qcom_print_hw_debug_reg_all(struct ufs_hba *hba,
1433                void *priv, void (*print_fn)(struct ufs_hba *hba,
1434                int offset, int num_regs, char *str, void *priv))
1435{
1436        u32 reg;
1437        struct ufs_qcom_host *host;
1438
1439        if (unlikely(!hba)) {
1440                pr_err("%s: hba is NULL\n", __func__);
1441                return;
1442        }
1443        if (unlikely(!print_fn)) {
1444                dev_err(hba->dev, "%s: print_fn is NULL\n", __func__);
1445                return;
1446        }
1447
1448        host = ufshcd_get_variant(hba);
1449        if (!(host->dbg_print_en & UFS_QCOM_DBG_PRINT_REGS_EN))
1450                return;
1451
1452        reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_REG_OCSC);
1453        print_fn(hba, reg, 44, "UFS_UFS_DBG_RD_REG_OCSC ", priv);
1454
1455        reg = ufshcd_readl(hba, REG_UFS_CFG1);
1456        reg |= UTP_DBG_RAMS_EN;
1457        ufshcd_writel(hba, reg, REG_UFS_CFG1);
1458
1459        reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_EDTL_RAM);
1460        print_fn(hba, reg, 32, "UFS_UFS_DBG_RD_EDTL_RAM ", priv);
1461
1462        reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_DESC_RAM);
1463        print_fn(hba, reg, 128, "UFS_UFS_DBG_RD_DESC_RAM ", priv);
1464
1465        reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_PRDT_RAM);
1466        print_fn(hba, reg, 64, "UFS_UFS_DBG_RD_PRDT_RAM ", priv);
1467
1468        /* clear bit 17 - UTP_DBG_RAMS_EN */
1469        ufshcd_rmwl(hba, UTP_DBG_RAMS_EN, 0, REG_UFS_CFG1);
1470
1471        reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_UAWM);
1472        print_fn(hba, reg, 4, "UFS_DBG_RD_REG_UAWM ", priv);
1473
1474        reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_UARM);
1475        print_fn(hba, reg, 4, "UFS_DBG_RD_REG_UARM ", priv);
1476
1477        reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TXUC);
1478        print_fn(hba, reg, 48, "UFS_DBG_RD_REG_TXUC ", priv);
1479
1480        reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_RXUC);
1481        print_fn(hba, reg, 27, "UFS_DBG_RD_REG_RXUC ", priv);
1482
1483        reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_DFC);
1484        print_fn(hba, reg, 19, "UFS_DBG_RD_REG_DFC ", priv);
1485
1486        reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TRLUT);
1487        print_fn(hba, reg, 34, "UFS_DBG_RD_REG_TRLUT ", priv);
1488
1489        reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TMRLUT);
1490        print_fn(hba, reg, 9, "UFS_DBG_RD_REG_TMRLUT ", priv);
1491}
1492
1493static void ufs_qcom_enable_test_bus(struct ufs_qcom_host *host)
1494{
1495        if (host->dbg_print_en & UFS_QCOM_DBG_PRINT_TEST_BUS_EN) {
1496                ufshcd_rmwl(host->hba, UFS_REG_TEST_BUS_EN,
1497                                UFS_REG_TEST_BUS_EN, REG_UFS_CFG1);
1498                ufshcd_rmwl(host->hba, TEST_BUS_EN, TEST_BUS_EN, REG_UFS_CFG1);
1499        } else {
1500                ufshcd_rmwl(host->hba, UFS_REG_TEST_BUS_EN, 0, REG_UFS_CFG1);
1501                ufshcd_rmwl(host->hba, TEST_BUS_EN, 0, REG_UFS_CFG1);
1502        }
1503}
1504
1505static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host)
1506{
1507        /* provide a legal default configuration */
1508        host->testbus.select_major = TSTBUS_UNIPRO;
1509        host->testbus.select_minor = 37;
1510}
1511
1512static bool ufs_qcom_testbus_cfg_is_ok(struct ufs_qcom_host *host)
1513{
1514        if (host->testbus.select_major >= TSTBUS_MAX) {
1515                dev_err(host->hba->dev,
1516                        "%s: UFS_CFG1[TEST_BUS_SEL} may not equal 0x%05X\n",
1517                        __func__, host->testbus.select_major);
1518                return false;
1519        }
1520
1521        return true;
1522}
1523
1524int ufs_qcom_testbus_config(struct ufs_qcom_host *host)
1525{
1526        int reg;
1527        int offset;
1528        u32 mask = TEST_BUS_SUB_SEL_MASK;
1529
1530        if (!host)
1531                return -EINVAL;
1532
1533        if (!ufs_qcom_testbus_cfg_is_ok(host))
1534                return -EPERM;
1535
1536        switch (host->testbus.select_major) {
1537        case TSTBUS_UAWM:
1538                reg = UFS_TEST_BUS_CTRL_0;
1539                offset = 24;
1540                break;
1541        case TSTBUS_UARM:
1542                reg = UFS_TEST_BUS_CTRL_0;
1543                offset = 16;
1544                break;
1545        case TSTBUS_TXUC:
1546                reg = UFS_TEST_BUS_CTRL_0;
1547                offset = 8;
1548                break;
1549        case TSTBUS_RXUC:
1550                reg = UFS_TEST_BUS_CTRL_0;
1551                offset = 0;
1552                break;
1553        case TSTBUS_DFC:
1554                reg = UFS_TEST_BUS_CTRL_1;
1555                offset = 24;
1556                break;
1557        case TSTBUS_TRLUT:
1558                reg = UFS_TEST_BUS_CTRL_1;
1559                offset = 16;
1560                break;
1561        case TSTBUS_TMRLUT:
1562                reg = UFS_TEST_BUS_CTRL_1;
1563                offset = 8;
1564                break;
1565        case TSTBUS_OCSC:
1566                reg = UFS_TEST_BUS_CTRL_1;
1567                offset = 0;
1568                break;
1569        case TSTBUS_WRAPPER:
1570                reg = UFS_TEST_BUS_CTRL_2;
1571                offset = 16;
1572                break;
1573        case TSTBUS_COMBINED:
1574                reg = UFS_TEST_BUS_CTRL_2;
1575                offset = 8;
1576                break;
1577        case TSTBUS_UTP_HCI:
1578                reg = UFS_TEST_BUS_CTRL_2;
1579                offset = 0;
1580                break;
1581        case TSTBUS_UNIPRO:
1582                reg = UFS_UNIPRO_CFG;
1583                offset = 20;
1584                mask = 0xFFF;
1585                break;
1586        /*
1587         * No need for a default case, since
1588         * ufs_qcom_testbus_cfg_is_ok() checks that the configuration
1589         * is legal
1590         */
1591        }
1592        mask <<= offset;
1593
1594        pm_runtime_get_sync(host->hba->dev);
1595        ufshcd_hold(host->hba, false);
1596        ufshcd_rmwl(host->hba, TEST_BUS_SEL,
1597                    (u32)host->testbus.select_major << 19,
1598                    REG_UFS_CFG1);
1599        ufshcd_rmwl(host->hba, mask,
1600                    (u32)host->testbus.select_minor << offset,
1601                    reg);
1602        ufs_qcom_enable_test_bus(host);
1603        /*
1604         * Make sure the test bus configuration is
1605         * committed before returning.
1606         */
1607        mb();
1608        ufshcd_release(host->hba);
1609        pm_runtime_put_sync(host->hba->dev);
1610
1611        return 0;
1612}
1613
1614static void ufs_qcom_testbus_read(struct ufs_hba *hba)
1615{
1616        ufs_qcom_dump_regs(hba, UFS_TEST_BUS, 1, "UFS_TEST_BUS ");
1617}
1618
1619static void ufs_qcom_print_unipro_testbus(struct ufs_hba *hba)
1620{
1621        struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1622        u32 *testbus = NULL;
1623        int i, nminor = 256, testbus_len = nminor * sizeof(u32);
1624
1625        testbus = kmalloc(testbus_len, GFP_KERNEL);
1626        if (!testbus)
1627                return;
1628
1629        host->testbus.select_major = TSTBUS_UNIPRO;
1630        for (i = 0; i < nminor; i++) {
1631                host->testbus.select_minor = i;
1632                ufs_qcom_testbus_config(host);
1633                testbus[i] = ufshcd_readl(hba, UFS_TEST_BUS);
1634        }
1635        print_hex_dump(KERN_ERR, "UNIPRO_TEST_BUS ", DUMP_PREFIX_OFFSET,
1636                        16, 4, testbus, testbus_len, false);
1637        kfree(testbus);
1638}
1639
1640static void ufs_qcom_dump_dbg_regs(struct ufs_hba *hba)
1641{
1642        ufs_qcom_dump_regs(hba, REG_UFS_SYS1CLK_1US, 16,
1643                        "HCI Vendor Specific Registers ");
1644
1645        /* sleep a bit intermittently as we are dumping too much data */
1646        ufs_qcom_print_hw_debug_reg_all(hba, NULL, ufs_qcom_dump_regs_wrapper);
1647        usleep_range(1000, 1100);
1648        ufs_qcom_testbus_read(hba);
1649        usleep_range(1000, 1100);
1650        ufs_qcom_print_unipro_testbus(hba);
1651        usleep_range(1000, 1100);
1652}
1653
1654/**
1655 * struct ufs_hba_qcom_vops - UFS QCOM specific variant operations
1656 *
1657 * The variant operations configure the necessary controller and PHY
1658 * handshake during initialization.
1659 */
1660static struct ufs_hba_variant_ops ufs_hba_qcom_vops = {
1661        .name                   = "qcom",
1662        .init                   = ufs_qcom_init,
1663        .exit                   = ufs_qcom_exit,
1664        .get_ufs_hci_version    = ufs_qcom_get_ufs_hci_version,
1665        .clk_scale_notify       = ufs_qcom_clk_scale_notify,
1666        .setup_clocks           = ufs_qcom_setup_clocks,
1667        .hce_enable_notify      = ufs_qcom_hce_enable_notify,
1668        .link_startup_notify    = ufs_qcom_link_startup_notify,
1669        .pwr_change_notify      = ufs_qcom_pwr_change_notify,
1670        .apply_dev_quirks       = ufs_qcom_apply_dev_quirks,
1671        .suspend                = ufs_qcom_suspend,
1672        .resume                 = ufs_qcom_resume,
1673        .dbg_register_dump      = ufs_qcom_dump_dbg_regs,
1674};
1675
1676/**
1677 * ufs_qcom_probe - probe routine of the driver
1678 * @pdev: pointer to Platform device handle
1679 *
1680 * Return zero for success and non-zero for failure
1681 */
1682static int ufs_qcom_probe(struct platform_device *pdev)
1683{
1684        int err;
1685        struct device *dev = &pdev->dev;
1686
1687        /* Perform generic probe */
1688        err = ufshcd_pltfrm_init(pdev, &ufs_hba_qcom_vops);
1689        if (err)
1690                dev_err(dev, "ufshcd_pltfrm_init() failed %d\n", err);
1691
1692        return err;
1693}
1694
1695/**
1696 * ufs_qcom_remove - set driver_data of the device to NULL
1697 * @pdev: pointer to platform device handle
1698 *
1699 * Always returns 0
1700 */
1701static int ufs_qcom_remove(struct platform_device *pdev)
1702{
1703        struct ufs_hba *hba =  platform_get_drvdata(pdev);
1704
1705        pm_runtime_get_sync(&(pdev)->dev);
1706        ufshcd_remove(hba);
1707        return 0;
1708}
1709
1710static const struct of_device_id ufs_qcom_of_match[] = {
1711        { .compatible = "qcom,ufshc"},
1712        {},
1713};
1714MODULE_DEVICE_TABLE(of, ufs_qcom_of_match);
1715
1716static const struct dev_pm_ops ufs_qcom_pm_ops = {
1717        .suspend        = ufshcd_pltfrm_suspend,
1718        .resume         = ufshcd_pltfrm_resume,
1719        .runtime_suspend = ufshcd_pltfrm_runtime_suspend,
1720        .runtime_resume  = ufshcd_pltfrm_runtime_resume,
1721        .runtime_idle    = ufshcd_pltfrm_runtime_idle,
1722};
1723
1724static struct platform_driver ufs_qcom_pltform = {
1725        .probe  = ufs_qcom_probe,
1726        .remove = ufs_qcom_remove,
1727        .shutdown = ufshcd_pltfrm_shutdown,
1728        .driver = {
1729                .name   = "ufshcd-qcom",
1730                .pm     = &ufs_qcom_pm_ops,
1731                .of_match_table = of_match_ptr(ufs_qcom_of_match),
1732        },
1733};
1734module_platform_driver(ufs_qcom_pltform);
1735
1736MODULE_LICENSE("GPL v2");
1737