linux/drivers/scsi/ufs/ufs-qcom.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2013-2016, Linux Foundation. All rights reserved.
   3 *
   4 * This program is free software; you can redistribute it and/or modify
   5 * it under the terms of the GNU General Public License version 2 and
   6 * only version 2 as published by the Free Software Foundation.
   7 *
   8 * This program is distributed in the hope that it will be useful,
   9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  11 * GNU General Public License for more details.
  12 *
  13 */
  14
  15#include <linux/time.h>
  16#include <linux/of.h>
  17#include <linux/platform_device.h>
  18#include <linux/phy/phy.h>
  19#include <linux/phy/phy-qcom-ufs.h>
  20
  21#include "ufshcd.h"
  22#include "ufshcd-pltfrm.h"
  23#include "unipro.h"
  24#include "ufs-qcom.h"
  25#include "ufshci.h"
  26#include "ufs_quirks.h"
  27#define UFS_QCOM_DEFAULT_DBG_PRINT_EN   \
  28        (UFS_QCOM_DBG_PRINT_REGS_EN | UFS_QCOM_DBG_PRINT_TEST_BUS_EN)
  29
  30enum {
  31        TSTBUS_UAWM,
  32        TSTBUS_UARM,
  33        TSTBUS_TXUC,
  34        TSTBUS_RXUC,
  35        TSTBUS_DFC,
  36        TSTBUS_TRLUT,
  37        TSTBUS_TMRLUT,
  38        TSTBUS_OCSC,
  39        TSTBUS_UTP_HCI,
  40        TSTBUS_COMBINED,
  41        TSTBUS_WRAPPER,
  42        TSTBUS_UNIPRO,
  43        TSTBUS_MAX,
  44};
  45
  46static struct ufs_qcom_host *ufs_qcom_hosts[MAX_UFS_QCOM_HOSTS];
  47
  48static int ufs_qcom_set_bus_vote(struct ufs_qcom_host *host, int vote);
  49static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host);
  50static int ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(struct ufs_hba *hba,
  51                                                       u32 clk_cycles);
  52
  53static void ufs_qcom_dump_regs(struct ufs_hba *hba, int offset, int len,
  54                char *prefix)
  55{
  56        print_hex_dump(KERN_ERR, prefix,
  57                        len > 4 ? DUMP_PREFIX_OFFSET : DUMP_PREFIX_NONE,
  58                        16, 4, (void __force *)hba->mmio_base + offset,
  59                        len * 4, false);
  60}
  61
  62static void ufs_qcom_dump_regs_wrapper(struct ufs_hba *hba, int offset, int len,
  63                char *prefix, void *priv)
  64{
  65        ufs_qcom_dump_regs(hba, offset, len, prefix);
  66}
  67
  68static int ufs_qcom_get_connected_tx_lanes(struct ufs_hba *hba, u32 *tx_lanes)
  69{
  70        int err = 0;
  71
  72        err = ufshcd_dme_get(hba,
  73                        UIC_ARG_MIB(PA_CONNECTEDTXDATALANES), tx_lanes);
  74        if (err)
  75                dev_err(hba->dev, "%s: couldn't read PA_CONNECTEDTXDATALANES %d\n",
  76                                __func__, err);
  77
  78        return err;
  79}
  80
  81static int ufs_qcom_host_clk_get(struct device *dev,
  82                const char *name, struct clk **clk_out)
  83{
  84        struct clk *clk;
  85        int err = 0;
  86
  87        clk = devm_clk_get(dev, name);
  88        if (IS_ERR(clk)) {
  89                err = PTR_ERR(clk);
  90                dev_err(dev, "%s: failed to get %s err %d",
  91                                __func__, name, err);
  92        } else {
  93                *clk_out = clk;
  94        }
  95
  96        return err;
  97}
  98
  99static int ufs_qcom_host_clk_enable(struct device *dev,
 100                const char *name, struct clk *clk)
 101{
 102        int err = 0;
 103
 104        err = clk_prepare_enable(clk);
 105        if (err)
 106                dev_err(dev, "%s: %s enable failed %d\n", __func__, name, err);
 107
 108        return err;
 109}
 110
 111static void ufs_qcom_disable_lane_clks(struct ufs_qcom_host *host)
 112{
 113        if (!host->is_lane_clks_enabled)
 114                return;
 115
 116        if (host->hba->lanes_per_direction > 1)
 117                clk_disable_unprepare(host->tx_l1_sync_clk);
 118        clk_disable_unprepare(host->tx_l0_sync_clk);
 119        if (host->hba->lanes_per_direction > 1)
 120                clk_disable_unprepare(host->rx_l1_sync_clk);
 121        clk_disable_unprepare(host->rx_l0_sync_clk);
 122
 123        host->is_lane_clks_enabled = false;
 124}
 125
 126static int ufs_qcom_enable_lane_clks(struct ufs_qcom_host *host)
 127{
 128        int err = 0;
 129        struct device *dev = host->hba->dev;
 130
 131        if (host->is_lane_clks_enabled)
 132                return 0;
 133
 134        err = ufs_qcom_host_clk_enable(dev, "rx_lane0_sync_clk",
 135                host->rx_l0_sync_clk);
 136        if (err)
 137                goto out;
 138
 139        err = ufs_qcom_host_clk_enable(dev, "tx_lane0_sync_clk",
 140                host->tx_l0_sync_clk);
 141        if (err)
 142                goto disable_rx_l0;
 143
 144        if (host->hba->lanes_per_direction > 1) {
 145                err = ufs_qcom_host_clk_enable(dev, "rx_lane1_sync_clk",
 146                        host->rx_l1_sync_clk);
 147                if (err)
 148                        goto disable_tx_l0;
 149
 150                err = ufs_qcom_host_clk_enable(dev, "tx_lane1_sync_clk",
 151                        host->tx_l1_sync_clk);
 152                if (err)
 153                        goto disable_rx_l1;
 154        }
 155
 156        host->is_lane_clks_enabled = true;
 157        goto out;
 158
 159disable_rx_l1:
 160        if (host->hba->lanes_per_direction > 1)
 161                clk_disable_unprepare(host->rx_l1_sync_clk);
 162disable_tx_l0:
 163        clk_disable_unprepare(host->tx_l0_sync_clk);
 164disable_rx_l0:
 165        clk_disable_unprepare(host->rx_l0_sync_clk);
 166out:
 167        return err;
 168}
 169
 170static int ufs_qcom_init_lane_clks(struct ufs_qcom_host *host)
 171{
 172        int err = 0;
 173        struct device *dev = host->hba->dev;
 174
 175        err = ufs_qcom_host_clk_get(dev,
 176                        "rx_lane0_sync_clk", &host->rx_l0_sync_clk);
 177        if (err)
 178                goto out;
 179
 180        err = ufs_qcom_host_clk_get(dev,
 181                        "tx_lane0_sync_clk", &host->tx_l0_sync_clk);
 182        if (err)
 183                goto out;
 184
 185        /* In case of single lane per direction, don't read lane1 clocks */
 186        if (host->hba->lanes_per_direction > 1) {
 187                err = ufs_qcom_host_clk_get(dev, "rx_lane1_sync_clk",
 188                        &host->rx_l1_sync_clk);
 189                if (err)
 190                        goto out;
 191
 192                err = ufs_qcom_host_clk_get(dev, "tx_lane1_sync_clk",
 193                        &host->tx_l1_sync_clk);
 194        }
 195out:
 196        return err;
 197}
 198
 199static int ufs_qcom_link_startup_post_change(struct ufs_hba *hba)
 200{
 201        struct ufs_qcom_host *host = ufshcd_get_variant(hba);
 202        struct phy *phy = host->generic_phy;
 203        u32 tx_lanes;
 204        int err = 0;
 205
 206        err = ufs_qcom_get_connected_tx_lanes(hba, &tx_lanes);
 207        if (err)
 208                goto out;
 209
 210        err = ufs_qcom_phy_set_tx_lane_enable(phy, tx_lanes);
 211        if (err)
 212                dev_err(hba->dev, "%s: ufs_qcom_phy_set_tx_lane_enable failed\n",
 213                        __func__);
 214
 215out:
 216        return err;
 217}
 218
 219static int ufs_qcom_check_hibern8(struct ufs_hba *hba)
 220{
 221        int err;
 222        u32 tx_fsm_val = 0;
 223        unsigned long timeout = jiffies + msecs_to_jiffies(HBRN8_POLL_TOUT_MS);
 224
 225        do {
 226                err = ufshcd_dme_get(hba,
 227                                UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE,
 228                                        UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
 229                                &tx_fsm_val);
 230                if (err || tx_fsm_val == TX_FSM_HIBERN8)
 231                        break;
 232
 233                /* sleep for max. 200us */
 234                usleep_range(100, 200);
 235        } while (time_before(jiffies, timeout));
 236
 237        /*
 238         * we might have scheduled out for long during polling so
 239         * check the state again.
 240         */
 241        if (time_after(jiffies, timeout))
 242                err = ufshcd_dme_get(hba,
 243                                UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE,
 244                                        UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
 245                                &tx_fsm_val);
 246
 247        if (err) {
 248                dev_err(hba->dev, "%s: unable to get TX_FSM_STATE, err %d\n",
 249                                __func__, err);
 250        } else if (tx_fsm_val != TX_FSM_HIBERN8) {
 251                err = tx_fsm_val;
 252                dev_err(hba->dev, "%s: invalid TX_FSM_STATE = %d\n",
 253                                __func__, err);
 254        }
 255
 256        return err;
 257}
 258
 259static void ufs_qcom_select_unipro_mode(struct ufs_qcom_host *host)
 260{
 261        ufshcd_rmwl(host->hba, QUNIPRO_SEL,
 262                   ufs_qcom_cap_qunipro(host) ? QUNIPRO_SEL : 0,
 263                   REG_UFS_CFG1);
 264        /* make sure above configuration is applied before we return */
 265        mb();
 266}
 267
 268static int ufs_qcom_power_up_sequence(struct ufs_hba *hba)
 269{
 270        struct ufs_qcom_host *host = ufshcd_get_variant(hba);
 271        struct phy *phy = host->generic_phy;
 272        int ret = 0;
 273        bool is_rate_B = (UFS_QCOM_LIMIT_HS_RATE == PA_HS_MODE_B)
 274                                                        ? true : false;
 275
 276        /* Assert PHY reset and apply PHY calibration values */
 277        ufs_qcom_assert_reset(hba);
 278        /* provide 1ms delay to let the reset pulse propagate */
 279        usleep_range(1000, 1100);
 280
 281        ret = ufs_qcom_phy_calibrate_phy(phy, is_rate_B);
 282
 283        if (ret) {
 284                dev_err(hba->dev, "%s: ufs_qcom_phy_calibrate_phy() failed, ret = %d\n",
 285                        __func__, ret);
 286                goto out;
 287        }
 288
 289        /* De-assert PHY reset and start serdes */
 290        ufs_qcom_deassert_reset(hba);
 291
 292        /*
 293         * after reset deassertion, phy will need all ref clocks,
 294         * voltage, current to settle down before starting serdes.
 295         */
 296        usleep_range(1000, 1100);
 297        ret = ufs_qcom_phy_start_serdes(phy);
 298        if (ret) {
 299                dev_err(hba->dev, "%s: ufs_qcom_phy_start_serdes() failed, ret = %d\n",
 300                        __func__, ret);
 301                goto out;
 302        }
 303
 304        ret = ufs_qcom_phy_is_pcs_ready(phy);
 305        if (ret)
 306                dev_err(hba->dev,
 307                        "%s: is_physical_coding_sublayer_ready() failed, ret = %d\n",
 308                        __func__, ret);
 309
 310        ufs_qcom_select_unipro_mode(host);
 311
 312out:
 313        return ret;
 314}
 315
 316/*
 317 * The UTP controller has a number of internal clock gating cells (CGCs).
 318 * Internal hardware sub-modules within the UTP controller control the CGCs.
 319 * Hardware CGCs disable the clock to inactivate UTP sub-modules not involved
 320 * in a specific operation, UTP controller CGCs are by default disabled and
 321 * this function enables them (after every UFS link startup) to save some power
 322 * leakage.
 323 */
 324static void ufs_qcom_enable_hw_clk_gating(struct ufs_hba *hba)
 325{
 326        ufshcd_writel(hba,
 327                ufshcd_readl(hba, REG_UFS_CFG2) | REG_UFS_CFG2_CGC_EN_ALL,
 328                REG_UFS_CFG2);
 329
 330        /* Ensure that HW clock gating is enabled before next operations */
 331        mb();
 332}
 333
 334static int ufs_qcom_hce_enable_notify(struct ufs_hba *hba,
 335                                      enum ufs_notify_change_status status)
 336{
 337        struct ufs_qcom_host *host = ufshcd_get_variant(hba);
 338        int err = 0;
 339
 340        switch (status) {
 341        case PRE_CHANGE:
 342                ufs_qcom_power_up_sequence(hba);
 343                /*
 344                 * The PHY PLL output is the source of tx/rx lane symbol
 345                 * clocks, hence, enable the lane clocks only after PHY
 346                 * is initialized.
 347                 */
 348                err = ufs_qcom_enable_lane_clks(host);
 349                break;
 350        case POST_CHANGE:
 351                /* check if UFS PHY moved from DISABLED to HIBERN8 */
 352                err = ufs_qcom_check_hibern8(hba);
 353                ufs_qcom_enable_hw_clk_gating(hba);
 354
 355                break;
 356        default:
 357                dev_err(hba->dev, "%s: invalid status %d\n", __func__, status);
 358                err = -EINVAL;
 359                break;
 360        }
 361        return err;
 362}
 363
 364/**
 365 * Returns zero for success and non-zero in case of a failure
 366 */
 367static int ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear,
 368                               u32 hs, u32 rate, bool update_link_startup_timer)
 369{
 370        int ret = 0;
 371        struct ufs_qcom_host *host = ufshcd_get_variant(hba);
 372        struct ufs_clk_info *clki;
 373        u32 core_clk_period_in_ns;
 374        u32 tx_clk_cycles_per_us = 0;
 375        unsigned long core_clk_rate = 0;
 376        u32 core_clk_cycles_per_us = 0;
 377
 378        static u32 pwm_fr_table[][2] = {
 379                {UFS_PWM_G1, 0x1},
 380                {UFS_PWM_G2, 0x1},
 381                {UFS_PWM_G3, 0x1},
 382                {UFS_PWM_G4, 0x1},
 383        };
 384
 385        static u32 hs_fr_table_rA[][2] = {
 386                {UFS_HS_G1, 0x1F},
 387                {UFS_HS_G2, 0x3e},
 388                {UFS_HS_G3, 0x7D},
 389        };
 390
 391        static u32 hs_fr_table_rB[][2] = {
 392                {UFS_HS_G1, 0x24},
 393                {UFS_HS_G2, 0x49},
 394                {UFS_HS_G3, 0x92},
 395        };
 396
 397        /*
 398         * The Qunipro controller does not use following registers:
 399         * SYS1CLK_1US_REG, TX_SYMBOL_CLK_1US_REG, CLK_NS_REG &
 400         * UFS_REG_PA_LINK_STARTUP_TIMER
 401         * But UTP controller uses SYS1CLK_1US_REG register for Interrupt
 402         * Aggregation logic.
 403        */
 404        if (ufs_qcom_cap_qunipro(host) && !ufshcd_is_intr_aggr_allowed(hba))
 405                goto out;
 406
 407        if (gear == 0) {
 408                dev_err(hba->dev, "%s: invalid gear = %d\n", __func__, gear);
 409                goto out_error;
 410        }
 411
 412        list_for_each_entry(clki, &hba->clk_list_head, list) {
 413                if (!strcmp(clki->name, "core_clk"))
 414                        core_clk_rate = clk_get_rate(clki->clk);
 415        }
 416
 417        /* If frequency is smaller than 1MHz, set to 1MHz */
 418        if (core_clk_rate < DEFAULT_CLK_RATE_HZ)
 419                core_clk_rate = DEFAULT_CLK_RATE_HZ;
 420
 421        core_clk_cycles_per_us = core_clk_rate / USEC_PER_SEC;
 422        if (ufshcd_readl(hba, REG_UFS_SYS1CLK_1US) != core_clk_cycles_per_us) {
 423                ufshcd_writel(hba, core_clk_cycles_per_us, REG_UFS_SYS1CLK_1US);
 424                /*
 425                 * make sure above write gets applied before we return from
 426                 * this function.
 427                 */
 428                mb();
 429        }
 430
 431        if (ufs_qcom_cap_qunipro(host))
 432                goto out;
 433
 434        core_clk_period_in_ns = NSEC_PER_SEC / core_clk_rate;
 435        core_clk_period_in_ns <<= OFFSET_CLK_NS_REG;
 436        core_clk_period_in_ns &= MASK_CLK_NS_REG;
 437
 438        switch (hs) {
 439        case FASTAUTO_MODE:
 440        case FAST_MODE:
 441                if (rate == PA_HS_MODE_A) {
 442                        if (gear > ARRAY_SIZE(hs_fr_table_rA)) {
 443                                dev_err(hba->dev,
 444                                        "%s: index %d exceeds table size %zu\n",
 445                                        __func__, gear,
 446                                        ARRAY_SIZE(hs_fr_table_rA));
 447                                goto out_error;
 448                        }
 449                        tx_clk_cycles_per_us = hs_fr_table_rA[gear-1][1];
 450                } else if (rate == PA_HS_MODE_B) {
 451                        if (gear > ARRAY_SIZE(hs_fr_table_rB)) {
 452                                dev_err(hba->dev,
 453                                        "%s: index %d exceeds table size %zu\n",
 454                                        __func__, gear,
 455                                        ARRAY_SIZE(hs_fr_table_rB));
 456                                goto out_error;
 457                        }
 458                        tx_clk_cycles_per_us = hs_fr_table_rB[gear-1][1];
 459                } else {
 460                        dev_err(hba->dev, "%s: invalid rate = %d\n",
 461                                __func__, rate);
 462                        goto out_error;
 463                }
 464                break;
 465        case SLOWAUTO_MODE:
 466        case SLOW_MODE:
 467                if (gear > ARRAY_SIZE(pwm_fr_table)) {
 468                        dev_err(hba->dev,
 469                                        "%s: index %d exceeds table size %zu\n",
 470                                        __func__, gear,
 471                                        ARRAY_SIZE(pwm_fr_table));
 472                        goto out_error;
 473                }
 474                tx_clk_cycles_per_us = pwm_fr_table[gear-1][1];
 475                break;
 476        case UNCHANGED:
 477        default:
 478                dev_err(hba->dev, "%s: invalid mode = %d\n", __func__, hs);
 479                goto out_error;
 480        }
 481
 482        if (ufshcd_readl(hba, REG_UFS_TX_SYMBOL_CLK_NS_US) !=
 483            (core_clk_period_in_ns | tx_clk_cycles_per_us)) {
 484                /* this register 2 fields shall be written at once */
 485                ufshcd_writel(hba, core_clk_period_in_ns | tx_clk_cycles_per_us,
 486                              REG_UFS_TX_SYMBOL_CLK_NS_US);
 487                /*
 488                 * make sure above write gets applied before we return from
 489                 * this function.
 490                 */
 491                mb();
 492        }
 493
 494        if (update_link_startup_timer) {
 495                ufshcd_writel(hba, ((core_clk_rate / MSEC_PER_SEC) * 100),
 496                              REG_UFS_PA_LINK_STARTUP_TIMER);
 497                /*
 498                 * make sure that this configuration is applied before
 499                 * we return
 500                 */
 501                mb();
 502        }
 503        goto out;
 504
 505out_error:
 506        ret = -EINVAL;
 507out:
 508        return ret;
 509}
 510
 511static int ufs_qcom_link_startup_notify(struct ufs_hba *hba,
 512                                        enum ufs_notify_change_status status)
 513{
 514        int err = 0;
 515        struct ufs_qcom_host *host = ufshcd_get_variant(hba);
 516
 517        switch (status) {
 518        case PRE_CHANGE:
 519                if (ufs_qcom_cfg_timers(hba, UFS_PWM_G1, SLOWAUTO_MODE,
 520                                        0, true)) {
 521                        dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n",
 522                                __func__);
 523                        err = -EINVAL;
 524                        goto out;
 525                }
 526
 527                if (ufs_qcom_cap_qunipro(host))
 528                        /*
 529                         * set unipro core clock cycles to 150 & clear clock
 530                         * divider
 531                         */
 532                        err = ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba,
 533                                                                          150);
 534
 535                /*
 536                 * Some UFS devices (and may be host) have issues if LCC is
 537                 * enabled. So we are setting PA_Local_TX_LCC_Enable to 0
 538                 * before link startup which will make sure that both host
 539                 * and device TX LCC are disabled once link startup is
 540                 * completed.
 541                 */
 542                if (ufshcd_get_local_unipro_ver(hba) != UFS_UNIPRO_VER_1_41)
 543                        err = ufshcd_dme_set(hba,
 544                                        UIC_ARG_MIB(PA_LOCAL_TX_LCC_ENABLE),
 545                                        0);
 546
 547                break;
 548        case POST_CHANGE:
 549                ufs_qcom_link_startup_post_change(hba);
 550                break;
 551        default:
 552                break;
 553        }
 554
 555out:
 556        return err;
 557}
 558
 559static int ufs_qcom_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
 560{
 561        struct ufs_qcom_host *host = ufshcd_get_variant(hba);
 562        struct phy *phy = host->generic_phy;
 563        int ret = 0;
 564
 565        if (ufs_qcom_is_link_off(hba)) {
 566                /*
 567                 * Disable the tx/rx lane symbol clocks before PHY is
 568                 * powered down as the PLL source should be disabled
 569                 * after downstream clocks are disabled.
 570                 */
 571                ufs_qcom_disable_lane_clks(host);
 572                phy_power_off(phy);
 573
 574                /* Assert PHY soft reset */
 575                ufs_qcom_assert_reset(hba);
 576                goto out;
 577        }
 578
 579        /*
 580         * If UniPro link is not active, PHY ref_clk, main PHY analog power
 581         * rail and low noise analog power rail for PLL can be switched off.
 582         */
 583        if (!ufs_qcom_is_link_active(hba)) {
 584                ufs_qcom_disable_lane_clks(host);
 585                phy_power_off(phy);
 586        }
 587
 588out:
 589        return ret;
 590}
 591
 592static int ufs_qcom_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
 593{
 594        struct ufs_qcom_host *host = ufshcd_get_variant(hba);
 595        struct phy *phy = host->generic_phy;
 596        int err;
 597
 598        err = phy_power_on(phy);
 599        if (err) {
 600                dev_err(hba->dev, "%s: failed enabling regs, err = %d\n",
 601                        __func__, err);
 602                goto out;
 603        }
 604
 605        err = ufs_qcom_enable_lane_clks(host);
 606        if (err)
 607                goto out;
 608
 609        hba->is_sys_suspended = false;
 610
 611out:
 612        return err;
 613}
 614
 615struct ufs_qcom_dev_params {
 616        u32 pwm_rx_gear;        /* pwm rx gear to work in */
 617        u32 pwm_tx_gear;        /* pwm tx gear to work in */
 618        u32 hs_rx_gear;         /* hs rx gear to work in */
 619        u32 hs_tx_gear;         /* hs tx gear to work in */
 620        u32 rx_lanes;           /* number of rx lanes */
 621        u32 tx_lanes;           /* number of tx lanes */
 622        u32 rx_pwr_pwm;         /* rx pwm working pwr */
 623        u32 tx_pwr_pwm;         /* tx pwm working pwr */
 624        u32 rx_pwr_hs;          /* rx hs working pwr */
 625        u32 tx_pwr_hs;          /* tx hs working pwr */
 626        u32 hs_rate;            /* rate A/B to work in HS */
 627        u32 desired_working_mode;
 628};
 629
 630static int ufs_qcom_get_pwr_dev_param(struct ufs_qcom_dev_params *qcom_param,
 631                                      struct ufs_pa_layer_attr *dev_max,
 632                                      struct ufs_pa_layer_attr *agreed_pwr)
 633{
 634        int min_qcom_gear;
 635        int min_dev_gear;
 636        bool is_dev_sup_hs = false;
 637        bool is_qcom_max_hs = false;
 638
 639        if (dev_max->pwr_rx == FAST_MODE)
 640                is_dev_sup_hs = true;
 641
 642        if (qcom_param->desired_working_mode == FAST) {
 643                is_qcom_max_hs = true;
 644                min_qcom_gear = min_t(u32, qcom_param->hs_rx_gear,
 645                                      qcom_param->hs_tx_gear);
 646        } else {
 647                min_qcom_gear = min_t(u32, qcom_param->pwm_rx_gear,
 648                                      qcom_param->pwm_tx_gear);
 649        }
 650
 651        /*
 652         * device doesn't support HS but qcom_param->desired_working_mode is
 653         * HS, thus device and qcom_param don't agree
 654         */
 655        if (!is_dev_sup_hs && is_qcom_max_hs) {
 656                pr_err("%s: failed to agree on power mode (device doesn't support HS but requested power is HS)\n",
 657                        __func__);
 658                return -ENOTSUPP;
 659        } else if (is_dev_sup_hs && is_qcom_max_hs) {
 660                /*
 661                 * since device supports HS, it supports FAST_MODE.
 662                 * since qcom_param->desired_working_mode is also HS
 663                 * then final decision (FAST/FASTAUTO) is done according
 664                 * to qcom_params as it is the restricting factor
 665                 */
 666                agreed_pwr->pwr_rx = agreed_pwr->pwr_tx =
 667                                                qcom_param->rx_pwr_hs;
 668        } else {
 669                /*
 670                 * here qcom_param->desired_working_mode is PWM.
 671                 * it doesn't matter whether device supports HS or PWM,
 672                 * in both cases qcom_param->desired_working_mode will
 673                 * determine the mode
 674                 */
 675                 agreed_pwr->pwr_rx = agreed_pwr->pwr_tx =
 676                                                qcom_param->rx_pwr_pwm;
 677        }
 678
 679        /*
 680         * we would like tx to work in the minimum number of lanes
 681         * between device capability and vendor preferences.
 682         * the same decision will be made for rx
 683         */
 684        agreed_pwr->lane_tx = min_t(u32, dev_max->lane_tx,
 685                                                qcom_param->tx_lanes);
 686        agreed_pwr->lane_rx = min_t(u32, dev_max->lane_rx,
 687                                                qcom_param->rx_lanes);
 688
 689        /* device maximum gear is the minimum between device rx and tx gears */
 690        min_dev_gear = min_t(u32, dev_max->gear_rx, dev_max->gear_tx);
 691
 692        /*
 693         * if both device capabilities and vendor pre-defined preferences are
 694         * both HS or both PWM then set the minimum gear to be the chosen
 695         * working gear.
 696         * if one is PWM and one is HS then the one that is PWM get to decide
 697         * what is the gear, as it is the one that also decided previously what
 698         * pwr the device will be configured to.
 699         */
 700        if ((is_dev_sup_hs && is_qcom_max_hs) ||
 701            (!is_dev_sup_hs && !is_qcom_max_hs))
 702                agreed_pwr->gear_rx = agreed_pwr->gear_tx =
 703                        min_t(u32, min_dev_gear, min_qcom_gear);
 704        else if (!is_dev_sup_hs)
 705                agreed_pwr->gear_rx = agreed_pwr->gear_tx = min_dev_gear;
 706        else
 707                agreed_pwr->gear_rx = agreed_pwr->gear_tx = min_qcom_gear;
 708
 709        agreed_pwr->hs_rate = qcom_param->hs_rate;
 710        return 0;
 711}
 712
 713#ifdef CONFIG_MSM_BUS_SCALING
 714static int ufs_qcom_get_bus_vote(struct ufs_qcom_host *host,
 715                const char *speed_mode)
 716{
 717        struct device *dev = host->hba->dev;
 718        struct device_node *np = dev->of_node;
 719        int err;
 720        const char *key = "qcom,bus-vector-names";
 721
 722        if (!speed_mode) {
 723                err = -EINVAL;
 724                goto out;
 725        }
 726
 727        if (host->bus_vote.is_max_bw_needed && !!strcmp(speed_mode, "MIN"))
 728                err = of_property_match_string(np, key, "MAX");
 729        else
 730                err = of_property_match_string(np, key, speed_mode);
 731
 732out:
 733        if (err < 0)
 734                dev_err(dev, "%s: Invalid %s mode %d\n",
 735                                __func__, speed_mode, err);
 736        return err;
 737}
 738
 739static void ufs_qcom_get_speed_mode(struct ufs_pa_layer_attr *p, char *result)
 740{
 741        int gear = max_t(u32, p->gear_rx, p->gear_tx);
 742        int lanes = max_t(u32, p->lane_rx, p->lane_tx);
 743        int pwr;
 744
 745        /* default to PWM Gear 1, Lane 1 if power mode is not initialized */
 746        if (!gear)
 747                gear = 1;
 748
 749        if (!lanes)
 750                lanes = 1;
 751
 752        if (!p->pwr_rx && !p->pwr_tx) {
 753                pwr = SLOWAUTO_MODE;
 754                snprintf(result, BUS_VECTOR_NAME_LEN, "MIN");
 755        } else if (p->pwr_rx == FAST_MODE || p->pwr_rx == FASTAUTO_MODE ||
 756                 p->pwr_tx == FAST_MODE || p->pwr_tx == FASTAUTO_MODE) {
 757                pwr = FAST_MODE;
 758                snprintf(result, BUS_VECTOR_NAME_LEN, "%s_R%s_G%d_L%d", "HS",
 759                         p->hs_rate == PA_HS_MODE_B ? "B" : "A", gear, lanes);
 760        } else {
 761                pwr = SLOW_MODE;
 762                snprintf(result, BUS_VECTOR_NAME_LEN, "%s_G%d_L%d",
 763                         "PWM", gear, lanes);
 764        }
 765}
 766
 767static int ufs_qcom_set_bus_vote(struct ufs_qcom_host *host, int vote)
 768{
 769        int err = 0;
 770
 771        if (vote != host->bus_vote.curr_vote) {
 772                err = msm_bus_scale_client_update_request(
 773                                host->bus_vote.client_handle, vote);
 774                if (err) {
 775                        dev_err(host->hba->dev,
 776                                "%s: msm_bus_scale_client_update_request() failed: bus_client_handle=0x%x, vote=%d, err=%d\n",
 777                                __func__, host->bus_vote.client_handle,
 778                                vote, err);
 779                        goto out;
 780                }
 781
 782                host->bus_vote.curr_vote = vote;
 783        }
 784out:
 785        return err;
 786}
 787
 788static int ufs_qcom_update_bus_bw_vote(struct ufs_qcom_host *host)
 789{
 790        int vote;
 791        int err = 0;
 792        char mode[BUS_VECTOR_NAME_LEN];
 793
 794        ufs_qcom_get_speed_mode(&host->dev_req_params, mode);
 795
 796        vote = ufs_qcom_get_bus_vote(host, mode);
 797        if (vote >= 0)
 798                err = ufs_qcom_set_bus_vote(host, vote);
 799        else
 800                err = vote;
 801
 802        if (err)
 803                dev_err(host->hba->dev, "%s: failed %d\n", __func__, err);
 804        else
 805                host->bus_vote.saved_vote = vote;
 806        return err;
 807}
 808
 809static ssize_t
 810show_ufs_to_mem_max_bus_bw(struct device *dev, struct device_attribute *attr,
 811                        char *buf)
 812{
 813        struct ufs_hba *hba = dev_get_drvdata(dev);
 814        struct ufs_qcom_host *host = ufshcd_get_variant(hba);
 815
 816        return snprintf(buf, PAGE_SIZE, "%u\n",
 817                        host->bus_vote.is_max_bw_needed);
 818}
 819
 820static ssize_t
 821store_ufs_to_mem_max_bus_bw(struct device *dev, struct device_attribute *attr,
 822                const char *buf, size_t count)
 823{
 824        struct ufs_hba *hba = dev_get_drvdata(dev);
 825        struct ufs_qcom_host *host = ufshcd_get_variant(hba);
 826        uint32_t value;
 827
 828        if (!kstrtou32(buf, 0, &value)) {
 829                host->bus_vote.is_max_bw_needed = !!value;
 830                ufs_qcom_update_bus_bw_vote(host);
 831        }
 832
 833        return count;
 834}
 835
 836static int ufs_qcom_bus_register(struct ufs_qcom_host *host)
 837{
 838        int err;
 839        struct msm_bus_scale_pdata *bus_pdata;
 840        struct device *dev = host->hba->dev;
 841        struct platform_device *pdev = to_platform_device(dev);
 842        struct device_node *np = dev->of_node;
 843
 844        bus_pdata = msm_bus_cl_get_pdata(pdev);
 845        if (!bus_pdata) {
 846                dev_err(dev, "%s: failed to get bus vectors\n", __func__);
 847                err = -ENODATA;
 848                goto out;
 849        }
 850
 851        err = of_property_count_strings(np, "qcom,bus-vector-names");
 852        if (err < 0 || err != bus_pdata->num_usecases) {
 853                dev_err(dev, "%s: qcom,bus-vector-names not specified correctly %d\n",
 854                                __func__, err);
 855                goto out;
 856        }
 857
 858        host->bus_vote.client_handle = msm_bus_scale_register_client(bus_pdata);
 859        if (!host->bus_vote.client_handle) {
 860                dev_err(dev, "%s: msm_bus_scale_register_client failed\n",
 861                                __func__);
 862                err = -EFAULT;
 863                goto out;
 864        }
 865
 866        /* cache the vote index for minimum and maximum bandwidth */
 867        host->bus_vote.min_bw_vote = ufs_qcom_get_bus_vote(host, "MIN");
 868        host->bus_vote.max_bw_vote = ufs_qcom_get_bus_vote(host, "MAX");
 869
 870        host->bus_vote.max_bus_bw.show = show_ufs_to_mem_max_bus_bw;
 871        host->bus_vote.max_bus_bw.store = store_ufs_to_mem_max_bus_bw;
 872        sysfs_attr_init(&host->bus_vote.max_bus_bw.attr);
 873        host->bus_vote.max_bus_bw.attr.name = "max_bus_bw";
 874        host->bus_vote.max_bus_bw.attr.mode = S_IRUGO | S_IWUSR;
 875        err = device_create_file(dev, &host->bus_vote.max_bus_bw);
 876out:
 877        return err;
 878}
 879#else /* CONFIG_MSM_BUS_SCALING */
 880static int ufs_qcom_update_bus_bw_vote(struct ufs_qcom_host *host)
 881{
 882        return 0;
 883}
 884
 885static int ufs_qcom_set_bus_vote(struct ufs_qcom_host *host, int vote)
 886{
 887        return 0;
 888}
 889
 890static int ufs_qcom_bus_register(struct ufs_qcom_host *host)
 891{
 892        return 0;
 893}
 894#endif /* CONFIG_MSM_BUS_SCALING */
 895
 896static void ufs_qcom_dev_ref_clk_ctrl(struct ufs_qcom_host *host, bool enable)
 897{
 898        if (host->dev_ref_clk_ctrl_mmio &&
 899            (enable ^ host->is_dev_ref_clk_enabled)) {
 900                u32 temp = readl_relaxed(host->dev_ref_clk_ctrl_mmio);
 901
 902                if (enable)
 903                        temp |= host->dev_ref_clk_en_mask;
 904                else
 905                        temp &= ~host->dev_ref_clk_en_mask;
 906
 907                /*
 908                 * If we are here to disable this clock it might be immediately
 909                 * after entering into hibern8 in which case we need to make
 910                 * sure that device ref_clk is active at least 1us after the
 911                 * hibern8 enter.
 912                 */
 913                if (!enable)
 914                        udelay(1);
 915
 916                writel_relaxed(temp, host->dev_ref_clk_ctrl_mmio);
 917
 918                /* ensure that ref_clk is enabled/disabled before we return */
 919                wmb();
 920
 921                /*
 922                 * If we call hibern8 exit after this, we need to make sure that
 923                 * device ref_clk is stable for at least 1us before the hibern8
 924                 * exit command.
 925                 */
 926                if (enable)
 927                        udelay(1);
 928
 929                host->is_dev_ref_clk_enabled = enable;
 930        }
 931}
 932
 933static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba,
 934                                enum ufs_notify_change_status status,
 935                                struct ufs_pa_layer_attr *dev_max_params,
 936                                struct ufs_pa_layer_attr *dev_req_params)
 937{
 938        u32 val;
 939        struct ufs_qcom_host *host = ufshcd_get_variant(hba);
 940        struct phy *phy = host->generic_phy;
 941        struct ufs_qcom_dev_params ufs_qcom_cap;
 942        int ret = 0;
 943        int res = 0;
 944
 945        if (!dev_req_params) {
 946                pr_err("%s: incoming dev_req_params is NULL\n", __func__);
 947                ret = -EINVAL;
 948                goto out;
 949        }
 950
 951        switch (status) {
 952        case PRE_CHANGE:
 953                ufs_qcom_cap.tx_lanes = UFS_QCOM_LIMIT_NUM_LANES_TX;
 954                ufs_qcom_cap.rx_lanes = UFS_QCOM_LIMIT_NUM_LANES_RX;
 955                ufs_qcom_cap.hs_rx_gear = UFS_QCOM_LIMIT_HSGEAR_RX;
 956                ufs_qcom_cap.hs_tx_gear = UFS_QCOM_LIMIT_HSGEAR_TX;
 957                ufs_qcom_cap.pwm_rx_gear = UFS_QCOM_LIMIT_PWMGEAR_RX;
 958                ufs_qcom_cap.pwm_tx_gear = UFS_QCOM_LIMIT_PWMGEAR_TX;
 959                ufs_qcom_cap.rx_pwr_pwm = UFS_QCOM_LIMIT_RX_PWR_PWM;
 960                ufs_qcom_cap.tx_pwr_pwm = UFS_QCOM_LIMIT_TX_PWR_PWM;
 961                ufs_qcom_cap.rx_pwr_hs = UFS_QCOM_LIMIT_RX_PWR_HS;
 962                ufs_qcom_cap.tx_pwr_hs = UFS_QCOM_LIMIT_TX_PWR_HS;
 963                ufs_qcom_cap.hs_rate = UFS_QCOM_LIMIT_HS_RATE;
 964                ufs_qcom_cap.desired_working_mode =
 965                                        UFS_QCOM_LIMIT_DESIRED_MODE;
 966
 967                if (host->hw_ver.major == 0x1) {
 968                        /*
 969                         * HS-G3 operations may not reliably work on legacy QCOM
 970                         * UFS host controller hardware even though capability
 971                         * exchange during link startup phase may end up
 972                         * negotiating maximum supported gear as G3.
 973                         * Hence downgrade the maximum supported gear to HS-G2.
 974                         */
 975                        if (ufs_qcom_cap.hs_tx_gear > UFS_HS_G2)
 976                                ufs_qcom_cap.hs_tx_gear = UFS_HS_G2;
 977                        if (ufs_qcom_cap.hs_rx_gear > UFS_HS_G2)
 978                                ufs_qcom_cap.hs_rx_gear = UFS_HS_G2;
 979                }
 980
 981                ret = ufs_qcom_get_pwr_dev_param(&ufs_qcom_cap,
 982                                                 dev_max_params,
 983                                                 dev_req_params);
 984                if (ret) {
 985                        pr_err("%s: failed to determine capabilities\n",
 986                                        __func__);
 987                        goto out;
 988                }
 989
 990                /* enable the device ref clock before changing to HS mode */
 991                if (!ufshcd_is_hs_mode(&hba->pwr_info) &&
 992                        ufshcd_is_hs_mode(dev_req_params))
 993                        ufs_qcom_dev_ref_clk_ctrl(host, true);
 994                break;
 995        case POST_CHANGE:
 996                if (ufs_qcom_cfg_timers(hba, dev_req_params->gear_rx,
 997                                        dev_req_params->pwr_rx,
 998                                        dev_req_params->hs_rate, false)) {
 999                        dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n",
1000                                __func__);
1001                        /*
1002                         * we return error code at the end of the routine,
1003                         * but continue to configure UFS_PHY_TX_LANE_ENABLE
1004                         * and bus voting as usual
1005                         */
1006                        ret = -EINVAL;
1007                }
1008
1009                val = ~(MAX_U32 << dev_req_params->lane_tx);
1010                res = ufs_qcom_phy_set_tx_lane_enable(phy, val);
1011                if (res) {
1012                        dev_err(hba->dev, "%s: ufs_qcom_phy_set_tx_lane_enable() failed res = %d\n",
1013                                __func__, res);
1014                        ret = res;
1015                }
1016
1017                /* cache the power mode parameters to use internally */
1018                memcpy(&host->dev_req_params,
1019                                dev_req_params, sizeof(*dev_req_params));
1020                ufs_qcom_update_bus_bw_vote(host);
1021
1022                /* disable the device ref clock if entered PWM mode */
1023                if (ufshcd_is_hs_mode(&hba->pwr_info) &&
1024                        !ufshcd_is_hs_mode(dev_req_params))
1025                        ufs_qcom_dev_ref_clk_ctrl(host, false);
1026                break;
1027        default:
1028                ret = -EINVAL;
1029                break;
1030        }
1031out:
1032        return ret;
1033}
1034
1035static int ufs_qcom_quirk_host_pa_saveconfigtime(struct ufs_hba *hba)
1036{
1037        int err;
1038        u32 pa_vs_config_reg1;
1039
1040        err = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_VS_CONFIG_REG1),
1041                             &pa_vs_config_reg1);
1042        if (err)
1043                goto out;
1044
1045        /* Allow extension of MSB bits of PA_SaveConfigTime attribute */
1046        err = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_VS_CONFIG_REG1),
1047                            (pa_vs_config_reg1 | (1 << 12)));
1048
1049out:
1050        return err;
1051}
1052
1053static int ufs_qcom_apply_dev_quirks(struct ufs_hba *hba)
1054{
1055        int err = 0;
1056
1057        if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME)
1058                err = ufs_qcom_quirk_host_pa_saveconfigtime(hba);
1059
1060        return err;
1061}
1062
1063static u32 ufs_qcom_get_ufs_hci_version(struct ufs_hba *hba)
1064{
1065        struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1066
1067        if (host->hw_ver.major == 0x1)
1068                return UFSHCI_VERSION_11;
1069        else
1070                return UFSHCI_VERSION_20;
1071}
1072
1073/**
1074 * ufs_qcom_advertise_quirks - advertise the known QCOM UFS controller quirks
1075 * @hba: host controller instance
1076 *
1077 * QCOM UFS host controller might have some non standard behaviours (quirks)
1078 * than what is specified by UFSHCI specification. Advertise all such
1079 * quirks to standard UFS host controller driver so standard takes them into
1080 * account.
1081 */
1082static void ufs_qcom_advertise_quirks(struct ufs_hba *hba)
1083{
1084        struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1085
1086        if (host->hw_ver.major == 0x01) {
1087                hba->quirks |= UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS
1088                            | UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP
1089                            | UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE;
1090
1091                if (host->hw_ver.minor == 0x0001 && host->hw_ver.step == 0x0001)
1092                        hba->quirks |= UFSHCD_QUIRK_BROKEN_INTR_AGGR;
1093
1094                hba->quirks |= UFSHCD_QUIRK_BROKEN_LCC;
1095        }
1096
1097        if (host->hw_ver.major >= 0x2) {
1098                hba->quirks |= UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION;
1099
1100                if (!ufs_qcom_cap_qunipro(host))
1101                        /* Legacy UniPro mode still need following quirks */
1102                        hba->quirks |= (UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS
1103                                | UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE
1104                                | UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP);
1105        }
1106}
1107
1108static void ufs_qcom_set_caps(struct ufs_hba *hba)
1109{
1110        struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1111
1112        hba->caps |= UFSHCD_CAP_CLK_GATING | UFSHCD_CAP_HIBERN8_WITH_CLK_GATING;
1113        hba->caps |= UFSHCD_CAP_CLK_SCALING;
1114        hba->caps |= UFSHCD_CAP_AUTO_BKOPS_SUSPEND;
1115
1116        if (host->hw_ver.major >= 0x2) {
1117                host->caps = UFS_QCOM_CAP_QUNIPRO |
1118                             UFS_QCOM_CAP_RETAIN_SEC_CFG_AFTER_PWR_COLLAPSE;
1119        }
1120}
1121
1122/**
1123 * ufs_qcom_setup_clocks - enables/disable clocks
1124 * @hba: host controller instance
1125 * @on: If true, enable clocks else disable them.
1126 * @status: PRE_CHANGE or POST_CHANGE notify
1127 *
1128 * Returns 0 on success, non-zero on failure.
1129 */
1130static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on,
1131                                 enum ufs_notify_change_status status)
1132{
1133        struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1134        int err;
1135        int vote = 0;
1136
1137        /*
1138         * In case ufs_qcom_init() is not yet done, simply ignore.
1139         * This ufs_qcom_setup_clocks() shall be called from
1140         * ufs_qcom_init() after init is done.
1141         */
1142        if (!host)
1143                return 0;
1144
1145        if (on && (status == POST_CHANGE)) {
1146                phy_power_on(host->generic_phy);
1147
1148                /* enable the device ref clock for HS mode*/
1149                if (ufshcd_is_hs_mode(&hba->pwr_info))
1150                        ufs_qcom_dev_ref_clk_ctrl(host, true);
1151                vote = host->bus_vote.saved_vote;
1152                if (vote == host->bus_vote.min_bw_vote)
1153                        ufs_qcom_update_bus_bw_vote(host);
1154
1155        } else if (!on && (status == PRE_CHANGE)) {
1156                if (!ufs_qcom_is_link_active(hba)) {
1157                        /* disable device ref_clk */
1158                        ufs_qcom_dev_ref_clk_ctrl(host, false);
1159
1160                        /* powering off PHY during aggressive clk gating */
1161                        phy_power_off(host->generic_phy);
1162                }
1163
1164                vote = host->bus_vote.min_bw_vote;
1165        }
1166
1167        err = ufs_qcom_set_bus_vote(host, vote);
1168        if (err)
1169                dev_err(hba->dev, "%s: set bus vote failed %d\n",
1170                                __func__, err);
1171
1172        return err;
1173}
1174
1175#define ANDROID_BOOT_DEV_MAX    30
1176static char android_boot_dev[ANDROID_BOOT_DEV_MAX];
1177
1178#ifndef MODULE
1179static int __init get_android_boot_dev(char *str)
1180{
1181        strlcpy(android_boot_dev, str, ANDROID_BOOT_DEV_MAX);
1182        return 1;
1183}
1184__setup("androidboot.bootdevice=", get_android_boot_dev);
1185#endif
1186
1187/**
1188 * ufs_qcom_init - bind phy with controller
1189 * @hba: host controller instance
1190 *
1191 * Binds PHY with controller and powers up PHY enabling clocks
1192 * and regulators.
1193 *
1194 * Returns -EPROBE_DEFER if binding fails, returns negative error
1195 * on phy power up failure and returns zero on success.
1196 */
1197static int ufs_qcom_init(struct ufs_hba *hba)
1198{
1199        int err;
1200        struct device *dev = hba->dev;
1201        struct platform_device *pdev = to_platform_device(dev);
1202        struct ufs_qcom_host *host;
1203        struct resource *res;
1204
1205        if (strlen(android_boot_dev) && strcmp(android_boot_dev, dev_name(dev)))
1206                return -ENODEV;
1207
1208        host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
1209        if (!host) {
1210                err = -ENOMEM;
1211                dev_err(dev, "%s: no memory for qcom ufs host\n", __func__);
1212                goto out;
1213        }
1214
1215        /* Make a two way bind between the qcom host and the hba */
1216        host->hba = hba;
1217        ufshcd_set_variant(hba, host);
1218
1219        /*
1220         * voting/devoting device ref_clk source is time consuming hence
1221         * skip devoting it during aggressive clock gating. This clock
1222         * will still be gated off during runtime suspend.
1223         */
1224        host->generic_phy = devm_phy_get(dev, "ufsphy");
1225
1226        if (host->generic_phy == ERR_PTR(-EPROBE_DEFER)) {
1227                /*
1228                 * UFS driver might be probed before the phy driver does.
1229                 * In that case we would like to return EPROBE_DEFER code.
1230                 */
1231                err = -EPROBE_DEFER;
1232                dev_warn(dev, "%s: required phy device. hasn't probed yet. err = %d\n",
1233                        __func__, err);
1234                goto out_variant_clear;
1235        } else if (IS_ERR(host->generic_phy)) {
1236                err = PTR_ERR(host->generic_phy);
1237                dev_err(dev, "%s: PHY get failed %d\n", __func__, err);
1238                goto out_variant_clear;
1239        }
1240
1241        err = ufs_qcom_bus_register(host);
1242        if (err)
1243                goto out_variant_clear;
1244
1245        ufs_qcom_get_controller_revision(hba, &host->hw_ver.major,
1246                &host->hw_ver.minor, &host->hw_ver.step);
1247
1248        /*
1249         * for newer controllers, device reference clock control bit has
1250         * moved inside UFS controller register address space itself.
1251         */
1252        if (host->hw_ver.major >= 0x02) {
1253                host->dev_ref_clk_ctrl_mmio = hba->mmio_base + REG_UFS_CFG1;
1254                host->dev_ref_clk_en_mask = BIT(26);
1255        } else {
1256                /* "dev_ref_clk_ctrl_mem" is optional resource */
1257                res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1258                if (res) {
1259                        host->dev_ref_clk_ctrl_mmio =
1260                                        devm_ioremap_resource(dev, res);
1261                        if (IS_ERR(host->dev_ref_clk_ctrl_mmio)) {
1262                                dev_warn(dev,
1263                                        "%s: could not map dev_ref_clk_ctrl_mmio, err %ld\n",
1264                                        __func__,
1265                                        PTR_ERR(host->dev_ref_clk_ctrl_mmio));
1266                                host->dev_ref_clk_ctrl_mmio = NULL;
1267                        }
1268                        host->dev_ref_clk_en_mask = BIT(5);
1269                }
1270        }
1271
1272        /* update phy revision information before calling phy_init() */
1273        ufs_qcom_phy_save_controller_version(host->generic_phy,
1274                host->hw_ver.major, host->hw_ver.minor, host->hw_ver.step);
1275
1276        phy_init(host->generic_phy);
1277        err = phy_power_on(host->generic_phy);
1278        if (err)
1279                goto out_unregister_bus;
1280
1281        err = ufs_qcom_init_lane_clks(host);
1282        if (err)
1283                goto out_disable_phy;
1284
1285        ufs_qcom_set_caps(hba);
1286        ufs_qcom_advertise_quirks(hba);
1287
1288        ufs_qcom_setup_clocks(hba, true, POST_CHANGE);
1289
1290        if (hba->dev->id < MAX_UFS_QCOM_HOSTS)
1291                ufs_qcom_hosts[hba->dev->id] = host;
1292
1293        host->dbg_print_en |= UFS_QCOM_DEFAULT_DBG_PRINT_EN;
1294        ufs_qcom_get_default_testbus_cfg(host);
1295        err = ufs_qcom_testbus_config(host);
1296        if (err) {
1297                dev_warn(dev, "%s: failed to configure the testbus %d\n",
1298                                __func__, err);
1299                err = 0;
1300        }
1301
1302        goto out;
1303
1304out_disable_phy:
1305        phy_power_off(host->generic_phy);
1306out_unregister_bus:
1307        phy_exit(host->generic_phy);
1308out_variant_clear:
1309        ufshcd_set_variant(hba, NULL);
1310out:
1311        return err;
1312}
1313
1314static void ufs_qcom_exit(struct ufs_hba *hba)
1315{
1316        struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1317
1318        ufs_qcom_disable_lane_clks(host);
1319        phy_power_off(host->generic_phy);
1320        phy_exit(host->generic_phy);
1321}
1322
1323static int ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(struct ufs_hba *hba,
1324                                                       u32 clk_cycles)
1325{
1326        int err;
1327        u32 core_clk_ctrl_reg;
1328
1329        if (clk_cycles > DME_VS_CORE_CLK_CTRL_MAX_CORE_CLK_1US_CYCLES_MASK)
1330                return -EINVAL;
1331
1332        err = ufshcd_dme_get(hba,
1333                            UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
1334                            &core_clk_ctrl_reg);
1335        if (err)
1336                goto out;
1337
1338        core_clk_ctrl_reg &= ~DME_VS_CORE_CLK_CTRL_MAX_CORE_CLK_1US_CYCLES_MASK;
1339        core_clk_ctrl_reg |= clk_cycles;
1340
1341        /* Clear CORE_CLK_DIV_EN */
1342        core_clk_ctrl_reg &= ~DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT;
1343
1344        err = ufshcd_dme_set(hba,
1345                            UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
1346                            core_clk_ctrl_reg);
1347out:
1348        return err;
1349}
1350
1351static int ufs_qcom_clk_scale_up_pre_change(struct ufs_hba *hba)
1352{
1353        /* nothing to do as of now */
1354        return 0;
1355}
1356
1357static int ufs_qcom_clk_scale_up_post_change(struct ufs_hba *hba)
1358{
1359        struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1360
1361        if (!ufs_qcom_cap_qunipro(host))
1362                return 0;
1363
1364        /* set unipro core clock cycles to 150 and clear clock divider */
1365        return ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba, 150);
1366}
1367
1368static int ufs_qcom_clk_scale_down_pre_change(struct ufs_hba *hba)
1369{
1370        struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1371        int err;
1372        u32 core_clk_ctrl_reg;
1373
1374        if (!ufs_qcom_cap_qunipro(host))
1375                return 0;
1376
1377        err = ufshcd_dme_get(hba,
1378                            UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
1379                            &core_clk_ctrl_reg);
1380
1381        /* make sure CORE_CLK_DIV_EN is cleared */
1382        if (!err &&
1383            (core_clk_ctrl_reg & DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT)) {
1384                core_clk_ctrl_reg &= ~DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT;
1385                err = ufshcd_dme_set(hba,
1386                                    UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
1387                                    core_clk_ctrl_reg);
1388        }
1389
1390        return err;
1391}
1392
1393static int ufs_qcom_clk_scale_down_post_change(struct ufs_hba *hba)
1394{
1395        struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1396
1397        if (!ufs_qcom_cap_qunipro(host))
1398                return 0;
1399
1400        /* set unipro core clock cycles to 75 and clear clock divider */
1401        return ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba, 75);
1402}
1403
1404static int ufs_qcom_clk_scale_notify(struct ufs_hba *hba,
1405                bool scale_up, enum ufs_notify_change_status status)
1406{
1407        struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1408        struct ufs_pa_layer_attr *dev_req_params = &host->dev_req_params;
1409        int err = 0;
1410
1411        if (status == PRE_CHANGE) {
1412                if (scale_up)
1413                        err = ufs_qcom_clk_scale_up_pre_change(hba);
1414                else
1415                        err = ufs_qcom_clk_scale_down_pre_change(hba);
1416        } else {
1417                if (scale_up)
1418                        err = ufs_qcom_clk_scale_up_post_change(hba);
1419                else
1420                        err = ufs_qcom_clk_scale_down_post_change(hba);
1421
1422                if (err || !dev_req_params)
1423                        goto out;
1424
1425                ufs_qcom_cfg_timers(hba,
1426                                    dev_req_params->gear_rx,
1427                                    dev_req_params->pwr_rx,
1428                                    dev_req_params->hs_rate,
1429                                    false);
1430                ufs_qcom_update_bus_bw_vote(host);
1431        }
1432
1433out:
1434        return err;
1435}
1436
1437static void ufs_qcom_print_hw_debug_reg_all(struct ufs_hba *hba,
1438                void *priv, void (*print_fn)(struct ufs_hba *hba,
1439                int offset, int num_regs, char *str, void *priv))
1440{
1441        u32 reg;
1442        struct ufs_qcom_host *host;
1443
1444        if (unlikely(!hba)) {
1445                pr_err("%s: hba is NULL\n", __func__);
1446                return;
1447        }
1448        if (unlikely(!print_fn)) {
1449                dev_err(hba->dev, "%s: print_fn is NULL\n", __func__);
1450                return;
1451        }
1452
1453        host = ufshcd_get_variant(hba);
1454        if (!(host->dbg_print_en & UFS_QCOM_DBG_PRINT_REGS_EN))
1455                return;
1456
1457        reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_REG_OCSC);
1458        print_fn(hba, reg, 44, "UFS_UFS_DBG_RD_REG_OCSC ", priv);
1459
1460        reg = ufshcd_readl(hba, REG_UFS_CFG1);
1461        reg |= UFS_BIT(17);
1462        ufshcd_writel(hba, reg, REG_UFS_CFG1);
1463
1464        reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_EDTL_RAM);
1465        print_fn(hba, reg, 32, "UFS_UFS_DBG_RD_EDTL_RAM ", priv);
1466
1467        reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_DESC_RAM);
1468        print_fn(hba, reg, 128, "UFS_UFS_DBG_RD_DESC_RAM ", priv);
1469
1470        reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_PRDT_RAM);
1471        print_fn(hba, reg, 64, "UFS_UFS_DBG_RD_PRDT_RAM ", priv);
1472
1473        /* clear bit 17 - UTP_DBG_RAMS_EN */
1474        ufshcd_rmwl(hba, UFS_BIT(17), 0, REG_UFS_CFG1);
1475
1476        reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_UAWM);
1477        print_fn(hba, reg, 4, "UFS_DBG_RD_REG_UAWM ", priv);
1478
1479        reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_UARM);
1480        print_fn(hba, reg, 4, "UFS_DBG_RD_REG_UARM ", priv);
1481
1482        reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TXUC);
1483        print_fn(hba, reg, 48, "UFS_DBG_RD_REG_TXUC ", priv);
1484
1485        reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_RXUC);
1486        print_fn(hba, reg, 27, "UFS_DBG_RD_REG_RXUC ", priv);
1487
1488        reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_DFC);
1489        print_fn(hba, reg, 19, "UFS_DBG_RD_REG_DFC ", priv);
1490
1491        reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TRLUT);
1492        print_fn(hba, reg, 34, "UFS_DBG_RD_REG_TRLUT ", priv);
1493
1494        reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TMRLUT);
1495        print_fn(hba, reg, 9, "UFS_DBG_RD_REG_TMRLUT ", priv);
1496}
1497
1498static void ufs_qcom_enable_test_bus(struct ufs_qcom_host *host)
1499{
1500        if (host->dbg_print_en & UFS_QCOM_DBG_PRINT_TEST_BUS_EN) {
1501                ufshcd_rmwl(host->hba, UFS_REG_TEST_BUS_EN,
1502                                UFS_REG_TEST_BUS_EN, REG_UFS_CFG1);
1503                ufshcd_rmwl(host->hba, TEST_BUS_EN, TEST_BUS_EN, REG_UFS_CFG1);
1504        } else {
1505                ufshcd_rmwl(host->hba, UFS_REG_TEST_BUS_EN, 0, REG_UFS_CFG1);
1506                ufshcd_rmwl(host->hba, TEST_BUS_EN, 0, REG_UFS_CFG1);
1507        }
1508}
1509
1510static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host)
1511{
1512        /* provide a legal default configuration */
1513        host->testbus.select_major = TSTBUS_UNIPRO;
1514        host->testbus.select_minor = 37;
1515}
1516
1517static bool ufs_qcom_testbus_cfg_is_ok(struct ufs_qcom_host *host)
1518{
1519        if (host->testbus.select_major >= TSTBUS_MAX) {
1520                dev_err(host->hba->dev,
1521                        "%s: UFS_CFG1[TEST_BUS_SEL} may not equal 0x%05X\n",
1522                        __func__, host->testbus.select_major);
1523                return false;
1524        }
1525
1526        return true;
1527}
1528
1529int ufs_qcom_testbus_config(struct ufs_qcom_host *host)
1530{
1531        int reg;
1532        int offset;
1533        u32 mask = TEST_BUS_SUB_SEL_MASK;
1534
1535        if (!host)
1536                return -EINVAL;
1537
1538        if (!ufs_qcom_testbus_cfg_is_ok(host))
1539                return -EPERM;
1540
1541        switch (host->testbus.select_major) {
1542        case TSTBUS_UAWM:
1543                reg = UFS_TEST_BUS_CTRL_0;
1544                offset = 24;
1545                break;
1546        case TSTBUS_UARM:
1547                reg = UFS_TEST_BUS_CTRL_0;
1548                offset = 16;
1549                break;
1550        case TSTBUS_TXUC:
1551                reg = UFS_TEST_BUS_CTRL_0;
1552                offset = 8;
1553                break;
1554        case TSTBUS_RXUC:
1555                reg = UFS_TEST_BUS_CTRL_0;
1556                offset = 0;
1557                break;
1558        case TSTBUS_DFC:
1559                reg = UFS_TEST_BUS_CTRL_1;
1560                offset = 24;
1561                break;
1562        case TSTBUS_TRLUT:
1563                reg = UFS_TEST_BUS_CTRL_1;
1564                offset = 16;
1565                break;
1566        case TSTBUS_TMRLUT:
1567                reg = UFS_TEST_BUS_CTRL_1;
1568                offset = 8;
1569                break;
1570        case TSTBUS_OCSC:
1571                reg = UFS_TEST_BUS_CTRL_1;
1572                offset = 0;
1573                break;
1574        case TSTBUS_WRAPPER:
1575                reg = UFS_TEST_BUS_CTRL_2;
1576                offset = 16;
1577                break;
1578        case TSTBUS_COMBINED:
1579                reg = UFS_TEST_BUS_CTRL_2;
1580                offset = 8;
1581                break;
1582        case TSTBUS_UTP_HCI:
1583                reg = UFS_TEST_BUS_CTRL_2;
1584                offset = 0;
1585                break;
1586        case TSTBUS_UNIPRO:
1587                reg = UFS_UNIPRO_CFG;
1588                offset = 20;
1589                mask = 0xFFF;
1590                break;
1591        /*
1592         * No need for a default case, since
1593         * ufs_qcom_testbus_cfg_is_ok() checks that the configuration
1594         * is legal
1595         */
1596        }
1597        mask <<= offset;
1598
1599        pm_runtime_get_sync(host->hba->dev);
1600        ufshcd_hold(host->hba, false);
1601        ufshcd_rmwl(host->hba, TEST_BUS_SEL,
1602                    (u32)host->testbus.select_major << 19,
1603                    REG_UFS_CFG1);
1604        ufshcd_rmwl(host->hba, mask,
1605                    (u32)host->testbus.select_minor << offset,
1606                    reg);
1607        ufs_qcom_enable_test_bus(host);
1608        /*
1609         * Make sure the test bus configuration is
1610         * committed before returning.
1611         */
1612        mb();
1613        ufshcd_release(host->hba);
1614        pm_runtime_put_sync(host->hba->dev);
1615
1616        return 0;
1617}
1618
1619static void ufs_qcom_testbus_read(struct ufs_hba *hba)
1620{
1621        ufs_qcom_dump_regs(hba, UFS_TEST_BUS, 1, "UFS_TEST_BUS ");
1622}
1623
1624static void ufs_qcom_print_unipro_testbus(struct ufs_hba *hba)
1625{
1626        struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1627        u32 *testbus = NULL;
1628        int i, nminor = 256, testbus_len = nminor * sizeof(u32);
1629
1630        testbus = kmalloc(testbus_len, GFP_KERNEL);
1631        if (!testbus)
1632                return;
1633
1634        host->testbus.select_major = TSTBUS_UNIPRO;
1635        for (i = 0; i < nminor; i++) {
1636                host->testbus.select_minor = i;
1637                ufs_qcom_testbus_config(host);
1638                testbus[i] = ufshcd_readl(hba, UFS_TEST_BUS);
1639        }
1640        print_hex_dump(KERN_ERR, "UNIPRO_TEST_BUS ", DUMP_PREFIX_OFFSET,
1641                        16, 4, testbus, testbus_len, false);
1642        kfree(testbus);
1643}
1644
1645static void ufs_qcom_dump_dbg_regs(struct ufs_hba *hba)
1646{
1647        ufs_qcom_dump_regs(hba, REG_UFS_SYS1CLK_1US, 16,
1648                        "HCI Vendor Specific Registers ");
1649
1650        /* sleep a bit intermittently as we are dumping too much data */
1651        ufs_qcom_print_hw_debug_reg_all(hba, NULL, ufs_qcom_dump_regs_wrapper);
1652        usleep_range(1000, 1100);
1653        ufs_qcom_testbus_read(hba);
1654        usleep_range(1000, 1100);
1655        ufs_qcom_print_unipro_testbus(hba);
1656        usleep_range(1000, 1100);
1657}
1658
1659/**
1660 * struct ufs_hba_qcom_vops - UFS QCOM specific variant operations
1661 *
1662 * The variant operations configure the necessary controller and PHY
1663 * handshake during initialization.
1664 */
1665static struct ufs_hba_variant_ops ufs_hba_qcom_vops = {
1666        .name                   = "qcom",
1667        .init                   = ufs_qcom_init,
1668        .exit                   = ufs_qcom_exit,
1669        .get_ufs_hci_version    = ufs_qcom_get_ufs_hci_version,
1670        .clk_scale_notify       = ufs_qcom_clk_scale_notify,
1671        .setup_clocks           = ufs_qcom_setup_clocks,
1672        .hce_enable_notify      = ufs_qcom_hce_enable_notify,
1673        .link_startup_notify    = ufs_qcom_link_startup_notify,
1674        .pwr_change_notify      = ufs_qcom_pwr_change_notify,
1675        .apply_dev_quirks       = ufs_qcom_apply_dev_quirks,
1676        .suspend                = ufs_qcom_suspend,
1677        .resume                 = ufs_qcom_resume,
1678        .dbg_register_dump      = ufs_qcom_dump_dbg_regs,
1679};
1680
1681/**
1682 * ufs_qcom_probe - probe routine of the driver
1683 * @pdev: pointer to Platform device handle
1684 *
1685 * Return zero for success and non-zero for failure
1686 */
1687static int ufs_qcom_probe(struct platform_device *pdev)
1688{
1689        int err;
1690        struct device *dev = &pdev->dev;
1691
1692        /* Perform generic probe */
1693        err = ufshcd_pltfrm_init(pdev, &ufs_hba_qcom_vops);
1694        if (err)
1695                dev_err(dev, "ufshcd_pltfrm_init() failed %d\n", err);
1696
1697        return err;
1698}
1699
1700/**
1701 * ufs_qcom_remove - set driver_data of the device to NULL
1702 * @pdev: pointer to platform device handle
1703 *
1704 * Always returns 0
1705 */
1706static int ufs_qcom_remove(struct platform_device *pdev)
1707{
1708        struct ufs_hba *hba =  platform_get_drvdata(pdev);
1709
1710        pm_runtime_get_sync(&(pdev)->dev);
1711        ufshcd_remove(hba);
1712        return 0;
1713}
1714
1715static const struct of_device_id ufs_qcom_of_match[] = {
1716        { .compatible = "qcom,ufshc"},
1717        {},
1718};
1719MODULE_DEVICE_TABLE(of, ufs_qcom_of_match);
1720
1721static const struct dev_pm_ops ufs_qcom_pm_ops = {
1722        .suspend        = ufshcd_pltfrm_suspend,
1723        .resume         = ufshcd_pltfrm_resume,
1724        .runtime_suspend = ufshcd_pltfrm_runtime_suspend,
1725        .runtime_resume  = ufshcd_pltfrm_runtime_resume,
1726        .runtime_idle    = ufshcd_pltfrm_runtime_idle,
1727};
1728
1729static struct platform_driver ufs_qcom_pltform = {
1730        .probe  = ufs_qcom_probe,
1731        .remove = ufs_qcom_remove,
1732        .shutdown = ufshcd_pltfrm_shutdown,
1733        .driver = {
1734                .name   = "ufshcd-qcom",
1735                .pm     = &ufs_qcom_pm_ops,
1736                .of_match_table = of_match_ptr(ufs_qcom_of_match),
1737        },
1738};
1739module_platform_driver(ufs_qcom_pltform);
1740
1741MODULE_LICENSE("GPL v2");
1742