linux/drivers/scsi/ufs/ufs-qcom.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (c) 2013-2016, Linux Foundation. All rights reserved.
   4 */
   5
   6#include <linux/acpi.h>
   7#include <linux/time.h>
   8#include <linux/of.h>
   9#include <linux/platform_device.h>
  10#include <linux/phy/phy.h>
  11#include <linux/gpio/consumer.h>
  12#include <linux/reset-controller.h>
  13
  14#include "ufshcd.h"
  15#include "ufshcd-pltfrm.h"
  16#include "unipro.h"
  17#include "ufs-qcom.h"
  18#include "ufshci.h"
  19#include "ufs_quirks.h"
  20#define UFS_QCOM_DEFAULT_DBG_PRINT_EN   \
  21        (UFS_QCOM_DBG_PRINT_REGS_EN | UFS_QCOM_DBG_PRINT_TEST_BUS_EN)
  22
  23enum {
  24        TSTBUS_UAWM,
  25        TSTBUS_UARM,
  26        TSTBUS_TXUC,
  27        TSTBUS_RXUC,
  28        TSTBUS_DFC,
  29        TSTBUS_TRLUT,
  30        TSTBUS_TMRLUT,
  31        TSTBUS_OCSC,
  32        TSTBUS_UTP_HCI,
  33        TSTBUS_COMBINED,
  34        TSTBUS_WRAPPER,
  35        TSTBUS_UNIPRO,
  36        TSTBUS_MAX,
  37};
  38
  39static struct ufs_qcom_host *ufs_qcom_hosts[MAX_UFS_QCOM_HOSTS];
  40
  41static int ufs_qcom_set_bus_vote(struct ufs_qcom_host *host, int vote);
  42static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host);
  43static int ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(struct ufs_hba *hba,
  44                                                       u32 clk_cycles);
  45
  46static struct ufs_qcom_host *rcdev_to_ufs_host(struct reset_controller_dev *rcd)
  47{
  48        return container_of(rcd, struct ufs_qcom_host, rcdev);
  49}
  50
  51static void ufs_qcom_dump_regs_wrapper(struct ufs_hba *hba, int offset, int len,
  52                                       const char *prefix, void *priv)
  53{
  54        ufshcd_dump_regs(hba, offset, len * 4, prefix);
  55}
  56
  57static int ufs_qcom_get_connected_tx_lanes(struct ufs_hba *hba, u32 *tx_lanes)
  58{
  59        int err = 0;
  60
  61        err = ufshcd_dme_get(hba,
  62                        UIC_ARG_MIB(PA_CONNECTEDTXDATALANES), tx_lanes);
  63        if (err)
  64                dev_err(hba->dev, "%s: couldn't read PA_CONNECTEDTXDATALANES %d\n",
  65                                __func__, err);
  66
  67        return err;
  68}
  69
  70static int ufs_qcom_host_clk_get(struct device *dev,
  71                const char *name, struct clk **clk_out, bool optional)
  72{
  73        struct clk *clk;
  74        int err = 0;
  75
  76        clk = devm_clk_get(dev, name);
  77        if (!IS_ERR(clk)) {
  78                *clk_out = clk;
  79                return 0;
  80        }
  81
  82        err = PTR_ERR(clk);
  83
  84        if (optional && err == -ENOENT) {
  85                *clk_out = NULL;
  86                return 0;
  87        }
  88
  89        if (err != -EPROBE_DEFER)
  90                dev_err(dev, "failed to get %s err %d\n", name, err);
  91
  92        return err;
  93}
  94
  95static int ufs_qcom_host_clk_enable(struct device *dev,
  96                const char *name, struct clk *clk)
  97{
  98        int err = 0;
  99
 100        err = clk_prepare_enable(clk);
 101        if (err)
 102                dev_err(dev, "%s: %s enable failed %d\n", __func__, name, err);
 103
 104        return err;
 105}
 106
 107static void ufs_qcom_disable_lane_clks(struct ufs_qcom_host *host)
 108{
 109        if (!host->is_lane_clks_enabled)
 110                return;
 111
 112        clk_disable_unprepare(host->tx_l1_sync_clk);
 113        clk_disable_unprepare(host->tx_l0_sync_clk);
 114        clk_disable_unprepare(host->rx_l1_sync_clk);
 115        clk_disable_unprepare(host->rx_l0_sync_clk);
 116
 117        host->is_lane_clks_enabled = false;
 118}
 119
 120static int ufs_qcom_enable_lane_clks(struct ufs_qcom_host *host)
 121{
 122        int err = 0;
 123        struct device *dev = host->hba->dev;
 124
 125        if (host->is_lane_clks_enabled)
 126                return 0;
 127
 128        err = ufs_qcom_host_clk_enable(dev, "rx_lane0_sync_clk",
 129                host->rx_l0_sync_clk);
 130        if (err)
 131                goto out;
 132
 133        err = ufs_qcom_host_clk_enable(dev, "tx_lane0_sync_clk",
 134                host->tx_l0_sync_clk);
 135        if (err)
 136                goto disable_rx_l0;
 137
 138        err = ufs_qcom_host_clk_enable(dev, "rx_lane1_sync_clk",
 139                        host->rx_l1_sync_clk);
 140        if (err)
 141                goto disable_tx_l0;
 142
 143        err = ufs_qcom_host_clk_enable(dev, "tx_lane1_sync_clk",
 144                        host->tx_l1_sync_clk);
 145        if (err)
 146                goto disable_rx_l1;
 147
 148        host->is_lane_clks_enabled = true;
 149        goto out;
 150
 151disable_rx_l1:
 152        clk_disable_unprepare(host->rx_l1_sync_clk);
 153disable_tx_l0:
 154        clk_disable_unprepare(host->tx_l0_sync_clk);
 155disable_rx_l0:
 156        clk_disable_unprepare(host->rx_l0_sync_clk);
 157out:
 158        return err;
 159}
 160
 161static int ufs_qcom_init_lane_clks(struct ufs_qcom_host *host)
 162{
 163        int err = 0;
 164        struct device *dev = host->hba->dev;
 165
 166        if (has_acpi_companion(dev))
 167                return 0;
 168
 169        err = ufs_qcom_host_clk_get(dev, "rx_lane0_sync_clk",
 170                                        &host->rx_l0_sync_clk, false);
 171        if (err)
 172                goto out;
 173
 174        err = ufs_qcom_host_clk_get(dev, "tx_lane0_sync_clk",
 175                                        &host->tx_l0_sync_clk, false);
 176        if (err)
 177                goto out;
 178
 179        /* In case of single lane per direction, don't read lane1 clocks */
 180        if (host->hba->lanes_per_direction > 1) {
 181                err = ufs_qcom_host_clk_get(dev, "rx_lane1_sync_clk",
 182                        &host->rx_l1_sync_clk, false);
 183                if (err)
 184                        goto out;
 185
 186                err = ufs_qcom_host_clk_get(dev, "tx_lane1_sync_clk",
 187                        &host->tx_l1_sync_clk, true);
 188        }
 189out:
 190        return err;
 191}
 192
 193static int ufs_qcom_link_startup_post_change(struct ufs_hba *hba)
 194{
 195        u32 tx_lanes;
 196
 197        return ufs_qcom_get_connected_tx_lanes(hba, &tx_lanes);
 198}
 199
 200static int ufs_qcom_check_hibern8(struct ufs_hba *hba)
 201{
 202        int err;
 203        u32 tx_fsm_val = 0;
 204        unsigned long timeout = jiffies + msecs_to_jiffies(HBRN8_POLL_TOUT_MS);
 205
 206        do {
 207                err = ufshcd_dme_get(hba,
 208                                UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE,
 209                                        UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
 210                                &tx_fsm_val);
 211                if (err || tx_fsm_val == TX_FSM_HIBERN8)
 212                        break;
 213
 214                /* sleep for max. 200us */
 215                usleep_range(100, 200);
 216        } while (time_before(jiffies, timeout));
 217
 218        /*
 219         * we might have scheduled out for long during polling so
 220         * check the state again.
 221         */
 222        if (time_after(jiffies, timeout))
 223                err = ufshcd_dme_get(hba,
 224                                UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE,
 225                                        UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
 226                                &tx_fsm_val);
 227
 228        if (err) {
 229                dev_err(hba->dev, "%s: unable to get TX_FSM_STATE, err %d\n",
 230                                __func__, err);
 231        } else if (tx_fsm_val != TX_FSM_HIBERN8) {
 232                err = tx_fsm_val;
 233                dev_err(hba->dev, "%s: invalid TX_FSM_STATE = %d\n",
 234                                __func__, err);
 235        }
 236
 237        return err;
 238}
 239
 240static void ufs_qcom_select_unipro_mode(struct ufs_qcom_host *host)
 241{
 242        ufshcd_rmwl(host->hba, QUNIPRO_SEL,
 243                   ufs_qcom_cap_qunipro(host) ? QUNIPRO_SEL : 0,
 244                   REG_UFS_CFG1);
 245        /* make sure above configuration is applied before we return */
 246        mb();
 247}
 248
 249static int ufs_qcom_power_up_sequence(struct ufs_hba *hba)
 250{
 251        struct ufs_qcom_host *host = ufshcd_get_variant(hba);
 252        struct phy *phy = host->generic_phy;
 253        int ret = 0;
 254        bool is_rate_B = (UFS_QCOM_LIMIT_HS_RATE == PA_HS_MODE_B)
 255                                                        ? true : false;
 256
 257        if (is_rate_B)
 258                phy_set_mode(phy, PHY_MODE_UFS_HS_B);
 259
 260        /* phy initialization - calibrate the phy */
 261        ret = phy_init(phy);
 262        if (ret) {
 263                dev_err(hba->dev, "%s: phy init failed, ret = %d\n",
 264                        __func__, ret);
 265                goto out;
 266        }
 267
 268        /* power on phy - start serdes and phy's power and clocks */
 269        ret = phy_power_on(phy);
 270        if (ret) {
 271                dev_err(hba->dev, "%s: phy power on failed, ret = %d\n",
 272                        __func__, ret);
 273                goto out_disable_phy;
 274        }
 275
 276        ufs_qcom_select_unipro_mode(host);
 277
 278        return 0;
 279
 280out_disable_phy:
 281        phy_exit(phy);
 282out:
 283        return ret;
 284}
 285
 286/*
 287 * The UTP controller has a number of internal clock gating cells (CGCs).
 288 * Internal hardware sub-modules within the UTP controller control the CGCs.
 289 * Hardware CGCs disable the clock to inactivate UTP sub-modules not involved
 290 * in a specific operation, UTP controller CGCs are by default disabled and
 291 * this function enables them (after every UFS link startup) to save some power
 292 * leakage.
 293 */
 294static void ufs_qcom_enable_hw_clk_gating(struct ufs_hba *hba)
 295{
 296        ufshcd_writel(hba,
 297                ufshcd_readl(hba, REG_UFS_CFG2) | REG_UFS_CFG2_CGC_EN_ALL,
 298                REG_UFS_CFG2);
 299
 300        /* Ensure that HW clock gating is enabled before next operations */
 301        mb();
 302}
 303
 304static int ufs_qcom_hce_enable_notify(struct ufs_hba *hba,
 305                                      enum ufs_notify_change_status status)
 306{
 307        struct ufs_qcom_host *host = ufshcd_get_variant(hba);
 308        int err = 0;
 309
 310        switch (status) {
 311        case PRE_CHANGE:
 312                ufs_qcom_power_up_sequence(hba);
 313                /*
 314                 * The PHY PLL output is the source of tx/rx lane symbol
 315                 * clocks, hence, enable the lane clocks only after PHY
 316                 * is initialized.
 317                 */
 318                err = ufs_qcom_enable_lane_clks(host);
 319                break;
 320        case POST_CHANGE:
 321                /* check if UFS PHY moved from DISABLED to HIBERN8 */
 322                err = ufs_qcom_check_hibern8(hba);
 323                ufs_qcom_enable_hw_clk_gating(hba);
 324
 325                break;
 326        default:
 327                dev_err(hba->dev, "%s: invalid status %d\n", __func__, status);
 328                err = -EINVAL;
 329                break;
 330        }
 331        return err;
 332}
 333
 334/**
 335 * Returns zero for success and non-zero in case of a failure
 336 */
 337static int ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear,
 338                               u32 hs, u32 rate, bool update_link_startup_timer)
 339{
 340        int ret = 0;
 341        struct ufs_qcom_host *host = ufshcd_get_variant(hba);
 342        struct ufs_clk_info *clki;
 343        u32 core_clk_period_in_ns;
 344        u32 tx_clk_cycles_per_us = 0;
 345        unsigned long core_clk_rate = 0;
 346        u32 core_clk_cycles_per_us = 0;
 347
 348        static u32 pwm_fr_table[][2] = {
 349                {UFS_PWM_G1, 0x1},
 350                {UFS_PWM_G2, 0x1},
 351                {UFS_PWM_G3, 0x1},
 352                {UFS_PWM_G4, 0x1},
 353        };
 354
 355        static u32 hs_fr_table_rA[][2] = {
 356                {UFS_HS_G1, 0x1F},
 357                {UFS_HS_G2, 0x3e},
 358                {UFS_HS_G3, 0x7D},
 359        };
 360
 361        static u32 hs_fr_table_rB[][2] = {
 362                {UFS_HS_G1, 0x24},
 363                {UFS_HS_G2, 0x49},
 364                {UFS_HS_G3, 0x92},
 365        };
 366
 367        /*
 368         * The Qunipro controller does not use following registers:
 369         * SYS1CLK_1US_REG, TX_SYMBOL_CLK_1US_REG, CLK_NS_REG &
 370         * UFS_REG_PA_LINK_STARTUP_TIMER
 371         * But UTP controller uses SYS1CLK_1US_REG register for Interrupt
 372         * Aggregation logic.
 373        */
 374        if (ufs_qcom_cap_qunipro(host) && !ufshcd_is_intr_aggr_allowed(hba))
 375                goto out;
 376
 377        if (gear == 0) {
 378                dev_err(hba->dev, "%s: invalid gear = %d\n", __func__, gear);
 379                goto out_error;
 380        }
 381
 382        list_for_each_entry(clki, &hba->clk_list_head, list) {
 383                if (!strcmp(clki->name, "core_clk"))
 384                        core_clk_rate = clk_get_rate(clki->clk);
 385        }
 386
 387        /* If frequency is smaller than 1MHz, set to 1MHz */
 388        if (core_clk_rate < DEFAULT_CLK_RATE_HZ)
 389                core_clk_rate = DEFAULT_CLK_RATE_HZ;
 390
 391        core_clk_cycles_per_us = core_clk_rate / USEC_PER_SEC;
 392        if (ufshcd_readl(hba, REG_UFS_SYS1CLK_1US) != core_clk_cycles_per_us) {
 393                ufshcd_writel(hba, core_clk_cycles_per_us, REG_UFS_SYS1CLK_1US);
 394                /*
 395                 * make sure above write gets applied before we return from
 396                 * this function.
 397                 */
 398                mb();
 399        }
 400
 401        if (ufs_qcom_cap_qunipro(host))
 402                goto out;
 403
 404        core_clk_period_in_ns = NSEC_PER_SEC / core_clk_rate;
 405        core_clk_period_in_ns <<= OFFSET_CLK_NS_REG;
 406        core_clk_period_in_ns &= MASK_CLK_NS_REG;
 407
 408        switch (hs) {
 409        case FASTAUTO_MODE:
 410        case FAST_MODE:
 411                if (rate == PA_HS_MODE_A) {
 412                        if (gear > ARRAY_SIZE(hs_fr_table_rA)) {
 413                                dev_err(hba->dev,
 414                                        "%s: index %d exceeds table size %zu\n",
 415                                        __func__, gear,
 416                                        ARRAY_SIZE(hs_fr_table_rA));
 417                                goto out_error;
 418                        }
 419                        tx_clk_cycles_per_us = hs_fr_table_rA[gear-1][1];
 420                } else if (rate == PA_HS_MODE_B) {
 421                        if (gear > ARRAY_SIZE(hs_fr_table_rB)) {
 422                                dev_err(hba->dev,
 423                                        "%s: index %d exceeds table size %zu\n",
 424                                        __func__, gear,
 425                                        ARRAY_SIZE(hs_fr_table_rB));
 426                                goto out_error;
 427                        }
 428                        tx_clk_cycles_per_us = hs_fr_table_rB[gear-1][1];
 429                } else {
 430                        dev_err(hba->dev, "%s: invalid rate = %d\n",
 431                                __func__, rate);
 432                        goto out_error;
 433                }
 434                break;
 435        case SLOWAUTO_MODE:
 436        case SLOW_MODE:
 437                if (gear > ARRAY_SIZE(pwm_fr_table)) {
 438                        dev_err(hba->dev,
 439                                        "%s: index %d exceeds table size %zu\n",
 440                                        __func__, gear,
 441                                        ARRAY_SIZE(pwm_fr_table));
 442                        goto out_error;
 443                }
 444                tx_clk_cycles_per_us = pwm_fr_table[gear-1][1];
 445                break;
 446        case UNCHANGED:
 447        default:
 448                dev_err(hba->dev, "%s: invalid mode = %d\n", __func__, hs);
 449                goto out_error;
 450        }
 451
 452        if (ufshcd_readl(hba, REG_UFS_TX_SYMBOL_CLK_NS_US) !=
 453            (core_clk_period_in_ns | tx_clk_cycles_per_us)) {
 454                /* this register 2 fields shall be written at once */
 455                ufshcd_writel(hba, core_clk_period_in_ns | tx_clk_cycles_per_us,
 456                              REG_UFS_TX_SYMBOL_CLK_NS_US);
 457                /*
 458                 * make sure above write gets applied before we return from
 459                 * this function.
 460                 */
 461                mb();
 462        }
 463
 464        if (update_link_startup_timer) {
 465                ufshcd_writel(hba, ((core_clk_rate / MSEC_PER_SEC) * 100),
 466                              REG_UFS_PA_LINK_STARTUP_TIMER);
 467                /*
 468                 * make sure that this configuration is applied before
 469                 * we return
 470                 */
 471                mb();
 472        }
 473        goto out;
 474
 475out_error:
 476        ret = -EINVAL;
 477out:
 478        return ret;
 479}
 480
 481static int ufs_qcom_link_startup_notify(struct ufs_hba *hba,
 482                                        enum ufs_notify_change_status status)
 483{
 484        int err = 0;
 485        struct ufs_qcom_host *host = ufshcd_get_variant(hba);
 486
 487        switch (status) {
 488        case PRE_CHANGE:
 489                if (ufs_qcom_cfg_timers(hba, UFS_PWM_G1, SLOWAUTO_MODE,
 490                                        0, true)) {
 491                        dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n",
 492                                __func__);
 493                        err = -EINVAL;
 494                        goto out;
 495                }
 496
 497                if (ufs_qcom_cap_qunipro(host))
 498                        /*
 499                         * set unipro core clock cycles to 150 & clear clock
 500                         * divider
 501                         */
 502                        err = ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba,
 503                                                                          150);
 504
 505                /*
 506                 * Some UFS devices (and may be host) have issues if LCC is
 507                 * enabled. So we are setting PA_Local_TX_LCC_Enable to 0
 508                 * before link startup which will make sure that both host
 509                 * and device TX LCC are disabled once link startup is
 510                 * completed.
 511                 */
 512                if (ufshcd_get_local_unipro_ver(hba) != UFS_UNIPRO_VER_1_41)
 513                        err = ufshcd_dme_set(hba,
 514                                        UIC_ARG_MIB(PA_LOCAL_TX_LCC_ENABLE),
 515                                        0);
 516
 517                break;
 518        case POST_CHANGE:
 519                ufs_qcom_link_startup_post_change(hba);
 520                break;
 521        default:
 522                break;
 523        }
 524
 525out:
 526        return err;
 527}
 528
 529static int ufs_qcom_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
 530{
 531        struct ufs_qcom_host *host = ufshcd_get_variant(hba);
 532        struct phy *phy = host->generic_phy;
 533        int ret = 0;
 534
 535        if (ufs_qcom_is_link_off(hba)) {
 536                /*
 537                 * Disable the tx/rx lane symbol clocks before PHY is
 538                 * powered down as the PLL source should be disabled
 539                 * after downstream clocks are disabled.
 540                 */
 541                ufs_qcom_disable_lane_clks(host);
 542                phy_power_off(phy);
 543
 544        } else if (!ufs_qcom_is_link_active(hba)) {
 545                ufs_qcom_disable_lane_clks(host);
 546        }
 547
 548        return ret;
 549}
 550
 551static int ufs_qcom_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
 552{
 553        struct ufs_qcom_host *host = ufshcd_get_variant(hba);
 554        struct phy *phy = host->generic_phy;
 555        int err;
 556
 557        if (ufs_qcom_is_link_off(hba)) {
 558                err = phy_power_on(phy);
 559                if (err) {
 560                        dev_err(hba->dev, "%s: failed PHY power on: %d\n",
 561                                __func__, err);
 562                        return err;
 563                }
 564
 565                err = ufs_qcom_enable_lane_clks(host);
 566                if (err)
 567                        return err;
 568
 569        } else if (!ufs_qcom_is_link_active(hba)) {
 570                err = ufs_qcom_enable_lane_clks(host);
 571                if (err)
 572                        return err;
 573        }
 574
 575        hba->is_sys_suspended = false;
 576        return 0;
 577}
 578
 579#ifdef CONFIG_MSM_BUS_SCALING
 580static int ufs_qcom_get_bus_vote(struct ufs_qcom_host *host,
 581                const char *speed_mode)
 582{
 583        struct device *dev = host->hba->dev;
 584        struct device_node *np = dev->of_node;
 585        int err;
 586        const char *key = "qcom,bus-vector-names";
 587
 588        if (!speed_mode) {
 589                err = -EINVAL;
 590                goto out;
 591        }
 592
 593        if (host->bus_vote.is_max_bw_needed && !!strcmp(speed_mode, "MIN"))
 594                err = of_property_match_string(np, key, "MAX");
 595        else
 596                err = of_property_match_string(np, key, speed_mode);
 597
 598out:
 599        if (err < 0)
 600                dev_err(dev, "%s: Invalid %s mode %d\n",
 601                                __func__, speed_mode, err);
 602        return err;
 603}
 604
 605static void ufs_qcom_get_speed_mode(struct ufs_pa_layer_attr *p, char *result)
 606{
 607        int gear = max_t(u32, p->gear_rx, p->gear_tx);
 608        int lanes = max_t(u32, p->lane_rx, p->lane_tx);
 609        int pwr;
 610
 611        /* default to PWM Gear 1, Lane 1 if power mode is not initialized */
 612        if (!gear)
 613                gear = 1;
 614
 615        if (!lanes)
 616                lanes = 1;
 617
 618        if (!p->pwr_rx && !p->pwr_tx) {
 619                pwr = SLOWAUTO_MODE;
 620                snprintf(result, BUS_VECTOR_NAME_LEN, "MIN");
 621        } else if (p->pwr_rx == FAST_MODE || p->pwr_rx == FASTAUTO_MODE ||
 622                 p->pwr_tx == FAST_MODE || p->pwr_tx == FASTAUTO_MODE) {
 623                pwr = FAST_MODE;
 624                snprintf(result, BUS_VECTOR_NAME_LEN, "%s_R%s_G%d_L%d", "HS",
 625                         p->hs_rate == PA_HS_MODE_B ? "B" : "A", gear, lanes);
 626        } else {
 627                pwr = SLOW_MODE;
 628                snprintf(result, BUS_VECTOR_NAME_LEN, "%s_G%d_L%d",
 629                         "PWM", gear, lanes);
 630        }
 631}
 632
 633static int ufs_qcom_set_bus_vote(struct ufs_qcom_host *host, int vote)
 634{
 635        int err = 0;
 636
 637        if (vote != host->bus_vote.curr_vote) {
 638                err = msm_bus_scale_client_update_request(
 639                                host->bus_vote.client_handle, vote);
 640                if (err) {
 641                        dev_err(host->hba->dev,
 642                                "%s: msm_bus_scale_client_update_request() failed: bus_client_handle=0x%x, vote=%d, err=%d\n",
 643                                __func__, host->bus_vote.client_handle,
 644                                vote, err);
 645                        goto out;
 646                }
 647
 648                host->bus_vote.curr_vote = vote;
 649        }
 650out:
 651        return err;
 652}
 653
 654static int ufs_qcom_update_bus_bw_vote(struct ufs_qcom_host *host)
 655{
 656        int vote;
 657        int err = 0;
 658        char mode[BUS_VECTOR_NAME_LEN];
 659
 660        ufs_qcom_get_speed_mode(&host->dev_req_params, mode);
 661
 662        vote = ufs_qcom_get_bus_vote(host, mode);
 663        if (vote >= 0)
 664                err = ufs_qcom_set_bus_vote(host, vote);
 665        else
 666                err = vote;
 667
 668        if (err)
 669                dev_err(host->hba->dev, "%s: failed %d\n", __func__, err);
 670        else
 671                host->bus_vote.saved_vote = vote;
 672        return err;
 673}
 674
 675static ssize_t
 676show_ufs_to_mem_max_bus_bw(struct device *dev, struct device_attribute *attr,
 677                        char *buf)
 678{
 679        struct ufs_hba *hba = dev_get_drvdata(dev);
 680        struct ufs_qcom_host *host = ufshcd_get_variant(hba);
 681
 682        return snprintf(buf, PAGE_SIZE, "%u\n",
 683                        host->bus_vote.is_max_bw_needed);
 684}
 685
 686static ssize_t
 687store_ufs_to_mem_max_bus_bw(struct device *dev, struct device_attribute *attr,
 688                const char *buf, size_t count)
 689{
 690        struct ufs_hba *hba = dev_get_drvdata(dev);
 691        struct ufs_qcom_host *host = ufshcd_get_variant(hba);
 692        uint32_t value;
 693
 694        if (!kstrtou32(buf, 0, &value)) {
 695                host->bus_vote.is_max_bw_needed = !!value;
 696                ufs_qcom_update_bus_bw_vote(host);
 697        }
 698
 699        return count;
 700}
 701
 702static int ufs_qcom_bus_register(struct ufs_qcom_host *host)
 703{
 704        int err;
 705        struct msm_bus_scale_pdata *bus_pdata;
 706        struct device *dev = host->hba->dev;
 707        struct platform_device *pdev = to_platform_device(dev);
 708        struct device_node *np = dev->of_node;
 709
 710        bus_pdata = msm_bus_cl_get_pdata(pdev);
 711        if (!bus_pdata) {
 712                dev_err(dev, "%s: failed to get bus vectors\n", __func__);
 713                err = -ENODATA;
 714                goto out;
 715        }
 716
 717        err = of_property_count_strings(np, "qcom,bus-vector-names");
 718        if (err < 0 || err != bus_pdata->num_usecases) {
 719                dev_err(dev, "%s: qcom,bus-vector-names not specified correctly %d\n",
 720                                __func__, err);
 721                goto out;
 722        }
 723
 724        host->bus_vote.client_handle = msm_bus_scale_register_client(bus_pdata);
 725        if (!host->bus_vote.client_handle) {
 726                dev_err(dev, "%s: msm_bus_scale_register_client failed\n",
 727                                __func__);
 728                err = -EFAULT;
 729                goto out;
 730        }
 731
 732        /* cache the vote index for minimum and maximum bandwidth */
 733        host->bus_vote.min_bw_vote = ufs_qcom_get_bus_vote(host, "MIN");
 734        host->bus_vote.max_bw_vote = ufs_qcom_get_bus_vote(host, "MAX");
 735
 736        host->bus_vote.max_bus_bw.show = show_ufs_to_mem_max_bus_bw;
 737        host->bus_vote.max_bus_bw.store = store_ufs_to_mem_max_bus_bw;
 738        sysfs_attr_init(&host->bus_vote.max_bus_bw.attr);
 739        host->bus_vote.max_bus_bw.attr.name = "max_bus_bw";
 740        host->bus_vote.max_bus_bw.attr.mode = S_IRUGO | S_IWUSR;
 741        err = device_create_file(dev, &host->bus_vote.max_bus_bw);
 742out:
 743        return err;
 744}
 745#else /* CONFIG_MSM_BUS_SCALING */
 746static int ufs_qcom_update_bus_bw_vote(struct ufs_qcom_host *host)
 747{
 748        return 0;
 749}
 750
 751static int ufs_qcom_set_bus_vote(struct ufs_qcom_host *host, int vote)
 752{
 753        return 0;
 754}
 755
 756static int ufs_qcom_bus_register(struct ufs_qcom_host *host)
 757{
 758        return 0;
 759}
 760#endif /* CONFIG_MSM_BUS_SCALING */
 761
 762static void ufs_qcom_dev_ref_clk_ctrl(struct ufs_qcom_host *host, bool enable)
 763{
 764        if (host->dev_ref_clk_ctrl_mmio &&
 765            (enable ^ host->is_dev_ref_clk_enabled)) {
 766                u32 temp = readl_relaxed(host->dev_ref_clk_ctrl_mmio);
 767
 768                if (enable)
 769                        temp |= host->dev_ref_clk_en_mask;
 770                else
 771                        temp &= ~host->dev_ref_clk_en_mask;
 772
 773                /*
 774                 * If we are here to disable this clock it might be immediately
 775                 * after entering into hibern8 in which case we need to make
 776                 * sure that device ref_clk is active at least 1us after the
 777                 * hibern8 enter.
 778                 */
 779                if (!enable)
 780                        udelay(1);
 781
 782                writel_relaxed(temp, host->dev_ref_clk_ctrl_mmio);
 783
 784                /* ensure that ref_clk is enabled/disabled before we return */
 785                wmb();
 786
 787                /*
 788                 * If we call hibern8 exit after this, we need to make sure that
 789                 * device ref_clk is stable for at least 1us before the hibern8
 790                 * exit command.
 791                 */
 792                if (enable)
 793                        udelay(1);
 794
 795                host->is_dev_ref_clk_enabled = enable;
 796        }
 797}
 798
 799static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba,
 800                                enum ufs_notify_change_status status,
 801                                struct ufs_pa_layer_attr *dev_max_params,
 802                                struct ufs_pa_layer_attr *dev_req_params)
 803{
 804        struct ufs_qcom_host *host = ufshcd_get_variant(hba);
 805        struct ufs_dev_params ufs_qcom_cap;
 806        int ret = 0;
 807
 808        if (!dev_req_params) {
 809                pr_err("%s: incoming dev_req_params is NULL\n", __func__);
 810                ret = -EINVAL;
 811                goto out;
 812        }
 813
 814        switch (status) {
 815        case PRE_CHANGE:
 816                ufs_qcom_cap.tx_lanes = UFS_QCOM_LIMIT_NUM_LANES_TX;
 817                ufs_qcom_cap.rx_lanes = UFS_QCOM_LIMIT_NUM_LANES_RX;
 818                ufs_qcom_cap.hs_rx_gear = UFS_QCOM_LIMIT_HSGEAR_RX;
 819                ufs_qcom_cap.hs_tx_gear = UFS_QCOM_LIMIT_HSGEAR_TX;
 820                ufs_qcom_cap.pwm_rx_gear = UFS_QCOM_LIMIT_PWMGEAR_RX;
 821                ufs_qcom_cap.pwm_tx_gear = UFS_QCOM_LIMIT_PWMGEAR_TX;
 822                ufs_qcom_cap.rx_pwr_pwm = UFS_QCOM_LIMIT_RX_PWR_PWM;
 823                ufs_qcom_cap.tx_pwr_pwm = UFS_QCOM_LIMIT_TX_PWR_PWM;
 824                ufs_qcom_cap.rx_pwr_hs = UFS_QCOM_LIMIT_RX_PWR_HS;
 825                ufs_qcom_cap.tx_pwr_hs = UFS_QCOM_LIMIT_TX_PWR_HS;
 826                ufs_qcom_cap.hs_rate = UFS_QCOM_LIMIT_HS_RATE;
 827                ufs_qcom_cap.desired_working_mode =
 828                                        UFS_QCOM_LIMIT_DESIRED_MODE;
 829
 830                if (host->hw_ver.major == 0x1) {
 831                        /*
 832                         * HS-G3 operations may not reliably work on legacy QCOM
 833                         * UFS host controller hardware even though capability
 834                         * exchange during link startup phase may end up
 835                         * negotiating maximum supported gear as G3.
 836                         * Hence downgrade the maximum supported gear to HS-G2.
 837                         */
 838                        if (ufs_qcom_cap.hs_tx_gear > UFS_HS_G2)
 839                                ufs_qcom_cap.hs_tx_gear = UFS_HS_G2;
 840                        if (ufs_qcom_cap.hs_rx_gear > UFS_HS_G2)
 841                                ufs_qcom_cap.hs_rx_gear = UFS_HS_G2;
 842                }
 843
 844                ret = ufshcd_get_pwr_dev_param(&ufs_qcom_cap,
 845                                               dev_max_params,
 846                                               dev_req_params);
 847                if (ret) {
 848                        pr_err("%s: failed to determine capabilities\n",
 849                                        __func__);
 850                        goto out;
 851                }
 852
 853                /* enable the device ref clock before changing to HS mode */
 854                if (!ufshcd_is_hs_mode(&hba->pwr_info) &&
 855                        ufshcd_is_hs_mode(dev_req_params))
 856                        ufs_qcom_dev_ref_clk_ctrl(host, true);
 857                break;
 858        case POST_CHANGE:
 859                if (ufs_qcom_cfg_timers(hba, dev_req_params->gear_rx,
 860                                        dev_req_params->pwr_rx,
 861                                        dev_req_params->hs_rate, false)) {
 862                        dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n",
 863                                __func__);
 864                        /*
 865                         * we return error code at the end of the routine,
 866                         * but continue to configure UFS_PHY_TX_LANE_ENABLE
 867                         * and bus voting as usual
 868                         */
 869                        ret = -EINVAL;
 870                }
 871
 872                /* cache the power mode parameters to use internally */
 873                memcpy(&host->dev_req_params,
 874                                dev_req_params, sizeof(*dev_req_params));
 875                ufs_qcom_update_bus_bw_vote(host);
 876
 877                /* disable the device ref clock if entered PWM mode */
 878                if (ufshcd_is_hs_mode(&hba->pwr_info) &&
 879                        !ufshcd_is_hs_mode(dev_req_params))
 880                        ufs_qcom_dev_ref_clk_ctrl(host, false);
 881                break;
 882        default:
 883                ret = -EINVAL;
 884                break;
 885        }
 886out:
 887        return ret;
 888}
 889
 890static int ufs_qcom_quirk_host_pa_saveconfigtime(struct ufs_hba *hba)
 891{
 892        int err;
 893        u32 pa_vs_config_reg1;
 894
 895        err = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_VS_CONFIG_REG1),
 896                             &pa_vs_config_reg1);
 897        if (err)
 898                goto out;
 899
 900        /* Allow extension of MSB bits of PA_SaveConfigTime attribute */
 901        err = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_VS_CONFIG_REG1),
 902                            (pa_vs_config_reg1 | (1 << 12)));
 903
 904out:
 905        return err;
 906}
 907
 908static int ufs_qcom_apply_dev_quirks(struct ufs_hba *hba)
 909{
 910        int err = 0;
 911
 912        if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME)
 913                err = ufs_qcom_quirk_host_pa_saveconfigtime(hba);
 914
 915        return err;
 916}
 917
 918static u32 ufs_qcom_get_ufs_hci_version(struct ufs_hba *hba)
 919{
 920        struct ufs_qcom_host *host = ufshcd_get_variant(hba);
 921
 922        if (host->hw_ver.major == 0x1)
 923                return UFSHCI_VERSION_11;
 924        else
 925                return UFSHCI_VERSION_20;
 926}
 927
 928/**
 929 * ufs_qcom_advertise_quirks - advertise the known QCOM UFS controller quirks
 930 * @hba: host controller instance
 931 *
 932 * QCOM UFS host controller might have some non standard behaviours (quirks)
 933 * than what is specified by UFSHCI specification. Advertise all such
 934 * quirks to standard UFS host controller driver so standard takes them into
 935 * account.
 936 */
 937static void ufs_qcom_advertise_quirks(struct ufs_hba *hba)
 938{
 939        struct ufs_qcom_host *host = ufshcd_get_variant(hba);
 940
 941        if (host->hw_ver.major == 0x01) {
 942                hba->quirks |= UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS
 943                            | UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP
 944                            | UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE;
 945
 946                if (host->hw_ver.minor == 0x0001 && host->hw_ver.step == 0x0001)
 947                        hba->quirks |= UFSHCD_QUIRK_BROKEN_INTR_AGGR;
 948
 949                hba->quirks |= UFSHCD_QUIRK_BROKEN_LCC;
 950        }
 951
 952        if (host->hw_ver.major == 0x2) {
 953                hba->quirks |= UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION;
 954
 955                if (!ufs_qcom_cap_qunipro(host))
 956                        /* Legacy UniPro mode still need following quirks */
 957                        hba->quirks |= (UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS
 958                                | UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE
 959                                | UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP);
 960        }
 961}
 962
 963static void ufs_qcom_set_caps(struct ufs_hba *hba)
 964{
 965        struct ufs_qcom_host *host = ufshcd_get_variant(hba);
 966
 967        hba->caps |= UFSHCD_CAP_CLK_GATING | UFSHCD_CAP_HIBERN8_WITH_CLK_GATING;
 968        hba->caps |= UFSHCD_CAP_CLK_SCALING;
 969        hba->caps |= UFSHCD_CAP_AUTO_BKOPS_SUSPEND;
 970
 971        if (host->hw_ver.major >= 0x2) {
 972                host->caps = UFS_QCOM_CAP_QUNIPRO |
 973                             UFS_QCOM_CAP_RETAIN_SEC_CFG_AFTER_PWR_COLLAPSE;
 974        }
 975}
 976
 977/**
 978 * ufs_qcom_setup_clocks - enables/disable clocks
 979 * @hba: host controller instance
 980 * @on: If true, enable clocks else disable them.
 981 * @status: PRE_CHANGE or POST_CHANGE notify
 982 *
 983 * Returns 0 on success, non-zero on failure.
 984 */
 985static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on,
 986                                 enum ufs_notify_change_status status)
 987{
 988        struct ufs_qcom_host *host = ufshcd_get_variant(hba);
 989        int err;
 990        int vote = 0;
 991
 992        /*
 993         * In case ufs_qcom_init() is not yet done, simply ignore.
 994         * This ufs_qcom_setup_clocks() shall be called from
 995         * ufs_qcom_init() after init is done.
 996         */
 997        if (!host)
 998                return 0;
 999
1000        if (on && (status == POST_CHANGE)) {
1001                /* enable the device ref clock for HS mode*/
1002                if (ufshcd_is_hs_mode(&hba->pwr_info))
1003                        ufs_qcom_dev_ref_clk_ctrl(host, true);
1004                vote = host->bus_vote.saved_vote;
1005                if (vote == host->bus_vote.min_bw_vote)
1006                        ufs_qcom_update_bus_bw_vote(host);
1007
1008        } else if (!on && (status == PRE_CHANGE)) {
1009                if (!ufs_qcom_is_link_active(hba)) {
1010                        /* disable device ref_clk */
1011                        ufs_qcom_dev_ref_clk_ctrl(host, false);
1012                }
1013
1014                vote = host->bus_vote.min_bw_vote;
1015        }
1016
1017        err = ufs_qcom_set_bus_vote(host, vote);
1018        if (err)
1019                dev_err(hba->dev, "%s: set bus vote failed %d\n",
1020                                __func__, err);
1021
1022        return err;
1023}
1024
1025static int
1026ufs_qcom_reset_assert(struct reset_controller_dev *rcdev, unsigned long id)
1027{
1028        struct ufs_qcom_host *host = rcdev_to_ufs_host(rcdev);
1029
1030        /* Currently this code only knows about a single reset. */
1031        WARN_ON(id);
1032        ufs_qcom_assert_reset(host->hba);
1033        /* provide 1ms delay to let the reset pulse propagate. */
1034        usleep_range(1000, 1100);
1035        return 0;
1036}
1037
1038static int
1039ufs_qcom_reset_deassert(struct reset_controller_dev *rcdev, unsigned long id)
1040{
1041        struct ufs_qcom_host *host = rcdev_to_ufs_host(rcdev);
1042
1043        /* Currently this code only knows about a single reset. */
1044        WARN_ON(id);
1045        ufs_qcom_deassert_reset(host->hba);
1046
1047        /*
1048         * after reset deassertion, phy will need all ref clocks,
1049         * voltage, current to settle down before starting serdes.
1050         */
1051        usleep_range(1000, 1100);
1052        return 0;
1053}
1054
1055static const struct reset_control_ops ufs_qcom_reset_ops = {
1056        .assert = ufs_qcom_reset_assert,
1057        .deassert = ufs_qcom_reset_deassert,
1058};
1059
1060#define ANDROID_BOOT_DEV_MAX    30
1061static char android_boot_dev[ANDROID_BOOT_DEV_MAX];
1062
1063#ifndef MODULE
1064static int __init get_android_boot_dev(char *str)
1065{
1066        strlcpy(android_boot_dev, str, ANDROID_BOOT_DEV_MAX);
1067        return 1;
1068}
1069__setup("androidboot.bootdevice=", get_android_boot_dev);
1070#endif
1071
1072/**
1073 * ufs_qcom_init - bind phy with controller
1074 * @hba: host controller instance
1075 *
1076 * Binds PHY with controller and powers up PHY enabling clocks
1077 * and regulators.
1078 *
1079 * Returns -EPROBE_DEFER if binding fails, returns negative error
1080 * on phy power up failure and returns zero on success.
1081 */
1082static int ufs_qcom_init(struct ufs_hba *hba)
1083{
1084        int err;
1085        struct device *dev = hba->dev;
1086        struct platform_device *pdev = to_platform_device(dev);
1087        struct ufs_qcom_host *host;
1088        struct resource *res;
1089
1090        if (strlen(android_boot_dev) && strcmp(android_boot_dev, dev_name(dev)))
1091                return -ENODEV;
1092
1093        host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
1094        if (!host) {
1095                err = -ENOMEM;
1096                dev_err(dev, "%s: no memory for qcom ufs host\n", __func__);
1097                goto out;
1098        }
1099
1100        /* Make a two way bind between the qcom host and the hba */
1101        host->hba = hba;
1102        ufshcd_set_variant(hba, host);
1103
1104        /* Fire up the reset controller. Failure here is non-fatal. */
1105        host->rcdev.of_node = dev->of_node;
1106        host->rcdev.ops = &ufs_qcom_reset_ops;
1107        host->rcdev.owner = dev->driver->owner;
1108        host->rcdev.nr_resets = 1;
1109        err = devm_reset_controller_register(dev, &host->rcdev);
1110        if (err) {
1111                dev_warn(dev, "Failed to register reset controller\n");
1112                err = 0;
1113        }
1114
1115        /*
1116         * voting/devoting device ref_clk source is time consuming hence
1117         * skip devoting it during aggressive clock gating. This clock
1118         * will still be gated off during runtime suspend.
1119         */
1120        host->generic_phy = devm_phy_get(dev, "ufsphy");
1121
1122        if (host->generic_phy == ERR_PTR(-EPROBE_DEFER)) {
1123                /*
1124                 * UFS driver might be probed before the phy driver does.
1125                 * In that case we would like to return EPROBE_DEFER code.
1126                 */
1127                err = -EPROBE_DEFER;
1128                dev_warn(dev, "%s: required phy device. hasn't probed yet. err = %d\n",
1129                        __func__, err);
1130                goto out_variant_clear;
1131        } else if (IS_ERR(host->generic_phy)) {
1132                if (has_acpi_companion(dev)) {
1133                        host->generic_phy = NULL;
1134                } else {
1135                        err = PTR_ERR(host->generic_phy);
1136                        dev_err(dev, "%s: PHY get failed %d\n", __func__, err);
1137                        goto out_variant_clear;
1138                }
1139        }
1140
1141        host->device_reset = devm_gpiod_get_optional(dev, "reset",
1142                                                     GPIOD_OUT_HIGH);
1143        if (IS_ERR(host->device_reset)) {
1144                err = PTR_ERR(host->device_reset);
1145                if (err != -EPROBE_DEFER)
1146                        dev_err(dev, "failed to acquire reset gpio: %d\n", err);
1147                goto out_variant_clear;
1148        }
1149
1150        err = ufs_qcom_bus_register(host);
1151        if (err)
1152                goto out_variant_clear;
1153
1154        ufs_qcom_get_controller_revision(hba, &host->hw_ver.major,
1155                &host->hw_ver.minor, &host->hw_ver.step);
1156
1157        /*
1158         * for newer controllers, device reference clock control bit has
1159         * moved inside UFS controller register address space itself.
1160         */
1161        if (host->hw_ver.major >= 0x02) {
1162                host->dev_ref_clk_ctrl_mmio = hba->mmio_base + REG_UFS_CFG1;
1163                host->dev_ref_clk_en_mask = BIT(26);
1164        } else {
1165                /* "dev_ref_clk_ctrl_mem" is optional resource */
1166                res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1167                if (res) {
1168                        host->dev_ref_clk_ctrl_mmio =
1169                                        devm_ioremap_resource(dev, res);
1170                        if (IS_ERR(host->dev_ref_clk_ctrl_mmio)) {
1171                                dev_warn(dev,
1172                                        "%s: could not map dev_ref_clk_ctrl_mmio, err %ld\n",
1173                                        __func__,
1174                                        PTR_ERR(host->dev_ref_clk_ctrl_mmio));
1175                                host->dev_ref_clk_ctrl_mmio = NULL;
1176                        }
1177                        host->dev_ref_clk_en_mask = BIT(5);
1178                }
1179        }
1180
1181        err = ufs_qcom_init_lane_clks(host);
1182        if (err)
1183                goto out_variant_clear;
1184
1185        ufs_qcom_set_caps(hba);
1186        ufs_qcom_advertise_quirks(hba);
1187
1188        ufs_qcom_setup_clocks(hba, true, POST_CHANGE);
1189
1190        if (hba->dev->id < MAX_UFS_QCOM_HOSTS)
1191                ufs_qcom_hosts[hba->dev->id] = host;
1192
1193        host->dbg_print_en |= UFS_QCOM_DEFAULT_DBG_PRINT_EN;
1194        ufs_qcom_get_default_testbus_cfg(host);
1195        err = ufs_qcom_testbus_config(host);
1196        if (err) {
1197                dev_warn(dev, "%s: failed to configure the testbus %d\n",
1198                                __func__, err);
1199                err = 0;
1200        }
1201
1202        goto out;
1203
1204out_variant_clear:
1205        ufshcd_set_variant(hba, NULL);
1206out:
1207        return err;
1208}
1209
1210static void ufs_qcom_exit(struct ufs_hba *hba)
1211{
1212        struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1213
1214        ufs_qcom_disable_lane_clks(host);
1215        phy_power_off(host->generic_phy);
1216        phy_exit(host->generic_phy);
1217}
1218
1219static int ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(struct ufs_hba *hba,
1220                                                       u32 clk_cycles)
1221{
1222        int err;
1223        u32 core_clk_ctrl_reg;
1224
1225        if (clk_cycles > DME_VS_CORE_CLK_CTRL_MAX_CORE_CLK_1US_CYCLES_MASK)
1226                return -EINVAL;
1227
1228        err = ufshcd_dme_get(hba,
1229                            UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
1230                            &core_clk_ctrl_reg);
1231        if (err)
1232                goto out;
1233
1234        core_clk_ctrl_reg &= ~DME_VS_CORE_CLK_CTRL_MAX_CORE_CLK_1US_CYCLES_MASK;
1235        core_clk_ctrl_reg |= clk_cycles;
1236
1237        /* Clear CORE_CLK_DIV_EN */
1238        core_clk_ctrl_reg &= ~DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT;
1239
1240        err = ufshcd_dme_set(hba,
1241                            UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
1242                            core_clk_ctrl_reg);
1243out:
1244        return err;
1245}
1246
1247static int ufs_qcom_clk_scale_up_pre_change(struct ufs_hba *hba)
1248{
1249        /* nothing to do as of now */
1250        return 0;
1251}
1252
1253static int ufs_qcom_clk_scale_up_post_change(struct ufs_hba *hba)
1254{
1255        struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1256
1257        if (!ufs_qcom_cap_qunipro(host))
1258                return 0;
1259
1260        /* set unipro core clock cycles to 150 and clear clock divider */
1261        return ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba, 150);
1262}
1263
1264static int ufs_qcom_clk_scale_down_pre_change(struct ufs_hba *hba)
1265{
1266        struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1267        int err;
1268        u32 core_clk_ctrl_reg;
1269
1270        if (!ufs_qcom_cap_qunipro(host))
1271                return 0;
1272
1273        err = ufshcd_dme_get(hba,
1274                            UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
1275                            &core_clk_ctrl_reg);
1276
1277        /* make sure CORE_CLK_DIV_EN is cleared */
1278        if (!err &&
1279            (core_clk_ctrl_reg & DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT)) {
1280                core_clk_ctrl_reg &= ~DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT;
1281                err = ufshcd_dme_set(hba,
1282                                    UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
1283                                    core_clk_ctrl_reg);
1284        }
1285
1286        return err;
1287}
1288
1289static int ufs_qcom_clk_scale_down_post_change(struct ufs_hba *hba)
1290{
1291        struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1292
1293        if (!ufs_qcom_cap_qunipro(host))
1294                return 0;
1295
1296        /* set unipro core clock cycles to 75 and clear clock divider */
1297        return ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba, 75);
1298}
1299
1300static int ufs_qcom_clk_scale_notify(struct ufs_hba *hba,
1301                bool scale_up, enum ufs_notify_change_status status)
1302{
1303        struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1304        struct ufs_pa_layer_attr *dev_req_params = &host->dev_req_params;
1305        int err = 0;
1306
1307        if (status == PRE_CHANGE) {
1308                if (scale_up)
1309                        err = ufs_qcom_clk_scale_up_pre_change(hba);
1310                else
1311                        err = ufs_qcom_clk_scale_down_pre_change(hba);
1312        } else {
1313                if (scale_up)
1314                        err = ufs_qcom_clk_scale_up_post_change(hba);
1315                else
1316                        err = ufs_qcom_clk_scale_down_post_change(hba);
1317
1318                if (err || !dev_req_params)
1319                        goto out;
1320
1321                ufs_qcom_cfg_timers(hba,
1322                                    dev_req_params->gear_rx,
1323                                    dev_req_params->pwr_rx,
1324                                    dev_req_params->hs_rate,
1325                                    false);
1326                ufs_qcom_update_bus_bw_vote(host);
1327        }
1328
1329out:
1330        return err;
1331}
1332
1333static void ufs_qcom_print_hw_debug_reg_all(struct ufs_hba *hba,
1334                void *priv, void (*print_fn)(struct ufs_hba *hba,
1335                int offset, int num_regs, const char *str, void *priv))
1336{
1337        u32 reg;
1338        struct ufs_qcom_host *host;
1339
1340        if (unlikely(!hba)) {
1341                pr_err("%s: hba is NULL\n", __func__);
1342                return;
1343        }
1344        if (unlikely(!print_fn)) {
1345                dev_err(hba->dev, "%s: print_fn is NULL\n", __func__);
1346                return;
1347        }
1348
1349        host = ufshcd_get_variant(hba);
1350        if (!(host->dbg_print_en & UFS_QCOM_DBG_PRINT_REGS_EN))
1351                return;
1352
1353        reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_REG_OCSC);
1354        print_fn(hba, reg, 44, "UFS_UFS_DBG_RD_REG_OCSC ", priv);
1355
1356        reg = ufshcd_readl(hba, REG_UFS_CFG1);
1357        reg |= UTP_DBG_RAMS_EN;
1358        ufshcd_writel(hba, reg, REG_UFS_CFG1);
1359
1360        reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_EDTL_RAM);
1361        print_fn(hba, reg, 32, "UFS_UFS_DBG_RD_EDTL_RAM ", priv);
1362
1363        reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_DESC_RAM);
1364        print_fn(hba, reg, 128, "UFS_UFS_DBG_RD_DESC_RAM ", priv);
1365
1366        reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_PRDT_RAM);
1367        print_fn(hba, reg, 64, "UFS_UFS_DBG_RD_PRDT_RAM ", priv);
1368
1369        /* clear bit 17 - UTP_DBG_RAMS_EN */
1370        ufshcd_rmwl(hba, UTP_DBG_RAMS_EN, 0, REG_UFS_CFG1);
1371
1372        reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_UAWM);
1373        print_fn(hba, reg, 4, "UFS_DBG_RD_REG_UAWM ", priv);
1374
1375        reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_UARM);
1376        print_fn(hba, reg, 4, "UFS_DBG_RD_REG_UARM ", priv);
1377
1378        reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TXUC);
1379        print_fn(hba, reg, 48, "UFS_DBG_RD_REG_TXUC ", priv);
1380
1381        reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_RXUC);
1382        print_fn(hba, reg, 27, "UFS_DBG_RD_REG_RXUC ", priv);
1383
1384        reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_DFC);
1385        print_fn(hba, reg, 19, "UFS_DBG_RD_REG_DFC ", priv);
1386
1387        reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TRLUT);
1388        print_fn(hba, reg, 34, "UFS_DBG_RD_REG_TRLUT ", priv);
1389
1390        reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TMRLUT);
1391        print_fn(hba, reg, 9, "UFS_DBG_RD_REG_TMRLUT ", priv);
1392}
1393
1394static void ufs_qcom_enable_test_bus(struct ufs_qcom_host *host)
1395{
1396        if (host->dbg_print_en & UFS_QCOM_DBG_PRINT_TEST_BUS_EN) {
1397                ufshcd_rmwl(host->hba, UFS_REG_TEST_BUS_EN,
1398                                UFS_REG_TEST_BUS_EN, REG_UFS_CFG1);
1399                ufshcd_rmwl(host->hba, TEST_BUS_EN, TEST_BUS_EN, REG_UFS_CFG1);
1400        } else {
1401                ufshcd_rmwl(host->hba, UFS_REG_TEST_BUS_EN, 0, REG_UFS_CFG1);
1402                ufshcd_rmwl(host->hba, TEST_BUS_EN, 0, REG_UFS_CFG1);
1403        }
1404}
1405
1406static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host)
1407{
1408        /* provide a legal default configuration */
1409        host->testbus.select_major = TSTBUS_UNIPRO;
1410        host->testbus.select_minor = 37;
1411}
1412
1413static bool ufs_qcom_testbus_cfg_is_ok(struct ufs_qcom_host *host)
1414{
1415        if (host->testbus.select_major >= TSTBUS_MAX) {
1416                dev_err(host->hba->dev,
1417                        "%s: UFS_CFG1[TEST_BUS_SEL} may not equal 0x%05X\n",
1418                        __func__, host->testbus.select_major);
1419                return false;
1420        }
1421
1422        return true;
1423}
1424
1425int ufs_qcom_testbus_config(struct ufs_qcom_host *host)
1426{
1427        int reg;
1428        int offset;
1429        u32 mask = TEST_BUS_SUB_SEL_MASK;
1430
1431        if (!host)
1432                return -EINVAL;
1433
1434        if (!ufs_qcom_testbus_cfg_is_ok(host))
1435                return -EPERM;
1436
1437        switch (host->testbus.select_major) {
1438        case TSTBUS_UAWM:
1439                reg = UFS_TEST_BUS_CTRL_0;
1440                offset = 24;
1441                break;
1442        case TSTBUS_UARM:
1443                reg = UFS_TEST_BUS_CTRL_0;
1444                offset = 16;
1445                break;
1446        case TSTBUS_TXUC:
1447                reg = UFS_TEST_BUS_CTRL_0;
1448                offset = 8;
1449                break;
1450        case TSTBUS_RXUC:
1451                reg = UFS_TEST_BUS_CTRL_0;
1452                offset = 0;
1453                break;
1454        case TSTBUS_DFC:
1455                reg = UFS_TEST_BUS_CTRL_1;
1456                offset = 24;
1457                break;
1458        case TSTBUS_TRLUT:
1459                reg = UFS_TEST_BUS_CTRL_1;
1460                offset = 16;
1461                break;
1462        case TSTBUS_TMRLUT:
1463                reg = UFS_TEST_BUS_CTRL_1;
1464                offset = 8;
1465                break;
1466        case TSTBUS_OCSC:
1467                reg = UFS_TEST_BUS_CTRL_1;
1468                offset = 0;
1469                break;
1470        case TSTBUS_WRAPPER:
1471                reg = UFS_TEST_BUS_CTRL_2;
1472                offset = 16;
1473                break;
1474        case TSTBUS_COMBINED:
1475                reg = UFS_TEST_BUS_CTRL_2;
1476                offset = 8;
1477                break;
1478        case TSTBUS_UTP_HCI:
1479                reg = UFS_TEST_BUS_CTRL_2;
1480                offset = 0;
1481                break;
1482        case TSTBUS_UNIPRO:
1483                reg = UFS_UNIPRO_CFG;
1484                offset = 20;
1485                mask = 0xFFF;
1486                break;
1487        /*
1488         * No need for a default case, since
1489         * ufs_qcom_testbus_cfg_is_ok() checks that the configuration
1490         * is legal
1491         */
1492        }
1493        mask <<= offset;
1494
1495        pm_runtime_get_sync(host->hba->dev);
1496        ufshcd_hold(host->hba, false);
1497        ufshcd_rmwl(host->hba, TEST_BUS_SEL,
1498                    (u32)host->testbus.select_major << 19,
1499                    REG_UFS_CFG1);
1500        ufshcd_rmwl(host->hba, mask,
1501                    (u32)host->testbus.select_minor << offset,
1502                    reg);
1503        ufs_qcom_enable_test_bus(host);
1504        /*
1505         * Make sure the test bus configuration is
1506         * committed before returning.
1507         */
1508        mb();
1509        ufshcd_release(host->hba);
1510        pm_runtime_put_sync(host->hba->dev);
1511
1512        return 0;
1513}
1514
1515static void ufs_qcom_testbus_read(struct ufs_hba *hba)
1516{
1517        ufshcd_dump_regs(hba, UFS_TEST_BUS, 4, "UFS_TEST_BUS ");
1518}
1519
1520static void ufs_qcom_print_unipro_testbus(struct ufs_hba *hba)
1521{
1522        struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1523        u32 *testbus = NULL;
1524        int i, nminor = 256, testbus_len = nminor * sizeof(u32);
1525
1526        testbus = kmalloc(testbus_len, GFP_KERNEL);
1527        if (!testbus)
1528                return;
1529
1530        host->testbus.select_major = TSTBUS_UNIPRO;
1531        for (i = 0; i < nminor; i++) {
1532                host->testbus.select_minor = i;
1533                ufs_qcom_testbus_config(host);
1534                testbus[i] = ufshcd_readl(hba, UFS_TEST_BUS);
1535        }
1536        print_hex_dump(KERN_ERR, "UNIPRO_TEST_BUS ", DUMP_PREFIX_OFFSET,
1537                        16, 4, testbus, testbus_len, false);
1538        kfree(testbus);
1539}
1540
1541static void ufs_qcom_dump_dbg_regs(struct ufs_hba *hba)
1542{
1543        ufshcd_dump_regs(hba, REG_UFS_SYS1CLK_1US, 16 * 4,
1544                         "HCI Vendor Specific Registers ");
1545
1546        /* sleep a bit intermittently as we are dumping too much data */
1547        ufs_qcom_print_hw_debug_reg_all(hba, NULL, ufs_qcom_dump_regs_wrapper);
1548        usleep_range(1000, 1100);
1549        ufs_qcom_testbus_read(hba);
1550        usleep_range(1000, 1100);
1551        ufs_qcom_print_unipro_testbus(hba);
1552        usleep_range(1000, 1100);
1553}
1554
1555/**
1556 * ufs_qcom_device_reset() - toggle the (optional) device reset line
1557 * @hba: per-adapter instance
1558 *
1559 * Toggles the (optional) reset line to reset the attached device.
1560 */
1561static void ufs_qcom_device_reset(struct ufs_hba *hba)
1562{
1563        struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1564
1565        /* reset gpio is optional */
1566        if (!host->device_reset)
1567                return;
1568
1569        /*
1570         * The UFS device shall detect reset pulses of 1us, sleep for 10us to
1571         * be on the safe side.
1572         */
1573        gpiod_set_value_cansleep(host->device_reset, 1);
1574        usleep_range(10, 15);
1575
1576        gpiod_set_value_cansleep(host->device_reset, 0);
1577        usleep_range(10, 15);
1578}
1579
1580/**
1581 * struct ufs_hba_qcom_vops - UFS QCOM specific variant operations
1582 *
1583 * The variant operations configure the necessary controller and PHY
1584 * handshake during initialization.
1585 */
1586static const struct ufs_hba_variant_ops ufs_hba_qcom_vops = {
1587        .name                   = "qcom",
1588        .init                   = ufs_qcom_init,
1589        .exit                   = ufs_qcom_exit,
1590        .get_ufs_hci_version    = ufs_qcom_get_ufs_hci_version,
1591        .clk_scale_notify       = ufs_qcom_clk_scale_notify,
1592        .setup_clocks           = ufs_qcom_setup_clocks,
1593        .hce_enable_notify      = ufs_qcom_hce_enable_notify,
1594        .link_startup_notify    = ufs_qcom_link_startup_notify,
1595        .pwr_change_notify      = ufs_qcom_pwr_change_notify,
1596        .apply_dev_quirks       = ufs_qcom_apply_dev_quirks,
1597        .suspend                = ufs_qcom_suspend,
1598        .resume                 = ufs_qcom_resume,
1599        .dbg_register_dump      = ufs_qcom_dump_dbg_regs,
1600        .device_reset           = ufs_qcom_device_reset,
1601};
1602
1603/**
1604 * ufs_qcom_probe - probe routine of the driver
1605 * @pdev: pointer to Platform device handle
1606 *
1607 * Return zero for success and non-zero for failure
1608 */
1609static int ufs_qcom_probe(struct platform_device *pdev)
1610{
1611        int err;
1612        struct device *dev = &pdev->dev;
1613
1614        /* Perform generic probe */
1615        err = ufshcd_pltfrm_init(pdev, &ufs_hba_qcom_vops);
1616        if (err)
1617                dev_err(dev, "ufshcd_pltfrm_init() failed %d\n", err);
1618
1619        return err;
1620}
1621
1622/**
1623 * ufs_qcom_remove - set driver_data of the device to NULL
1624 * @pdev: pointer to platform device handle
1625 *
1626 * Always returns 0
1627 */
1628static int ufs_qcom_remove(struct platform_device *pdev)
1629{
1630        struct ufs_hba *hba =  platform_get_drvdata(pdev);
1631
1632        pm_runtime_get_sync(&(pdev)->dev);
1633        ufshcd_remove(hba);
1634        return 0;
1635}
1636
1637static const struct of_device_id ufs_qcom_of_match[] = {
1638        { .compatible = "qcom,ufshc"},
1639        {},
1640};
1641MODULE_DEVICE_TABLE(of, ufs_qcom_of_match);
1642
1643#ifdef CONFIG_ACPI
1644static const struct acpi_device_id ufs_qcom_acpi_match[] = {
1645        { "QCOM24A5" },
1646        { },
1647};
1648MODULE_DEVICE_TABLE(acpi, ufs_qcom_acpi_match);
1649#endif
1650
1651static const struct dev_pm_ops ufs_qcom_pm_ops = {
1652        .suspend        = ufshcd_pltfrm_suspend,
1653        .resume         = ufshcd_pltfrm_resume,
1654        .runtime_suspend = ufshcd_pltfrm_runtime_suspend,
1655        .runtime_resume  = ufshcd_pltfrm_runtime_resume,
1656        .runtime_idle    = ufshcd_pltfrm_runtime_idle,
1657};
1658
1659static struct platform_driver ufs_qcom_pltform = {
1660        .probe  = ufs_qcom_probe,
1661        .remove = ufs_qcom_remove,
1662        .shutdown = ufshcd_pltfrm_shutdown,
1663        .driver = {
1664                .name   = "ufshcd-qcom",
1665                .pm     = &ufs_qcom_pm_ops,
1666                .of_match_table = of_match_ptr(ufs_qcom_of_match),
1667                .acpi_match_table = ACPI_PTR(ufs_qcom_acpi_match),
1668        },
1669};
1670module_platform_driver(ufs_qcom_pltform);
1671
1672MODULE_LICENSE("GPL v2");
1673