linux/drivers/scsi/ufs/ufshcd.c
<<
>>
Prefs
   1/*
   2 * Universal Flash Storage Host controller driver Core
   3 *
   4 * This code is based on drivers/scsi/ufs/ufshcd.c
   5 * Copyright (C) 2011-2013 Samsung India Software Operations
   6 * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
   7 *
   8 * Authors:
   9 *      Santosh Yaraganavi <santosh.sy@samsung.com>
  10 *      Vinayak Holikatti <h.vinayak@samsung.com>
  11 *
  12 * This program is free software; you can redistribute it and/or
  13 * modify it under the terms of the GNU General Public License
  14 * as published by the Free Software Foundation; either version 2
  15 * of the License, or (at your option) any later version.
  16 * See the COPYING file in the top-level directory or visit
  17 * <http://www.gnu.org/licenses/gpl-2.0.html>
  18 *
  19 * This program is distributed in the hope that it will be useful,
  20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  22 * GNU General Public License for more details.
  23 *
  24 * This program is provided "AS IS" and "WITH ALL FAULTS" and
  25 * without warranty of any kind. You are solely responsible for
  26 * determining the appropriateness of using and distributing
  27 * the program and assume all risks associated with your exercise
  28 * of rights with respect to the program, including but not limited
  29 * to infringement of third party rights, the risks and costs of
  30 * program errors, damage to or loss of data, programs or equipment,
  31 * and unavailability or interruption of operations. Under no
  32 * circumstances will the contributor of this Program be liable for
  33 * any damages of any kind arising from your use or distribution of
  34 * this program.
  35 *
  36 * The Linux Foundation chooses to take subject only to the GPLv2
  37 * license terms, and distributes only under these terms.
  38 */
  39
  40#include <linux/async.h>
  41#include <linux/devfreq.h>
  42#include <linux/nls.h>
  43#include <linux/of.h>
  44#include <linux/bitfield.h>
  45#include "ufshcd.h"
  46#include "ufs_quirks.h"
  47#include "unipro.h"
  48#include "ufs-sysfs.h"
  49
  50#define CREATE_TRACE_POINTS
  51#include <trace/events/ufs.h>
  52
  53#define UFSHCD_REQ_SENSE_SIZE   18
  54
  55#define UFSHCD_ENABLE_INTRS     (UTP_TRANSFER_REQ_COMPL |\
  56                                 UTP_TASK_REQ_COMPL |\
  57                                 UFSHCD_ERROR_MASK)
  58/* UIC command timeout, unit: ms */
  59#define UIC_CMD_TIMEOUT 500
  60
  61/* NOP OUT retries waiting for NOP IN response */
  62#define NOP_OUT_RETRIES    10
  63/* Timeout after 30 msecs if NOP OUT hangs without response */
  64#define NOP_OUT_TIMEOUT    30 /* msecs */
  65
  66/* Query request retries */
  67#define QUERY_REQ_RETRIES 3
  68/* Query request timeout */
  69#define QUERY_REQ_TIMEOUT 1500 /* 1.5 seconds */
  70
  71/* Task management command timeout */
  72#define TM_CMD_TIMEOUT  100 /* msecs */
  73
  74/* maximum number of retries for a general UIC command  */
  75#define UFS_UIC_COMMAND_RETRIES 3
  76
  77/* maximum number of link-startup retries */
  78#define DME_LINKSTARTUP_RETRIES 3
  79
  80/* Maximum retries for Hibern8 enter */
  81#define UIC_HIBERN8_ENTER_RETRIES 3
  82
  83/* maximum number of reset retries before giving up */
  84#define MAX_HOST_RESET_RETRIES 5
  85
  86/* Expose the flag value from utp_upiu_query.value */
  87#define MASK_QUERY_UPIU_FLAG_LOC 0xFF
  88
  89/* Interrupt aggregation default timeout, unit: 40us */
  90#define INT_AGGR_DEF_TO 0x02
  91
  92#define ufshcd_toggle_vreg(_dev, _vreg, _on)                            \
  93        ({                                                              \
  94                int _ret;                                               \
  95                if (_on)                                                \
  96                        _ret = ufshcd_enable_vreg(_dev, _vreg);         \
  97                else                                                    \
  98                        _ret = ufshcd_disable_vreg(_dev, _vreg);        \
  99                _ret;                                                   \
 100        })
 101
 102#define ufshcd_hex_dump(prefix_str, buf, len) \
 103print_hex_dump(KERN_ERR, prefix_str, DUMP_PREFIX_OFFSET, 16, 4, buf, len, false)
 104
 105enum {
 106        UFSHCD_MAX_CHANNEL      = 0,
 107        UFSHCD_MAX_ID           = 1,
 108        UFSHCD_CMD_PER_LUN      = 32,
 109        UFSHCD_CAN_QUEUE        = 32,
 110};
 111
 112/* UFSHCD states */
 113enum {
 114        UFSHCD_STATE_RESET,
 115        UFSHCD_STATE_ERROR,
 116        UFSHCD_STATE_OPERATIONAL,
 117        UFSHCD_STATE_EH_SCHEDULED,
 118};
 119
 120/* UFSHCD error handling flags */
 121enum {
 122        UFSHCD_EH_IN_PROGRESS = (1 << 0),
 123};
 124
 125/* UFSHCD UIC layer error flags */
 126enum {
 127        UFSHCD_UIC_DL_PA_INIT_ERROR = (1 << 0), /* Data link layer error */
 128        UFSHCD_UIC_DL_NAC_RECEIVED_ERROR = (1 << 1), /* Data link layer error */
 129        UFSHCD_UIC_DL_TCx_REPLAY_ERROR = (1 << 2), /* Data link layer error */
 130        UFSHCD_UIC_NL_ERROR = (1 << 3), /* Network layer error */
 131        UFSHCD_UIC_TL_ERROR = (1 << 4), /* Transport Layer error */
 132        UFSHCD_UIC_DME_ERROR = (1 << 5), /* DME error */
 133};
 134
 135#define ufshcd_set_eh_in_progress(h) \
 136        ((h)->eh_flags |= UFSHCD_EH_IN_PROGRESS)
 137#define ufshcd_eh_in_progress(h) \
 138        ((h)->eh_flags & UFSHCD_EH_IN_PROGRESS)
 139#define ufshcd_clear_eh_in_progress(h) \
 140        ((h)->eh_flags &= ~UFSHCD_EH_IN_PROGRESS)
 141
 142#define ufshcd_set_ufs_dev_active(h) \
 143        ((h)->curr_dev_pwr_mode = UFS_ACTIVE_PWR_MODE)
 144#define ufshcd_set_ufs_dev_sleep(h) \
 145        ((h)->curr_dev_pwr_mode = UFS_SLEEP_PWR_MODE)
 146#define ufshcd_set_ufs_dev_poweroff(h) \
 147        ((h)->curr_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE)
 148#define ufshcd_is_ufs_dev_active(h) \
 149        ((h)->curr_dev_pwr_mode == UFS_ACTIVE_PWR_MODE)
 150#define ufshcd_is_ufs_dev_sleep(h) \
 151        ((h)->curr_dev_pwr_mode == UFS_SLEEP_PWR_MODE)
 152#define ufshcd_is_ufs_dev_poweroff(h) \
 153        ((h)->curr_dev_pwr_mode == UFS_POWERDOWN_PWR_MODE)
 154
 155struct ufs_pm_lvl_states ufs_pm_lvl_states[] = {
 156        {UFS_ACTIVE_PWR_MODE, UIC_LINK_ACTIVE_STATE},
 157        {UFS_ACTIVE_PWR_MODE, UIC_LINK_HIBERN8_STATE},
 158        {UFS_SLEEP_PWR_MODE, UIC_LINK_ACTIVE_STATE},
 159        {UFS_SLEEP_PWR_MODE, UIC_LINK_HIBERN8_STATE},
 160        {UFS_POWERDOWN_PWR_MODE, UIC_LINK_HIBERN8_STATE},
 161        {UFS_POWERDOWN_PWR_MODE, UIC_LINK_OFF_STATE},
 162};
 163
 164static inline enum ufs_dev_pwr_mode
 165ufs_get_pm_lvl_to_dev_pwr_mode(enum ufs_pm_level lvl)
 166{
 167        return ufs_pm_lvl_states[lvl].dev_state;
 168}
 169
 170static inline enum uic_link_state
 171ufs_get_pm_lvl_to_link_pwr_state(enum ufs_pm_level lvl)
 172{
 173        return ufs_pm_lvl_states[lvl].link_state;
 174}
 175
 176static inline enum ufs_pm_level
 177ufs_get_desired_pm_lvl_for_dev_link_state(enum ufs_dev_pwr_mode dev_state,
 178                                        enum uic_link_state link_state)
 179{
 180        enum ufs_pm_level lvl;
 181
 182        for (lvl = UFS_PM_LVL_0; lvl < UFS_PM_LVL_MAX; lvl++) {
 183                if ((ufs_pm_lvl_states[lvl].dev_state == dev_state) &&
 184                        (ufs_pm_lvl_states[lvl].link_state == link_state))
 185                        return lvl;
 186        }
 187
 188        /* if no match found, return the level 0 */
 189        return UFS_PM_LVL_0;
 190}
 191
 192static struct ufs_dev_fix ufs_fixups[] = {
 193        /* UFS cards deviations table */
 194        UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
 195                UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
 196        UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL, UFS_DEVICE_NO_VCCQ),
 197        UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
 198                UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS),
 199        UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
 200                UFS_DEVICE_NO_FASTAUTO),
 201        UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
 202                UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE),
 203        UFS_FIX(UFS_VENDOR_TOSHIBA, UFS_ANY_MODEL,
 204                UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
 205        UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9C8KBADG",
 206                UFS_DEVICE_QUIRK_PA_TACTIVATE),
 207        UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9D8KBADG",
 208                UFS_DEVICE_QUIRK_PA_TACTIVATE),
 209        UFS_FIX(UFS_VENDOR_SKHYNIX, UFS_ANY_MODEL, UFS_DEVICE_NO_VCCQ),
 210        UFS_FIX(UFS_VENDOR_SKHYNIX, UFS_ANY_MODEL,
 211                UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME),
 212
 213        END_FIX
 214};
 215
 216static void ufshcd_tmc_handler(struct ufs_hba *hba);
 217static void ufshcd_async_scan(void *data, async_cookie_t cookie);
 218static int ufshcd_reset_and_restore(struct ufs_hba *hba);
 219static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd);
 220static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag);
 221static void ufshcd_hba_exit(struct ufs_hba *hba);
 222static int ufshcd_probe_hba(struct ufs_hba *hba);
 223static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
 224                                 bool skip_ref_clk);
 225static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on);
 226static int ufshcd_set_vccq_rail_unused(struct ufs_hba *hba, bool unused);
 227static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba);
 228static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba);
 229static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba);
 230static int ufshcd_host_reset_and_restore(struct ufs_hba *hba);
 231static void ufshcd_resume_clkscaling(struct ufs_hba *hba);
 232static void ufshcd_suspend_clkscaling(struct ufs_hba *hba);
 233static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba);
 234static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up);
 235static irqreturn_t ufshcd_intr(int irq, void *__hba);
 236static int ufshcd_change_power_mode(struct ufs_hba *hba,
 237                             struct ufs_pa_layer_attr *pwr_mode);
 238static inline bool ufshcd_valid_tag(struct ufs_hba *hba, int tag)
 239{
 240        return tag >= 0 && tag < hba->nutrs;
 241}
 242
 243static inline int ufshcd_enable_irq(struct ufs_hba *hba)
 244{
 245        int ret = 0;
 246
 247        if (!hba->is_irq_enabled) {
 248                ret = request_irq(hba->irq, ufshcd_intr, IRQF_SHARED, UFSHCD,
 249                                hba);
 250                if (ret)
 251                        dev_err(hba->dev, "%s: request_irq failed, ret=%d\n",
 252                                __func__, ret);
 253                hba->is_irq_enabled = true;
 254        }
 255
 256        return ret;
 257}
 258
 259static inline void ufshcd_disable_irq(struct ufs_hba *hba)
 260{
 261        if (hba->is_irq_enabled) {
 262                free_irq(hba->irq, hba);
 263                hba->is_irq_enabled = false;
 264        }
 265}
 266
 267static void ufshcd_scsi_unblock_requests(struct ufs_hba *hba)
 268{
 269        if (atomic_dec_and_test(&hba->scsi_block_reqs_cnt))
 270                scsi_unblock_requests(hba->host);
 271}
 272
 273static void ufshcd_scsi_block_requests(struct ufs_hba *hba)
 274{
 275        if (atomic_inc_return(&hba->scsi_block_reqs_cnt) == 1)
 276                scsi_block_requests(hba->host);
 277}
 278
 279/* replace non-printable or non-ASCII characters with spaces */
 280static inline void ufshcd_remove_non_printable(char *val)
 281{
 282        if (!val)
 283                return;
 284
 285        if (*val < 0x20 || *val > 0x7e)
 286                *val = ' ';
 287}
 288
 289static void ufshcd_add_cmd_upiu_trace(struct ufs_hba *hba, unsigned int tag,
 290                const char *str)
 291{
 292        struct utp_upiu_req *rq = hba->lrb[tag].ucd_req_ptr;
 293
 294        trace_ufshcd_upiu(dev_name(hba->dev), str, &rq->header, &rq->sc.cdb);
 295}
 296
 297static void ufshcd_add_query_upiu_trace(struct ufs_hba *hba, unsigned int tag,
 298                const char *str)
 299{
 300        struct utp_upiu_req *rq = hba->lrb[tag].ucd_req_ptr;
 301
 302        trace_ufshcd_upiu(dev_name(hba->dev), str, &rq->header, &rq->qr);
 303}
 304
 305static void ufshcd_add_tm_upiu_trace(struct ufs_hba *hba, unsigned int tag,
 306                const char *str)
 307{
 308        struct utp_task_req_desc *descp;
 309        struct utp_upiu_task_req *task_req;
 310        int off = (int)tag - hba->nutrs;
 311
 312        descp = &hba->utmrdl_base_addr[off];
 313        task_req = (struct utp_upiu_task_req *)descp->task_req_upiu;
 314        trace_ufshcd_upiu(dev_name(hba->dev), str, &task_req->header,
 315                        &task_req->input_param1);
 316}
 317
 318static void ufshcd_add_command_trace(struct ufs_hba *hba,
 319                unsigned int tag, const char *str)
 320{
 321        sector_t lba = -1;
 322        u8 opcode = 0;
 323        u32 intr, doorbell;
 324        struct ufshcd_lrb *lrbp;
 325        int transfer_len = -1;
 326
 327        /* trace UPIU also */
 328        ufshcd_add_cmd_upiu_trace(hba, tag, str);
 329
 330        if (!trace_ufshcd_command_enabled())
 331                return;
 332
 333        lrbp = &hba->lrb[tag];
 334
 335        if (lrbp->cmd) { /* data phase exists */
 336                opcode = (u8)(*lrbp->cmd->cmnd);
 337                if ((opcode == READ_10) || (opcode == WRITE_10)) {
 338                        /*
 339                         * Currently we only fully trace read(10) and write(10)
 340                         * commands
 341                         */
 342                        if (lrbp->cmd->request && lrbp->cmd->request->bio)
 343                                lba =
 344                                  lrbp->cmd->request->bio->bi_iter.bi_sector;
 345                        transfer_len = be32_to_cpu(
 346                                lrbp->ucd_req_ptr->sc.exp_data_transfer_len);
 347                }
 348        }
 349
 350        intr = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
 351        doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
 352        trace_ufshcd_command(dev_name(hba->dev), str, tag,
 353                                doorbell, transfer_len, intr, lba, opcode);
 354}
 355
 356static void ufshcd_print_clk_freqs(struct ufs_hba *hba)
 357{
 358        struct ufs_clk_info *clki;
 359        struct list_head *head = &hba->clk_list_head;
 360
 361        if (list_empty(head))
 362                return;
 363
 364        list_for_each_entry(clki, head, list) {
 365                if (!IS_ERR_OR_NULL(clki->clk) && clki->min_freq &&
 366                                clki->max_freq)
 367                        dev_err(hba->dev, "clk: %s, rate: %u\n",
 368                                        clki->name, clki->curr_freq);
 369        }
 370}
 371
 372static void ufshcd_print_uic_err_hist(struct ufs_hba *hba,
 373                struct ufs_uic_err_reg_hist *err_hist, char *err_name)
 374{
 375        int i;
 376
 377        for (i = 0; i < UIC_ERR_REG_HIST_LENGTH; i++) {
 378                int p = (i + err_hist->pos - 1) % UIC_ERR_REG_HIST_LENGTH;
 379
 380                if (err_hist->reg[p] == 0)
 381                        continue;
 382                dev_err(hba->dev, "%s[%d] = 0x%x at %lld us\n", err_name, i,
 383                        err_hist->reg[p], ktime_to_us(err_hist->tstamp[p]));
 384        }
 385}
 386
 387static void ufshcd_print_host_regs(struct ufs_hba *hba)
 388{
 389        /*
 390         * hex_dump reads its data without the readl macro. This might
 391         * cause inconsistency issues on some platform, as the printed
 392         * values may be from cache and not the most recent value.
 393         * To know whether you are looking at an un-cached version verify
 394         * that IORESOURCE_MEM flag is on when xxx_get_resource() is invoked
 395         * during platform/pci probe function.
 396         */
 397        ufshcd_hex_dump("host regs: ", hba->mmio_base, UFSHCI_REG_SPACE_SIZE);
 398        dev_err(hba->dev, "hba->ufs_version = 0x%x, hba->capabilities = 0x%x\n",
 399                hba->ufs_version, hba->capabilities);
 400        dev_err(hba->dev,
 401                "hba->outstanding_reqs = 0x%x, hba->outstanding_tasks = 0x%x\n",
 402                (u32)hba->outstanding_reqs, (u32)hba->outstanding_tasks);
 403        dev_err(hba->dev,
 404                "last_hibern8_exit_tstamp at %lld us, hibern8_exit_cnt = %d\n",
 405                ktime_to_us(hba->ufs_stats.last_hibern8_exit_tstamp),
 406                hba->ufs_stats.hibern8_exit_cnt);
 407
 408        ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.pa_err, "pa_err");
 409        ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.dl_err, "dl_err");
 410        ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.nl_err, "nl_err");
 411        ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.tl_err, "tl_err");
 412        ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.dme_err, "dme_err");
 413
 414        ufshcd_print_clk_freqs(hba);
 415
 416        if (hba->vops && hba->vops->dbg_register_dump)
 417                hba->vops->dbg_register_dump(hba);
 418}
 419
 420static
 421void ufshcd_print_trs(struct ufs_hba *hba, unsigned long bitmap, bool pr_prdt)
 422{
 423        struct ufshcd_lrb *lrbp;
 424        int prdt_length;
 425        int tag;
 426
 427        for_each_set_bit(tag, &bitmap, hba->nutrs) {
 428                lrbp = &hba->lrb[tag];
 429
 430                dev_err(hba->dev, "UPIU[%d] - issue time %lld us\n",
 431                                tag, ktime_to_us(lrbp->issue_time_stamp));
 432                dev_err(hba->dev, "UPIU[%d] - complete time %lld us\n",
 433                                tag, ktime_to_us(lrbp->compl_time_stamp));
 434                dev_err(hba->dev,
 435                        "UPIU[%d] - Transfer Request Descriptor phys@0x%llx\n",
 436                        tag, (u64)lrbp->utrd_dma_addr);
 437
 438                ufshcd_hex_dump("UPIU TRD: ", lrbp->utr_descriptor_ptr,
 439                                sizeof(struct utp_transfer_req_desc));
 440                dev_err(hba->dev, "UPIU[%d] - Request UPIU phys@0x%llx\n", tag,
 441                        (u64)lrbp->ucd_req_dma_addr);
 442                ufshcd_hex_dump("UPIU REQ: ", lrbp->ucd_req_ptr,
 443                                sizeof(struct utp_upiu_req));
 444                dev_err(hba->dev, "UPIU[%d] - Response UPIU phys@0x%llx\n", tag,
 445                        (u64)lrbp->ucd_rsp_dma_addr);
 446                ufshcd_hex_dump("UPIU RSP: ", lrbp->ucd_rsp_ptr,
 447                                sizeof(struct utp_upiu_rsp));
 448
 449                prdt_length = le16_to_cpu(
 450                        lrbp->utr_descriptor_ptr->prd_table_length);
 451                dev_err(hba->dev,
 452                        "UPIU[%d] - PRDT - %d entries  phys@0x%llx\n",
 453                        tag, prdt_length,
 454                        (u64)lrbp->ucd_prdt_dma_addr);
 455
 456                if (pr_prdt)
 457                        ufshcd_hex_dump("UPIU PRDT: ", lrbp->ucd_prdt_ptr,
 458                                sizeof(struct ufshcd_sg_entry) * prdt_length);
 459        }
 460}
 461
 462static void ufshcd_print_tmrs(struct ufs_hba *hba, unsigned long bitmap)
 463{
 464        struct utp_task_req_desc *tmrdp;
 465        int tag;
 466
 467        for_each_set_bit(tag, &bitmap, hba->nutmrs) {
 468                tmrdp = &hba->utmrdl_base_addr[tag];
 469                dev_err(hba->dev, "TM[%d] - Task Management Header\n", tag);
 470                ufshcd_hex_dump("TM TRD: ", &tmrdp->header,
 471                                sizeof(struct request_desc_header));
 472                dev_err(hba->dev, "TM[%d] - Task Management Request UPIU\n",
 473                                tag);
 474                ufshcd_hex_dump("TM REQ: ", tmrdp->task_req_upiu,
 475                                sizeof(struct utp_upiu_req));
 476                dev_err(hba->dev, "TM[%d] - Task Management Response UPIU\n",
 477                                tag);
 478                ufshcd_hex_dump("TM RSP: ", tmrdp->task_rsp_upiu,
 479                                sizeof(struct utp_task_req_desc));
 480        }
 481}
 482
 483static void ufshcd_print_host_state(struct ufs_hba *hba)
 484{
 485        dev_err(hba->dev, "UFS Host state=%d\n", hba->ufshcd_state);
 486        dev_err(hba->dev, "lrb in use=0x%lx, outstanding reqs=0x%lx tasks=0x%lx\n",
 487                hba->lrb_in_use, hba->outstanding_reqs, hba->outstanding_tasks);
 488        dev_err(hba->dev, "saved_err=0x%x, saved_uic_err=0x%x\n",
 489                hba->saved_err, hba->saved_uic_err);
 490        dev_err(hba->dev, "Device power mode=%d, UIC link state=%d\n",
 491                hba->curr_dev_pwr_mode, hba->uic_link_state);
 492        dev_err(hba->dev, "PM in progress=%d, sys. suspended=%d\n",
 493                hba->pm_op_in_progress, hba->is_sys_suspended);
 494        dev_err(hba->dev, "Auto BKOPS=%d, Host self-block=%d\n",
 495                hba->auto_bkops_enabled, hba->host->host_self_blocked);
 496        dev_err(hba->dev, "Clk gate=%d\n", hba->clk_gating.state);
 497        dev_err(hba->dev, "error handling flags=0x%x, req. abort count=%d\n",
 498                hba->eh_flags, hba->req_abort_count);
 499        dev_err(hba->dev, "Host capabilities=0x%x, caps=0x%x\n",
 500                hba->capabilities, hba->caps);
 501        dev_err(hba->dev, "quirks=0x%x, dev. quirks=0x%x\n", hba->quirks,
 502                hba->dev_quirks);
 503}
 504
 505/**
 506 * ufshcd_print_pwr_info - print power params as saved in hba
 507 * power info
 508 * @hba: per-adapter instance
 509 */
 510static void ufshcd_print_pwr_info(struct ufs_hba *hba)
 511{
 512        static const char * const names[] = {
 513                "INVALID MODE",
 514                "FAST MODE",
 515                "SLOW_MODE",
 516                "INVALID MODE",
 517                "FASTAUTO_MODE",
 518                "SLOWAUTO_MODE",
 519                "INVALID MODE",
 520        };
 521
 522        dev_err(hba->dev, "%s:[RX, TX]: gear=[%d, %d], lane[%d, %d], pwr[%s, %s], rate = %d\n",
 523                 __func__,
 524                 hba->pwr_info.gear_rx, hba->pwr_info.gear_tx,
 525                 hba->pwr_info.lane_rx, hba->pwr_info.lane_tx,
 526                 names[hba->pwr_info.pwr_rx],
 527                 names[hba->pwr_info.pwr_tx],
 528                 hba->pwr_info.hs_rate);
 529}
 530
 531/*
 532 * ufshcd_wait_for_register - wait for register value to change
 533 * @hba - per-adapter interface
 534 * @reg - mmio register offset
 535 * @mask - mask to apply to read register value
 536 * @val - wait condition
 537 * @interval_us - polling interval in microsecs
 538 * @timeout_ms - timeout in millisecs
 539 * @can_sleep - perform sleep or just spin
 540 *
 541 * Returns -ETIMEDOUT on error, zero on success
 542 */
 543int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
 544                                u32 val, unsigned long interval_us,
 545                                unsigned long timeout_ms, bool can_sleep)
 546{
 547        int err = 0;
 548        unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
 549
 550        /* ignore bits that we don't intend to wait on */
 551        val = val & mask;
 552
 553        while ((ufshcd_readl(hba, reg) & mask) != val) {
 554                if (can_sleep)
 555                        usleep_range(interval_us, interval_us + 50);
 556                else
 557                        udelay(interval_us);
 558                if (time_after(jiffies, timeout)) {
 559                        if ((ufshcd_readl(hba, reg) & mask) != val)
 560                                err = -ETIMEDOUT;
 561                        break;
 562                }
 563        }
 564
 565        return err;
 566}
 567
 568/**
 569 * ufshcd_get_intr_mask - Get the interrupt bit mask
 570 * @hba: Pointer to adapter instance
 571 *
 572 * Returns interrupt bit mask per version
 573 */
 574static inline u32 ufshcd_get_intr_mask(struct ufs_hba *hba)
 575{
 576        u32 intr_mask = 0;
 577
 578        switch (hba->ufs_version) {
 579        case UFSHCI_VERSION_10:
 580                intr_mask = INTERRUPT_MASK_ALL_VER_10;
 581                break;
 582        case UFSHCI_VERSION_11:
 583        case UFSHCI_VERSION_20:
 584                intr_mask = INTERRUPT_MASK_ALL_VER_11;
 585                break;
 586        case UFSHCI_VERSION_21:
 587        default:
 588                intr_mask = INTERRUPT_MASK_ALL_VER_21;
 589                break;
 590        }
 591
 592        return intr_mask;
 593}
 594
 595/**
 596 * ufshcd_get_ufs_version - Get the UFS version supported by the HBA
 597 * @hba: Pointer to adapter instance
 598 *
 599 * Returns UFSHCI version supported by the controller
 600 */
 601static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba)
 602{
 603        if (hba->quirks & UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION)
 604                return ufshcd_vops_get_ufs_hci_version(hba);
 605
 606        return ufshcd_readl(hba, REG_UFS_VERSION);
 607}
 608
 609/**
 610 * ufshcd_is_device_present - Check if any device connected to
 611 *                            the host controller
 612 * @hba: pointer to adapter instance
 613 *
 614 * Returns true if device present, false if no device detected
 615 */
 616static inline bool ufshcd_is_device_present(struct ufs_hba *hba)
 617{
 618        return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) &
 619                                                DEVICE_PRESENT) ? true : false;
 620}
 621
 622/**
 623 * ufshcd_get_tr_ocs - Get the UTRD Overall Command Status
 624 * @lrbp: pointer to local command reference block
 625 *
 626 * This function is used to get the OCS field from UTRD
 627 * Returns the OCS field in the UTRD
 628 */
 629static inline int ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp)
 630{
 631        return le32_to_cpu(lrbp->utr_descriptor_ptr->header.dword_2) & MASK_OCS;
 632}
 633
 634/**
 635 * ufshcd_get_tmr_ocs - Get the UTMRD Overall Command Status
 636 * @task_req_descp: pointer to utp_task_req_desc structure
 637 *
 638 * This function is used to get the OCS field from UTMRD
 639 * Returns the OCS field in the UTMRD
 640 */
 641static inline int
 642ufshcd_get_tmr_ocs(struct utp_task_req_desc *task_req_descp)
 643{
 644        return le32_to_cpu(task_req_descp->header.dword_2) & MASK_OCS;
 645}
 646
 647/**
 648 * ufshcd_get_tm_free_slot - get a free slot for task management request
 649 * @hba: per adapter instance
 650 * @free_slot: pointer to variable with available slot value
 651 *
 652 * Get a free tag and lock it until ufshcd_put_tm_slot() is called.
 653 * Returns 0 if free slot is not available, else return 1 with tag value
 654 * in @free_slot.
 655 */
 656static bool ufshcd_get_tm_free_slot(struct ufs_hba *hba, int *free_slot)
 657{
 658        int tag;
 659        bool ret = false;
 660
 661        if (!free_slot)
 662                goto out;
 663
 664        do {
 665                tag = find_first_zero_bit(&hba->tm_slots_in_use, hba->nutmrs);
 666                if (tag >= hba->nutmrs)
 667                        goto out;
 668        } while (test_and_set_bit_lock(tag, &hba->tm_slots_in_use));
 669
 670        *free_slot = tag;
 671        ret = true;
 672out:
 673        return ret;
 674}
 675
 676static inline void ufshcd_put_tm_slot(struct ufs_hba *hba, int slot)
 677{
 678        clear_bit_unlock(slot, &hba->tm_slots_in_use);
 679}
 680
 681/**
 682 * ufshcd_utrl_clear - Clear a bit in UTRLCLR register
 683 * @hba: per adapter instance
 684 * @pos: position of the bit to be cleared
 685 */
 686static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 pos)
 687{
 688        if (hba->quirks & UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR)
 689                ufshcd_writel(hba, (1 << pos), REG_UTP_TRANSFER_REQ_LIST_CLEAR);
 690        else
 691                ufshcd_writel(hba, ~(1 << pos),
 692                                REG_UTP_TRANSFER_REQ_LIST_CLEAR);
 693}
 694
 695/**
 696 * ufshcd_utmrl_clear - Clear a bit in UTRMLCLR register
 697 * @hba: per adapter instance
 698 * @pos: position of the bit to be cleared
 699 */
 700static inline void ufshcd_utmrl_clear(struct ufs_hba *hba, u32 pos)
 701{
 702        if (hba->quirks & UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR)
 703                ufshcd_writel(hba, (1 << pos), REG_UTP_TASK_REQ_LIST_CLEAR);
 704        else
 705                ufshcd_writel(hba, ~(1 << pos), REG_UTP_TASK_REQ_LIST_CLEAR);
 706}
 707
 708/**
 709 * ufshcd_outstanding_req_clear - Clear a bit in outstanding request field
 710 * @hba: per adapter instance
 711 * @tag: position of the bit to be cleared
 712 */
 713static inline void ufshcd_outstanding_req_clear(struct ufs_hba *hba, int tag)
 714{
 715        __clear_bit(tag, &hba->outstanding_reqs);
 716}
 717
 718/**
 719 * ufshcd_get_lists_status - Check UCRDY, UTRLRDY and UTMRLRDY
 720 * @reg: Register value of host controller status
 721 *
 722 * Returns integer, 0 on Success and positive value if failed
 723 */
 724static inline int ufshcd_get_lists_status(u32 reg)
 725{
 726        return !((reg & UFSHCD_STATUS_READY) == UFSHCD_STATUS_READY);
 727}
 728
 729/**
 730 * ufshcd_get_uic_cmd_result - Get the UIC command result
 731 * @hba: Pointer to adapter instance
 732 *
 733 * This function gets the result of UIC command completion
 734 * Returns 0 on success, non zero value on error
 735 */
 736static inline int ufshcd_get_uic_cmd_result(struct ufs_hba *hba)
 737{
 738        return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2) &
 739               MASK_UIC_COMMAND_RESULT;
 740}
 741
 742/**
 743 * ufshcd_get_dme_attr_val - Get the value of attribute returned by UIC command
 744 * @hba: Pointer to adapter instance
 745 *
 746 * This function gets UIC command argument3
 747 * Returns 0 on success, non zero value on error
 748 */
 749static inline u32 ufshcd_get_dme_attr_val(struct ufs_hba *hba)
 750{
 751        return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3);
 752}
 753
 754/**
 755 * ufshcd_get_req_rsp - returns the TR response transaction type
 756 * @ucd_rsp_ptr: pointer to response UPIU
 757 */
 758static inline int
 759ufshcd_get_req_rsp(struct utp_upiu_rsp *ucd_rsp_ptr)
 760{
 761        return be32_to_cpu(ucd_rsp_ptr->header.dword_0) >> 24;
 762}
 763
 764/**
 765 * ufshcd_get_rsp_upiu_result - Get the result from response UPIU
 766 * @ucd_rsp_ptr: pointer to response UPIU
 767 *
 768 * This function gets the response status and scsi_status from response UPIU
 769 * Returns the response result code.
 770 */
 771static inline int
 772ufshcd_get_rsp_upiu_result(struct utp_upiu_rsp *ucd_rsp_ptr)
 773{
 774        return be32_to_cpu(ucd_rsp_ptr->header.dword_1) & MASK_RSP_UPIU_RESULT;
 775}
 776
 777/*
 778 * ufshcd_get_rsp_upiu_data_seg_len - Get the data segment length
 779 *                              from response UPIU
 780 * @ucd_rsp_ptr: pointer to response UPIU
 781 *
 782 * Return the data segment length.
 783 */
 784static inline unsigned int
 785ufshcd_get_rsp_upiu_data_seg_len(struct utp_upiu_rsp *ucd_rsp_ptr)
 786{
 787        return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
 788                MASK_RSP_UPIU_DATA_SEG_LEN;
 789}
 790
 791/**
 792 * ufshcd_is_exception_event - Check if the device raised an exception event
 793 * @ucd_rsp_ptr: pointer to response UPIU
 794 *
 795 * The function checks if the device raised an exception event indicated in
 796 * the Device Information field of response UPIU.
 797 *
 798 * Returns true if exception is raised, false otherwise.
 799 */
 800static inline bool ufshcd_is_exception_event(struct utp_upiu_rsp *ucd_rsp_ptr)
 801{
 802        return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
 803                        MASK_RSP_EXCEPTION_EVENT ? true : false;
 804}
 805
 806/**
 807 * ufshcd_reset_intr_aggr - Reset interrupt aggregation values.
 808 * @hba: per adapter instance
 809 */
 810static inline void
 811ufshcd_reset_intr_aggr(struct ufs_hba *hba)
 812{
 813        ufshcd_writel(hba, INT_AGGR_ENABLE |
 814                      INT_AGGR_COUNTER_AND_TIMER_RESET,
 815                      REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
 816}
 817
 818/**
 819 * ufshcd_config_intr_aggr - Configure interrupt aggregation values.
 820 * @hba: per adapter instance
 821 * @cnt: Interrupt aggregation counter threshold
 822 * @tmout: Interrupt aggregation timeout value
 823 */
 824static inline void
 825ufshcd_config_intr_aggr(struct ufs_hba *hba, u8 cnt, u8 tmout)
 826{
 827        ufshcd_writel(hba, INT_AGGR_ENABLE | INT_AGGR_PARAM_WRITE |
 828                      INT_AGGR_COUNTER_THLD_VAL(cnt) |
 829                      INT_AGGR_TIMEOUT_VAL(tmout),
 830                      REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
 831}
 832
 833/**
 834 * ufshcd_disable_intr_aggr - Disables interrupt aggregation.
 835 * @hba: per adapter instance
 836 */
 837static inline void ufshcd_disable_intr_aggr(struct ufs_hba *hba)
 838{
 839        ufshcd_writel(hba, 0, REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
 840}
 841
 842/**
 843 * ufshcd_enable_run_stop_reg - Enable run-stop registers,
 844 *                      When run-stop registers are set to 1, it indicates the
 845 *                      host controller that it can process the requests
 846 * @hba: per adapter instance
 847 */
 848static void ufshcd_enable_run_stop_reg(struct ufs_hba *hba)
 849{
 850        ufshcd_writel(hba, UTP_TASK_REQ_LIST_RUN_STOP_BIT,
 851                      REG_UTP_TASK_REQ_LIST_RUN_STOP);
 852        ufshcd_writel(hba, UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT,
 853                      REG_UTP_TRANSFER_REQ_LIST_RUN_STOP);
 854}
 855
 856/**
 857 * ufshcd_hba_start - Start controller initialization sequence
 858 * @hba: per adapter instance
 859 */
 860static inline void ufshcd_hba_start(struct ufs_hba *hba)
 861{
 862        ufshcd_writel(hba, CONTROLLER_ENABLE, REG_CONTROLLER_ENABLE);
 863}
 864
 865/**
 866 * ufshcd_is_hba_active - Get controller state
 867 * @hba: per adapter instance
 868 *
 869 * Returns false if controller is active, true otherwise
 870 */
 871static inline bool ufshcd_is_hba_active(struct ufs_hba *hba)
 872{
 873        return (ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & CONTROLLER_ENABLE)
 874                ? false : true;
 875}
 876
 877u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba)
 878{
 879        /* HCI version 1.0 and 1.1 supports UniPro 1.41 */
 880        if ((hba->ufs_version == UFSHCI_VERSION_10) ||
 881            (hba->ufs_version == UFSHCI_VERSION_11))
 882                return UFS_UNIPRO_VER_1_41;
 883        else
 884                return UFS_UNIPRO_VER_1_6;
 885}
 886EXPORT_SYMBOL(ufshcd_get_local_unipro_ver);
 887
 888static bool ufshcd_is_unipro_pa_params_tuning_req(struct ufs_hba *hba)
 889{
 890        /*
 891         * If both host and device support UniPro ver1.6 or later, PA layer
 892         * parameters tuning happens during link startup itself.
 893         *
 894         * We can manually tune PA layer parameters if either host or device
 895         * doesn't support UniPro ver 1.6 or later. But to keep manual tuning
 896         * logic simple, we will only do manual tuning if local unipro version
 897         * doesn't support ver1.6 or later.
 898         */
 899        if (ufshcd_get_local_unipro_ver(hba) < UFS_UNIPRO_VER_1_6)
 900                return true;
 901        else
 902                return false;
 903}
 904
 905static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up)
 906{
 907        int ret = 0;
 908        struct ufs_clk_info *clki;
 909        struct list_head *head = &hba->clk_list_head;
 910        ktime_t start = ktime_get();
 911        bool clk_state_changed = false;
 912
 913        if (list_empty(head))
 914                goto out;
 915
 916        ret = ufshcd_vops_clk_scale_notify(hba, scale_up, PRE_CHANGE);
 917        if (ret)
 918                return ret;
 919
 920        list_for_each_entry(clki, head, list) {
 921                if (!IS_ERR_OR_NULL(clki->clk)) {
 922                        if (scale_up && clki->max_freq) {
 923                                if (clki->curr_freq == clki->max_freq)
 924                                        continue;
 925
 926                                clk_state_changed = true;
 927                                ret = clk_set_rate(clki->clk, clki->max_freq);
 928                                if (ret) {
 929                                        dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
 930                                                __func__, clki->name,
 931                                                clki->max_freq, ret);
 932                                        break;
 933                                }
 934                                trace_ufshcd_clk_scaling(dev_name(hba->dev),
 935                                                "scaled up", clki->name,
 936                                                clki->curr_freq,
 937                                                clki->max_freq);
 938
 939                                clki->curr_freq = clki->max_freq;
 940
 941                        } else if (!scale_up && clki->min_freq) {
 942                                if (clki->curr_freq == clki->min_freq)
 943                                        continue;
 944
 945                                clk_state_changed = true;
 946                                ret = clk_set_rate(clki->clk, clki->min_freq);
 947                                if (ret) {
 948                                        dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
 949                                                __func__, clki->name,
 950                                                clki->min_freq, ret);
 951                                        break;
 952                                }
 953                                trace_ufshcd_clk_scaling(dev_name(hba->dev),
 954                                                "scaled down", clki->name,
 955                                                clki->curr_freq,
 956                                                clki->min_freq);
 957                                clki->curr_freq = clki->min_freq;
 958                        }
 959                }
 960                dev_dbg(hba->dev, "%s: clk: %s, rate: %lu\n", __func__,
 961                                clki->name, clk_get_rate(clki->clk));
 962        }
 963
 964        ret = ufshcd_vops_clk_scale_notify(hba, scale_up, POST_CHANGE);
 965
 966out:
 967        if (clk_state_changed)
 968                trace_ufshcd_profile_clk_scaling(dev_name(hba->dev),
 969                        (scale_up ? "up" : "down"),
 970                        ktime_to_us(ktime_sub(ktime_get(), start)), ret);
 971        return ret;
 972}
 973
 974/**
 975 * ufshcd_is_devfreq_scaling_required - check if scaling is required or not
 976 * @hba: per adapter instance
 977 * @scale_up: True if scaling up and false if scaling down
 978 *
 979 * Returns true if scaling is required, false otherwise.
 980 */
 981static bool ufshcd_is_devfreq_scaling_required(struct ufs_hba *hba,
 982                                               bool scale_up)
 983{
 984        struct ufs_clk_info *clki;
 985        struct list_head *head = &hba->clk_list_head;
 986
 987        if (list_empty(head))
 988                return false;
 989
 990        list_for_each_entry(clki, head, list) {
 991                if (!IS_ERR_OR_NULL(clki->clk)) {
 992                        if (scale_up && clki->max_freq) {
 993                                if (clki->curr_freq == clki->max_freq)
 994                                        continue;
 995                                return true;
 996                        } else if (!scale_up && clki->min_freq) {
 997                                if (clki->curr_freq == clki->min_freq)
 998                                        continue;
 999                                return true;
1000                        }
1001                }
1002        }
1003
1004        return false;
1005}
1006
1007static int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba,
1008                                        u64 wait_timeout_us)
1009{
1010        unsigned long flags;
1011        int ret = 0;
1012        u32 tm_doorbell;
1013        u32 tr_doorbell;
1014        bool timeout = false, do_last_check = false;
1015        ktime_t start;
1016
1017        ufshcd_hold(hba, false);
1018        spin_lock_irqsave(hba->host->host_lock, flags);
1019        /*
1020         * Wait for all the outstanding tasks/transfer requests.
1021         * Verify by checking the doorbell registers are clear.
1022         */
1023        start = ktime_get();
1024        do {
1025                if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL) {
1026                        ret = -EBUSY;
1027                        goto out;
1028                }
1029
1030                tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
1031                tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
1032                if (!tm_doorbell && !tr_doorbell) {
1033                        timeout = false;
1034                        break;
1035                } else if (do_last_check) {
1036                        break;
1037                }
1038
1039                spin_unlock_irqrestore(hba->host->host_lock, flags);
1040                schedule();
1041                if (ktime_to_us(ktime_sub(ktime_get(), start)) >
1042                    wait_timeout_us) {
1043                        timeout = true;
1044                        /*
1045                         * We might have scheduled out for long time so make
1046                         * sure to check if doorbells are cleared by this time
1047                         * or not.
1048                         */
1049                        do_last_check = true;
1050                }
1051                spin_lock_irqsave(hba->host->host_lock, flags);
1052        } while (tm_doorbell || tr_doorbell);
1053
1054        if (timeout) {
1055                dev_err(hba->dev,
1056                        "%s: timedout waiting for doorbell to clear (tm=0x%x, tr=0x%x)\n",
1057                        __func__, tm_doorbell, tr_doorbell);
1058                ret = -EBUSY;
1059        }
1060out:
1061        spin_unlock_irqrestore(hba->host->host_lock, flags);
1062        ufshcd_release(hba);
1063        return ret;
1064}
1065
1066/**
1067 * ufshcd_scale_gear - scale up/down UFS gear
1068 * @hba: per adapter instance
1069 * @scale_up: True for scaling up gear and false for scaling down
1070 *
1071 * Returns 0 for success,
1072 * Returns -EBUSY if scaling can't happen at this time
1073 * Returns non-zero for any other errors
1074 */
1075static int ufshcd_scale_gear(struct ufs_hba *hba, bool scale_up)
1076{
1077        #define UFS_MIN_GEAR_TO_SCALE_DOWN      UFS_HS_G1
1078        int ret = 0;
1079        struct ufs_pa_layer_attr new_pwr_info;
1080
1081        if (scale_up) {
1082                memcpy(&new_pwr_info, &hba->clk_scaling.saved_pwr_info.info,
1083                       sizeof(struct ufs_pa_layer_attr));
1084        } else {
1085                memcpy(&new_pwr_info, &hba->pwr_info,
1086                       sizeof(struct ufs_pa_layer_attr));
1087
1088                if (hba->pwr_info.gear_tx > UFS_MIN_GEAR_TO_SCALE_DOWN
1089                    || hba->pwr_info.gear_rx > UFS_MIN_GEAR_TO_SCALE_DOWN) {
1090                        /* save the current power mode */
1091                        memcpy(&hba->clk_scaling.saved_pwr_info.info,
1092                                &hba->pwr_info,
1093                                sizeof(struct ufs_pa_layer_attr));
1094
1095                        /* scale down gear */
1096                        new_pwr_info.gear_tx = UFS_MIN_GEAR_TO_SCALE_DOWN;
1097                        new_pwr_info.gear_rx = UFS_MIN_GEAR_TO_SCALE_DOWN;
1098                }
1099        }
1100
1101        /* check if the power mode needs to be changed or not? */
1102        ret = ufshcd_change_power_mode(hba, &new_pwr_info);
1103
1104        if (ret)
1105                dev_err(hba->dev, "%s: failed err %d, old gear: (tx %d rx %d), new gear: (tx %d rx %d)",
1106                        __func__, ret,
1107                        hba->pwr_info.gear_tx, hba->pwr_info.gear_rx,
1108                        new_pwr_info.gear_tx, new_pwr_info.gear_rx);
1109
1110        return ret;
1111}
1112
1113static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba)
1114{
1115        #define DOORBELL_CLR_TOUT_US            (1000 * 1000) /* 1 sec */
1116        int ret = 0;
1117        /*
1118         * make sure that there are no outstanding requests when
1119         * clock scaling is in progress
1120         */
1121        ufshcd_scsi_block_requests(hba);
1122        down_write(&hba->clk_scaling_lock);
1123        if (ufshcd_wait_for_doorbell_clr(hba, DOORBELL_CLR_TOUT_US)) {
1124                ret = -EBUSY;
1125                up_write(&hba->clk_scaling_lock);
1126                ufshcd_scsi_unblock_requests(hba);
1127        }
1128
1129        return ret;
1130}
1131
1132static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba)
1133{
1134        up_write(&hba->clk_scaling_lock);
1135        ufshcd_scsi_unblock_requests(hba);
1136}
1137
1138/**
1139 * ufshcd_devfreq_scale - scale up/down UFS clocks and gear
1140 * @hba: per adapter instance
1141 * @scale_up: True for scaling up and false for scalin down
1142 *
1143 * Returns 0 for success,
1144 * Returns -EBUSY if scaling can't happen at this time
1145 * Returns non-zero for any other errors
1146 */
1147static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up)
1148{
1149        int ret = 0;
1150
1151        /* let's not get into low power until clock scaling is completed */
1152        ufshcd_hold(hba, false);
1153
1154        ret = ufshcd_clock_scaling_prepare(hba);
1155        if (ret)
1156                return ret;
1157
1158        /* scale down the gear before scaling down clocks */
1159        if (!scale_up) {
1160                ret = ufshcd_scale_gear(hba, false);
1161                if (ret)
1162                        goto out;
1163        }
1164
1165        ret = ufshcd_scale_clks(hba, scale_up);
1166        if (ret) {
1167                if (!scale_up)
1168                        ufshcd_scale_gear(hba, true);
1169                goto out;
1170        }
1171
1172        /* scale up the gear after scaling up clocks */
1173        if (scale_up) {
1174                ret = ufshcd_scale_gear(hba, true);
1175                if (ret) {
1176                        ufshcd_scale_clks(hba, false);
1177                        goto out;
1178                }
1179        }
1180
1181        ret = ufshcd_vops_clk_scale_notify(hba, scale_up, POST_CHANGE);
1182
1183out:
1184        ufshcd_clock_scaling_unprepare(hba);
1185        ufshcd_release(hba);
1186        return ret;
1187}
1188
1189static void ufshcd_clk_scaling_suspend_work(struct work_struct *work)
1190{
1191        struct ufs_hba *hba = container_of(work, struct ufs_hba,
1192                                           clk_scaling.suspend_work);
1193        unsigned long irq_flags;
1194
1195        spin_lock_irqsave(hba->host->host_lock, irq_flags);
1196        if (hba->clk_scaling.active_reqs || hba->clk_scaling.is_suspended) {
1197                spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1198                return;
1199        }
1200        hba->clk_scaling.is_suspended = true;
1201        spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1202
1203        __ufshcd_suspend_clkscaling(hba);
1204}
1205
1206static void ufshcd_clk_scaling_resume_work(struct work_struct *work)
1207{
1208        struct ufs_hba *hba = container_of(work, struct ufs_hba,
1209                                           clk_scaling.resume_work);
1210        unsigned long irq_flags;
1211
1212        spin_lock_irqsave(hba->host->host_lock, irq_flags);
1213        if (!hba->clk_scaling.is_suspended) {
1214                spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1215                return;
1216        }
1217        hba->clk_scaling.is_suspended = false;
1218        spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1219
1220        devfreq_resume_device(hba->devfreq);
1221}
1222
1223static int ufshcd_devfreq_target(struct device *dev,
1224                                unsigned long *freq, u32 flags)
1225{
1226        int ret = 0;
1227        struct ufs_hba *hba = dev_get_drvdata(dev);
1228        ktime_t start;
1229        bool scale_up, sched_clk_scaling_suspend_work = false;
1230        struct list_head *clk_list = &hba->clk_list_head;
1231        struct ufs_clk_info *clki;
1232        unsigned long irq_flags;
1233
1234        if (!ufshcd_is_clkscaling_supported(hba))
1235                return -EINVAL;
1236
1237        spin_lock_irqsave(hba->host->host_lock, irq_flags);
1238        if (ufshcd_eh_in_progress(hba)) {
1239                spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1240                return 0;
1241        }
1242
1243        if (!hba->clk_scaling.active_reqs)
1244                sched_clk_scaling_suspend_work = true;
1245
1246        if (list_empty(clk_list)) {
1247                spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1248                goto out;
1249        }
1250
1251        clki = list_first_entry(&hba->clk_list_head, struct ufs_clk_info, list);
1252        scale_up = (*freq == clki->max_freq) ? true : false;
1253        if (!ufshcd_is_devfreq_scaling_required(hba, scale_up)) {
1254                spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1255                ret = 0;
1256                goto out; /* no state change required */
1257        }
1258        spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1259
1260        start = ktime_get();
1261        ret = ufshcd_devfreq_scale(hba, scale_up);
1262
1263        trace_ufshcd_profile_clk_scaling(dev_name(hba->dev),
1264                (scale_up ? "up" : "down"),
1265                ktime_to_us(ktime_sub(ktime_get(), start)), ret);
1266
1267out:
1268        if (sched_clk_scaling_suspend_work)
1269                queue_work(hba->clk_scaling.workq,
1270                           &hba->clk_scaling.suspend_work);
1271
1272        return ret;
1273}
1274
1275
1276static int ufshcd_devfreq_get_dev_status(struct device *dev,
1277                struct devfreq_dev_status *stat)
1278{
1279        struct ufs_hba *hba = dev_get_drvdata(dev);
1280        struct ufs_clk_scaling *scaling = &hba->clk_scaling;
1281        unsigned long flags;
1282
1283        if (!ufshcd_is_clkscaling_supported(hba))
1284                return -EINVAL;
1285
1286        memset(stat, 0, sizeof(*stat));
1287
1288        spin_lock_irqsave(hba->host->host_lock, flags);
1289        if (!scaling->window_start_t)
1290                goto start_window;
1291
1292        if (scaling->is_busy_started)
1293                scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(),
1294                                        scaling->busy_start_t));
1295
1296        stat->total_time = jiffies_to_usecs((long)jiffies -
1297                                (long)scaling->window_start_t);
1298        stat->busy_time = scaling->tot_busy_t;
1299start_window:
1300        scaling->window_start_t = jiffies;
1301        scaling->tot_busy_t = 0;
1302
1303        if (hba->outstanding_reqs) {
1304                scaling->busy_start_t = ktime_get();
1305                scaling->is_busy_started = true;
1306        } else {
1307                scaling->busy_start_t = 0;
1308                scaling->is_busy_started = false;
1309        }
1310        spin_unlock_irqrestore(hba->host->host_lock, flags);
1311        return 0;
1312}
1313
1314static struct devfreq_dev_profile ufs_devfreq_profile = {
1315        .polling_ms     = 100,
1316        .target         = ufshcd_devfreq_target,
1317        .get_dev_status = ufshcd_devfreq_get_dev_status,
1318};
1319
1320static int ufshcd_devfreq_init(struct ufs_hba *hba)
1321{
1322        struct list_head *clk_list = &hba->clk_list_head;
1323        struct ufs_clk_info *clki;
1324        struct devfreq *devfreq;
1325        int ret;
1326
1327        /* Skip devfreq if we don't have any clocks in the list */
1328        if (list_empty(clk_list))
1329                return 0;
1330
1331        clki = list_first_entry(clk_list, struct ufs_clk_info, list);
1332        dev_pm_opp_add(hba->dev, clki->min_freq, 0);
1333        dev_pm_opp_add(hba->dev, clki->max_freq, 0);
1334
1335        devfreq = devfreq_add_device(hba->dev,
1336                        &ufs_devfreq_profile,
1337                        DEVFREQ_GOV_SIMPLE_ONDEMAND,
1338                        NULL);
1339        if (IS_ERR(devfreq)) {
1340                ret = PTR_ERR(devfreq);
1341                dev_err(hba->dev, "Unable to register with devfreq %d\n", ret);
1342
1343                dev_pm_opp_remove(hba->dev, clki->min_freq);
1344                dev_pm_opp_remove(hba->dev, clki->max_freq);
1345                return ret;
1346        }
1347
1348        hba->devfreq = devfreq;
1349
1350        return 0;
1351}
1352
1353static void ufshcd_devfreq_remove(struct ufs_hba *hba)
1354{
1355        struct list_head *clk_list = &hba->clk_list_head;
1356        struct ufs_clk_info *clki;
1357
1358        if (!hba->devfreq)
1359                return;
1360
1361        devfreq_remove_device(hba->devfreq);
1362        hba->devfreq = NULL;
1363
1364        clki = list_first_entry(clk_list, struct ufs_clk_info, list);
1365        dev_pm_opp_remove(hba->dev, clki->min_freq);
1366        dev_pm_opp_remove(hba->dev, clki->max_freq);
1367}
1368
1369static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba)
1370{
1371        unsigned long flags;
1372
1373        devfreq_suspend_device(hba->devfreq);
1374        spin_lock_irqsave(hba->host->host_lock, flags);
1375        hba->clk_scaling.window_start_t = 0;
1376        spin_unlock_irqrestore(hba->host->host_lock, flags);
1377}
1378
1379static void ufshcd_suspend_clkscaling(struct ufs_hba *hba)
1380{
1381        unsigned long flags;
1382        bool suspend = false;
1383
1384        if (!ufshcd_is_clkscaling_supported(hba))
1385                return;
1386
1387        spin_lock_irqsave(hba->host->host_lock, flags);
1388        if (!hba->clk_scaling.is_suspended) {
1389                suspend = true;
1390                hba->clk_scaling.is_suspended = true;
1391        }
1392        spin_unlock_irqrestore(hba->host->host_lock, flags);
1393
1394        if (suspend)
1395                __ufshcd_suspend_clkscaling(hba);
1396}
1397
1398static void ufshcd_resume_clkscaling(struct ufs_hba *hba)
1399{
1400        unsigned long flags;
1401        bool resume = false;
1402
1403        if (!ufshcd_is_clkscaling_supported(hba))
1404                return;
1405
1406        spin_lock_irqsave(hba->host->host_lock, flags);
1407        if (hba->clk_scaling.is_suspended) {
1408                resume = true;
1409                hba->clk_scaling.is_suspended = false;
1410        }
1411        spin_unlock_irqrestore(hba->host->host_lock, flags);
1412
1413        if (resume)
1414                devfreq_resume_device(hba->devfreq);
1415}
1416
1417static ssize_t ufshcd_clkscale_enable_show(struct device *dev,
1418                struct device_attribute *attr, char *buf)
1419{
1420        struct ufs_hba *hba = dev_get_drvdata(dev);
1421
1422        return snprintf(buf, PAGE_SIZE, "%d\n", hba->clk_scaling.is_allowed);
1423}
1424
1425static ssize_t ufshcd_clkscale_enable_store(struct device *dev,
1426                struct device_attribute *attr, const char *buf, size_t count)
1427{
1428        struct ufs_hba *hba = dev_get_drvdata(dev);
1429        u32 value;
1430        int err;
1431
1432        if (kstrtou32(buf, 0, &value))
1433                return -EINVAL;
1434
1435        value = !!value;
1436        if (value == hba->clk_scaling.is_allowed)
1437                goto out;
1438
1439        pm_runtime_get_sync(hba->dev);
1440        ufshcd_hold(hba, false);
1441
1442        cancel_work_sync(&hba->clk_scaling.suspend_work);
1443        cancel_work_sync(&hba->clk_scaling.resume_work);
1444
1445        hba->clk_scaling.is_allowed = value;
1446
1447        if (value) {
1448                ufshcd_resume_clkscaling(hba);
1449        } else {
1450                ufshcd_suspend_clkscaling(hba);
1451                err = ufshcd_devfreq_scale(hba, true);
1452                if (err)
1453                        dev_err(hba->dev, "%s: failed to scale clocks up %d\n",
1454                                        __func__, err);
1455        }
1456
1457        ufshcd_release(hba);
1458        pm_runtime_put_sync(hba->dev);
1459out:
1460        return count;
1461}
1462
1463static void ufshcd_clkscaling_init_sysfs(struct ufs_hba *hba)
1464{
1465        hba->clk_scaling.enable_attr.show = ufshcd_clkscale_enable_show;
1466        hba->clk_scaling.enable_attr.store = ufshcd_clkscale_enable_store;
1467        sysfs_attr_init(&hba->clk_scaling.enable_attr.attr);
1468        hba->clk_scaling.enable_attr.attr.name = "clkscale_enable";
1469        hba->clk_scaling.enable_attr.attr.mode = 0644;
1470        if (device_create_file(hba->dev, &hba->clk_scaling.enable_attr))
1471                dev_err(hba->dev, "Failed to create sysfs for clkscale_enable\n");
1472}
1473
1474static void ufshcd_ungate_work(struct work_struct *work)
1475{
1476        int ret;
1477        unsigned long flags;
1478        struct ufs_hba *hba = container_of(work, struct ufs_hba,
1479                        clk_gating.ungate_work);
1480
1481        cancel_delayed_work_sync(&hba->clk_gating.gate_work);
1482
1483        spin_lock_irqsave(hba->host->host_lock, flags);
1484        if (hba->clk_gating.state == CLKS_ON) {
1485                spin_unlock_irqrestore(hba->host->host_lock, flags);
1486                goto unblock_reqs;
1487        }
1488
1489        spin_unlock_irqrestore(hba->host->host_lock, flags);
1490        ufshcd_setup_clocks(hba, true);
1491
1492        /* Exit from hibern8 */
1493        if (ufshcd_can_hibern8_during_gating(hba)) {
1494                /* Prevent gating in this path */
1495                hba->clk_gating.is_suspended = true;
1496                if (ufshcd_is_link_hibern8(hba)) {
1497                        ret = ufshcd_uic_hibern8_exit(hba);
1498                        if (ret)
1499                                dev_err(hba->dev, "%s: hibern8 exit failed %d\n",
1500                                        __func__, ret);
1501                        else
1502                                ufshcd_set_link_active(hba);
1503                }
1504                hba->clk_gating.is_suspended = false;
1505        }
1506unblock_reqs:
1507        ufshcd_scsi_unblock_requests(hba);
1508}
1509
1510/**
1511 * ufshcd_hold - Enable clocks that were gated earlier due to ufshcd_release.
1512 * Also, exit from hibern8 mode and set the link as active.
1513 * @hba: per adapter instance
1514 * @async: This indicates whether caller should ungate clocks asynchronously.
1515 */
1516int ufshcd_hold(struct ufs_hba *hba, bool async)
1517{
1518        int rc = 0;
1519        unsigned long flags;
1520
1521        if (!ufshcd_is_clkgating_allowed(hba))
1522                goto out;
1523        spin_lock_irqsave(hba->host->host_lock, flags);
1524        hba->clk_gating.active_reqs++;
1525
1526        if (ufshcd_eh_in_progress(hba)) {
1527                spin_unlock_irqrestore(hba->host->host_lock, flags);
1528                return 0;
1529        }
1530
1531start:
1532        switch (hba->clk_gating.state) {
1533        case CLKS_ON:
1534                /*
1535                 * Wait for the ungate work to complete if in progress.
1536                 * Though the clocks may be in ON state, the link could
1537                 * still be in hibner8 state if hibern8 is allowed
1538                 * during clock gating.
1539                 * Make sure we exit hibern8 state also in addition to
1540                 * clocks being ON.
1541                 */
1542                if (ufshcd_can_hibern8_during_gating(hba) &&
1543                    ufshcd_is_link_hibern8(hba)) {
1544                        spin_unlock_irqrestore(hba->host->host_lock, flags);
1545                        flush_work(&hba->clk_gating.ungate_work);
1546                        spin_lock_irqsave(hba->host->host_lock, flags);
1547                        goto start;
1548                }
1549                break;
1550        case REQ_CLKS_OFF:
1551                if (cancel_delayed_work(&hba->clk_gating.gate_work)) {
1552                        hba->clk_gating.state = CLKS_ON;
1553                        trace_ufshcd_clk_gating(dev_name(hba->dev),
1554                                                hba->clk_gating.state);
1555                        break;
1556                }
1557                /*
1558                 * If we are here, it means gating work is either done or
1559                 * currently running. Hence, fall through to cancel gating
1560                 * work and to enable clocks.
1561                 */
1562        case CLKS_OFF:
1563                ufshcd_scsi_block_requests(hba);
1564                hba->clk_gating.state = REQ_CLKS_ON;
1565                trace_ufshcd_clk_gating(dev_name(hba->dev),
1566                                        hba->clk_gating.state);
1567                queue_work(hba->clk_gating.clk_gating_workq,
1568                           &hba->clk_gating.ungate_work);
1569                /*
1570                 * fall through to check if we should wait for this
1571                 * work to be done or not.
1572                 */
1573        case REQ_CLKS_ON:
1574                if (async) {
1575                        rc = -EAGAIN;
1576                        hba->clk_gating.active_reqs--;
1577                        break;
1578                }
1579
1580                spin_unlock_irqrestore(hba->host->host_lock, flags);
1581                flush_work(&hba->clk_gating.ungate_work);
1582                /* Make sure state is CLKS_ON before returning */
1583                spin_lock_irqsave(hba->host->host_lock, flags);
1584                goto start;
1585        default:
1586                dev_err(hba->dev, "%s: clk gating is in invalid state %d\n",
1587                                __func__, hba->clk_gating.state);
1588                break;
1589        }
1590        spin_unlock_irqrestore(hba->host->host_lock, flags);
1591out:
1592        return rc;
1593}
1594EXPORT_SYMBOL_GPL(ufshcd_hold);
1595
1596static void ufshcd_gate_work(struct work_struct *work)
1597{
1598        struct ufs_hba *hba = container_of(work, struct ufs_hba,
1599                        clk_gating.gate_work.work);
1600        unsigned long flags;
1601
1602        spin_lock_irqsave(hba->host->host_lock, flags);
1603        /*
1604         * In case you are here to cancel this work the gating state
1605         * would be marked as REQ_CLKS_ON. In this case save time by
1606         * skipping the gating work and exit after changing the clock
1607         * state to CLKS_ON.
1608         */
1609        if (hba->clk_gating.is_suspended ||
1610                (hba->clk_gating.state == REQ_CLKS_ON)) {
1611                hba->clk_gating.state = CLKS_ON;
1612                trace_ufshcd_clk_gating(dev_name(hba->dev),
1613                                        hba->clk_gating.state);
1614                goto rel_lock;
1615        }
1616
1617        if (hba->clk_gating.active_reqs
1618                || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
1619                || hba->lrb_in_use || hba->outstanding_tasks
1620                || hba->active_uic_cmd || hba->uic_async_done)
1621                goto rel_lock;
1622
1623        spin_unlock_irqrestore(hba->host->host_lock, flags);
1624
1625        /* put the link into hibern8 mode before turning off clocks */
1626        if (ufshcd_can_hibern8_during_gating(hba)) {
1627                if (ufshcd_uic_hibern8_enter(hba)) {
1628                        hba->clk_gating.state = CLKS_ON;
1629                        trace_ufshcd_clk_gating(dev_name(hba->dev),
1630                                                hba->clk_gating.state);
1631                        goto out;
1632                }
1633                ufshcd_set_link_hibern8(hba);
1634        }
1635
1636        if (!ufshcd_is_link_active(hba))
1637                ufshcd_setup_clocks(hba, false);
1638        else
1639                /* If link is active, device ref_clk can't be switched off */
1640                __ufshcd_setup_clocks(hba, false, true);
1641
1642        /*
1643         * In case you are here to cancel this work the gating state
1644         * would be marked as REQ_CLKS_ON. In this case keep the state
1645         * as REQ_CLKS_ON which would anyway imply that clocks are off
1646         * and a request to turn them on is pending. By doing this way,
1647         * we keep the state machine in tact and this would ultimately
1648         * prevent from doing cancel work multiple times when there are
1649         * new requests arriving before the current cancel work is done.
1650         */
1651        spin_lock_irqsave(hba->host->host_lock, flags);
1652        if (hba->clk_gating.state == REQ_CLKS_OFF) {
1653                hba->clk_gating.state = CLKS_OFF;
1654                trace_ufshcd_clk_gating(dev_name(hba->dev),
1655                                        hba->clk_gating.state);
1656        }
1657rel_lock:
1658        spin_unlock_irqrestore(hba->host->host_lock, flags);
1659out:
1660        return;
1661}
1662
1663/* host lock must be held before calling this variant */
1664static void __ufshcd_release(struct ufs_hba *hba)
1665{
1666        if (!ufshcd_is_clkgating_allowed(hba))
1667                return;
1668
1669        hba->clk_gating.active_reqs--;
1670
1671        if (hba->clk_gating.active_reqs || hba->clk_gating.is_suspended
1672                || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
1673                || hba->lrb_in_use || hba->outstanding_tasks
1674                || hba->active_uic_cmd || hba->uic_async_done
1675                || ufshcd_eh_in_progress(hba))
1676                return;
1677
1678        hba->clk_gating.state = REQ_CLKS_OFF;
1679        trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state);
1680        schedule_delayed_work(&hba->clk_gating.gate_work,
1681                        msecs_to_jiffies(hba->clk_gating.delay_ms));
1682}
1683
1684void ufshcd_release(struct ufs_hba *hba)
1685{
1686        unsigned long flags;
1687
1688        spin_lock_irqsave(hba->host->host_lock, flags);
1689        __ufshcd_release(hba);
1690        spin_unlock_irqrestore(hba->host->host_lock, flags);
1691}
1692EXPORT_SYMBOL_GPL(ufshcd_release);
1693
1694static ssize_t ufshcd_clkgate_delay_show(struct device *dev,
1695                struct device_attribute *attr, char *buf)
1696{
1697        struct ufs_hba *hba = dev_get_drvdata(dev);
1698
1699        return snprintf(buf, PAGE_SIZE, "%lu\n", hba->clk_gating.delay_ms);
1700}
1701
1702static ssize_t ufshcd_clkgate_delay_store(struct device *dev,
1703                struct device_attribute *attr, const char *buf, size_t count)
1704{
1705        struct ufs_hba *hba = dev_get_drvdata(dev);
1706        unsigned long flags, value;
1707
1708        if (kstrtoul(buf, 0, &value))
1709                return -EINVAL;
1710
1711        spin_lock_irqsave(hba->host->host_lock, flags);
1712        hba->clk_gating.delay_ms = value;
1713        spin_unlock_irqrestore(hba->host->host_lock, flags);
1714        return count;
1715}
1716
1717static ssize_t ufshcd_clkgate_enable_show(struct device *dev,
1718                struct device_attribute *attr, char *buf)
1719{
1720        struct ufs_hba *hba = dev_get_drvdata(dev);
1721
1722        return snprintf(buf, PAGE_SIZE, "%d\n", hba->clk_gating.is_enabled);
1723}
1724
1725static ssize_t ufshcd_clkgate_enable_store(struct device *dev,
1726                struct device_attribute *attr, const char *buf, size_t count)
1727{
1728        struct ufs_hba *hba = dev_get_drvdata(dev);
1729        unsigned long flags;
1730        u32 value;
1731
1732        if (kstrtou32(buf, 0, &value))
1733                return -EINVAL;
1734
1735        value = !!value;
1736        if (value == hba->clk_gating.is_enabled)
1737                goto out;
1738
1739        if (value) {
1740                ufshcd_release(hba);
1741        } else {
1742                spin_lock_irqsave(hba->host->host_lock, flags);
1743                hba->clk_gating.active_reqs++;
1744                spin_unlock_irqrestore(hba->host->host_lock, flags);
1745        }
1746
1747        hba->clk_gating.is_enabled = value;
1748out:
1749        return count;
1750}
1751
1752static void ufshcd_init_clk_gating(struct ufs_hba *hba)
1753{
1754        char wq_name[sizeof("ufs_clk_gating_00")];
1755
1756        if (!ufshcd_is_clkgating_allowed(hba))
1757                return;
1758
1759        hba->clk_gating.delay_ms = 150;
1760        INIT_DELAYED_WORK(&hba->clk_gating.gate_work, ufshcd_gate_work);
1761        INIT_WORK(&hba->clk_gating.ungate_work, ufshcd_ungate_work);
1762
1763        snprintf(wq_name, ARRAY_SIZE(wq_name), "ufs_clk_gating_%d",
1764                 hba->host->host_no);
1765        hba->clk_gating.clk_gating_workq = alloc_ordered_workqueue(wq_name,
1766                                                           WQ_MEM_RECLAIM);
1767
1768        hba->clk_gating.is_enabled = true;
1769
1770        hba->clk_gating.delay_attr.show = ufshcd_clkgate_delay_show;
1771        hba->clk_gating.delay_attr.store = ufshcd_clkgate_delay_store;
1772        sysfs_attr_init(&hba->clk_gating.delay_attr.attr);
1773        hba->clk_gating.delay_attr.attr.name = "clkgate_delay_ms";
1774        hba->clk_gating.delay_attr.attr.mode = 0644;
1775        if (device_create_file(hba->dev, &hba->clk_gating.delay_attr))
1776                dev_err(hba->dev, "Failed to create sysfs for clkgate_delay\n");
1777
1778        hba->clk_gating.enable_attr.show = ufshcd_clkgate_enable_show;
1779        hba->clk_gating.enable_attr.store = ufshcd_clkgate_enable_store;
1780        sysfs_attr_init(&hba->clk_gating.enable_attr.attr);
1781        hba->clk_gating.enable_attr.attr.name = "clkgate_enable";
1782        hba->clk_gating.enable_attr.attr.mode = 0644;
1783        if (device_create_file(hba->dev, &hba->clk_gating.enable_attr))
1784                dev_err(hba->dev, "Failed to create sysfs for clkgate_enable\n");
1785}
1786
1787static void ufshcd_exit_clk_gating(struct ufs_hba *hba)
1788{
1789        if (!ufshcd_is_clkgating_allowed(hba))
1790                return;
1791        device_remove_file(hba->dev, &hba->clk_gating.delay_attr);
1792        device_remove_file(hba->dev, &hba->clk_gating.enable_attr);
1793        cancel_work_sync(&hba->clk_gating.ungate_work);
1794        cancel_delayed_work_sync(&hba->clk_gating.gate_work);
1795        destroy_workqueue(hba->clk_gating.clk_gating_workq);
1796}
1797
1798/* Must be called with host lock acquired */
1799static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba)
1800{
1801        bool queue_resume_work = false;
1802
1803        if (!ufshcd_is_clkscaling_supported(hba))
1804                return;
1805
1806        if (!hba->clk_scaling.active_reqs++)
1807                queue_resume_work = true;
1808
1809        if (!hba->clk_scaling.is_allowed || hba->pm_op_in_progress)
1810                return;
1811
1812        if (queue_resume_work)
1813                queue_work(hba->clk_scaling.workq,
1814                           &hba->clk_scaling.resume_work);
1815
1816        if (!hba->clk_scaling.window_start_t) {
1817                hba->clk_scaling.window_start_t = jiffies;
1818                hba->clk_scaling.tot_busy_t = 0;
1819                hba->clk_scaling.is_busy_started = false;
1820        }
1821
1822        if (!hba->clk_scaling.is_busy_started) {
1823                hba->clk_scaling.busy_start_t = ktime_get();
1824                hba->clk_scaling.is_busy_started = true;
1825        }
1826}
1827
1828static void ufshcd_clk_scaling_update_busy(struct ufs_hba *hba)
1829{
1830        struct ufs_clk_scaling *scaling = &hba->clk_scaling;
1831
1832        if (!ufshcd_is_clkscaling_supported(hba))
1833                return;
1834
1835        if (!hba->outstanding_reqs && scaling->is_busy_started) {
1836                scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(),
1837                                        scaling->busy_start_t));
1838                scaling->busy_start_t = 0;
1839                scaling->is_busy_started = false;
1840        }
1841}
1842/**
1843 * ufshcd_send_command - Send SCSI or device management commands
1844 * @hba: per adapter instance
1845 * @task_tag: Task tag of the command
1846 */
1847static inline
1848void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
1849{
1850        hba->lrb[task_tag].issue_time_stamp = ktime_get();
1851        hba->lrb[task_tag].compl_time_stamp = ktime_set(0, 0);
1852        ufshcd_clk_scaling_start_busy(hba);
1853        __set_bit(task_tag, &hba->outstanding_reqs);
1854        ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL);
1855        /* Make sure that doorbell is committed immediately */
1856        wmb();
1857        ufshcd_add_command_trace(hba, task_tag, "send");
1858}
1859
1860/**
1861 * ufshcd_copy_sense_data - Copy sense data in case of check condition
1862 * @lrbp: pointer to local reference block
1863 */
1864static inline void ufshcd_copy_sense_data(struct ufshcd_lrb *lrbp)
1865{
1866        int len;
1867        if (lrbp->sense_buffer &&
1868            ufshcd_get_rsp_upiu_data_seg_len(lrbp->ucd_rsp_ptr)) {
1869                int len_to_copy;
1870
1871                len = be16_to_cpu(lrbp->ucd_rsp_ptr->sr.sense_data_len);
1872                len_to_copy = min_t(int, RESPONSE_UPIU_SENSE_DATA_LENGTH, len);
1873
1874                memcpy(lrbp->sense_buffer,
1875                        lrbp->ucd_rsp_ptr->sr.sense_data,
1876                        min_t(int, len_to_copy, UFSHCD_REQ_SENSE_SIZE));
1877        }
1878}
1879
1880/**
1881 * ufshcd_copy_query_response() - Copy the Query Response and the data
1882 * descriptor
1883 * @hba: per adapter instance
1884 * @lrbp: pointer to local reference block
1885 */
1886static
1887int ufshcd_copy_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
1888{
1889        struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
1890
1891        memcpy(&query_res->upiu_res, &lrbp->ucd_rsp_ptr->qr, QUERY_OSF_SIZE);
1892
1893        /* Get the descriptor */
1894        if (lrbp->ucd_rsp_ptr->qr.opcode == UPIU_QUERY_OPCODE_READ_DESC) {
1895                u8 *descp = (u8 *)lrbp->ucd_rsp_ptr +
1896                                GENERAL_UPIU_REQUEST_SIZE;
1897                u16 resp_len;
1898                u16 buf_len;
1899
1900                /* data segment length */
1901                resp_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2) &
1902                                                MASK_QUERY_DATA_SEG_LEN;
1903                buf_len = be16_to_cpu(
1904                                hba->dev_cmd.query.request.upiu_req.length);
1905                if (likely(buf_len >= resp_len)) {
1906                        memcpy(hba->dev_cmd.query.descriptor, descp, resp_len);
1907                } else {
1908                        dev_warn(hba->dev,
1909                                "%s: Response size is bigger than buffer",
1910                                __func__);
1911                        return -EINVAL;
1912                }
1913        }
1914
1915        return 0;
1916}
1917
1918/**
1919 * ufshcd_hba_capabilities - Read controller capabilities
1920 * @hba: per adapter instance
1921 */
1922static inline void ufshcd_hba_capabilities(struct ufs_hba *hba)
1923{
1924        hba->capabilities = ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES);
1925
1926        /* nutrs and nutmrs are 0 based values */
1927        hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS) + 1;
1928        hba->nutmrs =
1929        ((hba->capabilities & MASK_TASK_MANAGEMENT_REQUEST_SLOTS) >> 16) + 1;
1930}
1931
1932/**
1933 * ufshcd_ready_for_uic_cmd - Check if controller is ready
1934 *                            to accept UIC commands
1935 * @hba: per adapter instance
1936 * Return true on success, else false
1937 */
1938static inline bool ufshcd_ready_for_uic_cmd(struct ufs_hba *hba)
1939{
1940        if (ufshcd_readl(hba, REG_CONTROLLER_STATUS) & UIC_COMMAND_READY)
1941                return true;
1942        else
1943                return false;
1944}
1945
1946/**
1947 * ufshcd_get_upmcrs - Get the power mode change request status
1948 * @hba: Pointer to adapter instance
1949 *
1950 * This function gets the UPMCRS field of HCS register
1951 * Returns value of UPMCRS field
1952 */
1953static inline u8 ufshcd_get_upmcrs(struct ufs_hba *hba)
1954{
1955        return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) >> 8) & 0x7;
1956}
1957
1958/**
1959 * ufshcd_dispatch_uic_cmd - Dispatch UIC commands to unipro layers
1960 * @hba: per adapter instance
1961 * @uic_cmd: UIC command
1962 *
1963 * Mutex must be held.
1964 */
1965static inline void
1966ufshcd_dispatch_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
1967{
1968        WARN_ON(hba->active_uic_cmd);
1969
1970        hba->active_uic_cmd = uic_cmd;
1971
1972        /* Write Args */
1973        ufshcd_writel(hba, uic_cmd->argument1, REG_UIC_COMMAND_ARG_1);
1974        ufshcd_writel(hba, uic_cmd->argument2, REG_UIC_COMMAND_ARG_2);
1975        ufshcd_writel(hba, uic_cmd->argument3, REG_UIC_COMMAND_ARG_3);
1976
1977        /* Write UIC Cmd */
1978        ufshcd_writel(hba, uic_cmd->command & COMMAND_OPCODE_MASK,
1979                      REG_UIC_COMMAND);
1980}
1981
1982/**
1983 * ufshcd_wait_for_uic_cmd - Wait complectioin of UIC command
1984 * @hba: per adapter instance
1985 * @uic_cmd: UIC command
1986 *
1987 * Must be called with mutex held.
1988 * Returns 0 only if success.
1989 */
1990static int
1991ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
1992{
1993        int ret;
1994        unsigned long flags;
1995
1996        if (wait_for_completion_timeout(&uic_cmd->done,
1997                                        msecs_to_jiffies(UIC_CMD_TIMEOUT)))
1998                ret = uic_cmd->argument2 & MASK_UIC_COMMAND_RESULT;
1999        else
2000                ret = -ETIMEDOUT;
2001
2002        spin_lock_irqsave(hba->host->host_lock, flags);
2003        hba->active_uic_cmd = NULL;
2004        spin_unlock_irqrestore(hba->host->host_lock, flags);
2005
2006        return ret;
2007}
2008
2009/**
2010 * __ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
2011 * @hba: per adapter instance
2012 * @uic_cmd: UIC command
2013 * @completion: initialize the completion only if this is set to true
2014 *
2015 * Identical to ufshcd_send_uic_cmd() expect mutex. Must be called
2016 * with mutex held and host_lock locked.
2017 * Returns 0 only if success.
2018 */
2019static int
2020__ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd,
2021                      bool completion)
2022{
2023        if (!ufshcd_ready_for_uic_cmd(hba)) {
2024                dev_err(hba->dev,
2025                        "Controller not ready to accept UIC commands\n");
2026                return -EIO;
2027        }
2028
2029        if (completion)
2030                init_completion(&uic_cmd->done);
2031
2032        ufshcd_dispatch_uic_cmd(hba, uic_cmd);
2033
2034        return 0;
2035}
2036
2037/**
2038 * ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
2039 * @hba: per adapter instance
2040 * @uic_cmd: UIC command
2041 *
2042 * Returns 0 only if success.
2043 */
2044static int
2045ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
2046{
2047        int ret;
2048        unsigned long flags;
2049
2050        ufshcd_hold(hba, false);
2051        mutex_lock(&hba->uic_cmd_mutex);
2052        ufshcd_add_delay_before_dme_cmd(hba);
2053
2054        spin_lock_irqsave(hba->host->host_lock, flags);
2055        ret = __ufshcd_send_uic_cmd(hba, uic_cmd, true);
2056        spin_unlock_irqrestore(hba->host->host_lock, flags);
2057        if (!ret)
2058                ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd);
2059
2060        mutex_unlock(&hba->uic_cmd_mutex);
2061
2062        ufshcd_release(hba);
2063        return ret;
2064}
2065
2066/**
2067 * ufshcd_map_sg - Map scatter-gather list to prdt
2068 * @hba: per adapter instance
2069 * @lrbp: pointer to local reference block
2070 *
2071 * Returns 0 in case of success, non-zero value in case of failure
2072 */
2073static int ufshcd_map_sg(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2074{
2075        struct ufshcd_sg_entry *prd_table;
2076        struct scatterlist *sg;
2077        struct scsi_cmnd *cmd;
2078        int sg_segments;
2079        int i;
2080
2081        cmd = lrbp->cmd;
2082        sg_segments = scsi_dma_map(cmd);
2083        if (sg_segments < 0)
2084                return sg_segments;
2085
2086        if (sg_segments) {
2087                if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN)
2088                        lrbp->utr_descriptor_ptr->prd_table_length =
2089                                cpu_to_le16((u16)(sg_segments *
2090                                        sizeof(struct ufshcd_sg_entry)));
2091                else
2092                        lrbp->utr_descriptor_ptr->prd_table_length =
2093                                cpu_to_le16((u16) (sg_segments));
2094
2095                prd_table = (struct ufshcd_sg_entry *)lrbp->ucd_prdt_ptr;
2096
2097                scsi_for_each_sg(cmd, sg, sg_segments, i) {
2098                        prd_table[i].size  =
2099                                cpu_to_le32(((u32) sg_dma_len(sg))-1);
2100                        prd_table[i].base_addr =
2101                                cpu_to_le32(lower_32_bits(sg->dma_address));
2102                        prd_table[i].upper_addr =
2103                                cpu_to_le32(upper_32_bits(sg->dma_address));
2104                        prd_table[i].reserved = 0;
2105                }
2106        } else {
2107                lrbp->utr_descriptor_ptr->prd_table_length = 0;
2108        }
2109
2110        return 0;
2111}
2112
2113/**
2114 * ufshcd_enable_intr - enable interrupts
2115 * @hba: per adapter instance
2116 * @intrs: interrupt bits
2117 */
2118static void ufshcd_enable_intr(struct ufs_hba *hba, u32 intrs)
2119{
2120        u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
2121
2122        if (hba->ufs_version == UFSHCI_VERSION_10) {
2123                u32 rw;
2124                rw = set & INTERRUPT_MASK_RW_VER_10;
2125                set = rw | ((set ^ intrs) & intrs);
2126        } else {
2127                set |= intrs;
2128        }
2129
2130        ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
2131}
2132
2133/**
2134 * ufshcd_disable_intr - disable interrupts
2135 * @hba: per adapter instance
2136 * @intrs: interrupt bits
2137 */
2138static void ufshcd_disable_intr(struct ufs_hba *hba, u32 intrs)
2139{
2140        u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
2141
2142        if (hba->ufs_version == UFSHCI_VERSION_10) {
2143                u32 rw;
2144                rw = (set & INTERRUPT_MASK_RW_VER_10) &
2145                        ~(intrs & INTERRUPT_MASK_RW_VER_10);
2146                set = rw | ((set & intrs) & ~INTERRUPT_MASK_RW_VER_10);
2147
2148        } else {
2149                set &= ~intrs;
2150        }
2151
2152        ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
2153}
2154
2155/**
2156 * ufshcd_prepare_req_desc_hdr() - Fills the requests header
2157 * descriptor according to request
2158 * @lrbp: pointer to local reference block
2159 * @upiu_flags: flags required in the header
2160 * @cmd_dir: requests data direction
2161 */
2162static void ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb *lrbp,
2163                        u32 *upiu_flags, enum dma_data_direction cmd_dir)
2164{
2165        struct utp_transfer_req_desc *req_desc = lrbp->utr_descriptor_ptr;
2166        u32 data_direction;
2167        u32 dword_0;
2168
2169        if (cmd_dir == DMA_FROM_DEVICE) {
2170                data_direction = UTP_DEVICE_TO_HOST;
2171                *upiu_flags = UPIU_CMD_FLAGS_READ;
2172        } else if (cmd_dir == DMA_TO_DEVICE) {
2173                data_direction = UTP_HOST_TO_DEVICE;
2174                *upiu_flags = UPIU_CMD_FLAGS_WRITE;
2175        } else {
2176                data_direction = UTP_NO_DATA_TRANSFER;
2177                *upiu_flags = UPIU_CMD_FLAGS_NONE;
2178        }
2179
2180        dword_0 = data_direction | (lrbp->command_type
2181                                << UPIU_COMMAND_TYPE_OFFSET);
2182        if (lrbp->intr_cmd)
2183                dword_0 |= UTP_REQ_DESC_INT_CMD;
2184
2185        /* Transfer request descriptor header fields */
2186        req_desc->header.dword_0 = cpu_to_le32(dword_0);
2187        /* dword_1 is reserved, hence it is set to 0 */
2188        req_desc->header.dword_1 = 0;
2189        /*
2190         * assigning invalid value for command status. Controller
2191         * updates OCS on command completion, with the command
2192         * status
2193         */
2194        req_desc->header.dword_2 =
2195                cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
2196        /* dword_3 is reserved, hence it is set to 0 */
2197        req_desc->header.dword_3 = 0;
2198
2199        req_desc->prd_table_length = 0;
2200}
2201
2202/**
2203 * ufshcd_prepare_utp_scsi_cmd_upiu() - fills the utp_transfer_req_desc,
2204 * for scsi commands
2205 * @lrbp: local reference block pointer
2206 * @upiu_flags: flags
2207 */
2208static
2209void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u32 upiu_flags)
2210{
2211        struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
2212        unsigned short cdb_len;
2213
2214        /* command descriptor fields */
2215        ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
2216                                UPIU_TRANSACTION_COMMAND, upiu_flags,
2217                                lrbp->lun, lrbp->task_tag);
2218        ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD(
2219                                UPIU_COMMAND_SET_TYPE_SCSI, 0, 0, 0);
2220
2221        /* Total EHS length and Data segment length will be zero */
2222        ucd_req_ptr->header.dword_2 = 0;
2223
2224        ucd_req_ptr->sc.exp_data_transfer_len =
2225                cpu_to_be32(lrbp->cmd->sdb.length);
2226
2227        cdb_len = min_t(unsigned short, lrbp->cmd->cmd_len, MAX_CDB_SIZE);
2228        memset(ucd_req_ptr->sc.cdb, 0, MAX_CDB_SIZE);
2229        memcpy(ucd_req_ptr->sc.cdb, lrbp->cmd->cmnd, cdb_len);
2230
2231        memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
2232}
2233
2234/**
2235 * ufshcd_prepare_utp_query_req_upiu() - fills the utp_transfer_req_desc,
2236 * for query requsts
2237 * @hba: UFS hba
2238 * @lrbp: local reference block pointer
2239 * @upiu_flags: flags
2240 */
2241static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba,
2242                                struct ufshcd_lrb *lrbp, u32 upiu_flags)
2243{
2244        struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
2245        struct ufs_query *query = &hba->dev_cmd.query;
2246        u16 len = be16_to_cpu(query->request.upiu_req.length);
2247        u8 *descp = (u8 *)lrbp->ucd_req_ptr + GENERAL_UPIU_REQUEST_SIZE;
2248
2249        /* Query request header */
2250        ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
2251                        UPIU_TRANSACTION_QUERY_REQ, upiu_flags,
2252                        lrbp->lun, lrbp->task_tag);
2253        ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD(
2254                        0, query->request.query_func, 0, 0);
2255
2256        /* Data segment length only need for WRITE_DESC */
2257        if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
2258                ucd_req_ptr->header.dword_2 =
2259                        UPIU_HEADER_DWORD(0, 0, (len >> 8), (u8)len);
2260        else
2261                ucd_req_ptr->header.dword_2 = 0;
2262
2263        /* Copy the Query Request buffer as is */
2264        memcpy(&ucd_req_ptr->qr, &query->request.upiu_req,
2265                        QUERY_OSF_SIZE);
2266
2267        /* Copy the Descriptor */
2268        if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
2269                memcpy(descp, query->descriptor, len);
2270
2271        memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
2272}
2273
2274static inline void ufshcd_prepare_utp_nop_upiu(struct ufshcd_lrb *lrbp)
2275{
2276        struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
2277
2278        memset(ucd_req_ptr, 0, sizeof(struct utp_upiu_req));
2279
2280        /* command descriptor fields */
2281        ucd_req_ptr->header.dword_0 =
2282                UPIU_HEADER_DWORD(
2283                        UPIU_TRANSACTION_NOP_OUT, 0, 0, lrbp->task_tag);
2284        /* clear rest of the fields of basic header */
2285        ucd_req_ptr->header.dword_1 = 0;
2286        ucd_req_ptr->header.dword_2 = 0;
2287
2288        memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
2289}
2290
2291/**
2292 * ufshcd_comp_devman_upiu - UFS Protocol Information Unit(UPIU)
2293 *                           for Device Management Purposes
2294 * @hba: per adapter instance
2295 * @lrbp: pointer to local reference block
2296 */
2297static int ufshcd_comp_devman_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2298{
2299        u32 upiu_flags;
2300        int ret = 0;
2301
2302        if ((hba->ufs_version == UFSHCI_VERSION_10) ||
2303            (hba->ufs_version == UFSHCI_VERSION_11))
2304                lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE;
2305        else
2306                lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
2307
2308        ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE);
2309        if (hba->dev_cmd.type == DEV_CMD_TYPE_QUERY)
2310                ufshcd_prepare_utp_query_req_upiu(hba, lrbp, upiu_flags);
2311        else if (hba->dev_cmd.type == DEV_CMD_TYPE_NOP)
2312                ufshcd_prepare_utp_nop_upiu(lrbp);
2313        else
2314                ret = -EINVAL;
2315
2316        return ret;
2317}
2318
2319/**
2320 * ufshcd_comp_scsi_upiu - UFS Protocol Information Unit(UPIU)
2321 *                         for SCSI Purposes
2322 * @hba: per adapter instance
2323 * @lrbp: pointer to local reference block
2324 */
2325static int ufshcd_comp_scsi_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2326{
2327        u32 upiu_flags;
2328        int ret = 0;
2329
2330        if ((hba->ufs_version == UFSHCI_VERSION_10) ||
2331            (hba->ufs_version == UFSHCI_VERSION_11))
2332                lrbp->command_type = UTP_CMD_TYPE_SCSI;
2333        else
2334                lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
2335
2336        if (likely(lrbp->cmd)) {
2337                ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags,
2338                                                lrbp->cmd->sc_data_direction);
2339                ufshcd_prepare_utp_scsi_cmd_upiu(lrbp, upiu_flags);
2340        } else {
2341                ret = -EINVAL;
2342        }
2343
2344        return ret;
2345}
2346
2347/**
2348 * ufshcd_upiu_wlun_to_scsi_wlun - maps UPIU W-LUN id to SCSI W-LUN ID
2349 * @upiu_wlun_id: UPIU W-LUN id
2350 *
2351 * Returns SCSI W-LUN id
2352 */
2353static inline u16 ufshcd_upiu_wlun_to_scsi_wlun(u8 upiu_wlun_id)
2354{
2355        return (upiu_wlun_id & ~UFS_UPIU_WLUN_ID) | SCSI_W_LUN_BASE;
2356}
2357
2358/**
2359 * ufshcd_queuecommand - main entry point for SCSI requests
2360 * @host: SCSI host pointer
2361 * @cmd: command from SCSI Midlayer
2362 *
2363 * Returns 0 for success, non-zero in case of failure
2364 */
2365static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
2366{
2367        struct ufshcd_lrb *lrbp;
2368        struct ufs_hba *hba;
2369        unsigned long flags;
2370        int tag;
2371        int err = 0;
2372
2373        hba = shost_priv(host);
2374
2375        tag = cmd->request->tag;
2376        if (!ufshcd_valid_tag(hba, tag)) {
2377                dev_err(hba->dev,
2378                        "%s: invalid command tag %d: cmd=0x%p, cmd->request=0x%p",
2379                        __func__, tag, cmd, cmd->request);
2380                BUG();
2381        }
2382
2383        if (!down_read_trylock(&hba->clk_scaling_lock))
2384                return SCSI_MLQUEUE_HOST_BUSY;
2385
2386        spin_lock_irqsave(hba->host->host_lock, flags);
2387        switch (hba->ufshcd_state) {
2388        case UFSHCD_STATE_OPERATIONAL:
2389                break;
2390        case UFSHCD_STATE_EH_SCHEDULED:
2391        case UFSHCD_STATE_RESET:
2392                err = SCSI_MLQUEUE_HOST_BUSY;
2393                goto out_unlock;
2394        case UFSHCD_STATE_ERROR:
2395                set_host_byte(cmd, DID_ERROR);
2396                cmd->scsi_done(cmd);
2397                goto out_unlock;
2398        default:
2399                dev_WARN_ONCE(hba->dev, 1, "%s: invalid state %d\n",
2400                                __func__, hba->ufshcd_state);
2401                set_host_byte(cmd, DID_BAD_TARGET);
2402                cmd->scsi_done(cmd);
2403                goto out_unlock;
2404        }
2405
2406        /* if error handling is in progress, don't issue commands */
2407        if (ufshcd_eh_in_progress(hba)) {
2408                set_host_byte(cmd, DID_ERROR);
2409                cmd->scsi_done(cmd);
2410                goto out_unlock;
2411        }
2412        spin_unlock_irqrestore(hba->host->host_lock, flags);
2413
2414        hba->req_abort_count = 0;
2415
2416        /* acquire the tag to make sure device cmds don't use it */
2417        if (test_and_set_bit_lock(tag, &hba->lrb_in_use)) {
2418                /*
2419                 * Dev manage command in progress, requeue the command.
2420                 * Requeuing the command helps in cases where the request *may*
2421                 * find different tag instead of waiting for dev manage command
2422                 * completion.
2423                 */
2424                err = SCSI_MLQUEUE_HOST_BUSY;
2425                goto out;
2426        }
2427
2428        err = ufshcd_hold(hba, true);
2429        if (err) {
2430                err = SCSI_MLQUEUE_HOST_BUSY;
2431                clear_bit_unlock(tag, &hba->lrb_in_use);
2432                goto out;
2433        }
2434        WARN_ON(hba->clk_gating.state != CLKS_ON);
2435
2436        lrbp = &hba->lrb[tag];
2437
2438        WARN_ON(lrbp->cmd);
2439        lrbp->cmd = cmd;
2440        lrbp->sense_bufflen = UFSHCD_REQ_SENSE_SIZE;
2441        lrbp->sense_buffer = cmd->sense_buffer;
2442        lrbp->task_tag = tag;
2443        lrbp->lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun);
2444        lrbp->intr_cmd = !ufshcd_is_intr_aggr_allowed(hba) ? true : false;
2445        lrbp->req_abort_skip = false;
2446
2447        ufshcd_comp_scsi_upiu(hba, lrbp);
2448
2449        err = ufshcd_map_sg(hba, lrbp);
2450        if (err) {
2451                lrbp->cmd = NULL;
2452                clear_bit_unlock(tag, &hba->lrb_in_use);
2453                goto out;
2454        }
2455        /* Make sure descriptors are ready before ringing the doorbell */
2456        wmb();
2457
2458        /* issue command to the controller */
2459        spin_lock_irqsave(hba->host->host_lock, flags);
2460        ufshcd_vops_setup_xfer_req(hba, tag, (lrbp->cmd ? true : false));
2461        ufshcd_send_command(hba, tag);
2462out_unlock:
2463        spin_unlock_irqrestore(hba->host->host_lock, flags);
2464out:
2465        up_read(&hba->clk_scaling_lock);
2466        return err;
2467}
2468
2469static int ufshcd_compose_dev_cmd(struct ufs_hba *hba,
2470                struct ufshcd_lrb *lrbp, enum dev_cmd_type cmd_type, int tag)
2471{
2472        lrbp->cmd = NULL;
2473        lrbp->sense_bufflen = 0;
2474        lrbp->sense_buffer = NULL;
2475        lrbp->task_tag = tag;
2476        lrbp->lun = 0; /* device management cmd is not specific to any LUN */
2477        lrbp->intr_cmd = true; /* No interrupt aggregation */
2478        hba->dev_cmd.type = cmd_type;
2479
2480        return ufshcd_comp_devman_upiu(hba, lrbp);
2481}
2482
2483static int
2484ufshcd_clear_cmd(struct ufs_hba *hba, int tag)
2485{
2486        int err = 0;
2487        unsigned long flags;
2488        u32 mask = 1 << tag;
2489
2490        /* clear outstanding transaction before retry */
2491        spin_lock_irqsave(hba->host->host_lock, flags);
2492        ufshcd_utrl_clear(hba, tag);
2493        spin_unlock_irqrestore(hba->host->host_lock, flags);
2494
2495        /*
2496         * wait for for h/w to clear corresponding bit in door-bell.
2497         * max. wait is 1 sec.
2498         */
2499        err = ufshcd_wait_for_register(hba,
2500                        REG_UTP_TRANSFER_REQ_DOOR_BELL,
2501                        mask, ~mask, 1000, 1000, true);
2502
2503        return err;
2504}
2505
2506static int
2507ufshcd_check_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2508{
2509        struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
2510
2511        /* Get the UPIU response */
2512        query_res->response = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr) >>
2513                                UPIU_RSP_CODE_OFFSET;
2514        return query_res->response;
2515}
2516
2517/**
2518 * ufshcd_dev_cmd_completion() - handles device management command responses
2519 * @hba: per adapter instance
2520 * @lrbp: pointer to local reference block
2521 */
2522static int
2523ufshcd_dev_cmd_completion(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2524{
2525        int resp;
2526        int err = 0;
2527
2528        hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
2529        resp = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
2530
2531        switch (resp) {
2532        case UPIU_TRANSACTION_NOP_IN:
2533                if (hba->dev_cmd.type != DEV_CMD_TYPE_NOP) {
2534                        err = -EINVAL;
2535                        dev_err(hba->dev, "%s: unexpected response %x\n",
2536                                        __func__, resp);
2537                }
2538                break;
2539        case UPIU_TRANSACTION_QUERY_RSP:
2540                err = ufshcd_check_query_response(hba, lrbp);
2541                if (!err)
2542                        err = ufshcd_copy_query_response(hba, lrbp);
2543                break;
2544        case UPIU_TRANSACTION_REJECT_UPIU:
2545                /* TODO: handle Reject UPIU Response */
2546                err = -EPERM;
2547                dev_err(hba->dev, "%s: Reject UPIU not fully implemented\n",
2548                                __func__);
2549                break;
2550        default:
2551                err = -EINVAL;
2552                dev_err(hba->dev, "%s: Invalid device management cmd response: %x\n",
2553                                __func__, resp);
2554                break;
2555        }
2556
2557        return err;
2558}
2559
2560static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
2561                struct ufshcd_lrb *lrbp, int max_timeout)
2562{
2563        int err = 0;
2564        unsigned long time_left;
2565        unsigned long flags;
2566
2567        time_left = wait_for_completion_timeout(hba->dev_cmd.complete,
2568                        msecs_to_jiffies(max_timeout));
2569
2570        /* Make sure descriptors are ready before ringing the doorbell */
2571        wmb();
2572        spin_lock_irqsave(hba->host->host_lock, flags);
2573        hba->dev_cmd.complete = NULL;
2574        if (likely(time_left)) {
2575                err = ufshcd_get_tr_ocs(lrbp);
2576                if (!err)
2577                        err = ufshcd_dev_cmd_completion(hba, lrbp);
2578        }
2579        spin_unlock_irqrestore(hba->host->host_lock, flags);
2580
2581        if (!time_left) {
2582                err = -ETIMEDOUT;
2583                dev_dbg(hba->dev, "%s: dev_cmd request timedout, tag %d\n",
2584                        __func__, lrbp->task_tag);
2585                if (!ufshcd_clear_cmd(hba, lrbp->task_tag))
2586                        /* successfully cleared the command, retry if needed */
2587                        err = -EAGAIN;
2588                /*
2589                 * in case of an error, after clearing the doorbell,
2590                 * we also need to clear the outstanding_request
2591                 * field in hba
2592                 */
2593                ufshcd_outstanding_req_clear(hba, lrbp->task_tag);
2594        }
2595
2596        return err;
2597}
2598
2599/**
2600 * ufshcd_get_dev_cmd_tag - Get device management command tag
2601 * @hba: per-adapter instance
2602 * @tag_out: pointer to variable with available slot value
2603 *
2604 * Get a free slot and lock it until device management command
2605 * completes.
2606 *
2607 * Returns false if free slot is unavailable for locking, else
2608 * return true with tag value in @tag.
2609 */
2610static bool ufshcd_get_dev_cmd_tag(struct ufs_hba *hba, int *tag_out)
2611{
2612        int tag;
2613        bool ret = false;
2614        unsigned long tmp;
2615
2616        if (!tag_out)
2617                goto out;
2618
2619        do {
2620                tmp = ~hba->lrb_in_use;
2621                tag = find_last_bit(&tmp, hba->nutrs);
2622                if (tag >= hba->nutrs)
2623                        goto out;
2624        } while (test_and_set_bit_lock(tag, &hba->lrb_in_use));
2625
2626        *tag_out = tag;
2627        ret = true;
2628out:
2629        return ret;
2630}
2631
2632static inline void ufshcd_put_dev_cmd_tag(struct ufs_hba *hba, int tag)
2633{
2634        clear_bit_unlock(tag, &hba->lrb_in_use);
2635}
2636
2637/**
2638 * ufshcd_exec_dev_cmd - API for sending device management requests
2639 * @hba: UFS hba
2640 * @cmd_type: specifies the type (NOP, Query...)
2641 * @timeout: time in seconds
2642 *
2643 * NOTE: Since there is only one available tag for device management commands,
2644 * it is expected you hold the hba->dev_cmd.lock mutex.
2645 */
2646static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
2647                enum dev_cmd_type cmd_type, int timeout)
2648{
2649        struct ufshcd_lrb *lrbp;
2650        int err;
2651        int tag;
2652        struct completion wait;
2653        unsigned long flags;
2654
2655        down_read(&hba->clk_scaling_lock);
2656
2657        /*
2658         * Get free slot, sleep if slots are unavailable.
2659         * Even though we use wait_event() which sleeps indefinitely,
2660         * the maximum wait time is bounded by SCSI request timeout.
2661         */
2662        wait_event(hba->dev_cmd.tag_wq, ufshcd_get_dev_cmd_tag(hba, &tag));
2663
2664        init_completion(&wait);
2665        lrbp = &hba->lrb[tag];
2666        WARN_ON(lrbp->cmd);
2667        err = ufshcd_compose_dev_cmd(hba, lrbp, cmd_type, tag);
2668        if (unlikely(err))
2669                goto out_put_tag;
2670
2671        hba->dev_cmd.complete = &wait;
2672
2673        ufshcd_add_query_upiu_trace(hba, tag, "query_send");
2674        /* Make sure descriptors are ready before ringing the doorbell */
2675        wmb();
2676        spin_lock_irqsave(hba->host->host_lock, flags);
2677        ufshcd_vops_setup_xfer_req(hba, tag, (lrbp->cmd ? true : false));
2678        ufshcd_send_command(hba, tag);
2679        spin_unlock_irqrestore(hba->host->host_lock, flags);
2680
2681        err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout);
2682
2683        ufshcd_add_query_upiu_trace(hba, tag,
2684                        err ? "query_complete_err" : "query_complete");
2685
2686out_put_tag:
2687        ufshcd_put_dev_cmd_tag(hba, tag);
2688        wake_up(&hba->dev_cmd.tag_wq);
2689        up_read(&hba->clk_scaling_lock);
2690        return err;
2691}
2692
2693/**
2694 * ufshcd_init_query() - init the query response and request parameters
2695 * @hba: per-adapter instance
2696 * @request: address of the request pointer to be initialized
2697 * @response: address of the response pointer to be initialized
2698 * @opcode: operation to perform
2699 * @idn: flag idn to access
2700 * @index: LU number to access
2701 * @selector: query/flag/descriptor further identification
2702 */
2703static inline void ufshcd_init_query(struct ufs_hba *hba,
2704                struct ufs_query_req **request, struct ufs_query_res **response,
2705                enum query_opcode opcode, u8 idn, u8 index, u8 selector)
2706{
2707        *request = &hba->dev_cmd.query.request;
2708        *response = &hba->dev_cmd.query.response;
2709        memset(*request, 0, sizeof(struct ufs_query_req));
2710        memset(*response, 0, sizeof(struct ufs_query_res));
2711        (*request)->upiu_req.opcode = opcode;
2712        (*request)->upiu_req.idn = idn;
2713        (*request)->upiu_req.index = index;
2714        (*request)->upiu_req.selector = selector;
2715}
2716
2717static int ufshcd_query_flag_retry(struct ufs_hba *hba,
2718        enum query_opcode opcode, enum flag_idn idn, bool *flag_res)
2719{
2720        int ret;
2721        int retries;
2722
2723        for (retries = 0; retries < QUERY_REQ_RETRIES; retries++) {
2724                ret = ufshcd_query_flag(hba, opcode, idn, flag_res);
2725                if (ret)
2726                        dev_dbg(hba->dev,
2727                                "%s: failed with error %d, retries %d\n",
2728                                __func__, ret, retries);
2729                else
2730                        break;
2731        }
2732
2733        if (ret)
2734                dev_err(hba->dev,
2735                        "%s: query attribute, opcode %d, idn %d, failed with error %d after %d retires\n",
2736                        __func__, opcode, idn, ret, retries);
2737        return ret;
2738}
2739
2740/**
2741 * ufshcd_query_flag() - API function for sending flag query requests
2742 * @hba: per-adapter instance
2743 * @opcode: flag query to perform
2744 * @idn: flag idn to access
2745 * @flag_res: the flag value after the query request completes
2746 *
2747 * Returns 0 for success, non-zero in case of failure
2748 */
2749int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
2750                        enum flag_idn idn, bool *flag_res)
2751{
2752        struct ufs_query_req *request = NULL;
2753        struct ufs_query_res *response = NULL;
2754        int err, index = 0, selector = 0;
2755        int timeout = QUERY_REQ_TIMEOUT;
2756
2757        BUG_ON(!hba);
2758
2759        ufshcd_hold(hba, false);
2760        mutex_lock(&hba->dev_cmd.lock);
2761        ufshcd_init_query(hba, &request, &response, opcode, idn, index,
2762                        selector);
2763
2764        switch (opcode) {
2765        case UPIU_QUERY_OPCODE_SET_FLAG:
2766        case UPIU_QUERY_OPCODE_CLEAR_FLAG:
2767        case UPIU_QUERY_OPCODE_TOGGLE_FLAG:
2768                request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
2769                break;
2770        case UPIU_QUERY_OPCODE_READ_FLAG:
2771                request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
2772                if (!flag_res) {
2773                        /* No dummy reads */
2774                        dev_err(hba->dev, "%s: Invalid argument for read request\n",
2775                                        __func__);
2776                        err = -EINVAL;
2777                        goto out_unlock;
2778                }
2779                break;
2780        default:
2781                dev_err(hba->dev,
2782                        "%s: Expected query flag opcode but got = %d\n",
2783                        __func__, opcode);
2784                err = -EINVAL;
2785                goto out_unlock;
2786        }
2787
2788        err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, timeout);
2789
2790        if (err) {
2791                dev_err(hba->dev,
2792                        "%s: Sending flag query for idn %d failed, err = %d\n",
2793                        __func__, idn, err);
2794                goto out_unlock;
2795        }
2796
2797        if (flag_res)
2798                *flag_res = (be32_to_cpu(response->upiu_res.value) &
2799                                MASK_QUERY_UPIU_FLAG_LOC) & 0x1;
2800
2801out_unlock:
2802        mutex_unlock(&hba->dev_cmd.lock);
2803        ufshcd_release(hba);
2804        return err;
2805}
2806
2807/**
2808 * ufshcd_query_attr - API function for sending attribute requests
2809 * @hba: per-adapter instance
2810 * @opcode: attribute opcode
2811 * @idn: attribute idn to access
2812 * @index: index field
2813 * @selector: selector field
2814 * @attr_val: the attribute value after the query request completes
2815 *
2816 * Returns 0 for success, non-zero in case of failure
2817*/
2818int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
2819                      enum attr_idn idn, u8 index, u8 selector, u32 *attr_val)
2820{
2821        struct ufs_query_req *request = NULL;
2822        struct ufs_query_res *response = NULL;
2823        int err;
2824
2825        BUG_ON(!hba);
2826
2827        ufshcd_hold(hba, false);
2828        if (!attr_val) {
2829                dev_err(hba->dev, "%s: attribute value required for opcode 0x%x\n",
2830                                __func__, opcode);
2831                err = -EINVAL;
2832                goto out;
2833        }
2834
2835        mutex_lock(&hba->dev_cmd.lock);
2836        ufshcd_init_query(hba, &request, &response, opcode, idn, index,
2837                        selector);
2838
2839        switch (opcode) {
2840        case UPIU_QUERY_OPCODE_WRITE_ATTR:
2841                request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
2842                request->upiu_req.value = cpu_to_be32(*attr_val);
2843                break;
2844        case UPIU_QUERY_OPCODE_READ_ATTR:
2845                request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
2846                break;
2847        default:
2848                dev_err(hba->dev, "%s: Expected query attr opcode but got = 0x%.2x\n",
2849                                __func__, opcode);
2850                err = -EINVAL;
2851                goto out_unlock;
2852        }
2853
2854        err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
2855
2856        if (err) {
2857                dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
2858                                __func__, opcode, idn, index, err);
2859                goto out_unlock;
2860        }
2861
2862        *attr_val = be32_to_cpu(response->upiu_res.value);
2863
2864out_unlock:
2865        mutex_unlock(&hba->dev_cmd.lock);
2866out:
2867        ufshcd_release(hba);
2868        return err;
2869}
2870
2871/**
2872 * ufshcd_query_attr_retry() - API function for sending query
2873 * attribute with retries
2874 * @hba: per-adapter instance
2875 * @opcode: attribute opcode
2876 * @idn: attribute idn to access
2877 * @index: index field
2878 * @selector: selector field
2879 * @attr_val: the attribute value after the query request
2880 * completes
2881 *
2882 * Returns 0 for success, non-zero in case of failure
2883*/
2884static int ufshcd_query_attr_retry(struct ufs_hba *hba,
2885        enum query_opcode opcode, enum attr_idn idn, u8 index, u8 selector,
2886        u32 *attr_val)
2887{
2888        int ret = 0;
2889        u32 retries;
2890
2891         for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
2892                ret = ufshcd_query_attr(hba, opcode, idn, index,
2893                                                selector, attr_val);
2894                if (ret)
2895                        dev_dbg(hba->dev, "%s: failed with error %d, retries %d\n",
2896                                __func__, ret, retries);
2897                else
2898                        break;
2899        }
2900
2901        if (ret)
2902                dev_err(hba->dev,
2903                        "%s: query attribute, idn %d, failed with error %d after %d retires\n",
2904                        __func__, idn, ret, QUERY_REQ_RETRIES);
2905        return ret;
2906}
2907
2908static int __ufshcd_query_descriptor(struct ufs_hba *hba,
2909                        enum query_opcode opcode, enum desc_idn idn, u8 index,
2910                        u8 selector, u8 *desc_buf, int *buf_len)
2911{
2912        struct ufs_query_req *request = NULL;
2913        struct ufs_query_res *response = NULL;
2914        int err;
2915
2916        BUG_ON(!hba);
2917
2918        ufshcd_hold(hba, false);
2919        if (!desc_buf) {
2920                dev_err(hba->dev, "%s: descriptor buffer required for opcode 0x%x\n",
2921                                __func__, opcode);
2922                err = -EINVAL;
2923                goto out;
2924        }
2925
2926        if (*buf_len < QUERY_DESC_MIN_SIZE || *buf_len > QUERY_DESC_MAX_SIZE) {
2927                dev_err(hba->dev, "%s: descriptor buffer size (%d) is out of range\n",
2928                                __func__, *buf_len);
2929                err = -EINVAL;
2930                goto out;
2931        }
2932
2933        mutex_lock(&hba->dev_cmd.lock);
2934        ufshcd_init_query(hba, &request, &response, opcode, idn, index,
2935                        selector);
2936        hba->dev_cmd.query.descriptor = desc_buf;
2937        request->upiu_req.length = cpu_to_be16(*buf_len);
2938
2939        switch (opcode) {
2940        case UPIU_QUERY_OPCODE_WRITE_DESC:
2941                request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
2942                break;
2943        case UPIU_QUERY_OPCODE_READ_DESC:
2944                request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
2945                break;
2946        default:
2947                dev_err(hba->dev,
2948                                "%s: Expected query descriptor opcode but got = 0x%.2x\n",
2949                                __func__, opcode);
2950                err = -EINVAL;
2951                goto out_unlock;
2952        }
2953
2954        err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
2955
2956        if (err) {
2957                dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
2958                                __func__, opcode, idn, index, err);
2959                goto out_unlock;
2960        }
2961
2962        hba->dev_cmd.query.descriptor = NULL;
2963        *buf_len = be16_to_cpu(response->upiu_res.length);
2964
2965out_unlock:
2966        mutex_unlock(&hba->dev_cmd.lock);
2967out:
2968        ufshcd_release(hba);
2969        return err;
2970}
2971
2972/**
2973 * ufshcd_query_descriptor_retry - API function for sending descriptor requests
2974 * @hba: per-adapter instance
2975 * @opcode: attribute opcode
2976 * @idn: attribute idn to access
2977 * @index: index field
2978 * @selector: selector field
2979 * @desc_buf: the buffer that contains the descriptor
2980 * @buf_len: length parameter passed to the device
2981 *
2982 * Returns 0 for success, non-zero in case of failure.
2983 * The buf_len parameter will contain, on return, the length parameter
2984 * received on the response.
2985 */
2986int ufshcd_query_descriptor_retry(struct ufs_hba *hba,
2987                                  enum query_opcode opcode,
2988                                  enum desc_idn idn, u8 index,
2989                                  u8 selector,
2990                                  u8 *desc_buf, int *buf_len)
2991{
2992        int err;
2993        int retries;
2994
2995        for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
2996                err = __ufshcd_query_descriptor(hba, opcode, idn, index,
2997                                                selector, desc_buf, buf_len);
2998                if (!err || err == -EINVAL)
2999                        break;
3000        }
3001
3002        return err;
3003}
3004
3005/**
3006 * ufshcd_read_desc_length - read the specified descriptor length from header
3007 * @hba: Pointer to adapter instance
3008 * @desc_id: descriptor idn value
3009 * @desc_index: descriptor index
3010 * @desc_length: pointer to variable to read the length of descriptor
3011 *
3012 * Return 0 in case of success, non-zero otherwise
3013 */
3014static int ufshcd_read_desc_length(struct ufs_hba *hba,
3015        enum desc_idn desc_id,
3016        int desc_index,
3017        int *desc_length)
3018{
3019        int ret;
3020        u8 header[QUERY_DESC_HDR_SIZE];
3021        int header_len = QUERY_DESC_HDR_SIZE;
3022
3023        if (desc_id >= QUERY_DESC_IDN_MAX)
3024                return -EINVAL;
3025
3026        ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
3027                                        desc_id, desc_index, 0, header,
3028                                        &header_len);
3029
3030        if (ret) {
3031                dev_err(hba->dev, "%s: Failed to get descriptor header id %d",
3032                        __func__, desc_id);
3033                return ret;
3034        } else if (desc_id != header[QUERY_DESC_DESC_TYPE_OFFSET]) {
3035                dev_warn(hba->dev, "%s: descriptor header id %d and desc_id %d mismatch",
3036                        __func__, header[QUERY_DESC_DESC_TYPE_OFFSET],
3037                        desc_id);
3038                ret = -EINVAL;
3039        }
3040
3041        *desc_length = header[QUERY_DESC_LENGTH_OFFSET];
3042        return ret;
3043
3044}
3045
3046/**
3047 * ufshcd_map_desc_id_to_length - map descriptor IDN to its length
3048 * @hba: Pointer to adapter instance
3049 * @desc_id: descriptor idn value
3050 * @desc_len: mapped desc length (out)
3051 *
3052 * Return 0 in case of success, non-zero otherwise
3053 */
3054int ufshcd_map_desc_id_to_length(struct ufs_hba *hba,
3055        enum desc_idn desc_id, int *desc_len)
3056{
3057        switch (desc_id) {
3058        case QUERY_DESC_IDN_DEVICE:
3059                *desc_len = hba->desc_size.dev_desc;
3060                break;
3061        case QUERY_DESC_IDN_POWER:
3062                *desc_len = hba->desc_size.pwr_desc;
3063                break;
3064        case QUERY_DESC_IDN_GEOMETRY:
3065                *desc_len = hba->desc_size.geom_desc;
3066                break;
3067        case QUERY_DESC_IDN_CONFIGURATION:
3068                *desc_len = hba->desc_size.conf_desc;
3069                break;
3070        case QUERY_DESC_IDN_UNIT:
3071                *desc_len = hba->desc_size.unit_desc;
3072                break;
3073        case QUERY_DESC_IDN_INTERCONNECT:
3074                *desc_len = hba->desc_size.interc_desc;
3075                break;
3076        case QUERY_DESC_IDN_STRING:
3077                *desc_len = QUERY_DESC_MAX_SIZE;
3078                break;
3079        case QUERY_DESC_IDN_HEALTH:
3080                *desc_len = hba->desc_size.hlth_desc;
3081                break;
3082        case QUERY_DESC_IDN_RFU_0:
3083        case QUERY_DESC_IDN_RFU_1:
3084                *desc_len = 0;
3085                break;
3086        default:
3087                *desc_len = 0;
3088                return -EINVAL;
3089        }
3090        return 0;
3091}
3092EXPORT_SYMBOL(ufshcd_map_desc_id_to_length);
3093
3094/**
3095 * ufshcd_read_desc_param - read the specified descriptor parameter
3096 * @hba: Pointer to adapter instance
3097 * @desc_id: descriptor idn value
3098 * @desc_index: descriptor index
3099 * @param_offset: offset of the parameter to read
3100 * @param_read_buf: pointer to buffer where parameter would be read
3101 * @param_size: sizeof(param_read_buf)
3102 *
3103 * Return 0 in case of success, non-zero otherwise
3104 */
3105int ufshcd_read_desc_param(struct ufs_hba *hba,
3106                           enum desc_idn desc_id,
3107                           int desc_index,
3108                           u8 param_offset,
3109                           u8 *param_read_buf,
3110                           u8 param_size)
3111{
3112        int ret;
3113        u8 *desc_buf;
3114        int buff_len;
3115        bool is_kmalloc = true;
3116
3117        /* Safety check */
3118        if (desc_id >= QUERY_DESC_IDN_MAX || !param_size)
3119                return -EINVAL;
3120
3121        /* Get the max length of descriptor from structure filled up at probe
3122         * time.
3123         */
3124        ret = ufshcd_map_desc_id_to_length(hba, desc_id, &buff_len);
3125
3126        /* Sanity checks */
3127        if (ret || !buff_len) {
3128                dev_err(hba->dev, "%s: Failed to get full descriptor length",
3129                        __func__);
3130                return ret;
3131        }
3132
3133        /* Check whether we need temp memory */
3134        if (param_offset != 0 || param_size < buff_len) {
3135                desc_buf = kmalloc(buff_len, GFP_KERNEL);
3136                if (!desc_buf)
3137                        return -ENOMEM;
3138        } else {
3139                desc_buf = param_read_buf;
3140                is_kmalloc = false;
3141        }
3142
3143        /* Request for full descriptor */
3144        ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
3145                                        desc_id, desc_index, 0,
3146                                        desc_buf, &buff_len);
3147
3148        if (ret) {
3149                dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d, desc_index %d, param_offset %d, ret %d",
3150                        __func__, desc_id, desc_index, param_offset, ret);
3151                goto out;
3152        }
3153
3154        /* Sanity check */
3155        if (desc_buf[QUERY_DESC_DESC_TYPE_OFFSET] != desc_id) {
3156                dev_err(hba->dev, "%s: invalid desc_id %d in descriptor header",
3157                        __func__, desc_buf[QUERY_DESC_DESC_TYPE_OFFSET]);
3158                ret = -EINVAL;
3159                goto out;
3160        }
3161
3162        /* Check wherher we will not copy more data, than available */
3163        if (is_kmalloc && param_size > buff_len)
3164                param_size = buff_len;
3165
3166        if (is_kmalloc)
3167                memcpy(param_read_buf, &desc_buf[param_offset], param_size);
3168out:
3169        if (is_kmalloc)
3170                kfree(desc_buf);
3171        return ret;
3172}
3173
3174static inline int ufshcd_read_desc(struct ufs_hba *hba,
3175                                   enum desc_idn desc_id,
3176                                   int desc_index,
3177                                   u8 *buf,
3178                                   u32 size)
3179{
3180        return ufshcd_read_desc_param(hba, desc_id, desc_index, 0, buf, size);
3181}
3182
3183static inline int ufshcd_read_power_desc(struct ufs_hba *hba,
3184                                         u8 *buf,
3185                                         u32 size)
3186{
3187        return ufshcd_read_desc(hba, QUERY_DESC_IDN_POWER, 0, buf, size);
3188}
3189
3190static int ufshcd_read_device_desc(struct ufs_hba *hba, u8 *buf, u32 size)
3191{
3192        return ufshcd_read_desc(hba, QUERY_DESC_IDN_DEVICE, 0, buf, size);
3193}
3194
3195/**
3196 * ufshcd_read_string_desc - read string descriptor
3197 * @hba: pointer to adapter instance
3198 * @desc_index: descriptor index
3199 * @buf: pointer to buffer where descriptor would be read
3200 * @size: size of buf
3201 * @ascii: if true convert from unicode to ascii characters
3202 *
3203 * Return 0 in case of success, non-zero otherwise
3204 */
3205int ufshcd_read_string_desc(struct ufs_hba *hba, int desc_index,
3206                            u8 *buf, u32 size, bool ascii)
3207{
3208        int err = 0;
3209
3210        err = ufshcd_read_desc(hba,
3211                                QUERY_DESC_IDN_STRING, desc_index, buf, size);
3212
3213        if (err) {
3214                dev_err(hba->dev, "%s: reading String Desc failed after %d retries. err = %d\n",
3215                        __func__, QUERY_REQ_RETRIES, err);
3216                goto out;
3217        }
3218
3219        if (ascii) {
3220                int desc_len;
3221                int ascii_len;
3222                int i;
3223                char *buff_ascii;
3224
3225                desc_len = buf[0];
3226                /* remove header and divide by 2 to move from UTF16 to UTF8 */
3227                ascii_len = (desc_len - QUERY_DESC_HDR_SIZE) / 2 + 1;
3228                if (size < ascii_len + QUERY_DESC_HDR_SIZE) {
3229                        dev_err(hba->dev, "%s: buffer allocated size is too small\n",
3230                                        __func__);
3231                        err = -ENOMEM;
3232                        goto out;
3233                }
3234
3235                buff_ascii = kmalloc(ascii_len, GFP_KERNEL);
3236                if (!buff_ascii) {
3237                        err = -ENOMEM;
3238                        goto out;
3239                }
3240
3241                /*
3242                 * the descriptor contains string in UTF16 format
3243                 * we need to convert to utf-8 so it can be displayed
3244                 */
3245                utf16s_to_utf8s((wchar_t *)&buf[QUERY_DESC_HDR_SIZE],
3246                                desc_len - QUERY_DESC_HDR_SIZE,
3247                                UTF16_BIG_ENDIAN, buff_ascii, ascii_len);
3248
3249                /* replace non-printable or non-ASCII characters with spaces */
3250                for (i = 0; i < ascii_len; i++)
3251                        ufshcd_remove_non_printable(&buff_ascii[i]);
3252
3253                memset(buf + QUERY_DESC_HDR_SIZE, 0,
3254                                size - QUERY_DESC_HDR_SIZE);
3255                memcpy(buf + QUERY_DESC_HDR_SIZE, buff_ascii, ascii_len);
3256                buf[QUERY_DESC_LENGTH_OFFSET] = ascii_len + QUERY_DESC_HDR_SIZE;
3257                kfree(buff_ascii);
3258        }
3259out:
3260        return err;
3261}
3262
3263/**
3264 * ufshcd_read_unit_desc_param - read the specified unit descriptor parameter
3265 * @hba: Pointer to adapter instance
3266 * @lun: lun id
3267 * @param_offset: offset of the parameter to read
3268 * @param_read_buf: pointer to buffer where parameter would be read
3269 * @param_size: sizeof(param_read_buf)
3270 *
3271 * Return 0 in case of success, non-zero otherwise
3272 */
3273static inline int ufshcd_read_unit_desc_param(struct ufs_hba *hba,
3274                                              int lun,
3275                                              enum unit_desc_param param_offset,
3276                                              u8 *param_read_buf,
3277                                              u32 param_size)
3278{
3279        /*
3280         * Unit descriptors are only available for general purpose LUs (LUN id
3281         * from 0 to 7) and RPMB Well known LU.
3282         */
3283        if (!ufs_is_valid_unit_desc_lun(lun))
3284                return -EOPNOTSUPP;
3285
3286        return ufshcd_read_desc_param(hba, QUERY_DESC_IDN_UNIT, lun,
3287                                      param_offset, param_read_buf, param_size);
3288}
3289
3290/**
3291 * ufshcd_memory_alloc - allocate memory for host memory space data structures
3292 * @hba: per adapter instance
3293 *
3294 * 1. Allocate DMA memory for Command Descriptor array
3295 *      Each command descriptor consist of Command UPIU, Response UPIU and PRDT
3296 * 2. Allocate DMA memory for UTP Transfer Request Descriptor List (UTRDL).
3297 * 3. Allocate DMA memory for UTP Task Management Request Descriptor List
3298 *      (UTMRDL)
3299 * 4. Allocate memory for local reference block(lrb).
3300 *
3301 * Returns 0 for success, non-zero in case of failure
3302 */
3303static int ufshcd_memory_alloc(struct ufs_hba *hba)
3304{
3305        size_t utmrdl_size, utrdl_size, ucdl_size;
3306
3307        /* Allocate memory for UTP command descriptors */
3308        ucdl_size = (sizeof(struct utp_transfer_cmd_desc) * hba->nutrs);
3309        hba->ucdl_base_addr = dmam_alloc_coherent(hba->dev,
3310                                                  ucdl_size,
3311                                                  &hba->ucdl_dma_addr,
3312                                                  GFP_KERNEL);
3313
3314        /*
3315         * UFSHCI requires UTP command descriptor to be 128 byte aligned.
3316         * make sure hba->ucdl_dma_addr is aligned to PAGE_SIZE
3317         * if hba->ucdl_dma_addr is aligned to PAGE_SIZE, then it will
3318         * be aligned to 128 bytes as well
3319         */
3320        if (!hba->ucdl_base_addr ||
3321            WARN_ON(hba->ucdl_dma_addr & (PAGE_SIZE - 1))) {
3322                dev_err(hba->dev,
3323                        "Command Descriptor Memory allocation failed\n");
3324                goto out;
3325        }
3326
3327        /*
3328         * Allocate memory for UTP Transfer descriptors
3329         * UFSHCI requires 1024 byte alignment of UTRD
3330         */
3331        utrdl_size = (sizeof(struct utp_transfer_req_desc) * hba->nutrs);
3332        hba->utrdl_base_addr = dmam_alloc_coherent(hba->dev,
3333                                                   utrdl_size,
3334                                                   &hba->utrdl_dma_addr,
3335                                                   GFP_KERNEL);
3336        if (!hba->utrdl_base_addr ||
3337            WARN_ON(hba->utrdl_dma_addr & (PAGE_SIZE - 1))) {
3338                dev_err(hba->dev,
3339                        "Transfer Descriptor Memory allocation failed\n");
3340                goto out;
3341        }
3342
3343        /*
3344         * Allocate memory for UTP Task Management descriptors
3345         * UFSHCI requires 1024 byte alignment of UTMRD
3346         */
3347        utmrdl_size = sizeof(struct utp_task_req_desc) * hba->nutmrs;
3348        hba->utmrdl_base_addr = dmam_alloc_coherent(hba->dev,
3349                                                    utmrdl_size,
3350                                                    &hba->utmrdl_dma_addr,
3351                                                    GFP_KERNEL);
3352        if (!hba->utmrdl_base_addr ||
3353            WARN_ON(hba->utmrdl_dma_addr & (PAGE_SIZE - 1))) {
3354                dev_err(hba->dev,
3355                "Task Management Descriptor Memory allocation failed\n");
3356                goto out;
3357        }
3358
3359        /* Allocate memory for local reference block */
3360        hba->lrb = devm_kcalloc(hba->dev,
3361                                hba->nutrs, sizeof(struct ufshcd_lrb),
3362                                GFP_KERNEL);
3363        if (!hba->lrb) {
3364                dev_err(hba->dev, "LRB Memory allocation failed\n");
3365                goto out;
3366        }
3367        return 0;
3368out:
3369        return -ENOMEM;
3370}
3371
3372/**
3373 * ufshcd_host_memory_configure - configure local reference block with
3374 *                              memory offsets
3375 * @hba: per adapter instance
3376 *
3377 * Configure Host memory space
3378 * 1. Update Corresponding UTRD.UCDBA and UTRD.UCDBAU with UCD DMA
3379 * address.
3380 * 2. Update each UTRD with Response UPIU offset, Response UPIU length
3381 * and PRDT offset.
3382 * 3. Save the corresponding addresses of UTRD, UCD.CMD, UCD.RSP and UCD.PRDT
3383 * into local reference block.
3384 */
3385static void ufshcd_host_memory_configure(struct ufs_hba *hba)
3386{
3387        struct utp_transfer_cmd_desc *cmd_descp;
3388        struct utp_transfer_req_desc *utrdlp;
3389        dma_addr_t cmd_desc_dma_addr;
3390        dma_addr_t cmd_desc_element_addr;
3391        u16 response_offset;
3392        u16 prdt_offset;
3393        int cmd_desc_size;
3394        int i;
3395
3396        utrdlp = hba->utrdl_base_addr;
3397        cmd_descp = hba->ucdl_base_addr;
3398
3399        response_offset =
3400                offsetof(struct utp_transfer_cmd_desc, response_upiu);
3401        prdt_offset =
3402                offsetof(struct utp_transfer_cmd_desc, prd_table);
3403
3404        cmd_desc_size = sizeof(struct utp_transfer_cmd_desc);
3405        cmd_desc_dma_addr = hba->ucdl_dma_addr;
3406
3407        for (i = 0; i < hba->nutrs; i++) {
3408                /* Configure UTRD with command descriptor base address */
3409                cmd_desc_element_addr =
3410                                (cmd_desc_dma_addr + (cmd_desc_size * i));
3411                utrdlp[i].command_desc_base_addr_lo =
3412                                cpu_to_le32(lower_32_bits(cmd_desc_element_addr));
3413                utrdlp[i].command_desc_base_addr_hi =
3414                                cpu_to_le32(upper_32_bits(cmd_desc_element_addr));
3415
3416                /* Response upiu and prdt offset should be in double words */
3417                if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN) {
3418                        utrdlp[i].response_upiu_offset =
3419                                cpu_to_le16(response_offset);
3420                        utrdlp[i].prd_table_offset =
3421                                cpu_to_le16(prdt_offset);
3422                        utrdlp[i].response_upiu_length =
3423                                cpu_to_le16(ALIGNED_UPIU_SIZE);
3424                } else {
3425                        utrdlp[i].response_upiu_offset =
3426                                cpu_to_le16((response_offset >> 2));
3427                        utrdlp[i].prd_table_offset =
3428                                cpu_to_le16((prdt_offset >> 2));
3429                        utrdlp[i].response_upiu_length =
3430                                cpu_to_le16(ALIGNED_UPIU_SIZE >> 2);
3431                }
3432
3433                hba->lrb[i].utr_descriptor_ptr = (utrdlp + i);
3434                hba->lrb[i].utrd_dma_addr = hba->utrdl_dma_addr +
3435                                (i * sizeof(struct utp_transfer_req_desc));
3436                hba->lrb[i].ucd_req_ptr =
3437                        (struct utp_upiu_req *)(cmd_descp + i);
3438                hba->lrb[i].ucd_req_dma_addr = cmd_desc_element_addr;
3439                hba->lrb[i].ucd_rsp_ptr =
3440                        (struct utp_upiu_rsp *)cmd_descp[i].response_upiu;
3441                hba->lrb[i].ucd_rsp_dma_addr = cmd_desc_element_addr +
3442                                response_offset;
3443                hba->lrb[i].ucd_prdt_ptr =
3444                        (struct ufshcd_sg_entry *)cmd_descp[i].prd_table;
3445                hba->lrb[i].ucd_prdt_dma_addr = cmd_desc_element_addr +
3446                                prdt_offset;
3447        }
3448}
3449
3450/**
3451 * ufshcd_dme_link_startup - Notify Unipro to perform link startup
3452 * @hba: per adapter instance
3453 *
3454 * UIC_CMD_DME_LINK_STARTUP command must be issued to Unipro layer,
3455 * in order to initialize the Unipro link startup procedure.
3456 * Once the Unipro links are up, the device connected to the controller
3457 * is detected.
3458 *
3459 * Returns 0 on success, non-zero value on failure
3460 */
3461static int ufshcd_dme_link_startup(struct ufs_hba *hba)
3462{
3463        struct uic_command uic_cmd = {0};
3464        int ret;
3465
3466        uic_cmd.command = UIC_CMD_DME_LINK_STARTUP;
3467
3468        ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3469        if (ret)
3470                dev_dbg(hba->dev,
3471                        "dme-link-startup: error code %d\n", ret);
3472        return ret;
3473}
3474/**
3475 * ufshcd_dme_reset - UIC command for DME_RESET
3476 * @hba: per adapter instance
3477 *
3478 * DME_RESET command is issued in order to reset UniPro stack.
3479 * This function now deal with cold reset.
3480 *
3481 * Returns 0 on success, non-zero value on failure
3482 */
3483static int ufshcd_dme_reset(struct ufs_hba *hba)
3484{
3485        struct uic_command uic_cmd = {0};
3486        int ret;
3487
3488        uic_cmd.command = UIC_CMD_DME_RESET;
3489
3490        ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3491        if (ret)
3492                dev_err(hba->dev,
3493                        "dme-reset: error code %d\n", ret);
3494
3495        return ret;
3496}
3497
3498/**
3499 * ufshcd_dme_enable - UIC command for DME_ENABLE
3500 * @hba: per adapter instance
3501 *
3502 * DME_ENABLE command is issued in order to enable UniPro stack.
3503 *
3504 * Returns 0 on success, non-zero value on failure
3505 */
3506static int ufshcd_dme_enable(struct ufs_hba *hba)
3507{
3508        struct uic_command uic_cmd = {0};
3509        int ret;
3510
3511        uic_cmd.command = UIC_CMD_DME_ENABLE;
3512
3513        ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3514        if (ret)
3515                dev_err(hba->dev,
3516                        "dme-reset: error code %d\n", ret);
3517
3518        return ret;
3519}
3520
3521static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba)
3522{
3523        #define MIN_DELAY_BEFORE_DME_CMDS_US    1000
3524        unsigned long min_sleep_time_us;
3525
3526        if (!(hba->quirks & UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS))
3527                return;
3528
3529        /*
3530         * last_dme_cmd_tstamp will be 0 only for 1st call to
3531         * this function
3532         */
3533        if (unlikely(!ktime_to_us(hba->last_dme_cmd_tstamp))) {
3534                min_sleep_time_us = MIN_DELAY_BEFORE_DME_CMDS_US;
3535        } else {
3536                unsigned long delta =
3537                        (unsigned long) ktime_to_us(
3538                                ktime_sub(ktime_get(),
3539                                hba->last_dme_cmd_tstamp));
3540
3541                if (delta < MIN_DELAY_BEFORE_DME_CMDS_US)
3542                        min_sleep_time_us =
3543                                MIN_DELAY_BEFORE_DME_CMDS_US - delta;
3544                else
3545                        return; /* no more delay required */
3546        }
3547
3548        /* allow sleep for extra 50us if needed */
3549        usleep_range(min_sleep_time_us, min_sleep_time_us + 50);
3550}
3551
3552/**
3553 * ufshcd_dme_set_attr - UIC command for DME_SET, DME_PEER_SET
3554 * @hba: per adapter instance
3555 * @attr_sel: uic command argument1
3556 * @attr_set: attribute set type as uic command argument2
3557 * @mib_val: setting value as uic command argument3
3558 * @peer: indicate whether peer or local
3559 *
3560 * Returns 0 on success, non-zero value on failure
3561 */
3562int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel,
3563                        u8 attr_set, u32 mib_val, u8 peer)
3564{
3565        struct uic_command uic_cmd = {0};
3566        static const char *const action[] = {
3567                "dme-set",
3568                "dme-peer-set"
3569        };
3570        const char *set = action[!!peer];
3571        int ret;
3572        int retries = UFS_UIC_COMMAND_RETRIES;
3573
3574        uic_cmd.command = peer ?
3575                UIC_CMD_DME_PEER_SET : UIC_CMD_DME_SET;
3576        uic_cmd.argument1 = attr_sel;
3577        uic_cmd.argument2 = UIC_ARG_ATTR_TYPE(attr_set);
3578        uic_cmd.argument3 = mib_val;
3579
3580        do {
3581                /* for peer attributes we retry upon failure */
3582                ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3583                if (ret)
3584                        dev_dbg(hba->dev, "%s: attr-id 0x%x val 0x%x error code %d\n",
3585                                set, UIC_GET_ATTR_ID(attr_sel), mib_val, ret);
3586        } while (ret && peer && --retries);
3587
3588        if (ret)
3589                dev_err(hba->dev, "%s: attr-id 0x%x val 0x%x failed %d retries\n",
3590                        set, UIC_GET_ATTR_ID(attr_sel), mib_val,
3591                        UFS_UIC_COMMAND_RETRIES - retries);
3592
3593        return ret;
3594}
3595EXPORT_SYMBOL_GPL(ufshcd_dme_set_attr);
3596
3597/**
3598 * ufshcd_dme_get_attr - UIC command for DME_GET, DME_PEER_GET
3599 * @hba: per adapter instance
3600 * @attr_sel: uic command argument1
3601 * @mib_val: the value of the attribute as returned by the UIC command
3602 * @peer: indicate whether peer or local
3603 *
3604 * Returns 0 on success, non-zero value on failure
3605 */
3606int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
3607                        u32 *mib_val, u8 peer)
3608{
3609        struct uic_command uic_cmd = {0};
3610        static const char *const action[] = {
3611                "dme-get",
3612                "dme-peer-get"
3613        };
3614        const char *get = action[!!peer];
3615        int ret;
3616        int retries = UFS_UIC_COMMAND_RETRIES;
3617        struct ufs_pa_layer_attr orig_pwr_info;
3618        struct ufs_pa_layer_attr temp_pwr_info;
3619        bool pwr_mode_change = false;
3620
3621        if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)) {
3622                orig_pwr_info = hba->pwr_info;
3623                temp_pwr_info = orig_pwr_info;
3624
3625                if (orig_pwr_info.pwr_tx == FAST_MODE ||
3626                    orig_pwr_info.pwr_rx == FAST_MODE) {
3627                        temp_pwr_info.pwr_tx = FASTAUTO_MODE;
3628                        temp_pwr_info.pwr_rx = FASTAUTO_MODE;
3629                        pwr_mode_change = true;
3630                } else if (orig_pwr_info.pwr_tx == SLOW_MODE ||
3631                    orig_pwr_info.pwr_rx == SLOW_MODE) {
3632                        temp_pwr_info.pwr_tx = SLOWAUTO_MODE;
3633                        temp_pwr_info.pwr_rx = SLOWAUTO_MODE;
3634                        pwr_mode_change = true;
3635                }
3636                if (pwr_mode_change) {
3637                        ret = ufshcd_change_power_mode(hba, &temp_pwr_info);
3638                        if (ret)
3639                                goto out;
3640                }
3641        }
3642
3643        uic_cmd.command = peer ?
3644                UIC_CMD_DME_PEER_GET : UIC_CMD_DME_GET;
3645        uic_cmd.argument1 = attr_sel;
3646
3647        do {
3648                /* for peer attributes we retry upon failure */
3649                ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3650                if (ret)
3651                        dev_dbg(hba->dev, "%s: attr-id 0x%x error code %d\n",
3652                                get, UIC_GET_ATTR_ID(attr_sel), ret);
3653        } while (ret && peer && --retries);
3654
3655        if (ret)
3656                dev_err(hba->dev, "%s: attr-id 0x%x failed %d retries\n",
3657                        get, UIC_GET_ATTR_ID(attr_sel),
3658                        UFS_UIC_COMMAND_RETRIES - retries);
3659
3660        if (mib_val && !ret)
3661                *mib_val = uic_cmd.argument3;
3662
3663        if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)
3664            && pwr_mode_change)
3665                ufshcd_change_power_mode(hba, &orig_pwr_info);
3666out:
3667        return ret;
3668}
3669EXPORT_SYMBOL_GPL(ufshcd_dme_get_attr);
3670
3671/**
3672 * ufshcd_uic_pwr_ctrl - executes UIC commands (which affects the link power
3673 * state) and waits for it to take effect.
3674 *
3675 * @hba: per adapter instance
3676 * @cmd: UIC command to execute
3677 *
3678 * DME operations like DME_SET(PA_PWRMODE), DME_HIBERNATE_ENTER &
3679 * DME_HIBERNATE_EXIT commands take some time to take its effect on both host
3680 * and device UniPro link and hence it's final completion would be indicated by
3681 * dedicated status bits in Interrupt Status register (UPMS, UHES, UHXS) in
3682 * addition to normal UIC command completion Status (UCCS). This function only
3683 * returns after the relevant status bits indicate the completion.
3684 *
3685 * Returns 0 on success, non-zero value on failure
3686 */
3687static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
3688{
3689        struct completion uic_async_done;
3690        unsigned long flags;
3691        u8 status;
3692        int ret;
3693        bool reenable_intr = false;
3694
3695        mutex_lock(&hba->uic_cmd_mutex);
3696        init_completion(&uic_async_done);
3697        ufshcd_add_delay_before_dme_cmd(hba);
3698
3699        spin_lock_irqsave(hba->host->host_lock, flags);
3700        hba->uic_async_done = &uic_async_done;
3701        if (ufshcd_readl(hba, REG_INTERRUPT_ENABLE) & UIC_COMMAND_COMPL) {
3702                ufshcd_disable_intr(hba, UIC_COMMAND_COMPL);
3703                /*
3704                 * Make sure UIC command completion interrupt is disabled before
3705                 * issuing UIC command.
3706                 */
3707                wmb();
3708                reenable_intr = true;
3709        }
3710        ret = __ufshcd_send_uic_cmd(hba, cmd, false);
3711        spin_unlock_irqrestore(hba->host->host_lock, flags);
3712        if (ret) {
3713                dev_err(hba->dev,
3714                        "pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n",
3715                        cmd->command, cmd->argument3, ret);
3716                goto out;
3717        }
3718
3719        if (!wait_for_completion_timeout(hba->uic_async_done,
3720                                         msecs_to_jiffies(UIC_CMD_TIMEOUT))) {
3721                dev_err(hba->dev,
3722                        "pwr ctrl cmd 0x%x with mode 0x%x completion timeout\n",
3723                        cmd->command, cmd->argument3);
3724                ret = -ETIMEDOUT;
3725                goto out;
3726        }
3727
3728        status = ufshcd_get_upmcrs(hba);
3729        if (status != PWR_LOCAL) {
3730                dev_err(hba->dev,
3731                        "pwr ctrl cmd 0x%x failed, host upmcrs:0x%x\n",
3732                        cmd->command, status);
3733                ret = (status != PWR_OK) ? status : -1;
3734        }
3735out:
3736        if (ret) {
3737                ufshcd_print_host_state(hba);
3738                ufshcd_print_pwr_info(hba);
3739                ufshcd_print_host_regs(hba);
3740        }
3741
3742        spin_lock_irqsave(hba->host->host_lock, flags);
3743        hba->active_uic_cmd = NULL;
3744        hba->uic_async_done = NULL;
3745        if (reenable_intr)
3746                ufshcd_enable_intr(hba, UIC_COMMAND_COMPL);
3747        spin_unlock_irqrestore(hba->host->host_lock, flags);
3748        mutex_unlock(&hba->uic_cmd_mutex);
3749
3750        return ret;
3751}
3752
3753/**
3754 * ufshcd_uic_change_pwr_mode - Perform the UIC power mode chage
3755 *                              using DME_SET primitives.
3756 * @hba: per adapter instance
3757 * @mode: powr mode value
3758 *
3759 * Returns 0 on success, non-zero value on failure
3760 */
3761static int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode)
3762{
3763        struct uic_command uic_cmd = {0};
3764        int ret;
3765
3766        if (hba->quirks & UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP) {
3767                ret = ufshcd_dme_set(hba,
3768                                UIC_ARG_MIB_SEL(PA_RXHSUNTERMCAP, 0), 1);
3769                if (ret) {
3770                        dev_err(hba->dev, "%s: failed to enable PA_RXHSUNTERMCAP ret %d\n",
3771                                                __func__, ret);
3772                        goto out;
3773                }
3774        }
3775
3776        uic_cmd.command = UIC_CMD_DME_SET;
3777        uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE);
3778        uic_cmd.argument3 = mode;
3779        ufshcd_hold(hba, false);
3780        ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
3781        ufshcd_release(hba);
3782
3783out:
3784        return ret;
3785}
3786
3787static int ufshcd_link_recovery(struct ufs_hba *hba)
3788{
3789        int ret;
3790        unsigned long flags;
3791
3792        spin_lock_irqsave(hba->host->host_lock, flags);
3793        hba->ufshcd_state = UFSHCD_STATE_RESET;
3794        ufshcd_set_eh_in_progress(hba);
3795        spin_unlock_irqrestore(hba->host->host_lock, flags);
3796
3797        ret = ufshcd_host_reset_and_restore(hba);
3798
3799        spin_lock_irqsave(hba->host->host_lock, flags);
3800        if (ret)
3801                hba->ufshcd_state = UFSHCD_STATE_ERROR;
3802        ufshcd_clear_eh_in_progress(hba);
3803        spin_unlock_irqrestore(hba->host->host_lock, flags);
3804
3805        if (ret)
3806                dev_err(hba->dev, "%s: link recovery failed, err %d",
3807                        __func__, ret);
3808
3809        return ret;
3810}
3811
3812static int __ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
3813{
3814        int ret;
3815        struct uic_command uic_cmd = {0};
3816        ktime_t start = ktime_get();
3817
3818        ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER, PRE_CHANGE);
3819
3820        uic_cmd.command = UIC_CMD_DME_HIBER_ENTER;
3821        ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
3822        trace_ufshcd_profile_hibern8(dev_name(hba->dev), "enter",
3823                             ktime_to_us(ktime_sub(ktime_get(), start)), ret);
3824
3825        if (ret) {
3826                dev_err(hba->dev, "%s: hibern8 enter failed. ret = %d\n",
3827                        __func__, ret);
3828
3829                /*
3830                 * If link recovery fails then return error so that caller
3831                 * don't retry the hibern8 enter again.
3832                 */
3833                if (ufshcd_link_recovery(hba))
3834                        ret = -ENOLINK;
3835        } else
3836                ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER,
3837                                                                POST_CHANGE);
3838
3839        return ret;
3840}
3841
3842static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
3843{
3844        int ret = 0, retries;
3845
3846        for (retries = UIC_HIBERN8_ENTER_RETRIES; retries > 0; retries--) {
3847                ret = __ufshcd_uic_hibern8_enter(hba);
3848                if (!ret || ret == -ENOLINK)
3849                        goto out;
3850        }
3851out:
3852        return ret;
3853}
3854
3855static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
3856{
3857        struct uic_command uic_cmd = {0};
3858        int ret;
3859        ktime_t start = ktime_get();
3860
3861        ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT, PRE_CHANGE);
3862
3863        uic_cmd.command = UIC_CMD_DME_HIBER_EXIT;
3864        ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
3865        trace_ufshcd_profile_hibern8(dev_name(hba->dev), "exit",
3866                             ktime_to_us(ktime_sub(ktime_get(), start)), ret);
3867
3868        if (ret) {
3869                dev_err(hba->dev, "%s: hibern8 exit failed. ret = %d\n",
3870                        __func__, ret);
3871                ret = ufshcd_link_recovery(hba);
3872        } else {
3873                ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT,
3874                                                                POST_CHANGE);
3875                hba->ufs_stats.last_hibern8_exit_tstamp = ktime_get();
3876                hba->ufs_stats.hibern8_exit_cnt++;
3877        }
3878
3879        return ret;
3880}
3881
3882static void ufshcd_auto_hibern8_enable(struct ufs_hba *hba)
3883{
3884        unsigned long flags;
3885
3886        if (!(hba->capabilities & MASK_AUTO_HIBERN8_SUPPORT) || !hba->ahit)
3887                return;
3888
3889        spin_lock_irqsave(hba->host->host_lock, flags);
3890        ufshcd_writel(hba, hba->ahit, REG_AUTO_HIBERNATE_IDLE_TIMER);
3891        spin_unlock_irqrestore(hba->host->host_lock, flags);
3892}
3893
3894 /**
3895 * ufshcd_init_pwr_info - setting the POR (power on reset)
3896 * values in hba power info
3897 * @hba: per-adapter instance
3898 */
3899static void ufshcd_init_pwr_info(struct ufs_hba *hba)
3900{
3901        hba->pwr_info.gear_rx = UFS_PWM_G1;
3902        hba->pwr_info.gear_tx = UFS_PWM_G1;
3903        hba->pwr_info.lane_rx = 1;
3904        hba->pwr_info.lane_tx = 1;
3905        hba->pwr_info.pwr_rx = SLOWAUTO_MODE;
3906        hba->pwr_info.pwr_tx = SLOWAUTO_MODE;
3907        hba->pwr_info.hs_rate = 0;
3908}
3909
3910/**
3911 * ufshcd_get_max_pwr_mode - reads the max power mode negotiated with device
3912 * @hba: per-adapter instance
3913 */
3914static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba)
3915{
3916        struct ufs_pa_layer_attr *pwr_info = &hba->max_pwr_info.info;
3917
3918        if (hba->max_pwr_info.is_valid)
3919                return 0;
3920
3921        pwr_info->pwr_tx = FAST_MODE;
3922        pwr_info->pwr_rx = FAST_MODE;
3923        pwr_info->hs_rate = PA_HS_MODE_B;
3924
3925        /* Get the connected lane count */
3926        ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDRXDATALANES),
3927                        &pwr_info->lane_rx);
3928        ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
3929                        &pwr_info->lane_tx);
3930
3931        if (!pwr_info->lane_rx || !pwr_info->lane_tx) {
3932                dev_err(hba->dev, "%s: invalid connected lanes value. rx=%d, tx=%d\n",
3933                                __func__,
3934                                pwr_info->lane_rx,
3935                                pwr_info->lane_tx);
3936                return -EINVAL;
3937        }
3938
3939        /*
3940         * First, get the maximum gears of HS speed.
3941         * If a zero value, it means there is no HSGEAR capability.
3942         * Then, get the maximum gears of PWM speed.
3943         */
3944        ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &pwr_info->gear_rx);
3945        if (!pwr_info->gear_rx) {
3946                ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
3947                                &pwr_info->gear_rx);
3948                if (!pwr_info->gear_rx) {
3949                        dev_err(hba->dev, "%s: invalid max pwm rx gear read = %d\n",
3950                                __func__, pwr_info->gear_rx);
3951                        return -EINVAL;
3952                }
3953                pwr_info->pwr_rx = SLOW_MODE;
3954        }
3955
3956        ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR),
3957                        &pwr_info->gear_tx);
3958        if (!pwr_info->gear_tx) {
3959                ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
3960                                &pwr_info->gear_tx);
3961                if (!pwr_info->gear_tx) {
3962                        dev_err(hba->dev, "%s: invalid max pwm tx gear read = %d\n",
3963                                __func__, pwr_info->gear_tx);
3964                        return -EINVAL;
3965                }
3966                pwr_info->pwr_tx = SLOW_MODE;
3967        }
3968
3969        hba->max_pwr_info.is_valid = true;
3970        return 0;
3971}
3972
3973static int ufshcd_change_power_mode(struct ufs_hba *hba,
3974                             struct ufs_pa_layer_attr *pwr_mode)
3975{
3976        int ret;
3977
3978        /* if already configured to the requested pwr_mode */
3979        if (pwr_mode->gear_rx == hba->pwr_info.gear_rx &&
3980            pwr_mode->gear_tx == hba->pwr_info.gear_tx &&
3981            pwr_mode->lane_rx == hba->pwr_info.lane_rx &&
3982            pwr_mode->lane_tx == hba->pwr_info.lane_tx &&
3983            pwr_mode->pwr_rx == hba->pwr_info.pwr_rx &&
3984            pwr_mode->pwr_tx == hba->pwr_info.pwr_tx &&
3985            pwr_mode->hs_rate == hba->pwr_info.hs_rate) {
3986                dev_dbg(hba->dev, "%s: power already configured\n", __func__);
3987                return 0;
3988        }
3989
3990        /*
3991         * Configure attributes for power mode change with below.
3992         * - PA_RXGEAR, PA_ACTIVERXDATALANES, PA_RXTERMINATION,
3993         * - PA_TXGEAR, PA_ACTIVETXDATALANES, PA_TXTERMINATION,
3994         * - PA_HSSERIES
3995         */
3996        ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), pwr_mode->gear_rx);
3997        ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES),
3998                        pwr_mode->lane_rx);
3999        if (pwr_mode->pwr_rx == FASTAUTO_MODE ||
4000                        pwr_mode->pwr_rx == FAST_MODE)
4001                ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), TRUE);
4002        else
4003                ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), FALSE);
4004
4005        ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), pwr_mode->gear_tx);
4006        ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES),
4007                        pwr_mode->lane_tx);
4008        if (pwr_mode->pwr_tx == FASTAUTO_MODE ||
4009                        pwr_mode->pwr_tx == FAST_MODE)
4010                ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), TRUE);
4011        else
4012                ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), FALSE);
4013
4014        if (pwr_mode->pwr_rx == FASTAUTO_MODE ||
4015            pwr_mode->pwr_tx == FASTAUTO_MODE ||
4016            pwr_mode->pwr_rx == FAST_MODE ||
4017            pwr_mode->pwr_tx == FAST_MODE)
4018                ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES),
4019                                                pwr_mode->hs_rate);
4020
4021        ret = ufshcd_uic_change_pwr_mode(hba, pwr_mode->pwr_rx << 4
4022                        | pwr_mode->pwr_tx);
4023
4024        if (ret) {
4025                dev_err(hba->dev,
4026                        "%s: power mode change failed %d\n", __func__, ret);
4027        } else {
4028                ufshcd_vops_pwr_change_notify(hba, POST_CHANGE, NULL,
4029                                                                pwr_mode);
4030
4031                memcpy(&hba->pwr_info, pwr_mode,
4032                        sizeof(struct ufs_pa_layer_attr));
4033        }
4034
4035        return ret;
4036}
4037
4038/**
4039 * ufshcd_config_pwr_mode - configure a new power mode
4040 * @hba: per-adapter instance
4041 * @desired_pwr_mode: desired power configuration
4042 */
4043int ufshcd_config_pwr_mode(struct ufs_hba *hba,
4044                struct ufs_pa_layer_attr *desired_pwr_mode)
4045{
4046        struct ufs_pa_layer_attr final_params = { 0 };
4047        int ret;
4048
4049        ret = ufshcd_vops_pwr_change_notify(hba, PRE_CHANGE,
4050                                        desired_pwr_mode, &final_params);
4051
4052        if (ret)
4053                memcpy(&final_params, desired_pwr_mode, sizeof(final_params));
4054
4055        ret = ufshcd_change_power_mode(hba, &final_params);
4056        if (!ret)
4057                ufshcd_print_pwr_info(hba);
4058
4059        return ret;
4060}
4061EXPORT_SYMBOL_GPL(ufshcd_config_pwr_mode);
4062
4063/**
4064 * ufshcd_complete_dev_init() - checks device readiness
4065 * @hba: per-adapter instance
4066 *
4067 * Set fDeviceInit flag and poll until device toggles it.
4068 */
4069static int ufshcd_complete_dev_init(struct ufs_hba *hba)
4070{
4071        int i;
4072        int err;
4073        bool flag_res = 1;
4074
4075        err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
4076                QUERY_FLAG_IDN_FDEVICEINIT, NULL);
4077        if (err) {
4078                dev_err(hba->dev,
4079                        "%s setting fDeviceInit flag failed with error %d\n",
4080                        __func__, err);
4081                goto out;
4082        }
4083
4084        /* poll for max. 1000 iterations for fDeviceInit flag to clear */
4085        for (i = 0; i < 1000 && !err && flag_res; i++)
4086                err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
4087                        QUERY_FLAG_IDN_FDEVICEINIT, &flag_res);
4088
4089        if (err)
4090                dev_err(hba->dev,
4091                        "%s reading fDeviceInit flag failed with error %d\n",
4092                        __func__, err);
4093        else if (flag_res)
4094                dev_err(hba->dev,
4095                        "%s fDeviceInit was not cleared by the device\n",
4096                        __func__);
4097
4098out:
4099        return err;
4100}
4101
4102/**
4103 * ufshcd_make_hba_operational - Make UFS controller operational
4104 * @hba: per adapter instance
4105 *
4106 * To bring UFS host controller to operational state,
4107 * 1. Enable required interrupts
4108 * 2. Configure interrupt aggregation
4109 * 3. Program UTRL and UTMRL base address
4110 * 4. Configure run-stop-registers
4111 *
4112 * Returns 0 on success, non-zero value on failure
4113 */
4114static int ufshcd_make_hba_operational(struct ufs_hba *hba)
4115{
4116        int err = 0;
4117        u32 reg;
4118
4119        /* Enable required interrupts */
4120        ufshcd_enable_intr(hba, UFSHCD_ENABLE_INTRS);
4121
4122        /* Configure interrupt aggregation */
4123        if (ufshcd_is_intr_aggr_allowed(hba))
4124                ufshcd_config_intr_aggr(hba, hba->nutrs - 1, INT_AGGR_DEF_TO);
4125        else
4126                ufshcd_disable_intr_aggr(hba);
4127
4128        /* Configure UTRL and UTMRL base address registers */
4129        ufshcd_writel(hba, lower_32_bits(hba->utrdl_dma_addr),
4130                        REG_UTP_TRANSFER_REQ_LIST_BASE_L);
4131        ufshcd_writel(hba, upper_32_bits(hba->utrdl_dma_addr),
4132                        REG_UTP_TRANSFER_REQ_LIST_BASE_H);
4133        ufshcd_writel(hba, lower_32_bits(hba->utmrdl_dma_addr),
4134                        REG_UTP_TASK_REQ_LIST_BASE_L);
4135        ufshcd_writel(hba, upper_32_bits(hba->utmrdl_dma_addr),
4136                        REG_UTP_TASK_REQ_LIST_BASE_H);
4137
4138        /*
4139         * Make sure base address and interrupt setup are updated before
4140         * enabling the run/stop registers below.
4141         */
4142        wmb();
4143
4144        /*
4145         * UCRDY, UTMRLDY and UTRLRDY bits must be 1
4146         */
4147        reg = ufshcd_readl(hba, REG_CONTROLLER_STATUS);
4148        if (!(ufshcd_get_lists_status(reg))) {
4149                ufshcd_enable_run_stop_reg(hba);
4150        } else {
4151                dev_err(hba->dev,
4152                        "Host controller not ready to process requests");
4153                err = -EIO;
4154                goto out;
4155        }
4156
4157out:
4158        return err;
4159}
4160
4161/**
4162 * ufshcd_hba_stop - Send controller to reset state
4163 * @hba: per adapter instance
4164 * @can_sleep: perform sleep or just spin
4165 */
4166static inline void ufshcd_hba_stop(struct ufs_hba *hba, bool can_sleep)
4167{
4168        int err;
4169
4170        ufshcd_writel(hba, CONTROLLER_DISABLE,  REG_CONTROLLER_ENABLE);
4171        err = ufshcd_wait_for_register(hba, REG_CONTROLLER_ENABLE,
4172                                        CONTROLLER_ENABLE, CONTROLLER_DISABLE,
4173                                        10, 1, can_sleep);
4174        if (err)
4175                dev_err(hba->dev, "%s: Controller disable failed\n", __func__);
4176}
4177
4178/**
4179 * ufshcd_hba_execute_hce - initialize the controller
4180 * @hba: per adapter instance
4181 *
4182 * The controller resets itself and controller firmware initialization
4183 * sequence kicks off. When controller is ready it will set
4184 * the Host Controller Enable bit to 1.
4185 *
4186 * Returns 0 on success, non-zero value on failure
4187 */
4188static int ufshcd_hba_execute_hce(struct ufs_hba *hba)
4189{
4190        int retry;
4191
4192        /*
4193         * msleep of 1 and 5 used in this function might result in msleep(20),
4194         * but it was necessary to send the UFS FPGA to reset mode during
4195         * development and testing of this driver. msleep can be changed to
4196         * mdelay and retry count can be reduced based on the controller.
4197         */
4198        if (!ufshcd_is_hba_active(hba))
4199                /* change controller state to "reset state" */
4200                ufshcd_hba_stop(hba, true);
4201
4202        /* UniPro link is disabled at this point */
4203        ufshcd_set_link_off(hba);
4204
4205        ufshcd_vops_hce_enable_notify(hba, PRE_CHANGE);
4206
4207        /* start controller initialization sequence */
4208        ufshcd_hba_start(hba);
4209
4210        /*
4211         * To initialize a UFS host controller HCE bit must be set to 1.
4212         * During initialization the HCE bit value changes from 1->0->1.
4213         * When the host controller completes initialization sequence
4214         * it sets the value of HCE bit to 1. The same HCE bit is read back
4215         * to check if the controller has completed initialization sequence.
4216         * So without this delay the value HCE = 1, set in the previous
4217         * instruction might be read back.
4218         * This delay can be changed based on the controller.
4219         */
4220        msleep(1);
4221
4222        /* wait for the host controller to complete initialization */
4223        retry = 10;
4224        while (ufshcd_is_hba_active(hba)) {
4225                if (retry) {
4226                        retry--;
4227                } else {
4228                        dev_err(hba->dev,
4229                                "Controller enable failed\n");
4230                        return -EIO;
4231                }
4232                msleep(5);
4233        }
4234
4235        /* enable UIC related interrupts */
4236        ufshcd_enable_intr(hba, UFSHCD_UIC_MASK);
4237
4238        ufshcd_vops_hce_enable_notify(hba, POST_CHANGE);
4239
4240        return 0;
4241}
4242
4243static int ufshcd_hba_enable(struct ufs_hba *hba)
4244{
4245        int ret;
4246
4247        if (hba->quirks & UFSHCI_QUIRK_BROKEN_HCE) {
4248                ufshcd_set_link_off(hba);
4249                ufshcd_vops_hce_enable_notify(hba, PRE_CHANGE);
4250
4251                /* enable UIC related interrupts */
4252                ufshcd_enable_intr(hba, UFSHCD_UIC_MASK);
4253                ret = ufshcd_dme_reset(hba);
4254                if (!ret) {
4255                        ret = ufshcd_dme_enable(hba);
4256                        if (!ret)
4257                                ufshcd_vops_hce_enable_notify(hba, POST_CHANGE);
4258                        if (ret)
4259                                dev_err(hba->dev,
4260                                        "Host controller enable failed with non-hce\n");
4261                }
4262        } else {
4263                ret = ufshcd_hba_execute_hce(hba);
4264        }
4265
4266        return ret;
4267}
4268static int ufshcd_disable_tx_lcc(struct ufs_hba *hba, bool peer)
4269{
4270        int tx_lanes, i, err = 0;
4271
4272        if (!peer)
4273                ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
4274                               &tx_lanes);
4275        else
4276                ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
4277                                    &tx_lanes);
4278        for (i = 0; i < tx_lanes; i++) {
4279                if (!peer)
4280                        err = ufshcd_dme_set(hba,
4281                                UIC_ARG_MIB_SEL(TX_LCC_ENABLE,
4282                                        UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)),
4283                                        0);
4284                else
4285                        err = ufshcd_dme_peer_set(hba,
4286                                UIC_ARG_MIB_SEL(TX_LCC_ENABLE,
4287                                        UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)),
4288                                        0);
4289                if (err) {
4290                        dev_err(hba->dev, "%s: TX LCC Disable failed, peer = %d, lane = %d, err = %d",
4291                                __func__, peer, i, err);
4292                        break;
4293                }
4294        }
4295
4296        return err;
4297}
4298
4299static inline int ufshcd_disable_device_tx_lcc(struct ufs_hba *hba)
4300{
4301        return ufshcd_disable_tx_lcc(hba, true);
4302}
4303
4304/**
4305 * ufshcd_link_startup - Initialize unipro link startup
4306 * @hba: per adapter instance
4307 *
4308 * Returns 0 for success, non-zero in case of failure
4309 */
4310static int ufshcd_link_startup(struct ufs_hba *hba)
4311{
4312        int ret;
4313        int retries = DME_LINKSTARTUP_RETRIES;
4314        bool link_startup_again = false;
4315
4316        /*
4317         * If UFS device isn't active then we will have to issue link startup
4318         * 2 times to make sure the device state move to active.
4319         */
4320        if (!ufshcd_is_ufs_dev_active(hba))
4321                link_startup_again = true;
4322
4323link_startup:
4324        do {
4325                ufshcd_vops_link_startup_notify(hba, PRE_CHANGE);
4326
4327                ret = ufshcd_dme_link_startup(hba);
4328
4329                /* check if device is detected by inter-connect layer */
4330                if (!ret && !ufshcd_is_device_present(hba)) {
4331                        dev_err(hba->dev, "%s: Device not present\n", __func__);
4332                        ret = -ENXIO;
4333                        goto out;
4334                }
4335
4336                /*
4337                 * DME link lost indication is only received when link is up,
4338                 * but we can't be sure if the link is up until link startup
4339                 * succeeds. So reset the local Uni-Pro and try again.
4340                 */
4341                if (ret && ufshcd_hba_enable(hba))
4342                        goto out;
4343        } while (ret && retries--);
4344
4345        if (ret)
4346                /* failed to get the link up... retire */
4347                goto out;
4348
4349        if (link_startup_again) {
4350                link_startup_again = false;
4351                retries = DME_LINKSTARTUP_RETRIES;
4352                goto link_startup;
4353        }
4354
4355        /* Mark that link is up in PWM-G1, 1-lane, SLOW-AUTO mode */
4356        ufshcd_init_pwr_info(hba);
4357        ufshcd_print_pwr_info(hba);
4358
4359        if (hba->quirks & UFSHCD_QUIRK_BROKEN_LCC) {
4360                ret = ufshcd_disable_device_tx_lcc(hba);
4361                if (ret)
4362                        goto out;
4363        }
4364
4365        /* Include any host controller configuration via UIC commands */
4366        ret = ufshcd_vops_link_startup_notify(hba, POST_CHANGE);
4367        if (ret)
4368                goto out;
4369
4370        ret = ufshcd_make_hba_operational(hba);
4371out:
4372        if (ret) {
4373                dev_err(hba->dev, "link startup failed %d\n", ret);
4374                ufshcd_print_host_state(hba);
4375                ufshcd_print_pwr_info(hba);
4376                ufshcd_print_host_regs(hba);
4377        }
4378        return ret;
4379}
4380
4381/**
4382 * ufshcd_verify_dev_init() - Verify device initialization
4383 * @hba: per-adapter instance
4384 *
4385 * Send NOP OUT UPIU and wait for NOP IN response to check whether the
4386 * device Transport Protocol (UTP) layer is ready after a reset.
4387 * If the UTP layer at the device side is not initialized, it may
4388 * not respond with NOP IN UPIU within timeout of %NOP_OUT_TIMEOUT
4389 * and we retry sending NOP OUT for %NOP_OUT_RETRIES iterations.
4390 */
4391static int ufshcd_verify_dev_init(struct ufs_hba *hba)
4392{
4393        int err = 0;
4394        int retries;
4395
4396        ufshcd_hold(hba, false);
4397        mutex_lock(&hba->dev_cmd.lock);
4398        for (retries = NOP_OUT_RETRIES; retries > 0; retries--) {
4399                err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP,
4400                                               NOP_OUT_TIMEOUT);
4401
4402                if (!err || err == -ETIMEDOUT)
4403                        break;
4404
4405                dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err);
4406        }
4407        mutex_unlock(&hba->dev_cmd.lock);
4408        ufshcd_release(hba);
4409
4410        if (err)
4411                dev_err(hba->dev, "%s: NOP OUT failed %d\n", __func__, err);
4412        return err;
4413}
4414
4415/**
4416 * ufshcd_set_queue_depth - set lun queue depth
4417 * @sdev: pointer to SCSI device
4418 *
4419 * Read bLUQueueDepth value and activate scsi tagged command
4420 * queueing. For WLUN, queue depth is set to 1. For best-effort
4421 * cases (bLUQueueDepth = 0) the queue depth is set to a maximum
4422 * value that host can queue.
4423 */
4424static void ufshcd_set_queue_depth(struct scsi_device *sdev)
4425{
4426        int ret = 0;
4427        u8 lun_qdepth;
4428        struct ufs_hba *hba;
4429
4430        hba = shost_priv(sdev->host);
4431
4432        lun_qdepth = hba->nutrs;
4433        ret = ufshcd_read_unit_desc_param(hba,
4434                                          ufshcd_scsi_to_upiu_lun(sdev->lun),
4435                                          UNIT_DESC_PARAM_LU_Q_DEPTH,
4436                                          &lun_qdepth,
4437                                          sizeof(lun_qdepth));
4438
4439        /* Some WLUN doesn't support unit descriptor */
4440        if (ret == -EOPNOTSUPP)
4441                lun_qdepth = 1;
4442        else if (!lun_qdepth)
4443                /* eventually, we can figure out the real queue depth */
4444                lun_qdepth = hba->nutrs;
4445        else
4446                lun_qdepth = min_t(int, lun_qdepth, hba->nutrs);
4447
4448        dev_dbg(hba->dev, "%s: activate tcq with queue depth %d\n",
4449                        __func__, lun_qdepth);
4450        scsi_change_queue_depth(sdev, lun_qdepth);
4451}
4452
4453/*
4454 * ufshcd_get_lu_wp - returns the "b_lu_write_protect" from UNIT DESCRIPTOR
4455 * @hba: per-adapter instance
4456 * @lun: UFS device lun id
4457 * @b_lu_write_protect: pointer to buffer to hold the LU's write protect info
4458 *
4459 * Returns 0 in case of success and b_lu_write_protect status would be returned
4460 * @b_lu_write_protect parameter.
4461 * Returns -ENOTSUPP if reading b_lu_write_protect is not supported.
4462 * Returns -EINVAL in case of invalid parameters passed to this function.
4463 */
4464static int ufshcd_get_lu_wp(struct ufs_hba *hba,
4465                            u8 lun,
4466                            u8 *b_lu_write_protect)
4467{
4468        int ret;
4469
4470        if (!b_lu_write_protect)
4471                ret = -EINVAL;
4472        /*
4473         * According to UFS device spec, RPMB LU can't be write
4474         * protected so skip reading bLUWriteProtect parameter for
4475         * it. For other W-LUs, UNIT DESCRIPTOR is not available.
4476         */
4477        else if (lun >= UFS_UPIU_MAX_GENERAL_LUN)
4478                ret = -ENOTSUPP;
4479        else
4480                ret = ufshcd_read_unit_desc_param(hba,
4481                                          lun,
4482                                          UNIT_DESC_PARAM_LU_WR_PROTECT,
4483                                          b_lu_write_protect,
4484                                          sizeof(*b_lu_write_protect));
4485        return ret;
4486}
4487
4488/**
4489 * ufshcd_get_lu_power_on_wp_status - get LU's power on write protect
4490 * status
4491 * @hba: per-adapter instance
4492 * @sdev: pointer to SCSI device
4493 *
4494 */
4495static inline void ufshcd_get_lu_power_on_wp_status(struct ufs_hba *hba,
4496                                                    struct scsi_device *sdev)
4497{
4498        if (hba->dev_info.f_power_on_wp_en &&
4499            !hba->dev_info.is_lu_power_on_wp) {
4500                u8 b_lu_write_protect;
4501
4502                if (!ufshcd_get_lu_wp(hba, ufshcd_scsi_to_upiu_lun(sdev->lun),
4503                                      &b_lu_write_protect) &&
4504                    (b_lu_write_protect == UFS_LU_POWER_ON_WP))
4505                        hba->dev_info.is_lu_power_on_wp = true;
4506        }
4507}
4508
4509/**
4510 * ufshcd_slave_alloc - handle initial SCSI device configurations
4511 * @sdev: pointer to SCSI device
4512 *
4513 * Returns success
4514 */
4515static int ufshcd_slave_alloc(struct scsi_device *sdev)
4516{
4517        struct ufs_hba *hba;
4518
4519        hba = shost_priv(sdev->host);
4520
4521        /* Mode sense(6) is not supported by UFS, so use Mode sense(10) */
4522        sdev->use_10_for_ms = 1;
4523
4524        /* allow SCSI layer to restart the device in case of errors */
4525        sdev->allow_restart = 1;
4526
4527        /* REPORT SUPPORTED OPERATION CODES is not supported */
4528        sdev->no_report_opcodes = 1;
4529
4530        /* WRITE_SAME command is not supported */
4531        sdev->no_write_same = 1;
4532
4533        ufshcd_set_queue_depth(sdev);
4534
4535        ufshcd_get_lu_power_on_wp_status(hba, sdev);
4536
4537        return 0;
4538}
4539
4540/**
4541 * ufshcd_change_queue_depth - change queue depth
4542 * @sdev: pointer to SCSI device
4543 * @depth: required depth to set
4544 *
4545 * Change queue depth and make sure the max. limits are not crossed.
4546 */
4547static int ufshcd_change_queue_depth(struct scsi_device *sdev, int depth)
4548{
4549        struct ufs_hba *hba = shost_priv(sdev->host);
4550
4551        if (depth > hba->nutrs)
4552                depth = hba->nutrs;
4553        return scsi_change_queue_depth(sdev, depth);
4554}
4555
4556/**
4557 * ufshcd_slave_configure - adjust SCSI device configurations
4558 * @sdev: pointer to SCSI device
4559 */
4560static int ufshcd_slave_configure(struct scsi_device *sdev)
4561{
4562        struct request_queue *q = sdev->request_queue;
4563
4564        blk_queue_update_dma_pad(q, PRDT_DATA_BYTE_COUNT_PAD - 1);
4565        blk_queue_max_segment_size(q, PRDT_DATA_BYTE_COUNT_MAX);
4566
4567        return 0;
4568}
4569
4570/**
4571 * ufshcd_slave_destroy - remove SCSI device configurations
4572 * @sdev: pointer to SCSI device
4573 */
4574static void ufshcd_slave_destroy(struct scsi_device *sdev)
4575{
4576        struct ufs_hba *hba;
4577
4578        hba = shost_priv(sdev->host);
4579        /* Drop the reference as it won't be needed anymore */
4580        if (ufshcd_scsi_to_upiu_lun(sdev->lun) == UFS_UPIU_UFS_DEVICE_WLUN) {
4581                unsigned long flags;
4582
4583                spin_lock_irqsave(hba->host->host_lock, flags);
4584                hba->sdev_ufs_device = NULL;
4585                spin_unlock_irqrestore(hba->host->host_lock, flags);
4586        }
4587}
4588
4589/**
4590 * ufshcd_task_req_compl - handle task management request completion
4591 * @hba: per adapter instance
4592 * @index: index of the completed request
4593 * @resp: task management service response
4594 *
4595 * Returns non-zero value on error, zero on success
4596 */
4597static int ufshcd_task_req_compl(struct ufs_hba *hba, u32 index, u8 *resp)
4598{
4599        struct utp_task_req_desc *task_req_descp;
4600        struct utp_upiu_task_rsp *task_rsp_upiup;
4601        unsigned long flags;
4602        int ocs_value;
4603        int task_result;
4604
4605        spin_lock_irqsave(hba->host->host_lock, flags);
4606
4607        /* Clear completed tasks from outstanding_tasks */
4608        __clear_bit(index, &hba->outstanding_tasks);
4609
4610        task_req_descp = hba->utmrdl_base_addr;
4611        ocs_value = ufshcd_get_tmr_ocs(&task_req_descp[index]);
4612
4613        if (ocs_value == OCS_SUCCESS) {
4614                task_rsp_upiup = (struct utp_upiu_task_rsp *)
4615                                task_req_descp[index].task_rsp_upiu;
4616                task_result = be32_to_cpu(task_rsp_upiup->output_param1);
4617                task_result = task_result & MASK_TM_SERVICE_RESP;
4618                if (resp)
4619                        *resp = (u8)task_result;
4620        } else {
4621                dev_err(hba->dev, "%s: failed, ocs = 0x%x\n",
4622                                __func__, ocs_value);
4623        }
4624        spin_unlock_irqrestore(hba->host->host_lock, flags);
4625
4626        return ocs_value;
4627}
4628
4629/**
4630 * ufshcd_scsi_cmd_status - Update SCSI command result based on SCSI status
4631 * @lrbp: pointer to local reference block of completed command
4632 * @scsi_status: SCSI command status
4633 *
4634 * Returns value base on SCSI command status
4635 */
4636static inline int
4637ufshcd_scsi_cmd_status(struct ufshcd_lrb *lrbp, int scsi_status)
4638{
4639        int result = 0;
4640
4641        switch (scsi_status) {
4642        case SAM_STAT_CHECK_CONDITION:
4643                ufshcd_copy_sense_data(lrbp);
4644        case SAM_STAT_GOOD:
4645                result |= DID_OK << 16 |
4646                          COMMAND_COMPLETE << 8 |
4647                          scsi_status;
4648                break;
4649        case SAM_STAT_TASK_SET_FULL:
4650        case SAM_STAT_BUSY:
4651        case SAM_STAT_TASK_ABORTED:
4652                ufshcd_copy_sense_data(lrbp);
4653                result |= scsi_status;
4654                break;
4655        default:
4656                result |= DID_ERROR << 16;
4657                break;
4658        } /* end of switch */
4659
4660        return result;
4661}
4662
4663/**
4664 * ufshcd_transfer_rsp_status - Get overall status of the response
4665 * @hba: per adapter instance
4666 * @lrbp: pointer to local reference block of completed command
4667 *
4668 * Returns result of the command to notify SCSI midlayer
4669 */
4670static inline int
4671ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
4672{
4673        int result = 0;
4674        int scsi_status;
4675        int ocs;
4676
4677        /* overall command status of utrd */
4678        ocs = ufshcd_get_tr_ocs(lrbp);
4679
4680        switch (ocs) {
4681        case OCS_SUCCESS:
4682                result = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
4683                hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
4684                switch (result) {
4685                case UPIU_TRANSACTION_RESPONSE:
4686                        /*
4687                         * get the response UPIU result to extract
4688                         * the SCSI command status
4689                         */
4690                        result = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr);
4691
4692                        /*
4693                         * get the result based on SCSI status response
4694                         * to notify the SCSI midlayer of the command status
4695                         */
4696                        scsi_status = result & MASK_SCSI_STATUS;
4697                        result = ufshcd_scsi_cmd_status(lrbp, scsi_status);
4698
4699                        /*
4700                         * Currently we are only supporting BKOPs exception
4701                         * events hence we can ignore BKOPs exception event
4702                         * during power management callbacks. BKOPs exception
4703                         * event is not expected to be raised in runtime suspend
4704                         * callback as it allows the urgent bkops.
4705                         * During system suspend, we are anyway forcefully
4706                         * disabling the bkops and if urgent bkops is needed
4707                         * it will be enabled on system resume. Long term
4708                         * solution could be to abort the system suspend if
4709                         * UFS device needs urgent BKOPs.
4710                         */
4711                        if (!hba->pm_op_in_progress &&
4712                            ufshcd_is_exception_event(lrbp->ucd_rsp_ptr))
4713                                schedule_work(&hba->eeh_work);
4714                        break;
4715                case UPIU_TRANSACTION_REJECT_UPIU:
4716                        /* TODO: handle Reject UPIU Response */
4717                        result = DID_ERROR << 16;
4718                        dev_err(hba->dev,
4719                                "Reject UPIU not fully implemented\n");
4720                        break;
4721                default:
4722                        result = DID_ERROR << 16;
4723                        dev_err(hba->dev,
4724                                "Unexpected request response code = %x\n",
4725                                result);
4726                        break;
4727                }
4728                break;
4729        case OCS_ABORTED:
4730                result |= DID_ABORT << 16;
4731                break;
4732        case OCS_INVALID_COMMAND_STATUS:
4733                result |= DID_REQUEUE << 16;
4734                break;
4735        case OCS_INVALID_CMD_TABLE_ATTR:
4736        case OCS_INVALID_PRDT_ATTR:
4737        case OCS_MISMATCH_DATA_BUF_SIZE:
4738        case OCS_MISMATCH_RESP_UPIU_SIZE:
4739        case OCS_PEER_COMM_FAILURE:
4740        case OCS_FATAL_ERROR:
4741        default:
4742                result |= DID_ERROR << 16;
4743                dev_err(hba->dev,
4744                                "OCS error from controller = %x for tag %d\n",
4745                                ocs, lrbp->task_tag);
4746                ufshcd_print_host_regs(hba);
4747                ufshcd_print_host_state(hba);
4748                break;
4749        } /* end of switch */
4750
4751        if (host_byte(result) != DID_OK)
4752                ufshcd_print_trs(hba, 1 << lrbp->task_tag, true);
4753        return result;
4754}
4755
4756/**
4757 * ufshcd_uic_cmd_compl - handle completion of uic command
4758 * @hba: per adapter instance
4759 * @intr_status: interrupt status generated by the controller
4760 */
4761static void ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
4762{
4763        if ((intr_status & UIC_COMMAND_COMPL) && hba->active_uic_cmd) {
4764                hba->active_uic_cmd->argument2 |=
4765                        ufshcd_get_uic_cmd_result(hba);
4766                hba->active_uic_cmd->argument3 =
4767                        ufshcd_get_dme_attr_val(hba);
4768                complete(&hba->active_uic_cmd->done);
4769        }
4770
4771        if ((intr_status & UFSHCD_UIC_PWR_MASK) && hba->uic_async_done)
4772                complete(hba->uic_async_done);
4773}
4774
4775/**
4776 * __ufshcd_transfer_req_compl - handle SCSI and query command completion
4777 * @hba: per adapter instance
4778 * @completed_reqs: requests to complete
4779 */
4780static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
4781                                        unsigned long completed_reqs)
4782{
4783        struct ufshcd_lrb *lrbp;
4784        struct scsi_cmnd *cmd;
4785        int result;
4786        int index;
4787
4788        for_each_set_bit(index, &completed_reqs, hba->nutrs) {
4789                lrbp = &hba->lrb[index];
4790                cmd = lrbp->cmd;
4791                if (cmd) {
4792                        ufshcd_add_command_trace(hba, index, "complete");
4793                        result = ufshcd_transfer_rsp_status(hba, lrbp);
4794                        scsi_dma_unmap(cmd);
4795                        cmd->result = result;
4796                        /* Mark completed command as NULL in LRB */
4797                        lrbp->cmd = NULL;
4798                        clear_bit_unlock(index, &hba->lrb_in_use);
4799                        /* Do not touch lrbp after scsi done */
4800                        cmd->scsi_done(cmd);
4801                        __ufshcd_release(hba);
4802                } else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE ||
4803                        lrbp->command_type == UTP_CMD_TYPE_UFS_STORAGE) {
4804                        if (hba->dev_cmd.complete) {
4805                                ufshcd_add_command_trace(hba, index,
4806                                                "dev_complete");
4807                                complete(hba->dev_cmd.complete);
4808                        }
4809                }
4810                if (ufshcd_is_clkscaling_supported(hba))
4811                        hba->clk_scaling.active_reqs--;
4812
4813                lrbp->compl_time_stamp = ktime_get();
4814        }
4815
4816        /* clear corresponding bits of completed commands */
4817        hba->outstanding_reqs ^= completed_reqs;
4818
4819        ufshcd_clk_scaling_update_busy(hba);
4820
4821        /* we might have free'd some tags above */
4822        wake_up(&hba->dev_cmd.tag_wq);
4823}
4824
4825/**
4826 * ufshcd_transfer_req_compl - handle SCSI and query command completion
4827 * @hba: per adapter instance
4828 */
4829static void ufshcd_transfer_req_compl(struct ufs_hba *hba)
4830{
4831        unsigned long completed_reqs;
4832        u32 tr_doorbell;
4833
4834        /* Resetting interrupt aggregation counters first and reading the
4835         * DOOR_BELL afterward allows us to handle all the completed requests.
4836         * In order to prevent other interrupts starvation the DB is read once
4837         * after reset. The down side of this solution is the possibility of
4838         * false interrupt if device completes another request after resetting
4839         * aggregation and before reading the DB.
4840         */
4841        if (ufshcd_is_intr_aggr_allowed(hba) &&
4842            !(hba->quirks & UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR))
4843                ufshcd_reset_intr_aggr(hba);
4844
4845        tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
4846        completed_reqs = tr_doorbell ^ hba->outstanding_reqs;
4847
4848        __ufshcd_transfer_req_compl(hba, completed_reqs);
4849}
4850
4851/**
4852 * ufshcd_disable_ee - disable exception event
4853 * @hba: per-adapter instance
4854 * @mask: exception event to disable
4855 *
4856 * Disables exception event in the device so that the EVENT_ALERT
4857 * bit is not set.
4858 *
4859 * Returns zero on success, non-zero error value on failure.
4860 */
4861static int ufshcd_disable_ee(struct ufs_hba *hba, u16 mask)
4862{
4863        int err = 0;
4864        u32 val;
4865
4866        if (!(hba->ee_ctrl_mask & mask))
4867                goto out;
4868
4869        val = hba->ee_ctrl_mask & ~mask;
4870        val &= MASK_EE_STATUS;
4871        err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
4872                        QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val);
4873        if (!err)
4874                hba->ee_ctrl_mask &= ~mask;
4875out:
4876        return err;
4877}
4878
4879/**
4880 * ufshcd_enable_ee - enable exception event
4881 * @hba: per-adapter instance
4882 * @mask: exception event to enable
4883 *
4884 * Enable corresponding exception event in the device to allow
4885 * device to alert host in critical scenarios.
4886 *
4887 * Returns zero on success, non-zero error value on failure.
4888 */
4889static int ufshcd_enable_ee(struct ufs_hba *hba, u16 mask)
4890{
4891        int err = 0;
4892        u32 val;
4893
4894        if (hba->ee_ctrl_mask & mask)
4895                goto out;
4896
4897        val = hba->ee_ctrl_mask | mask;
4898        val &= MASK_EE_STATUS;
4899        err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
4900                        QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val);
4901        if (!err)
4902                hba->ee_ctrl_mask |= mask;
4903out:
4904        return err;
4905}
4906
4907/**
4908 * ufshcd_enable_auto_bkops - Allow device managed BKOPS
4909 * @hba: per-adapter instance
4910 *
4911 * Allow device to manage background operations on its own. Enabling
4912 * this might lead to inconsistent latencies during normal data transfers
4913 * as the device is allowed to manage its own way of handling background
4914 * operations.
4915 *
4916 * Returns zero on success, non-zero on failure.
4917 */
4918static int ufshcd_enable_auto_bkops(struct ufs_hba *hba)
4919{
4920        int err = 0;
4921
4922        if (hba->auto_bkops_enabled)
4923                goto out;
4924
4925        err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
4926                        QUERY_FLAG_IDN_BKOPS_EN, NULL);
4927        if (err) {
4928                dev_err(hba->dev, "%s: failed to enable bkops %d\n",
4929                                __func__, err);
4930                goto out;
4931        }
4932
4933        hba->auto_bkops_enabled = true;
4934        trace_ufshcd_auto_bkops_state(dev_name(hba->dev), "Enabled");
4935
4936        /* No need of URGENT_BKOPS exception from the device */
4937        err = ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
4938        if (err)
4939                dev_err(hba->dev, "%s: failed to disable exception event %d\n",
4940                                __func__, err);
4941out:
4942        return err;
4943}
4944
4945/**
4946 * ufshcd_disable_auto_bkops - block device in doing background operations
4947 * @hba: per-adapter instance
4948 *
4949 * Disabling background operations improves command response latency but
4950 * has drawback of device moving into critical state where the device is
4951 * not-operable. Make sure to call ufshcd_enable_auto_bkops() whenever the
4952 * host is idle so that BKOPS are managed effectively without any negative
4953 * impacts.
4954 *
4955 * Returns zero on success, non-zero on failure.
4956 */
4957static int ufshcd_disable_auto_bkops(struct ufs_hba *hba)
4958{
4959        int err = 0;
4960
4961        if (!hba->auto_bkops_enabled)
4962                goto out;
4963
4964        /*
4965         * If host assisted BKOPs is to be enabled, make sure
4966         * urgent bkops exception is allowed.
4967         */
4968        err = ufshcd_enable_ee(hba, MASK_EE_URGENT_BKOPS);
4969        if (err) {
4970                dev_err(hba->dev, "%s: failed to enable exception event %d\n",
4971                                __func__, err);
4972                goto out;
4973        }
4974
4975        err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_CLEAR_FLAG,
4976                        QUERY_FLAG_IDN_BKOPS_EN, NULL);
4977        if (err) {
4978                dev_err(hba->dev, "%s: failed to disable bkops %d\n",
4979                                __func__, err);
4980                ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
4981                goto out;
4982        }
4983
4984        hba->auto_bkops_enabled = false;
4985        trace_ufshcd_auto_bkops_state(dev_name(hba->dev), "Disabled");
4986out:
4987        return err;
4988}
4989
4990/**
4991 * ufshcd_force_reset_auto_bkops - force reset auto bkops state
4992 * @hba: per adapter instance
4993 *
4994 * After a device reset the device may toggle the BKOPS_EN flag
4995 * to default value. The s/w tracking variables should be updated
4996 * as well. This function would change the auto-bkops state based on
4997 * UFSHCD_CAP_KEEP_AUTO_BKOPS_ENABLED_EXCEPT_SUSPEND.
4998 */
4999static void ufshcd_force_reset_auto_bkops(struct ufs_hba *hba)
5000{
5001        if (ufshcd_keep_autobkops_enabled_except_suspend(hba)) {
5002                hba->auto_bkops_enabled = false;
5003                hba->ee_ctrl_mask |= MASK_EE_URGENT_BKOPS;
5004                ufshcd_enable_auto_bkops(hba);
5005        } else {
5006                hba->auto_bkops_enabled = true;
5007                hba->ee_ctrl_mask &= ~MASK_EE_URGENT_BKOPS;
5008                ufshcd_disable_auto_bkops(hba);
5009        }
5010}
5011
5012static inline int ufshcd_get_bkops_status(struct ufs_hba *hba, u32 *status)
5013{
5014        return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
5015                        QUERY_ATTR_IDN_BKOPS_STATUS, 0, 0, status);
5016}
5017
5018/**
5019 * ufshcd_bkops_ctrl - control the auto bkops based on current bkops status
5020 * @hba: per-adapter instance
5021 * @status: bkops_status value
5022 *
5023 * Read the bkops_status from the UFS device and Enable fBackgroundOpsEn
5024 * flag in the device to permit background operations if the device
5025 * bkops_status is greater than or equal to "status" argument passed to
5026 * this function, disable otherwise.
5027 *
5028 * Returns 0 for success, non-zero in case of failure.
5029 *
5030 * NOTE: Caller of this function can check the "hba->auto_bkops_enabled" flag
5031 * to know whether auto bkops is enabled or disabled after this function
5032 * returns control to it.
5033 */
5034static int ufshcd_bkops_ctrl(struct ufs_hba *hba,
5035                             enum bkops_status status)
5036{
5037        int err;
5038        u32 curr_status = 0;
5039
5040        err = ufshcd_get_bkops_status(hba, &curr_status);
5041        if (err) {
5042                dev_err(hba->dev, "%s: failed to get BKOPS status %d\n",
5043                                __func__, err);
5044                goto out;
5045        } else if (curr_status > BKOPS_STATUS_MAX) {
5046                dev_err(hba->dev, "%s: invalid BKOPS status %d\n",
5047                                __func__, curr_status);
5048                err = -EINVAL;
5049                goto out;
5050        }
5051
5052        if (curr_status >= status)
5053                err = ufshcd_enable_auto_bkops(hba);
5054        else
5055                err = ufshcd_disable_auto_bkops(hba);
5056out:
5057        return err;
5058}
5059
5060/**
5061 * ufshcd_urgent_bkops - handle urgent bkops exception event
5062 * @hba: per-adapter instance
5063 *
5064 * Enable fBackgroundOpsEn flag in the device to permit background
5065 * operations.
5066 *
5067 * If BKOPs is enabled, this function returns 0, 1 if the bkops in not enabled
5068 * and negative error value for any other failure.
5069 */
5070static int ufshcd_urgent_bkops(struct ufs_hba *hba)
5071{
5072        return ufshcd_bkops_ctrl(hba, hba->urgent_bkops_lvl);
5073}
5074
5075static inline int ufshcd_get_ee_status(struct ufs_hba *hba, u32 *status)
5076{
5077        return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
5078                        QUERY_ATTR_IDN_EE_STATUS, 0, 0, status);
5079}
5080
5081static void ufshcd_bkops_exception_event_handler(struct ufs_hba *hba)
5082{
5083        int err;
5084        u32 curr_status = 0;
5085
5086        if (hba->is_urgent_bkops_lvl_checked)
5087                goto enable_auto_bkops;
5088
5089        err = ufshcd_get_bkops_status(hba, &curr_status);
5090        if (err) {
5091                dev_err(hba->dev, "%s: failed to get BKOPS status %d\n",
5092                                __func__, err);
5093                goto out;
5094        }
5095
5096        /*
5097         * We are seeing that some devices are raising the urgent bkops
5098         * exception events even when BKOPS status doesn't indicate performace
5099         * impacted or critical. Handle these device by determining their urgent
5100         * bkops status at runtime.
5101         */
5102        if (curr_status < BKOPS_STATUS_PERF_IMPACT) {
5103                dev_err(hba->dev, "%s: device raised urgent BKOPS exception for bkops status %d\n",
5104                                __func__, curr_status);
5105                /* update the current status as the urgent bkops level */
5106                hba->urgent_bkops_lvl = curr_status;
5107                hba->is_urgent_bkops_lvl_checked = true;
5108        }
5109
5110enable_auto_bkops:
5111        err = ufshcd_enable_auto_bkops(hba);
5112out:
5113        if (err < 0)
5114                dev_err(hba->dev, "%s: failed to handle urgent bkops %d\n",
5115                                __func__, err);
5116}
5117
5118/**
5119 * ufshcd_exception_event_handler - handle exceptions raised by device
5120 * @work: pointer to work data
5121 *
5122 * Read bExceptionEventStatus attribute from the device and handle the
5123 * exception event accordingly.
5124 */
5125static void ufshcd_exception_event_handler(struct work_struct *work)
5126{
5127        struct ufs_hba *hba;
5128        int err;
5129        u32 status = 0;
5130        hba = container_of(work, struct ufs_hba, eeh_work);
5131
5132        pm_runtime_get_sync(hba->dev);
5133        scsi_block_requests(hba->host);
5134        err = ufshcd_get_ee_status(hba, &status);
5135        if (err) {
5136                dev_err(hba->dev, "%s: failed to get exception status %d\n",
5137                                __func__, err);
5138                goto out;
5139        }
5140
5141        status &= hba->ee_ctrl_mask;
5142
5143        if (status & MASK_EE_URGENT_BKOPS)
5144                ufshcd_bkops_exception_event_handler(hba);
5145
5146out:
5147        scsi_unblock_requests(hba->host);
5148        pm_runtime_put_sync(hba->dev);
5149        return;
5150}
5151
5152/* Complete requests that have door-bell cleared */
5153static void ufshcd_complete_requests(struct ufs_hba *hba)
5154{
5155        ufshcd_transfer_req_compl(hba);
5156        ufshcd_tmc_handler(hba);
5157}
5158
5159/**
5160 * ufshcd_quirk_dl_nac_errors - This function checks if error handling is
5161 *                              to recover from the DL NAC errors or not.
5162 * @hba: per-adapter instance
5163 *
5164 * Returns true if error handling is required, false otherwise
5165 */
5166static bool ufshcd_quirk_dl_nac_errors(struct ufs_hba *hba)
5167{
5168        unsigned long flags;
5169        bool err_handling = true;
5170
5171        spin_lock_irqsave(hba->host->host_lock, flags);
5172        /*
5173         * UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS only workaround the
5174         * device fatal error and/or DL NAC & REPLAY timeout errors.
5175         */
5176        if (hba->saved_err & (CONTROLLER_FATAL_ERROR | SYSTEM_BUS_FATAL_ERROR))
5177                goto out;
5178
5179        if ((hba->saved_err & DEVICE_FATAL_ERROR) ||
5180            ((hba->saved_err & UIC_ERROR) &&
5181             (hba->saved_uic_err & UFSHCD_UIC_DL_TCx_REPLAY_ERROR)))
5182                goto out;
5183
5184        if ((hba->saved_err & UIC_ERROR) &&
5185            (hba->saved_uic_err & UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)) {
5186                int err;
5187                /*
5188                 * wait for 50ms to see if we can get any other errors or not.
5189                 */
5190                spin_unlock_irqrestore(hba->host->host_lock, flags);
5191                msleep(50);
5192                spin_lock_irqsave(hba->host->host_lock, flags);
5193
5194                /*
5195                 * now check if we have got any other severe errors other than
5196                 * DL NAC error?
5197                 */
5198                if ((hba->saved_err & INT_FATAL_ERRORS) ||
5199                    ((hba->saved_err & UIC_ERROR) &&
5200                    (hba->saved_uic_err & ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)))
5201                        goto out;
5202
5203                /*
5204                 * As DL NAC is the only error received so far, send out NOP
5205                 * command to confirm if link is still active or not.
5206                 *   - If we don't get any response then do error recovery.
5207                 *   - If we get response then clear the DL NAC error bit.
5208                 */
5209
5210                spin_unlock_irqrestore(hba->host->host_lock, flags);
5211                err = ufshcd_verify_dev_init(hba);
5212                spin_lock_irqsave(hba->host->host_lock, flags);
5213
5214                if (err)
5215                        goto out;
5216
5217                /* Link seems to be alive hence ignore the DL NAC errors */
5218                if (hba->saved_uic_err == UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)
5219                        hba->saved_err &= ~UIC_ERROR;
5220                /* clear NAC error */
5221                hba->saved_uic_err &= ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
5222                if (!hba->saved_uic_err) {
5223                        err_handling = false;
5224                        goto out;
5225                }
5226        }
5227out:
5228        spin_unlock_irqrestore(hba->host->host_lock, flags);
5229        return err_handling;
5230}
5231
5232/**
5233 * ufshcd_err_handler - handle UFS errors that require s/w attention
5234 * @work: pointer to work structure
5235 */
5236static void ufshcd_err_handler(struct work_struct *work)
5237{
5238        struct ufs_hba *hba;
5239        unsigned long flags;
5240        u32 err_xfer = 0;
5241        u32 err_tm = 0;
5242        int err = 0;
5243        int tag;
5244        bool needs_reset = false;
5245
5246        hba = container_of(work, struct ufs_hba, eh_work);
5247
5248        pm_runtime_get_sync(hba->dev);
5249        ufshcd_hold(hba, false);
5250
5251        spin_lock_irqsave(hba->host->host_lock, flags);
5252        if (hba->ufshcd_state == UFSHCD_STATE_RESET)
5253                goto out;
5254
5255        hba->ufshcd_state = UFSHCD_STATE_RESET;
5256        ufshcd_set_eh_in_progress(hba);
5257
5258        /* Complete requests that have door-bell cleared by h/w */
5259        ufshcd_complete_requests(hba);
5260
5261        if (hba->dev_quirks & UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
5262                bool ret;
5263
5264                spin_unlock_irqrestore(hba->host->host_lock, flags);
5265                /* release the lock as ufshcd_quirk_dl_nac_errors() may sleep */
5266                ret = ufshcd_quirk_dl_nac_errors(hba);
5267                spin_lock_irqsave(hba->host->host_lock, flags);
5268                if (!ret)
5269                        goto skip_err_handling;
5270        }
5271        if ((hba->saved_err & INT_FATAL_ERRORS) ||
5272            ((hba->saved_err & UIC_ERROR) &&
5273            (hba->saved_uic_err & (UFSHCD_UIC_DL_PA_INIT_ERROR |
5274                                   UFSHCD_UIC_DL_NAC_RECEIVED_ERROR |
5275                                   UFSHCD_UIC_DL_TCx_REPLAY_ERROR))))
5276                needs_reset = true;
5277
5278        /*
5279         * if host reset is required then skip clearing the pending
5280         * transfers forcefully because they will automatically get
5281         * cleared after link startup.
5282         */
5283        if (needs_reset)
5284                goto skip_pending_xfer_clear;
5285
5286        /* release lock as clear command might sleep */
5287        spin_unlock_irqrestore(hba->host->host_lock, flags);
5288        /* Clear pending transfer requests */
5289        for_each_set_bit(tag, &hba->outstanding_reqs, hba->nutrs) {
5290                if (ufshcd_clear_cmd(hba, tag)) {
5291                        err_xfer = true;
5292                        goto lock_skip_pending_xfer_clear;
5293                }
5294        }
5295
5296        /* Clear pending task management requests */
5297        for_each_set_bit(tag, &hba->outstanding_tasks, hba->nutmrs) {
5298                if (ufshcd_clear_tm_cmd(hba, tag)) {
5299                        err_tm = true;
5300                        goto lock_skip_pending_xfer_clear;
5301                }
5302        }
5303
5304lock_skip_pending_xfer_clear:
5305        spin_lock_irqsave(hba->host->host_lock, flags);
5306
5307        /* Complete the requests that are cleared by s/w */
5308        ufshcd_complete_requests(hba);
5309
5310        if (err_xfer || err_tm)
5311                needs_reset = true;
5312
5313skip_pending_xfer_clear:
5314        /* Fatal errors need reset */
5315        if (needs_reset) {
5316                unsigned long max_doorbells = (1UL << hba->nutrs) - 1;
5317
5318                /*
5319                 * ufshcd_reset_and_restore() does the link reinitialization
5320                 * which will need atleast one empty doorbell slot to send the
5321                 * device management commands (NOP and query commands).
5322                 * If there is no slot empty at this moment then free up last
5323                 * slot forcefully.
5324                 */
5325                if (hba->outstanding_reqs == max_doorbells)
5326                        __ufshcd_transfer_req_compl(hba,
5327                                                    (1UL << (hba->nutrs - 1)));
5328
5329                spin_unlock_irqrestore(hba->host->host_lock, flags);
5330                err = ufshcd_reset_and_restore(hba);
5331                spin_lock_irqsave(hba->host->host_lock, flags);
5332                if (err) {
5333                        dev_err(hba->dev, "%s: reset and restore failed\n",
5334                                        __func__);
5335                        hba->ufshcd_state = UFSHCD_STATE_ERROR;
5336                }
5337                /*
5338                 * Inform scsi mid-layer that we did reset and allow to handle
5339                 * Unit Attention properly.
5340                 */
5341                scsi_report_bus_reset(hba->host, 0);
5342                hba->saved_err = 0;
5343                hba->saved_uic_err = 0;
5344        }
5345
5346skip_err_handling:
5347        if (!needs_reset) {
5348                hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
5349                if (hba->saved_err || hba->saved_uic_err)
5350                        dev_err_ratelimited(hba->dev, "%s: exit: saved_err 0x%x saved_uic_err 0x%x",
5351                            __func__, hba->saved_err, hba->saved_uic_err);
5352        }
5353
5354        ufshcd_clear_eh_in_progress(hba);
5355
5356out:
5357        spin_unlock_irqrestore(hba->host->host_lock, flags);
5358        ufshcd_scsi_unblock_requests(hba);
5359        ufshcd_release(hba);
5360        pm_runtime_put_sync(hba->dev);
5361}
5362
5363static void ufshcd_update_uic_reg_hist(struct ufs_uic_err_reg_hist *reg_hist,
5364                u32 reg)
5365{
5366        reg_hist->reg[reg_hist->pos] = reg;
5367        reg_hist->tstamp[reg_hist->pos] = ktime_get();
5368        reg_hist->pos = (reg_hist->pos + 1) % UIC_ERR_REG_HIST_LENGTH;
5369}
5370
5371/**
5372 * ufshcd_update_uic_error - check and set fatal UIC error flags.
5373 * @hba: per-adapter instance
5374 */
5375static void ufshcd_update_uic_error(struct ufs_hba *hba)
5376{
5377        u32 reg;
5378
5379        /* PHY layer lane error */
5380        reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER);
5381        /* Ignore LINERESET indication, as this is not an error */
5382        if ((reg & UIC_PHY_ADAPTER_LAYER_ERROR) &&
5383                        (reg & UIC_PHY_ADAPTER_LAYER_LANE_ERR_MASK)) {
5384                /*
5385                 * To know whether this error is fatal or not, DB timeout
5386                 * must be checked but this error is handled separately.
5387                 */
5388                dev_dbg(hba->dev, "%s: UIC Lane error reported\n", __func__);
5389                ufshcd_update_uic_reg_hist(&hba->ufs_stats.pa_err, reg);
5390        }
5391
5392        /* PA_INIT_ERROR is fatal and needs UIC reset */
5393        reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER);
5394        if (reg)
5395                ufshcd_update_uic_reg_hist(&hba->ufs_stats.dl_err, reg);
5396
5397        if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT)
5398                hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR;
5399        else if (hba->dev_quirks &
5400                   UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
5401                if (reg & UIC_DATA_LINK_LAYER_ERROR_NAC_RECEIVED)
5402                        hba->uic_error |=
5403                                UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
5404                else if (reg & UIC_DATA_LINK_LAYER_ERROR_TCx_REPLAY_TIMEOUT)
5405                        hba->uic_error |= UFSHCD_UIC_DL_TCx_REPLAY_ERROR;
5406        }
5407
5408        /* UIC NL/TL/DME errors needs software retry */
5409        reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_NETWORK_LAYER);
5410        if (reg) {
5411                ufshcd_update_uic_reg_hist(&hba->ufs_stats.nl_err, reg);
5412                hba->uic_error |= UFSHCD_UIC_NL_ERROR;
5413        }
5414
5415        reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_TRANSPORT_LAYER);
5416        if (reg) {
5417                ufshcd_update_uic_reg_hist(&hba->ufs_stats.tl_err, reg);
5418                hba->uic_error |= UFSHCD_UIC_TL_ERROR;
5419        }
5420
5421        reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DME);
5422        if (reg) {
5423                ufshcd_update_uic_reg_hist(&hba->ufs_stats.dme_err, reg);
5424                hba->uic_error |= UFSHCD_UIC_DME_ERROR;
5425        }
5426
5427        dev_dbg(hba->dev, "%s: UIC error flags = 0x%08x\n",
5428                        __func__, hba->uic_error);
5429}
5430
5431/**
5432 * ufshcd_check_errors - Check for errors that need s/w attention
5433 * @hba: per-adapter instance
5434 */
5435static void ufshcd_check_errors(struct ufs_hba *hba)
5436{
5437        bool queue_eh_work = false;
5438
5439        if (hba->errors & INT_FATAL_ERRORS)
5440                queue_eh_work = true;
5441
5442        if (hba->errors & UIC_ERROR) {
5443                hba->uic_error = 0;
5444                ufshcd_update_uic_error(hba);
5445                if (hba->uic_error)
5446                        queue_eh_work = true;
5447        }
5448
5449        if (queue_eh_work) {
5450                /*
5451                 * update the transfer error masks to sticky bits, let's do this
5452                 * irrespective of current ufshcd_state.
5453                 */
5454                hba->saved_err |= hba->errors;
5455                hba->saved_uic_err |= hba->uic_error;
5456
5457                /* handle fatal errors only when link is functional */
5458                if (hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL) {
5459                        /* block commands from scsi mid-layer */
5460                        ufshcd_scsi_block_requests(hba);
5461
5462                        hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED;
5463
5464                        /* dump controller state before resetting */
5465                        if (hba->saved_err & (INT_FATAL_ERRORS | UIC_ERROR)) {
5466                                bool pr_prdt = !!(hba->saved_err &
5467                                                SYSTEM_BUS_FATAL_ERROR);
5468
5469                                dev_err(hba->dev, "%s: saved_err 0x%x saved_uic_err 0x%x\n",
5470                                        __func__, hba->saved_err,
5471                                        hba->saved_uic_err);
5472
5473                                ufshcd_print_host_regs(hba);
5474                                ufshcd_print_pwr_info(hba);
5475                                ufshcd_print_tmrs(hba, hba->outstanding_tasks);
5476                                ufshcd_print_trs(hba, hba->outstanding_reqs,
5477                                                        pr_prdt);
5478                        }
5479                        schedule_work(&hba->eh_work);
5480                }
5481        }
5482        /*
5483         * if (!queue_eh_work) -
5484         * Other errors are either non-fatal where host recovers
5485         * itself without s/w intervention or errors that will be
5486         * handled by the SCSI core layer.
5487         */
5488}
5489
5490/**
5491 * ufshcd_tmc_handler - handle task management function completion
5492 * @hba: per adapter instance
5493 */
5494static void ufshcd_tmc_handler(struct ufs_hba *hba)
5495{
5496        u32 tm_doorbell;
5497
5498        tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
5499        hba->tm_condition = tm_doorbell ^ hba->outstanding_tasks;
5500        wake_up(&hba->tm_wq);
5501}
5502
5503/**
5504 * ufshcd_sl_intr - Interrupt service routine
5505 * @hba: per adapter instance
5506 * @intr_status: contains interrupts generated by the controller
5507 */
5508static void ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
5509{
5510        hba->errors = UFSHCD_ERROR_MASK & intr_status;
5511        if (hba->errors)
5512                ufshcd_check_errors(hba);
5513
5514        if (intr_status & UFSHCD_UIC_MASK)
5515                ufshcd_uic_cmd_compl(hba, intr_status);
5516
5517        if (intr_status & UTP_TASK_REQ_COMPL)
5518                ufshcd_tmc_handler(hba);
5519
5520        if (intr_status & UTP_TRANSFER_REQ_COMPL)
5521                ufshcd_transfer_req_compl(hba);
5522}
5523
5524/**
5525 * ufshcd_intr - Main interrupt service routine
5526 * @irq: irq number
5527 * @__hba: pointer to adapter instance
5528 *
5529 * Returns IRQ_HANDLED - If interrupt is valid
5530 *              IRQ_NONE - If invalid interrupt
5531 */
5532static irqreturn_t ufshcd_intr(int irq, void *__hba)
5533{
5534        u32 intr_status, enabled_intr_status;
5535        irqreturn_t retval = IRQ_NONE;
5536        struct ufs_hba *hba = __hba;
5537        int retries = hba->nutrs;
5538
5539        spin_lock(hba->host->host_lock);
5540        intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
5541
5542        /*
5543         * There could be max of hba->nutrs reqs in flight and in worst case
5544         * if the reqs get finished 1 by 1 after the interrupt status is
5545         * read, make sure we handle them by checking the interrupt status
5546         * again in a loop until we process all of the reqs before returning.
5547         */
5548        do {
5549                enabled_intr_status =
5550                        intr_status & ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
5551                if (intr_status)
5552                        ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS);
5553                if (enabled_intr_status) {
5554                        ufshcd_sl_intr(hba, enabled_intr_status);
5555                        retval = IRQ_HANDLED;
5556                }
5557
5558                intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
5559        } while (intr_status && --retries);
5560
5561        spin_unlock(hba->host->host_lock);
5562        return retval;
5563}
5564
5565static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag)
5566{
5567        int err = 0;
5568        u32 mask = 1 << tag;
5569        unsigned long flags;
5570
5571        if (!test_bit(tag, &hba->outstanding_tasks))
5572                goto out;
5573
5574        spin_lock_irqsave(hba->host->host_lock, flags);
5575        ufshcd_utmrl_clear(hba, tag);
5576        spin_unlock_irqrestore(hba->host->host_lock, flags);
5577
5578        /* poll for max. 1 sec to clear door bell register by h/w */
5579        err = ufshcd_wait_for_register(hba,
5580                        REG_UTP_TASK_REQ_DOOR_BELL,
5581                        mask, 0, 1000, 1000, true);
5582out:
5583        return err;
5584}
5585
5586/**
5587 * ufshcd_issue_tm_cmd - issues task management commands to controller
5588 * @hba: per adapter instance
5589 * @lun_id: LUN ID to which TM command is sent
5590 * @task_id: task ID to which the TM command is applicable
5591 * @tm_function: task management function opcode
5592 * @tm_response: task management service response return value
5593 *
5594 * Returns non-zero value on error, zero on success.
5595 */
5596static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
5597                u8 tm_function, u8 *tm_response)
5598{
5599        struct utp_task_req_desc *task_req_descp;
5600        struct utp_upiu_task_req *task_req_upiup;
5601        struct Scsi_Host *host;
5602        unsigned long flags;
5603        int free_slot;
5604        int err;
5605        int task_tag;
5606
5607        host = hba->host;
5608
5609        /*
5610         * Get free slot, sleep if slots are unavailable.
5611         * Even though we use wait_event() which sleeps indefinitely,
5612         * the maximum wait time is bounded by %TM_CMD_TIMEOUT.
5613         */
5614        wait_event(hba->tm_tag_wq, ufshcd_get_tm_free_slot(hba, &free_slot));
5615        ufshcd_hold(hba, false);
5616
5617        spin_lock_irqsave(host->host_lock, flags);
5618        task_req_descp = hba->utmrdl_base_addr;
5619        task_req_descp += free_slot;
5620
5621        /* Configure task request descriptor */
5622        task_req_descp->header.dword_0 = cpu_to_le32(UTP_REQ_DESC_INT_CMD);
5623        task_req_descp->header.dword_2 =
5624                        cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
5625
5626        /* Configure task request UPIU */
5627        task_req_upiup =
5628                (struct utp_upiu_task_req *) task_req_descp->task_req_upiu;
5629        task_tag = hba->nutrs + free_slot;
5630        task_req_upiup->header.dword_0 =
5631                UPIU_HEADER_DWORD(UPIU_TRANSACTION_TASK_REQ, 0,
5632                                              lun_id, task_tag);
5633        task_req_upiup->header.dword_1 =
5634                UPIU_HEADER_DWORD(0, tm_function, 0, 0);
5635        /*
5636         * The host shall provide the same value for LUN field in the basic
5637         * header and for Input Parameter.
5638         */
5639        task_req_upiup->input_param1 = cpu_to_be32(lun_id);
5640        task_req_upiup->input_param2 = cpu_to_be32(task_id);
5641
5642        ufshcd_vops_setup_task_mgmt(hba, free_slot, tm_function);
5643
5644        /* send command to the controller */
5645        __set_bit(free_slot, &hba->outstanding_tasks);
5646
5647        /* Make sure descriptors are ready before ringing the task doorbell */
5648        wmb();
5649
5650        ufshcd_writel(hba, 1 << free_slot, REG_UTP_TASK_REQ_DOOR_BELL);
5651        /* Make sure that doorbell is committed immediately */
5652        wmb();
5653
5654        spin_unlock_irqrestore(host->host_lock, flags);
5655
5656        ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_send");
5657
5658        /* wait until the task management command is completed */
5659        err = wait_event_timeout(hba->tm_wq,
5660                        test_bit(free_slot, &hba->tm_condition),
5661                        msecs_to_jiffies(TM_CMD_TIMEOUT));
5662        if (!err) {
5663                ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_complete_err");
5664                dev_err(hba->dev, "%s: task management cmd 0x%.2x timed-out\n",
5665                                __func__, tm_function);
5666                if (ufshcd_clear_tm_cmd(hba, free_slot))
5667                        dev_WARN(hba->dev, "%s: unable clear tm cmd (slot %d) after timeout\n",
5668                                        __func__, free_slot);
5669                err = -ETIMEDOUT;
5670        } else {
5671                err = ufshcd_task_req_compl(hba, free_slot, tm_response);
5672                ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_complete");
5673        }
5674
5675        clear_bit(free_slot, &hba->tm_condition);
5676        ufshcd_put_tm_slot(hba, free_slot);
5677        wake_up(&hba->tm_tag_wq);
5678
5679        ufshcd_release(hba);
5680        return err;
5681}
5682
5683/**
5684 * ufshcd_eh_device_reset_handler - device reset handler registered to
5685 *                                    scsi layer.
5686 * @cmd: SCSI command pointer
5687 *
5688 * Returns SUCCESS/FAILED
5689 */
5690static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
5691{
5692        struct Scsi_Host *host;
5693        struct ufs_hba *hba;
5694        unsigned int tag;
5695        u32 pos;
5696        int err;
5697        u8 resp = 0xF;
5698        struct ufshcd_lrb *lrbp;
5699        unsigned long flags;
5700
5701        host = cmd->device->host;
5702        hba = shost_priv(host);
5703        tag = cmd->request->tag;
5704
5705        lrbp = &hba->lrb[tag];
5706        err = ufshcd_issue_tm_cmd(hba, lrbp->lun, 0, UFS_LOGICAL_RESET, &resp);
5707        if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
5708                if (!err)
5709                        err = resp;
5710                goto out;
5711        }
5712
5713        /* clear the commands that were pending for corresponding LUN */
5714        for_each_set_bit(pos, &hba->outstanding_reqs, hba->nutrs) {
5715                if (hba->lrb[pos].lun == lrbp->lun) {
5716                        err = ufshcd_clear_cmd(hba, pos);
5717                        if (err)
5718                                break;
5719                }
5720        }
5721        spin_lock_irqsave(host->host_lock, flags);
5722        ufshcd_transfer_req_compl(hba);
5723        spin_unlock_irqrestore(host->host_lock, flags);
5724
5725out:
5726        hba->req_abort_count = 0;
5727        if (!err) {
5728                err = SUCCESS;
5729        } else {
5730                dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
5731                err = FAILED;
5732        }
5733        return err;
5734}
5735
5736static void ufshcd_set_req_abort_skip(struct ufs_hba *hba, unsigned long bitmap)
5737{
5738        struct ufshcd_lrb *lrbp;
5739        int tag;
5740
5741        for_each_set_bit(tag, &bitmap, hba->nutrs) {
5742                lrbp = &hba->lrb[tag];
5743                lrbp->req_abort_skip = true;
5744        }
5745}
5746
5747/**
5748 * ufshcd_abort - abort a specific command
5749 * @cmd: SCSI command pointer
5750 *
5751 * Abort the pending command in device by sending UFS_ABORT_TASK task management
5752 * command, and in host controller by clearing the door-bell register. There can
5753 * be race between controller sending the command to the device while abort is
5754 * issued. To avoid that, first issue UFS_QUERY_TASK to check if the command is
5755 * really issued and then try to abort it.
5756 *
5757 * Returns SUCCESS/FAILED
5758 */
5759static int ufshcd_abort(struct scsi_cmnd *cmd)
5760{
5761        struct Scsi_Host *host;
5762        struct ufs_hba *hba;
5763        unsigned long flags;
5764        unsigned int tag;
5765        int err = 0;
5766        int poll_cnt;
5767        u8 resp = 0xF;
5768        struct ufshcd_lrb *lrbp;
5769        u32 reg;
5770
5771        host = cmd->device->host;
5772        hba = shost_priv(host);
5773        tag = cmd->request->tag;
5774        lrbp = &hba->lrb[tag];
5775        if (!ufshcd_valid_tag(hba, tag)) {
5776                dev_err(hba->dev,
5777                        "%s: invalid command tag %d: cmd=0x%p, cmd->request=0x%p",
5778                        __func__, tag, cmd, cmd->request);
5779                BUG();
5780        }
5781
5782        /*
5783         * Task abort to the device W-LUN is illegal. When this command
5784         * will fail, due to spec violation, scsi err handling next step
5785         * will be to send LU reset which, again, is a spec violation.
5786         * To avoid these unnecessary/illegal step we skip to the last error
5787         * handling stage: reset and restore.
5788         */
5789        if (lrbp->lun == UFS_UPIU_UFS_DEVICE_WLUN)
5790                return ufshcd_eh_host_reset_handler(cmd);
5791
5792        ufshcd_hold(hba, false);
5793        reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
5794        /* If command is already aborted/completed, return SUCCESS */
5795        if (!(test_bit(tag, &hba->outstanding_reqs))) {
5796                dev_err(hba->dev,
5797                        "%s: cmd at tag %d already completed, outstanding=0x%lx, doorbell=0x%x\n",
5798                        __func__, tag, hba->outstanding_reqs, reg);
5799                goto out;
5800        }
5801
5802        if (!(reg & (1 << tag))) {
5803                dev_err(hba->dev,
5804                "%s: cmd was completed, but without a notifying intr, tag = %d",
5805                __func__, tag);
5806        }
5807
5808        /* Print Transfer Request of aborted task */
5809        dev_err(hba->dev, "%s: Device abort task at tag %d\n", __func__, tag);
5810
5811        /*
5812         * Print detailed info about aborted request.
5813         * As more than one request might get aborted at the same time,
5814         * print full information only for the first aborted request in order
5815         * to reduce repeated printouts. For other aborted requests only print
5816         * basic details.
5817         */
5818        scsi_print_command(hba->lrb[tag].cmd);
5819        if (!hba->req_abort_count) {
5820                ufshcd_print_host_regs(hba);
5821                ufshcd_print_host_state(hba);
5822                ufshcd_print_pwr_info(hba);
5823                ufshcd_print_trs(hba, 1 << tag, true);
5824        } else {
5825                ufshcd_print_trs(hba, 1 << tag, false);
5826        }
5827        hba->req_abort_count++;
5828
5829        /* Skip task abort in case previous aborts failed and report failure */
5830        if (lrbp->req_abort_skip) {
5831                err = -EIO;
5832                goto out;
5833        }
5834
5835        for (poll_cnt = 100; poll_cnt; poll_cnt--) {
5836                err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
5837                                UFS_QUERY_TASK, &resp);
5838                if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED) {
5839                        /* cmd pending in the device */
5840                        dev_err(hba->dev, "%s: cmd pending in the device. tag = %d\n",
5841                                __func__, tag);
5842                        break;
5843                } else if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
5844                        /*
5845                         * cmd not pending in the device, check if it is
5846                         * in transition.
5847                         */
5848                        dev_err(hba->dev, "%s: cmd at tag %d not pending in the device.\n",
5849                                __func__, tag);
5850                        reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
5851                        if (reg & (1 << tag)) {
5852                                /* sleep for max. 200us to stabilize */
5853                                usleep_range(100, 200);
5854                                continue;
5855                        }
5856                        /* command completed already */
5857                        dev_err(hba->dev, "%s: cmd at tag %d successfully cleared from DB.\n",
5858                                __func__, tag);
5859                        goto out;
5860                } else {
5861                        dev_err(hba->dev,
5862                                "%s: no response from device. tag = %d, err %d\n",
5863                                __func__, tag, err);
5864                        if (!err)
5865                                err = resp; /* service response error */
5866                        goto out;
5867                }
5868        }
5869
5870        if (!poll_cnt) {
5871                err = -EBUSY;
5872                goto out;
5873        }
5874
5875        err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
5876                        UFS_ABORT_TASK, &resp);
5877        if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
5878                if (!err) {
5879                        err = resp; /* service response error */
5880                        dev_err(hba->dev, "%s: issued. tag = %d, err %d\n",
5881                                __func__, tag, err);
5882                }
5883                goto out;
5884        }
5885
5886        err = ufshcd_clear_cmd(hba, tag);
5887        if (err) {
5888                dev_err(hba->dev, "%s: Failed clearing cmd at tag %d, err %d\n",
5889                        __func__, tag, err);
5890                goto out;
5891        }
5892
5893        scsi_dma_unmap(cmd);
5894
5895        spin_lock_irqsave(host->host_lock, flags);
5896        ufshcd_outstanding_req_clear(hba, tag);
5897        hba->lrb[tag].cmd = NULL;
5898        spin_unlock_irqrestore(host->host_lock, flags);
5899
5900        clear_bit_unlock(tag, &hba->lrb_in_use);
5901        wake_up(&hba->dev_cmd.tag_wq);
5902
5903out:
5904        if (!err) {
5905                err = SUCCESS;
5906        } else {
5907                dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
5908                ufshcd_set_req_abort_skip(hba, hba->outstanding_reqs);
5909                err = FAILED;
5910        }
5911
5912        /*
5913         * This ufshcd_release() corresponds to the original scsi cmd that got
5914         * aborted here (as we won't get any IRQ for it).
5915         */
5916        ufshcd_release(hba);
5917        return err;
5918}
5919
5920/**
5921 * ufshcd_host_reset_and_restore - reset and restore host controller
5922 * @hba: per-adapter instance
5923 *
5924 * Note that host controller reset may issue DME_RESET to
5925 * local and remote (device) Uni-Pro stack and the attributes
5926 * are reset to default state.
5927 *
5928 * Returns zero on success, non-zero on failure
5929 */
5930static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
5931{
5932        int err;
5933        unsigned long flags;
5934
5935        /* Reset the host controller */
5936        spin_lock_irqsave(hba->host->host_lock, flags);
5937        ufshcd_hba_stop(hba, false);
5938        spin_unlock_irqrestore(hba->host->host_lock, flags);
5939
5940        /* scale up clocks to max frequency before full reinitialization */
5941        ufshcd_scale_clks(hba, true);
5942
5943        err = ufshcd_hba_enable(hba);
5944        if (err)
5945                goto out;
5946
5947        /* Establish the link again and restore the device */
5948        err = ufshcd_probe_hba(hba);
5949
5950        if (!err && (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL))
5951                err = -EIO;
5952out:
5953        if (err)
5954                dev_err(hba->dev, "%s: Host init failed %d\n", __func__, err);
5955
5956        return err;
5957}
5958
5959/**
5960 * ufshcd_reset_and_restore - reset and re-initialize host/device
5961 * @hba: per-adapter instance
5962 *
5963 * Reset and recover device, host and re-establish link. This
5964 * is helpful to recover the communication in fatal error conditions.
5965 *
5966 * Returns zero on success, non-zero on failure
5967 */
5968static int ufshcd_reset_and_restore(struct ufs_hba *hba)
5969{
5970        int err = 0;
5971        unsigned long flags;
5972        int retries = MAX_HOST_RESET_RETRIES;
5973
5974        do {
5975                err = ufshcd_host_reset_and_restore(hba);
5976        } while (err && --retries);
5977
5978        /*
5979         * After reset the door-bell might be cleared, complete
5980         * outstanding requests in s/w here.
5981         */
5982        spin_lock_irqsave(hba->host->host_lock, flags);
5983        ufshcd_transfer_req_compl(hba);
5984        ufshcd_tmc_handler(hba);
5985        spin_unlock_irqrestore(hba->host->host_lock, flags);
5986
5987        return err;
5988}
5989
5990/**
5991 * ufshcd_eh_host_reset_handler - host reset handler registered to scsi layer
5992 * @cmd: SCSI command pointer
5993 *
5994 * Returns SUCCESS/FAILED
5995 */
5996static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd)
5997{
5998        int err;
5999        unsigned long flags;
6000        struct ufs_hba *hba;
6001
6002        hba = shost_priv(cmd->device->host);
6003
6004        ufshcd_hold(hba, false);
6005        /*
6006         * Check if there is any race with fatal error handling.
6007         * If so, wait for it to complete. Even though fatal error
6008         * handling does reset and restore in some cases, don't assume
6009         * anything out of it. We are just avoiding race here.
6010         */
6011        do {
6012                spin_lock_irqsave(hba->host->host_lock, flags);
6013                if (!(work_pending(&hba->eh_work) ||
6014                            hba->ufshcd_state == UFSHCD_STATE_RESET ||
6015                            hba->ufshcd_state == UFSHCD_STATE_EH_SCHEDULED))
6016                        break;
6017                spin_unlock_irqrestore(hba->host->host_lock, flags);
6018                dev_dbg(hba->dev, "%s: reset in progress\n", __func__);
6019                flush_work(&hba->eh_work);
6020        } while (1);
6021
6022        hba->ufshcd_state = UFSHCD_STATE_RESET;
6023        ufshcd_set_eh_in_progress(hba);
6024        spin_unlock_irqrestore(hba->host->host_lock, flags);
6025
6026        err = ufshcd_reset_and_restore(hba);
6027
6028        spin_lock_irqsave(hba->host->host_lock, flags);
6029        if (!err) {
6030                err = SUCCESS;
6031                hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
6032        } else {
6033                err = FAILED;
6034                hba->ufshcd_state = UFSHCD_STATE_ERROR;
6035        }
6036        ufshcd_clear_eh_in_progress(hba);
6037        spin_unlock_irqrestore(hba->host->host_lock, flags);
6038
6039        ufshcd_release(hba);
6040        return err;
6041}
6042
6043/**
6044 * ufshcd_get_max_icc_level - calculate the ICC level
6045 * @sup_curr_uA: max. current supported by the regulator
6046 * @start_scan: row at the desc table to start scan from
6047 * @buff: power descriptor buffer
6048 *
6049 * Returns calculated max ICC level for specific regulator
6050 */
6051static u32 ufshcd_get_max_icc_level(int sup_curr_uA, u32 start_scan, char *buff)
6052{
6053        int i;
6054        int curr_uA;
6055        u16 data;
6056        u16 unit;
6057
6058        for (i = start_scan; i >= 0; i--) {
6059                data = be16_to_cpup((__be16 *)&buff[2 * i]);
6060                unit = (data & ATTR_ICC_LVL_UNIT_MASK) >>
6061                                                ATTR_ICC_LVL_UNIT_OFFSET;
6062                curr_uA = data & ATTR_ICC_LVL_VALUE_MASK;
6063                switch (unit) {
6064                case UFSHCD_NANO_AMP:
6065                        curr_uA = curr_uA / 1000;
6066                        break;
6067                case UFSHCD_MILI_AMP:
6068                        curr_uA = curr_uA * 1000;
6069                        break;
6070                case UFSHCD_AMP:
6071                        curr_uA = curr_uA * 1000 * 1000;
6072                        break;
6073                case UFSHCD_MICRO_AMP:
6074                default:
6075                        break;
6076                }
6077                if (sup_curr_uA >= curr_uA)
6078                        break;
6079        }
6080        if (i < 0) {
6081                i = 0;
6082                pr_err("%s: Couldn't find valid icc_level = %d", __func__, i);
6083        }
6084
6085        return (u32)i;
6086}
6087
6088/**
6089 * ufshcd_calc_icc_level - calculate the max ICC level
6090 * In case regulators are not initialized we'll return 0
6091 * @hba: per-adapter instance
6092 * @desc_buf: power descriptor buffer to extract ICC levels from.
6093 * @len: length of desc_buff
6094 *
6095 * Returns calculated ICC level
6096 */
6097static u32 ufshcd_find_max_sup_active_icc_level(struct ufs_hba *hba,
6098                                                        u8 *desc_buf, int len)
6099{
6100        u32 icc_level = 0;
6101
6102        if (!hba->vreg_info.vcc || !hba->vreg_info.vccq ||
6103                                                !hba->vreg_info.vccq2) {
6104                dev_err(hba->dev,
6105                        "%s: Regulator capability was not set, actvIccLevel=%d",
6106                                                        __func__, icc_level);
6107                goto out;
6108        }
6109
6110        if (hba->vreg_info.vcc)
6111                icc_level = ufshcd_get_max_icc_level(
6112                                hba->vreg_info.vcc->max_uA,
6113                                POWER_DESC_MAX_ACTV_ICC_LVLS - 1,
6114                                &desc_buf[PWR_DESC_ACTIVE_LVLS_VCC_0]);
6115
6116        if (hba->vreg_info.vccq)
6117                icc_level = ufshcd_get_max_icc_level(
6118                                hba->vreg_info.vccq->max_uA,
6119                                icc_level,
6120                                &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ_0]);
6121
6122        if (hba->vreg_info.vccq2)
6123                icc_level = ufshcd_get_max_icc_level(
6124                                hba->vreg_info.vccq2->max_uA,
6125                                icc_level,
6126                                &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ2_0]);
6127out:
6128        return icc_level;
6129}
6130
6131static void ufshcd_init_icc_levels(struct ufs_hba *hba)
6132{
6133        int ret;
6134        int buff_len = hba->desc_size.pwr_desc;
6135        u8 *desc_buf;
6136
6137        desc_buf = kmalloc(buff_len, GFP_KERNEL);
6138        if (!desc_buf)
6139                return;
6140
6141        ret = ufshcd_read_power_desc(hba, desc_buf, buff_len);
6142        if (ret) {
6143                dev_err(hba->dev,
6144                        "%s: Failed reading power descriptor.len = %d ret = %d",
6145                        __func__, buff_len, ret);
6146                goto out;
6147        }
6148
6149        hba->init_prefetch_data.icc_level =
6150                        ufshcd_find_max_sup_active_icc_level(hba,
6151                        desc_buf, buff_len);
6152        dev_dbg(hba->dev, "%s: setting icc_level 0x%x",
6153                        __func__, hba->init_prefetch_data.icc_level);
6154
6155        ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
6156                QUERY_ATTR_IDN_ACTIVE_ICC_LVL, 0, 0,
6157                &hba->init_prefetch_data.icc_level);
6158
6159        if (ret)
6160                dev_err(hba->dev,
6161                        "%s: Failed configuring bActiveICCLevel = %d ret = %d",
6162                        __func__, hba->init_prefetch_data.icc_level , ret);
6163
6164out:
6165        kfree(desc_buf);
6166}
6167
6168/**
6169 * ufshcd_scsi_add_wlus - Adds required W-LUs
6170 * @hba: per-adapter instance
6171 *
6172 * UFS device specification requires the UFS devices to support 4 well known
6173 * logical units:
6174 *      "REPORT_LUNS" (address: 01h)
6175 *      "UFS Device" (address: 50h)
6176 *      "RPMB" (address: 44h)
6177 *      "BOOT" (address: 30h)
6178 * UFS device's power management needs to be controlled by "POWER CONDITION"
6179 * field of SSU (START STOP UNIT) command. But this "power condition" field
6180 * will take effect only when its sent to "UFS device" well known logical unit
6181 * hence we require the scsi_device instance to represent this logical unit in
6182 * order for the UFS host driver to send the SSU command for power management.
6183 *
6184 * We also require the scsi_device instance for "RPMB" (Replay Protected Memory
6185 * Block) LU so user space process can control this LU. User space may also
6186 * want to have access to BOOT LU.
6187 *
6188 * This function adds scsi device instances for each of all well known LUs
6189 * (except "REPORT LUNS" LU).
6190 *
6191 * Returns zero on success (all required W-LUs are added successfully),
6192 * non-zero error value on failure (if failed to add any of the required W-LU).
6193 */
6194static int ufshcd_scsi_add_wlus(struct ufs_hba *hba)
6195{
6196        int ret = 0;
6197        struct scsi_device *sdev_rpmb;
6198        struct scsi_device *sdev_boot;
6199
6200        hba->sdev_ufs_device = __scsi_add_device(hba->host, 0, 0,
6201                ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_UFS_DEVICE_WLUN), NULL);
6202        if (IS_ERR(hba->sdev_ufs_device)) {
6203                ret = PTR_ERR(hba->sdev_ufs_device);
6204                hba->sdev_ufs_device = NULL;
6205                goto out;
6206        }
6207        scsi_device_put(hba->sdev_ufs_device);
6208
6209        sdev_rpmb = __scsi_add_device(hba->host, 0, 0,
6210                ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_RPMB_WLUN), NULL);
6211        if (IS_ERR(sdev_rpmb)) {
6212                ret = PTR_ERR(sdev_rpmb);
6213                goto remove_sdev_ufs_device;
6214        }
6215        scsi_device_put(sdev_rpmb);
6216
6217        sdev_boot = __scsi_add_device(hba->host, 0, 0,
6218                ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_BOOT_WLUN), NULL);
6219        if (IS_ERR(sdev_boot))
6220                dev_err(hba->dev, "%s: BOOT WLUN not found\n", __func__);
6221        else
6222                scsi_device_put(sdev_boot);
6223        goto out;
6224
6225remove_sdev_ufs_device:
6226        scsi_remove_device(hba->sdev_ufs_device);
6227out:
6228        return ret;
6229}
6230
6231static int ufs_get_device_desc(struct ufs_hba *hba,
6232                               struct ufs_dev_desc *dev_desc)
6233{
6234        int err;
6235        size_t buff_len;
6236        u8 model_index;
6237        u8 *desc_buf;
6238
6239        buff_len = max_t(size_t, hba->desc_size.dev_desc,
6240                         QUERY_DESC_MAX_SIZE + 1);
6241        desc_buf = kmalloc(buff_len, GFP_KERNEL);
6242        if (!desc_buf) {
6243                err = -ENOMEM;
6244                goto out;
6245        }
6246
6247        err = ufshcd_read_device_desc(hba, desc_buf, hba->desc_size.dev_desc);
6248        if (err) {
6249                dev_err(hba->dev, "%s: Failed reading Device Desc. err = %d\n",
6250                        __func__, err);
6251                goto out;
6252        }
6253
6254        /*
6255         * getting vendor (manufacturerID) and Bank Index in big endian
6256         * format
6257         */
6258        dev_desc->wmanufacturerid = desc_buf[DEVICE_DESC_PARAM_MANF_ID] << 8 |
6259                                     desc_buf[DEVICE_DESC_PARAM_MANF_ID + 1];
6260
6261        model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME];
6262
6263        /* Zero-pad entire buffer for string termination. */
6264        memset(desc_buf, 0, buff_len);
6265
6266        err = ufshcd_read_string_desc(hba, model_index, desc_buf,
6267                                      QUERY_DESC_MAX_SIZE, true/*ASCII*/);
6268        if (err) {
6269                dev_err(hba->dev, "%s: Failed reading Product Name. err = %d\n",
6270                        __func__, err);
6271                goto out;
6272        }
6273
6274        desc_buf[QUERY_DESC_MAX_SIZE] = '\0';
6275        strlcpy(dev_desc->model, (desc_buf + QUERY_DESC_HDR_SIZE),
6276                min_t(u8, desc_buf[QUERY_DESC_LENGTH_OFFSET],
6277                      MAX_MODEL_LEN));
6278
6279        /* Null terminate the model string */
6280        dev_desc->model[MAX_MODEL_LEN] = '\0';
6281
6282out:
6283        kfree(desc_buf);
6284        return err;
6285}
6286
6287static void ufs_fixup_device_setup(struct ufs_hba *hba,
6288                                   struct ufs_dev_desc *dev_desc)
6289{
6290        struct ufs_dev_fix *f;
6291
6292        for (f = ufs_fixups; f->quirk; f++) {
6293                if ((f->card.wmanufacturerid == dev_desc->wmanufacturerid ||
6294                     f->card.wmanufacturerid == UFS_ANY_VENDOR) &&
6295                    (STR_PRFX_EQUAL(f->card.model, dev_desc->model) ||
6296                     !strcmp(f->card.model, UFS_ANY_MODEL)))
6297                        hba->dev_quirks |= f->quirk;
6298        }
6299}
6300
6301/**
6302 * ufshcd_tune_pa_tactivate - Tunes PA_TActivate of local UniPro
6303 * @hba: per-adapter instance
6304 *
6305 * PA_TActivate parameter can be tuned manually if UniPro version is less than
6306 * 1.61. PA_TActivate needs to be greater than or equal to peerM-PHY's
6307 * RX_MIN_ACTIVATETIME_CAPABILITY attribute. This optimal value can help reduce
6308 * the hibern8 exit latency.
6309 *
6310 * Returns zero on success, non-zero error value on failure.
6311 */
6312static int ufshcd_tune_pa_tactivate(struct ufs_hba *hba)
6313{
6314        int ret = 0;
6315        u32 peer_rx_min_activatetime = 0, tuned_pa_tactivate;
6316
6317        ret = ufshcd_dme_peer_get(hba,
6318                                  UIC_ARG_MIB_SEL(
6319                                        RX_MIN_ACTIVATETIME_CAPABILITY,
6320                                        UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
6321                                  &peer_rx_min_activatetime);
6322        if (ret)
6323                goto out;
6324
6325        /* make sure proper unit conversion is applied */
6326        tuned_pa_tactivate =
6327                ((peer_rx_min_activatetime * RX_MIN_ACTIVATETIME_UNIT_US)
6328                 / PA_TACTIVATE_TIME_UNIT_US);
6329        ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE),
6330                             tuned_pa_tactivate);
6331
6332out:
6333        return ret;
6334}
6335
6336/**
6337 * ufshcd_tune_pa_hibern8time - Tunes PA_Hibern8Time of local UniPro
6338 * @hba: per-adapter instance
6339 *
6340 * PA_Hibern8Time parameter can be tuned manually if UniPro version is less than
6341 * 1.61. PA_Hibern8Time needs to be maximum of local M-PHY's
6342 * TX_HIBERN8TIME_CAPABILITY & peer M-PHY's RX_HIBERN8TIME_CAPABILITY.
6343 * This optimal value can help reduce the hibern8 exit latency.
6344 *
6345 * Returns zero on success, non-zero error value on failure.
6346 */
6347static int ufshcd_tune_pa_hibern8time(struct ufs_hba *hba)
6348{
6349        int ret = 0;
6350        u32 local_tx_hibern8_time_cap = 0, peer_rx_hibern8_time_cap = 0;
6351        u32 max_hibern8_time, tuned_pa_hibern8time;
6352
6353        ret = ufshcd_dme_get(hba,
6354                             UIC_ARG_MIB_SEL(TX_HIBERN8TIME_CAPABILITY,
6355                                        UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
6356                                  &local_tx_hibern8_time_cap);
6357        if (ret)
6358                goto out;
6359
6360        ret = ufshcd_dme_peer_get(hba,
6361                                  UIC_ARG_MIB_SEL(RX_HIBERN8TIME_CAPABILITY,
6362                                        UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
6363                                  &peer_rx_hibern8_time_cap);
6364        if (ret)
6365                goto out;
6366
6367        max_hibern8_time = max(local_tx_hibern8_time_cap,
6368                               peer_rx_hibern8_time_cap);
6369        /* make sure proper unit conversion is applied */
6370        tuned_pa_hibern8time = ((max_hibern8_time * HIBERN8TIME_UNIT_US)
6371                                / PA_HIBERN8_TIME_UNIT_US);
6372        ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME),
6373                             tuned_pa_hibern8time);
6374out:
6375        return ret;
6376}
6377
6378/**
6379 * ufshcd_quirk_tune_host_pa_tactivate - Ensures that host PA_TACTIVATE is
6380 * less than device PA_TACTIVATE time.
6381 * @hba: per-adapter instance
6382 *
6383 * Some UFS devices require host PA_TACTIVATE to be lower than device
6384 * PA_TACTIVATE, we need to enable UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE quirk
6385 * for such devices.
6386 *
6387 * Returns zero on success, non-zero error value on failure.
6388 */
6389static int ufshcd_quirk_tune_host_pa_tactivate(struct ufs_hba *hba)
6390{
6391        int ret = 0;
6392        u32 granularity, peer_granularity;
6393        u32 pa_tactivate, peer_pa_tactivate;
6394        u32 pa_tactivate_us, peer_pa_tactivate_us;
6395        u8 gran_to_us_table[] = {1, 4, 8, 16, 32, 100};
6396
6397        ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
6398                                  &granularity);
6399        if (ret)
6400                goto out;
6401
6402        ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
6403                                  &peer_granularity);
6404        if (ret)
6405                goto out;
6406
6407        if ((granularity < PA_GRANULARITY_MIN_VAL) ||
6408            (granularity > PA_GRANULARITY_MAX_VAL)) {
6409                dev_err(hba->dev, "%s: invalid host PA_GRANULARITY %d",
6410                        __func__, granularity);
6411                return -EINVAL;
6412        }
6413
6414        if ((peer_granularity < PA_GRANULARITY_MIN_VAL) ||
6415            (peer_granularity > PA_GRANULARITY_MAX_VAL)) {
6416                dev_err(hba->dev, "%s: invalid device PA_GRANULARITY %d",
6417                        __func__, peer_granularity);
6418                return -EINVAL;
6419        }
6420
6421        ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_TACTIVATE), &pa_tactivate);
6422        if (ret)
6423                goto out;
6424
6425        ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_TACTIVATE),
6426                                  &peer_pa_tactivate);
6427        if (ret)
6428                goto out;
6429
6430        pa_tactivate_us = pa_tactivate * gran_to_us_table[granularity - 1];
6431        peer_pa_tactivate_us = peer_pa_tactivate *
6432                             gran_to_us_table[peer_granularity - 1];
6433
6434        if (pa_tactivate_us > peer_pa_tactivate_us) {
6435                u32 new_peer_pa_tactivate;
6436
6437                new_peer_pa_tactivate = pa_tactivate_us /
6438                                      gran_to_us_table[peer_granularity - 1];
6439                new_peer_pa_tactivate++;
6440                ret = ufshcd_dme_peer_set(hba, UIC_ARG_MIB(PA_TACTIVATE),
6441                                          new_peer_pa_tactivate);
6442        }
6443
6444out:
6445        return ret;
6446}
6447
6448static void ufshcd_tune_unipro_params(struct ufs_hba *hba)
6449{
6450        if (ufshcd_is_unipro_pa_params_tuning_req(hba)) {
6451                ufshcd_tune_pa_tactivate(hba);
6452                ufshcd_tune_pa_hibern8time(hba);
6453        }
6454
6455        if (hba->dev_quirks & UFS_DEVICE_QUIRK_PA_TACTIVATE)
6456                /* set 1ms timeout for PA_TACTIVATE */
6457                ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 10);
6458
6459        if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE)
6460                ufshcd_quirk_tune_host_pa_tactivate(hba);
6461
6462        ufshcd_vops_apply_dev_quirks(hba);
6463}
6464
6465static void ufshcd_clear_dbg_ufs_stats(struct ufs_hba *hba)
6466{
6467        int err_reg_hist_size = sizeof(struct ufs_uic_err_reg_hist);
6468
6469        hba->ufs_stats.hibern8_exit_cnt = 0;
6470        hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
6471
6472        memset(&hba->ufs_stats.pa_err, 0, err_reg_hist_size);
6473        memset(&hba->ufs_stats.dl_err, 0, err_reg_hist_size);
6474        memset(&hba->ufs_stats.nl_err, 0, err_reg_hist_size);
6475        memset(&hba->ufs_stats.tl_err, 0, err_reg_hist_size);
6476        memset(&hba->ufs_stats.dme_err, 0, err_reg_hist_size);
6477
6478        hba->req_abort_count = 0;
6479}
6480
6481static void ufshcd_init_desc_sizes(struct ufs_hba *hba)
6482{
6483        int err;
6484
6485        err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_DEVICE, 0,
6486                &hba->desc_size.dev_desc);
6487        if (err)
6488                hba->desc_size.dev_desc = QUERY_DESC_DEVICE_DEF_SIZE;
6489
6490        err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_POWER, 0,
6491                &hba->desc_size.pwr_desc);
6492        if (err)
6493                hba->desc_size.pwr_desc = QUERY_DESC_POWER_DEF_SIZE;
6494
6495        err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_INTERCONNECT, 0,
6496                &hba->desc_size.interc_desc);
6497        if (err)
6498                hba->desc_size.interc_desc = QUERY_DESC_INTERCONNECT_DEF_SIZE;
6499
6500        err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_CONFIGURATION, 0,
6501                &hba->desc_size.conf_desc);
6502        if (err)
6503                hba->desc_size.conf_desc = QUERY_DESC_CONFIGURATION_DEF_SIZE;
6504
6505        err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_UNIT, 0,
6506                &hba->desc_size.unit_desc);
6507        if (err)
6508                hba->desc_size.unit_desc = QUERY_DESC_UNIT_DEF_SIZE;
6509
6510        err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_GEOMETRY, 0,
6511                &hba->desc_size.geom_desc);
6512        if (err)
6513                hba->desc_size.geom_desc = QUERY_DESC_GEOMETRY_DEF_SIZE;
6514        err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_HEALTH, 0,
6515                &hba->desc_size.hlth_desc);
6516        if (err)
6517                hba->desc_size.hlth_desc = QUERY_DESC_HEALTH_DEF_SIZE;
6518}
6519
6520static void ufshcd_def_desc_sizes(struct ufs_hba *hba)
6521{
6522        hba->desc_size.dev_desc = QUERY_DESC_DEVICE_DEF_SIZE;
6523        hba->desc_size.pwr_desc = QUERY_DESC_POWER_DEF_SIZE;
6524        hba->desc_size.interc_desc = QUERY_DESC_INTERCONNECT_DEF_SIZE;
6525        hba->desc_size.conf_desc = QUERY_DESC_CONFIGURATION_DEF_SIZE;
6526        hba->desc_size.unit_desc = QUERY_DESC_UNIT_DEF_SIZE;
6527        hba->desc_size.geom_desc = QUERY_DESC_GEOMETRY_DEF_SIZE;
6528        hba->desc_size.hlth_desc = QUERY_DESC_HEALTH_DEF_SIZE;
6529}
6530
6531/**
6532 * ufshcd_probe_hba - probe hba to detect device and initialize
6533 * @hba: per-adapter instance
6534 *
6535 * Execute link-startup and verify device initialization
6536 */
6537static int ufshcd_probe_hba(struct ufs_hba *hba)
6538{
6539        struct ufs_dev_desc card = {0};
6540        int ret;
6541        ktime_t start = ktime_get();
6542
6543        ret = ufshcd_link_startup(hba);
6544        if (ret)
6545                goto out;
6546
6547        /* set the default level for urgent bkops */
6548        hba->urgent_bkops_lvl = BKOPS_STATUS_PERF_IMPACT;
6549        hba->is_urgent_bkops_lvl_checked = false;
6550
6551        /* Debug counters initialization */
6552        ufshcd_clear_dbg_ufs_stats(hba);
6553
6554        /* UniPro link is active now */
6555        ufshcd_set_link_active(hba);
6556
6557        /* Enable Auto-Hibernate if configured */
6558        ufshcd_auto_hibern8_enable(hba);
6559
6560        ret = ufshcd_verify_dev_init(hba);
6561        if (ret)
6562                goto out;
6563
6564        ret = ufshcd_complete_dev_init(hba);
6565        if (ret)
6566                goto out;
6567
6568        /* Init check for device descriptor sizes */
6569        ufshcd_init_desc_sizes(hba);
6570
6571        ret = ufs_get_device_desc(hba, &card);
6572        if (ret) {
6573                dev_err(hba->dev, "%s: Failed getting device info. err = %d\n",
6574                        __func__, ret);
6575                goto out;
6576        }
6577
6578        ufs_fixup_device_setup(hba, &card);
6579        ufshcd_tune_unipro_params(hba);
6580
6581        ret = ufshcd_set_vccq_rail_unused(hba,
6582                (hba->dev_quirks & UFS_DEVICE_NO_VCCQ) ? true : false);
6583        if (ret)
6584                goto out;
6585
6586        /* UFS device is also active now */
6587        ufshcd_set_ufs_dev_active(hba);
6588        ufshcd_force_reset_auto_bkops(hba);
6589        hba->wlun_dev_clr_ua = true;
6590
6591        if (ufshcd_get_max_pwr_mode(hba)) {
6592                dev_err(hba->dev,
6593                        "%s: Failed getting max supported power mode\n",
6594                        __func__);
6595        } else {
6596                ret = ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info);
6597                if (ret) {
6598                        dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n",
6599                                        __func__, ret);
6600                        goto out;
6601                }
6602        }
6603
6604        /* set the state as operational after switching to desired gear */
6605        hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
6606
6607        /*
6608         * If we are in error handling context or in power management callbacks
6609         * context, no need to scan the host
6610         */
6611        if (!ufshcd_eh_in_progress(hba) && !hba->pm_op_in_progress) {
6612                bool flag;
6613
6614                /* clear any previous UFS device information */
6615                memset(&hba->dev_info, 0, sizeof(hba->dev_info));
6616                if (!ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
6617                                QUERY_FLAG_IDN_PWR_ON_WPE, &flag))
6618                        hba->dev_info.f_power_on_wp_en = flag;
6619
6620                if (!hba->is_init_prefetch)
6621                        ufshcd_init_icc_levels(hba);
6622
6623                /* Add required well known logical units to scsi mid layer */
6624                if (ufshcd_scsi_add_wlus(hba))
6625                        goto out;
6626
6627                /* Initialize devfreq after UFS device is detected */
6628                if (ufshcd_is_clkscaling_supported(hba)) {
6629                        memcpy(&hba->clk_scaling.saved_pwr_info.info,
6630                                &hba->pwr_info,
6631                                sizeof(struct ufs_pa_layer_attr));
6632                        hba->clk_scaling.saved_pwr_info.is_valid = true;
6633                        if (!hba->devfreq) {
6634                                ret = ufshcd_devfreq_init(hba);
6635                                if (ret)
6636                                        goto out;
6637                        }
6638                        hba->clk_scaling.is_allowed = true;
6639                }
6640
6641                scsi_scan_host(hba->host);
6642                pm_runtime_put_sync(hba->dev);
6643        }
6644
6645        if (!hba->is_init_prefetch)
6646                hba->is_init_prefetch = true;
6647
6648out:
6649        /*
6650         * If we failed to initialize the device or the device is not
6651         * present, turn off the power/clocks etc.
6652         */
6653        if (ret && !ufshcd_eh_in_progress(hba) && !hba->pm_op_in_progress) {
6654                pm_runtime_put_sync(hba->dev);
6655                ufshcd_hba_exit(hba);
6656        }
6657
6658        trace_ufshcd_init(dev_name(hba->dev), ret,
6659                ktime_to_us(ktime_sub(ktime_get(), start)),
6660                hba->curr_dev_pwr_mode, hba->uic_link_state);
6661        return ret;
6662}
6663
6664/**
6665 * ufshcd_async_scan - asynchronous execution for probing hba
6666 * @data: data pointer to pass to this function
6667 * @cookie: cookie data
6668 */
6669static void ufshcd_async_scan(void *data, async_cookie_t cookie)
6670{
6671        struct ufs_hba *hba = (struct ufs_hba *)data;
6672
6673        ufshcd_probe_hba(hba);
6674}
6675
6676static enum blk_eh_timer_return ufshcd_eh_timed_out(struct scsi_cmnd *scmd)
6677{
6678        unsigned long flags;
6679        struct Scsi_Host *host;
6680        struct ufs_hba *hba;
6681        int index;
6682        bool found = false;
6683
6684        if (!scmd || !scmd->device || !scmd->device->host)
6685                return BLK_EH_DONE;
6686
6687        host = scmd->device->host;
6688        hba = shost_priv(host);
6689        if (!hba)
6690                return BLK_EH_DONE;
6691
6692        spin_lock_irqsave(host->host_lock, flags);
6693
6694        for_each_set_bit(index, &hba->outstanding_reqs, hba->nutrs) {
6695                if (hba->lrb[index].cmd == scmd) {
6696                        found = true;
6697                        break;
6698                }
6699        }
6700
6701        spin_unlock_irqrestore(host->host_lock, flags);
6702
6703        /*
6704         * Bypass SCSI error handling and reset the block layer timer if this
6705         * SCSI command was not actually dispatched to UFS driver, otherwise
6706         * let SCSI layer handle the error as usual.
6707         */
6708        return found ? BLK_EH_DONE : BLK_EH_RESET_TIMER;
6709}
6710
6711static const struct attribute_group *ufshcd_driver_groups[] = {
6712        &ufs_sysfs_unit_descriptor_group,
6713        &ufs_sysfs_lun_attributes_group,
6714        NULL,
6715};
6716
6717static struct scsi_host_template ufshcd_driver_template = {
6718        .module                 = THIS_MODULE,
6719        .name                   = UFSHCD,
6720        .proc_name              = UFSHCD,
6721        .queuecommand           = ufshcd_queuecommand,
6722        .slave_alloc            = ufshcd_slave_alloc,
6723        .slave_configure        = ufshcd_slave_configure,
6724        .slave_destroy          = ufshcd_slave_destroy,
6725        .change_queue_depth     = ufshcd_change_queue_depth,
6726        .eh_abort_handler       = ufshcd_abort,
6727        .eh_device_reset_handler = ufshcd_eh_device_reset_handler,
6728        .eh_host_reset_handler   = ufshcd_eh_host_reset_handler,
6729        .eh_timed_out           = ufshcd_eh_timed_out,
6730        .this_id                = -1,
6731        .sg_tablesize           = SG_ALL,
6732        .cmd_per_lun            = UFSHCD_CMD_PER_LUN,
6733        .can_queue              = UFSHCD_CAN_QUEUE,
6734        .max_host_blocked       = 1,
6735        .track_queue_depth      = 1,
6736        .sdev_groups            = ufshcd_driver_groups,
6737};
6738
6739static int ufshcd_config_vreg_load(struct device *dev, struct ufs_vreg *vreg,
6740                                   int ua)
6741{
6742        int ret;
6743
6744        if (!vreg)
6745                return 0;
6746
6747        ret = regulator_set_load(vreg->reg, ua);
6748        if (ret < 0) {
6749                dev_err(dev, "%s: %s set load (ua=%d) failed, err=%d\n",
6750                                __func__, vreg->name, ua, ret);
6751        }
6752
6753        return ret;
6754}
6755
6756static inline int ufshcd_config_vreg_lpm(struct ufs_hba *hba,
6757                                         struct ufs_vreg *vreg)
6758{
6759        if (!vreg)
6760                return 0;
6761        else if (vreg->unused)
6762                return 0;
6763        else
6764                return ufshcd_config_vreg_load(hba->dev, vreg,
6765                                               UFS_VREG_LPM_LOAD_UA);
6766}
6767
6768static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
6769                                         struct ufs_vreg *vreg)
6770{
6771        if (!vreg)
6772                return 0;
6773        else if (vreg->unused)
6774                return 0;
6775        else
6776                return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA);
6777}
6778
6779static int ufshcd_config_vreg(struct device *dev,
6780                struct ufs_vreg *vreg, bool on)
6781{
6782        int ret = 0;
6783        struct regulator *reg;
6784        const char *name;
6785        int min_uV, uA_load;
6786
6787        BUG_ON(!vreg);
6788
6789        reg = vreg->reg;
6790        name = vreg->name;
6791
6792        if (regulator_count_voltages(reg) > 0) {
6793                min_uV = on ? vreg->min_uV : 0;
6794                ret = regulator_set_voltage(reg, min_uV, vreg->max_uV);
6795                if (ret) {
6796                        dev_err(dev, "%s: %s set voltage failed, err=%d\n",
6797                                        __func__, name, ret);
6798                        goto out;
6799                }
6800
6801                uA_load = on ? vreg->max_uA : 0;
6802                ret = ufshcd_config_vreg_load(dev, vreg, uA_load);
6803                if (ret)
6804                        goto out;
6805        }
6806out:
6807        return ret;
6808}
6809
6810static int ufshcd_enable_vreg(struct device *dev, struct ufs_vreg *vreg)
6811{
6812        int ret = 0;
6813
6814        if (!vreg)
6815                goto out;
6816        else if (vreg->enabled || vreg->unused)
6817                goto out;
6818
6819        ret = ufshcd_config_vreg(dev, vreg, true);
6820        if (!ret)
6821                ret = regulator_enable(vreg->reg);
6822
6823        if (!ret)
6824                vreg->enabled = true;
6825        else
6826                dev_err(dev, "%s: %s enable failed, err=%d\n",
6827                                __func__, vreg->name, ret);
6828out:
6829        return ret;
6830}
6831
6832static int ufshcd_disable_vreg(struct device *dev, struct ufs_vreg *vreg)
6833{
6834        int ret = 0;
6835
6836        if (!vreg)
6837                goto out;
6838        else if (!vreg->enabled || vreg->unused)
6839                goto out;
6840
6841        ret = regulator_disable(vreg->reg);
6842
6843        if (!ret) {
6844                /* ignore errors on applying disable config */
6845                ufshcd_config_vreg(dev, vreg, false);
6846                vreg->enabled = false;
6847        } else {
6848                dev_err(dev, "%s: %s disable failed, err=%d\n",
6849                                __func__, vreg->name, ret);
6850        }
6851out:
6852        return ret;
6853}
6854
6855static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on)
6856{
6857        int ret = 0;
6858        struct device *dev = hba->dev;
6859        struct ufs_vreg_info *info = &hba->vreg_info;
6860
6861        if (!info)
6862                goto out;
6863
6864        ret = ufshcd_toggle_vreg(dev, info->vcc, on);
6865        if (ret)
6866                goto out;
6867
6868        ret = ufshcd_toggle_vreg(dev, info->vccq, on);
6869        if (ret)
6870                goto out;
6871
6872        ret = ufshcd_toggle_vreg(dev, info->vccq2, on);
6873        if (ret)
6874                goto out;
6875
6876out:
6877        if (ret) {
6878                ufshcd_toggle_vreg(dev, info->vccq2, false);
6879                ufshcd_toggle_vreg(dev, info->vccq, false);
6880                ufshcd_toggle_vreg(dev, info->vcc, false);
6881        }
6882        return ret;
6883}
6884
6885static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on)
6886{
6887        struct ufs_vreg_info *info = &hba->vreg_info;
6888
6889        if (info)
6890                return ufshcd_toggle_vreg(hba->dev, info->vdd_hba, on);
6891
6892        return 0;
6893}
6894
6895static int ufshcd_get_vreg(struct device *dev, struct ufs_vreg *vreg)
6896{
6897        int ret = 0;
6898
6899        if (!vreg)
6900                goto out;
6901
6902        vreg->reg = devm_regulator_get(dev, vreg->name);
6903        if (IS_ERR(vreg->reg)) {
6904                ret = PTR_ERR(vreg->reg);
6905                dev_err(dev, "%s: %s get failed, err=%d\n",
6906                                __func__, vreg->name, ret);
6907        }
6908out:
6909        return ret;
6910}
6911
6912static int ufshcd_init_vreg(struct ufs_hba *hba)
6913{
6914        int ret = 0;
6915        struct device *dev = hba->dev;
6916        struct ufs_vreg_info *info = &hba->vreg_info;
6917
6918        if (!info)
6919                goto out;
6920
6921        ret = ufshcd_get_vreg(dev, info->vcc);
6922        if (ret)
6923                goto out;
6924
6925        ret = ufshcd_get_vreg(dev, info->vccq);
6926        if (ret)
6927                goto out;
6928
6929        ret = ufshcd_get_vreg(dev, info->vccq2);
6930out:
6931        return ret;
6932}
6933
6934static int ufshcd_init_hba_vreg(struct ufs_hba *hba)
6935{
6936        struct ufs_vreg_info *info = &hba->vreg_info;
6937
6938        if (info)
6939                return ufshcd_get_vreg(hba->dev, info->vdd_hba);
6940
6941        return 0;
6942}
6943
6944static int ufshcd_set_vccq_rail_unused(struct ufs_hba *hba, bool unused)
6945{
6946        int ret = 0;
6947        struct ufs_vreg_info *info = &hba->vreg_info;
6948
6949        if (!info)
6950                goto out;
6951        else if (!info->vccq)
6952                goto out;
6953
6954        if (unused) {
6955                /* shut off the rail here */
6956                ret = ufshcd_toggle_vreg(hba->dev, info->vccq, false);
6957                /*
6958                 * Mark this rail as no longer used, so it doesn't get enabled
6959                 * later by mistake
6960                 */
6961                if (!ret)
6962                        info->vccq->unused = true;
6963        } else {
6964                /*
6965                 * rail should have been already enabled hence just make sure
6966                 * that unused flag is cleared.
6967                 */
6968                info->vccq->unused = false;
6969        }
6970out:
6971        return ret;
6972}
6973
6974static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
6975                                        bool skip_ref_clk)
6976{
6977        int ret = 0;
6978        struct ufs_clk_info *clki;
6979        struct list_head *head = &hba->clk_list_head;
6980        unsigned long flags;
6981        ktime_t start = ktime_get();
6982        bool clk_state_changed = false;
6983
6984        if (list_empty(head))
6985                goto out;
6986
6987        /*
6988         * vendor specific setup_clocks ops may depend on clocks managed by
6989         * this standard driver hence call the vendor specific setup_clocks
6990         * before disabling the clocks managed here.
6991         */
6992        if (!on) {
6993                ret = ufshcd_vops_setup_clocks(hba, on, PRE_CHANGE);
6994                if (ret)
6995                        return ret;
6996        }
6997
6998        list_for_each_entry(clki, head, list) {
6999                if (!IS_ERR_OR_NULL(clki->clk)) {
7000                        if (skip_ref_clk && !strcmp(clki->name, "ref_clk"))
7001                                continue;
7002
7003                        clk_state_changed = on ^ clki->enabled;
7004                        if (on && !clki->enabled) {
7005                                ret = clk_prepare_enable(clki->clk);
7006                                if (ret) {
7007                                        dev_err(hba->dev, "%s: %s prepare enable failed, %d\n",
7008                                                __func__, clki->name, ret);
7009                                        goto out;
7010                                }
7011                        } else if (!on && clki->enabled) {
7012                                clk_disable_unprepare(clki->clk);
7013                        }
7014                        clki->enabled = on;
7015                        dev_dbg(hba->dev, "%s: clk: %s %sabled\n", __func__,
7016                                        clki->name, on ? "en" : "dis");
7017                }
7018        }
7019
7020        /*
7021         * vendor specific setup_clocks ops may depend on clocks managed by
7022         * this standard driver hence call the vendor specific setup_clocks
7023         * after enabling the clocks managed here.
7024         */
7025        if (on) {
7026                ret = ufshcd_vops_setup_clocks(hba, on, POST_CHANGE);
7027                if (ret)
7028                        return ret;
7029        }
7030
7031out:
7032        if (ret) {
7033                list_for_each_entry(clki, head, list) {
7034                        if (!IS_ERR_OR_NULL(clki->clk) && clki->enabled)
7035                                clk_disable_unprepare(clki->clk);
7036                }
7037        } else if (!ret && on) {
7038                spin_lock_irqsave(hba->host->host_lock, flags);
7039                hba->clk_gating.state = CLKS_ON;
7040                trace_ufshcd_clk_gating(dev_name(hba->dev),
7041                                        hba->clk_gating.state);
7042                spin_unlock_irqrestore(hba->host->host_lock, flags);
7043        }
7044
7045        if (clk_state_changed)
7046                trace_ufshcd_profile_clk_gating(dev_name(hba->dev),
7047                        (on ? "on" : "off"),
7048                        ktime_to_us(ktime_sub(ktime_get(), start)), ret);
7049        return ret;
7050}
7051
7052static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on)
7053{
7054        return  __ufshcd_setup_clocks(hba, on, false);
7055}
7056
7057static int ufshcd_init_clocks(struct ufs_hba *hba)
7058{
7059        int ret = 0;
7060        struct ufs_clk_info *clki;
7061        struct device *dev = hba->dev;
7062        struct list_head *head = &hba->clk_list_head;
7063
7064        if (list_empty(head))
7065                goto out;
7066
7067        list_for_each_entry(clki, head, list) {
7068                if (!clki->name)
7069                        continue;
7070
7071                clki->clk = devm_clk_get(dev, clki->name);
7072                if (IS_ERR(clki->clk)) {
7073                        ret = PTR_ERR(clki->clk);
7074                        dev_err(dev, "%s: %s clk get failed, %d\n",
7075                                        __func__, clki->name, ret);
7076                        goto out;
7077                }
7078
7079                if (clki->max_freq) {
7080                        ret = clk_set_rate(clki->clk, clki->max_freq);
7081                        if (ret) {
7082                                dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
7083                                        __func__, clki->name,
7084                                        clki->max_freq, ret);
7085                                goto out;
7086                        }
7087                        clki->curr_freq = clki->max_freq;
7088                }
7089                dev_dbg(dev, "%s: clk: %s, rate: %lu\n", __func__,
7090                                clki->name, clk_get_rate(clki->clk));
7091        }
7092out:
7093        return ret;
7094}
7095
7096static int ufshcd_variant_hba_init(struct ufs_hba *hba)
7097{
7098        int err = 0;
7099
7100        if (!hba->vops)
7101                goto out;
7102
7103        err = ufshcd_vops_init(hba);
7104        if (err)
7105                goto out;
7106
7107        err = ufshcd_vops_setup_regulators(hba, true);
7108        if (err)
7109                goto out_exit;
7110
7111        goto out;
7112
7113out_exit:
7114        ufshcd_vops_exit(hba);
7115out:
7116        if (err)
7117                dev_err(hba->dev, "%s: variant %s init failed err %d\n",
7118                        __func__, ufshcd_get_var_name(hba), err);
7119        return err;
7120}
7121
7122static void ufshcd_variant_hba_exit(struct ufs_hba *hba)
7123{
7124        if (!hba->vops)
7125                return;
7126
7127        ufshcd_vops_setup_regulators(hba, false);
7128
7129        ufshcd_vops_exit(hba);
7130}
7131
7132static int ufshcd_hba_init(struct ufs_hba *hba)
7133{
7134        int err;
7135
7136        /*
7137         * Handle host controller power separately from the UFS device power
7138         * rails as it will help controlling the UFS host controller power
7139         * collapse easily which is different than UFS device power collapse.
7140         * Also, enable the host controller power before we go ahead with rest
7141         * of the initialization here.
7142         */
7143        err = ufshcd_init_hba_vreg(hba);
7144        if (err)
7145                goto out;
7146
7147        err = ufshcd_setup_hba_vreg(hba, true);
7148        if (err)
7149                goto out;
7150
7151        err = ufshcd_init_clocks(hba);
7152        if (err)
7153                goto out_disable_hba_vreg;
7154
7155        err = ufshcd_setup_clocks(hba, true);
7156        if (err)
7157                goto out_disable_hba_vreg;
7158
7159        err = ufshcd_init_vreg(hba);
7160        if (err)
7161                goto out_disable_clks;
7162
7163        err = ufshcd_setup_vreg(hba, true);
7164        if (err)
7165                goto out_disable_clks;
7166
7167        err = ufshcd_variant_hba_init(hba);
7168        if (err)
7169                goto out_disable_vreg;
7170
7171        hba->is_powered = true;
7172        goto out;
7173
7174out_disable_vreg:
7175        ufshcd_setup_vreg(hba, false);
7176out_disable_clks:
7177        ufshcd_setup_clocks(hba, false);
7178out_disable_hba_vreg:
7179        ufshcd_setup_hba_vreg(hba, false);
7180out:
7181        return err;
7182}
7183
7184static void ufshcd_hba_exit(struct ufs_hba *hba)
7185{
7186        if (hba->is_powered) {
7187                ufshcd_variant_hba_exit(hba);
7188                ufshcd_setup_vreg(hba, false);
7189                ufshcd_suspend_clkscaling(hba);
7190                if (ufshcd_is_clkscaling_supported(hba)) {
7191                        if (hba->devfreq)
7192                                ufshcd_suspend_clkscaling(hba);
7193                        destroy_workqueue(hba->clk_scaling.workq);
7194                        ufshcd_devfreq_remove(hba);
7195                }
7196                ufshcd_setup_clocks(hba, false);
7197                ufshcd_setup_hba_vreg(hba, false);
7198                hba->is_powered = false;
7199        }
7200}
7201
7202static int
7203ufshcd_send_request_sense(struct ufs_hba *hba, struct scsi_device *sdp)
7204{
7205        unsigned char cmd[6] = {REQUEST_SENSE,
7206                                0,
7207                                0,
7208                                0,
7209                                UFSHCD_REQ_SENSE_SIZE,
7210                                0};
7211        char *buffer;
7212        int ret;
7213
7214        buffer = kzalloc(UFSHCD_REQ_SENSE_SIZE, GFP_KERNEL);
7215        if (!buffer) {
7216                ret = -ENOMEM;
7217                goto out;
7218        }
7219
7220        ret = scsi_execute(sdp, cmd, DMA_FROM_DEVICE, buffer,
7221                        UFSHCD_REQ_SENSE_SIZE, NULL, NULL,
7222                        msecs_to_jiffies(1000), 3, 0, RQF_PM, NULL);
7223        if (ret)
7224                pr_err("%s: failed with err %d\n", __func__, ret);
7225
7226        kfree(buffer);
7227out:
7228        return ret;
7229}
7230
7231/**
7232 * ufshcd_set_dev_pwr_mode - sends START STOP UNIT command to set device
7233 *                           power mode
7234 * @hba: per adapter instance
7235 * @pwr_mode: device power mode to set
7236 *
7237 * Returns 0 if requested power mode is set successfully
7238 * Returns non-zero if failed to set the requested power mode
7239 */
7240static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
7241                                     enum ufs_dev_pwr_mode pwr_mode)
7242{
7243        unsigned char cmd[6] = { START_STOP };
7244        struct scsi_sense_hdr sshdr;
7245        struct scsi_device *sdp;
7246        unsigned long flags;
7247        int ret;
7248
7249        spin_lock_irqsave(hba->host->host_lock, flags);
7250        sdp = hba->sdev_ufs_device;
7251        if (sdp) {
7252                ret = scsi_device_get(sdp);
7253                if (!ret && !scsi_device_online(sdp)) {
7254                        ret = -ENODEV;
7255                        scsi_device_put(sdp);
7256                }
7257        } else {
7258                ret = -ENODEV;
7259        }
7260        spin_unlock_irqrestore(hba->host->host_lock, flags);
7261
7262        if (ret)
7263                return ret;
7264
7265        /*
7266         * If scsi commands fail, the scsi mid-layer schedules scsi error-
7267         * handling, which would wait for host to be resumed. Since we know
7268         * we are functional while we are here, skip host resume in error
7269         * handling context.
7270         */
7271        hba->host->eh_noresume = 1;
7272        if (hba->wlun_dev_clr_ua) {
7273                ret = ufshcd_send_request_sense(hba, sdp);
7274                if (ret)
7275                        goto out;
7276                /* Unit attention condition is cleared now */
7277                hba->wlun_dev_clr_ua = false;
7278        }
7279
7280        cmd[4] = pwr_mode << 4;
7281
7282        /*
7283         * Current function would be generally called from the power management
7284         * callbacks hence set the RQF_PM flag so that it doesn't resume the
7285         * already suspended childs.
7286         */
7287        ret = scsi_execute(sdp, cmd, DMA_NONE, NULL, 0, NULL, &sshdr,
7288                        START_STOP_TIMEOUT, 0, 0, RQF_PM, NULL);
7289        if (ret) {
7290                sdev_printk(KERN_WARNING, sdp,
7291                            "START_STOP failed for power mode: %d, result %x\n",
7292                            pwr_mode, ret);
7293                if (driver_byte(ret) == DRIVER_SENSE)
7294                        scsi_print_sense_hdr(sdp, NULL, &sshdr);
7295        }
7296
7297        if (!ret)
7298                hba->curr_dev_pwr_mode = pwr_mode;
7299out:
7300        scsi_device_put(sdp);
7301        hba->host->eh_noresume = 0;
7302        return ret;
7303}
7304
7305static int ufshcd_link_state_transition(struct ufs_hba *hba,
7306                                        enum uic_link_state req_link_state,
7307                                        int check_for_bkops)
7308{
7309        int ret = 0;
7310
7311        if (req_link_state == hba->uic_link_state)
7312                return 0;
7313
7314        if (req_link_state == UIC_LINK_HIBERN8_STATE) {
7315                ret = ufshcd_uic_hibern8_enter(hba);
7316                if (!ret)
7317                        ufshcd_set_link_hibern8(hba);
7318                else
7319                        goto out;
7320        }
7321        /*
7322         * If autobkops is enabled, link can't be turned off because
7323         * turning off the link would also turn off the device.
7324         */
7325        else if ((req_link_state == UIC_LINK_OFF_STATE) &&
7326                   (!check_for_bkops || (check_for_bkops &&
7327                    !hba->auto_bkops_enabled))) {
7328                /*
7329                 * Let's make sure that link is in low power mode, we are doing
7330                 * this currently by putting the link in Hibern8. Otherway to
7331                 * put the link in low power mode is to send the DME end point
7332                 * to device and then send the DME reset command to local
7333                 * unipro. But putting the link in hibern8 is much faster.
7334                 */
7335                ret = ufshcd_uic_hibern8_enter(hba);
7336                if (ret)
7337                        goto out;
7338                /*
7339                 * Change controller state to "reset state" which
7340                 * should also put the link in off/reset state
7341                 */
7342                ufshcd_hba_stop(hba, true);
7343                /*
7344                 * TODO: Check if we need any delay to make sure that
7345                 * controller is reset
7346                 */
7347                ufshcd_set_link_off(hba);
7348        }
7349
7350out:
7351        return ret;
7352}
7353
7354static void ufshcd_vreg_set_lpm(struct ufs_hba *hba)
7355{
7356        /*
7357         * It seems some UFS devices may keep drawing more than sleep current
7358         * (atleast for 500us) from UFS rails (especially from VCCQ rail).
7359         * To avoid this situation, add 2ms delay before putting these UFS
7360         * rails in LPM mode.
7361         */
7362        if (!ufshcd_is_link_active(hba) &&
7363            hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM)
7364                usleep_range(2000, 2100);
7365
7366        /*
7367         * If UFS device is either in UFS_Sleep turn off VCC rail to save some
7368         * power.
7369         *
7370         * If UFS device and link is in OFF state, all power supplies (VCC,
7371         * VCCQ, VCCQ2) can be turned off if power on write protect is not
7372         * required. If UFS link is inactive (Hibern8 or OFF state) and device
7373         * is in sleep state, put VCCQ & VCCQ2 rails in LPM mode.
7374         *
7375         * Ignore the error returned by ufshcd_toggle_vreg() as device is anyway
7376         * in low power state which would save some power.
7377         */
7378        if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) &&
7379            !hba->dev_info.is_lu_power_on_wp) {
7380                ufshcd_setup_vreg(hba, false);
7381        } else if (!ufshcd_is_ufs_dev_active(hba)) {
7382                ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
7383                if (!ufshcd_is_link_active(hba)) {
7384                        ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
7385                        ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq2);
7386                }
7387        }
7388}
7389
7390static int ufshcd_vreg_set_hpm(struct ufs_hba *hba)
7391{
7392        int ret = 0;
7393
7394        if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) &&
7395            !hba->dev_info.is_lu_power_on_wp) {
7396                ret = ufshcd_setup_vreg(hba, true);
7397        } else if (!ufshcd_is_ufs_dev_active(hba)) {
7398                if (!ret && !ufshcd_is_link_active(hba)) {
7399                        ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq);
7400                        if (ret)
7401                                goto vcc_disable;
7402                        ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq2);
7403                        if (ret)
7404                                goto vccq_lpm;
7405                }
7406                ret = ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, true);
7407        }
7408        goto out;
7409
7410vccq_lpm:
7411        ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
7412vcc_disable:
7413        ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
7414out:
7415        return ret;
7416}
7417
7418static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba)
7419{
7420        if (ufshcd_is_link_off(hba))
7421                ufshcd_setup_hba_vreg(hba, false);
7422}
7423
7424static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba)
7425{
7426        if (ufshcd_is_link_off(hba))
7427                ufshcd_setup_hba_vreg(hba, true);
7428}
7429
7430/**
7431 * ufshcd_suspend - helper function for suspend operations
7432 * @hba: per adapter instance
7433 * @pm_op: desired low power operation type
7434 *
7435 * This function will try to put the UFS device and link into low power
7436 * mode based on the "rpm_lvl" (Runtime PM level) or "spm_lvl"
7437 * (System PM level).
7438 *
7439 * If this function is called during shutdown, it will make sure that
7440 * both UFS device and UFS link is powered off.
7441 *
7442 * NOTE: UFS device & link must be active before we enter in this function.
7443 *
7444 * Returns 0 for success and non-zero for failure
7445 */
7446static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
7447{
7448        int ret = 0;
7449        enum ufs_pm_level pm_lvl;
7450        enum ufs_dev_pwr_mode req_dev_pwr_mode;
7451        enum uic_link_state req_link_state;
7452
7453        hba->pm_op_in_progress = 1;
7454        if (!ufshcd_is_shutdown_pm(pm_op)) {
7455                pm_lvl = ufshcd_is_runtime_pm(pm_op) ?
7456                         hba->rpm_lvl : hba->spm_lvl;
7457                req_dev_pwr_mode = ufs_get_pm_lvl_to_dev_pwr_mode(pm_lvl);
7458                req_link_state = ufs_get_pm_lvl_to_link_pwr_state(pm_lvl);
7459        } else {
7460                req_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE;
7461                req_link_state = UIC_LINK_OFF_STATE;
7462        }
7463
7464        /*
7465         * If we can't transition into any of the low power modes
7466         * just gate the clocks.
7467         */
7468        ufshcd_hold(hba, false);
7469        hba->clk_gating.is_suspended = true;
7470
7471        if (hba->clk_scaling.is_allowed) {
7472                cancel_work_sync(&hba->clk_scaling.suspend_work);
7473                cancel_work_sync(&hba->clk_scaling.resume_work);
7474                ufshcd_suspend_clkscaling(hba);
7475        }
7476
7477        if (req_dev_pwr_mode == UFS_ACTIVE_PWR_MODE &&
7478                        req_link_state == UIC_LINK_ACTIVE_STATE) {
7479                goto disable_clks;
7480        }
7481
7482        if ((req_dev_pwr_mode == hba->curr_dev_pwr_mode) &&
7483            (req_link_state == hba->uic_link_state))
7484                goto enable_gating;
7485
7486        /* UFS device & link must be active before we enter in this function */
7487        if (!ufshcd_is_ufs_dev_active(hba) || !ufshcd_is_link_active(hba)) {
7488                ret = -EINVAL;
7489                goto enable_gating;
7490        }
7491
7492        if (ufshcd_is_runtime_pm(pm_op)) {
7493                if (ufshcd_can_autobkops_during_suspend(hba)) {
7494                        /*
7495                         * The device is idle with no requests in the queue,
7496                         * allow background operations if bkops status shows
7497                         * that performance might be impacted.
7498                         */
7499                        ret = ufshcd_urgent_bkops(hba);
7500                        if (ret)
7501                                goto enable_gating;
7502                } else {
7503                        /* make sure that auto bkops is disabled */
7504                        ufshcd_disable_auto_bkops(hba);
7505                }
7506        }
7507
7508        if ((req_dev_pwr_mode != hba->curr_dev_pwr_mode) &&
7509             ((ufshcd_is_runtime_pm(pm_op) && !hba->auto_bkops_enabled) ||
7510               !ufshcd_is_runtime_pm(pm_op))) {
7511                /* ensure that bkops is disabled */
7512                ufshcd_disable_auto_bkops(hba);
7513                ret = ufshcd_set_dev_pwr_mode(hba, req_dev_pwr_mode);
7514                if (ret)
7515                        goto enable_gating;
7516        }
7517
7518        ret = ufshcd_link_state_transition(hba, req_link_state, 1);
7519        if (ret)
7520                goto set_dev_active;
7521
7522        ufshcd_vreg_set_lpm(hba);
7523
7524disable_clks:
7525        /*
7526         * Call vendor specific suspend callback. As these callbacks may access
7527         * vendor specific host controller register space call them before the
7528         * host clocks are ON.
7529         */
7530        ret = ufshcd_vops_suspend(hba, pm_op);
7531        if (ret)
7532                goto set_link_active;
7533
7534        if (!ufshcd_is_link_active(hba))
7535                ufshcd_setup_clocks(hba, false);
7536        else
7537                /* If link is active, device ref_clk can't be switched off */
7538                __ufshcd_setup_clocks(hba, false, true);
7539
7540        hba->clk_gating.state = CLKS_OFF;
7541        trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state);
7542        /*
7543         * Disable the host irq as host controller as there won't be any
7544         * host controller transaction expected till resume.
7545         */
7546        ufshcd_disable_irq(hba);
7547        /* Put the host controller in low power mode if possible */
7548        ufshcd_hba_vreg_set_lpm(hba);
7549        goto out;
7550
7551set_link_active:
7552        if (hba->clk_scaling.is_allowed)
7553                ufshcd_resume_clkscaling(hba);
7554        ufshcd_vreg_set_hpm(hba);
7555        if (ufshcd_is_link_hibern8(hba) && !ufshcd_uic_hibern8_exit(hba))
7556                ufshcd_set_link_active(hba);
7557        else if (ufshcd_is_link_off(hba))
7558                ufshcd_host_reset_and_restore(hba);
7559set_dev_active:
7560        if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE))
7561                ufshcd_disable_auto_bkops(hba);
7562enable_gating:
7563        if (hba->clk_scaling.is_allowed)
7564                ufshcd_resume_clkscaling(hba);
7565        hba->clk_gating.is_suspended = false;
7566        ufshcd_release(hba);
7567out:
7568        hba->pm_op_in_progress = 0;
7569        return ret;
7570}
7571
7572/**
7573 * ufshcd_resume - helper function for resume operations
7574 * @hba: per adapter instance
7575 * @pm_op: runtime PM or system PM
7576 *
7577 * This function basically brings the UFS device, UniPro link and controller
7578 * to active state.
7579 *
7580 * Returns 0 for success and non-zero for failure
7581 */
7582static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
7583{
7584        int ret;
7585        enum uic_link_state old_link_state;
7586
7587        hba->pm_op_in_progress = 1;
7588        old_link_state = hba->uic_link_state;
7589
7590        ufshcd_hba_vreg_set_hpm(hba);
7591        /* Make sure clocks are enabled before accessing controller */
7592        ret = ufshcd_setup_clocks(hba, true);
7593        if (ret)
7594                goto out;
7595
7596        /* enable the host irq as host controller would be active soon */
7597        ret = ufshcd_enable_irq(hba);
7598        if (ret)
7599                goto disable_irq_and_vops_clks;
7600
7601        ret = ufshcd_vreg_set_hpm(hba);
7602        if (ret)
7603                goto disable_irq_and_vops_clks;
7604
7605        /*
7606         * Call vendor specific resume callback. As these callbacks may access
7607         * vendor specific host controller register space call them when the
7608         * host clocks are ON.
7609         */
7610        ret = ufshcd_vops_resume(hba, pm_op);
7611        if (ret)
7612                goto disable_vreg;
7613
7614        if (ufshcd_is_link_hibern8(hba)) {
7615                ret = ufshcd_uic_hibern8_exit(hba);
7616                if (!ret)
7617                        ufshcd_set_link_active(hba);
7618                else
7619                        goto vendor_suspend;
7620        } else if (ufshcd_is_link_off(hba)) {
7621                ret = ufshcd_host_reset_and_restore(hba);
7622                /*
7623                 * ufshcd_host_reset_and_restore() should have already
7624                 * set the link state as active
7625                 */
7626                if (ret || !ufshcd_is_link_active(hba))
7627                        goto vendor_suspend;
7628        }
7629
7630        if (!ufshcd_is_ufs_dev_active(hba)) {
7631                ret = ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE);
7632                if (ret)
7633                        goto set_old_link_state;
7634        }
7635
7636        if (ufshcd_keep_autobkops_enabled_except_suspend(hba))
7637                ufshcd_enable_auto_bkops(hba);
7638        else
7639                /*
7640                 * If BKOPs operations are urgently needed at this moment then
7641                 * keep auto-bkops enabled or else disable it.
7642                 */
7643                ufshcd_urgent_bkops(hba);
7644
7645        hba->clk_gating.is_suspended = false;
7646
7647        if (hba->clk_scaling.is_allowed)
7648                ufshcd_resume_clkscaling(hba);
7649
7650        /* Schedule clock gating in case of no access to UFS device yet */
7651        ufshcd_release(hba);
7652
7653        /* Enable Auto-Hibernate if configured */
7654        ufshcd_auto_hibern8_enable(hba);
7655
7656        goto out;
7657
7658set_old_link_state:
7659        ufshcd_link_state_transition(hba, old_link_state, 0);
7660vendor_suspend:
7661        ufshcd_vops_suspend(hba, pm_op);
7662disable_vreg:
7663        ufshcd_vreg_set_lpm(hba);
7664disable_irq_and_vops_clks:
7665        ufshcd_disable_irq(hba);
7666        if (hba->clk_scaling.is_allowed)
7667                ufshcd_suspend_clkscaling(hba);
7668        ufshcd_setup_clocks(hba, false);
7669out:
7670        hba->pm_op_in_progress = 0;
7671        return ret;
7672}
7673
7674/**
7675 * ufshcd_system_suspend - system suspend routine
7676 * @hba: per adapter instance
7677 *
7678 * Check the description of ufshcd_suspend() function for more details.
7679 *
7680 * Returns 0 for success and non-zero for failure
7681 */
7682int ufshcd_system_suspend(struct ufs_hba *hba)
7683{
7684        int ret = 0;
7685        ktime_t start = ktime_get();
7686
7687        if (!hba || !hba->is_powered)
7688                return 0;
7689
7690        if ((ufs_get_pm_lvl_to_dev_pwr_mode(hba->spm_lvl) ==
7691             hba->curr_dev_pwr_mode) &&
7692            (ufs_get_pm_lvl_to_link_pwr_state(hba->spm_lvl) ==
7693             hba->uic_link_state))
7694                goto out;
7695
7696        if (pm_runtime_suspended(hba->dev)) {
7697                /*
7698                 * UFS device and/or UFS link low power states during runtime
7699                 * suspend seems to be different than what is expected during
7700                 * system suspend. Hence runtime resume the devic & link and
7701                 * let the system suspend low power states to take effect.
7702                 * TODO: If resume takes longer time, we might have optimize
7703                 * it in future by not resuming everything if possible.
7704                 */
7705                ret = ufshcd_runtime_resume(hba);
7706                if (ret)
7707                        goto out;
7708        }
7709
7710        ret = ufshcd_suspend(hba, UFS_SYSTEM_PM);
7711out:
7712        trace_ufshcd_system_suspend(dev_name(hba->dev), ret,
7713                ktime_to_us(ktime_sub(ktime_get(), start)),
7714                hba->curr_dev_pwr_mode, hba->uic_link_state);
7715        if (!ret)
7716                hba->is_sys_suspended = true;
7717        return ret;
7718}
7719EXPORT_SYMBOL(ufshcd_system_suspend);
7720
7721/**
7722 * ufshcd_system_resume - system resume routine
7723 * @hba: per adapter instance
7724 *
7725 * Returns 0 for success and non-zero for failure
7726 */
7727
7728int ufshcd_system_resume(struct ufs_hba *hba)
7729{
7730        int ret = 0;
7731        ktime_t start = ktime_get();
7732
7733        if (!hba)
7734                return -EINVAL;
7735
7736        if (!hba->is_powered || pm_runtime_suspended(hba->dev))
7737                /*
7738                 * Let the runtime resume take care of resuming
7739                 * if runtime suspended.
7740                 */
7741                goto out;
7742        else
7743                ret = ufshcd_resume(hba, UFS_SYSTEM_PM);
7744out:
7745        trace_ufshcd_system_resume(dev_name(hba->dev), ret,
7746                ktime_to_us(ktime_sub(ktime_get(), start)),
7747                hba->curr_dev_pwr_mode, hba->uic_link_state);
7748        return ret;
7749}
7750EXPORT_SYMBOL(ufshcd_system_resume);
7751
7752/**
7753 * ufshcd_runtime_suspend - runtime suspend routine
7754 * @hba: per adapter instance
7755 *
7756 * Check the description of ufshcd_suspend() function for more details.
7757 *
7758 * Returns 0 for success and non-zero for failure
7759 */
7760int ufshcd_runtime_suspend(struct ufs_hba *hba)
7761{
7762        int ret = 0;
7763        ktime_t start = ktime_get();
7764
7765        if (!hba)
7766                return -EINVAL;
7767
7768        if (!hba->is_powered)
7769                goto out;
7770        else
7771                ret = ufshcd_suspend(hba, UFS_RUNTIME_PM);
7772out:
7773        trace_ufshcd_runtime_suspend(dev_name(hba->dev), ret,
7774                ktime_to_us(ktime_sub(ktime_get(), start)),
7775                hba->curr_dev_pwr_mode, hba->uic_link_state);
7776        return ret;
7777}
7778EXPORT_SYMBOL(ufshcd_runtime_suspend);
7779
7780/**
7781 * ufshcd_runtime_resume - runtime resume routine
7782 * @hba: per adapter instance
7783 *
7784 * This function basically brings the UFS device, UniPro link and controller
7785 * to active state. Following operations are done in this function:
7786 *
7787 * 1. Turn on all the controller related clocks
7788 * 2. Bring the UniPro link out of Hibernate state
7789 * 3. If UFS device is in sleep state, turn ON VCC rail and bring the UFS device
7790 *    to active state.
7791 * 4. If auto-bkops is enabled on the device, disable it.
7792 *
7793 * So following would be the possible power state after this function return
7794 * successfully:
7795 *      S1: UFS device in Active state with VCC rail ON
7796 *          UniPro link in Active state
7797 *          All the UFS/UniPro controller clocks are ON
7798 *
7799 * Returns 0 for success and non-zero for failure
7800 */
7801int ufshcd_runtime_resume(struct ufs_hba *hba)
7802{
7803        int ret = 0;
7804        ktime_t start = ktime_get();
7805
7806        if (!hba)
7807                return -EINVAL;
7808
7809        if (!hba->is_powered)
7810                goto out;
7811        else
7812                ret = ufshcd_resume(hba, UFS_RUNTIME_PM);
7813out:
7814        trace_ufshcd_runtime_resume(dev_name(hba->dev), ret,
7815                ktime_to_us(ktime_sub(ktime_get(), start)),
7816                hba->curr_dev_pwr_mode, hba->uic_link_state);
7817        return ret;
7818}
7819EXPORT_SYMBOL(ufshcd_runtime_resume);
7820
7821int ufshcd_runtime_idle(struct ufs_hba *hba)
7822{
7823        return 0;
7824}
7825EXPORT_SYMBOL(ufshcd_runtime_idle);
7826
7827/**
7828 * ufshcd_shutdown - shutdown routine
7829 * @hba: per adapter instance
7830 *
7831 * This function would power off both UFS device and UFS link.
7832 *
7833 * Returns 0 always to allow force shutdown even in case of errors.
7834 */
7835int ufshcd_shutdown(struct ufs_hba *hba)
7836{
7837        int ret = 0;
7838
7839        if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba))
7840                goto out;
7841
7842        if (pm_runtime_suspended(hba->dev)) {
7843                ret = ufshcd_runtime_resume(hba);
7844                if (ret)
7845                        goto out;
7846        }
7847
7848        ret = ufshcd_suspend(hba, UFS_SHUTDOWN_PM);
7849out:
7850        if (ret)
7851                dev_err(hba->dev, "%s failed, err %d\n", __func__, ret);
7852        /* allow force shutdown even in case of errors */
7853        return 0;
7854}
7855EXPORT_SYMBOL(ufshcd_shutdown);
7856
7857/**
7858 * ufshcd_remove - de-allocate SCSI host and host memory space
7859 *              data structure memory
7860 * @hba: per adapter instance
7861 */
7862void ufshcd_remove(struct ufs_hba *hba)
7863{
7864        ufs_sysfs_remove_nodes(hba->dev);
7865        scsi_remove_host(hba->host);
7866        /* disable interrupts */
7867        ufshcd_disable_intr(hba, hba->intr_mask);
7868        ufshcd_hba_stop(hba, true);
7869
7870        ufshcd_exit_clk_gating(hba);
7871        if (ufshcd_is_clkscaling_supported(hba))
7872                device_remove_file(hba->dev, &hba->clk_scaling.enable_attr);
7873        ufshcd_hba_exit(hba);
7874}
7875EXPORT_SYMBOL_GPL(ufshcd_remove);
7876
7877/**
7878 * ufshcd_dealloc_host - deallocate Host Bus Adapter (HBA)
7879 * @hba: pointer to Host Bus Adapter (HBA)
7880 */
7881void ufshcd_dealloc_host(struct ufs_hba *hba)
7882{
7883        scsi_host_put(hba->host);
7884}
7885EXPORT_SYMBOL_GPL(ufshcd_dealloc_host);
7886
7887/**
7888 * ufshcd_set_dma_mask - Set dma mask based on the controller
7889 *                       addressing capability
7890 * @hba: per adapter instance
7891 *
7892 * Returns 0 for success, non-zero for failure
7893 */
7894static int ufshcd_set_dma_mask(struct ufs_hba *hba)
7895{
7896        if (hba->capabilities & MASK_64_ADDRESSING_SUPPORT) {
7897                if (!dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(64)))
7898                        return 0;
7899        }
7900        return dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(32));
7901}
7902
7903/**
7904 * ufshcd_alloc_host - allocate Host Bus Adapter (HBA)
7905 * @dev: pointer to device handle
7906 * @hba_handle: driver private handle
7907 * Returns 0 on success, non-zero value on failure
7908 */
7909int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle)
7910{
7911        struct Scsi_Host *host;
7912        struct ufs_hba *hba;
7913        int err = 0;
7914
7915        if (!dev) {
7916                dev_err(dev,
7917                "Invalid memory reference for dev is NULL\n");
7918                err = -ENODEV;
7919                goto out_error;
7920        }
7921
7922        host = scsi_host_alloc(&ufshcd_driver_template,
7923                                sizeof(struct ufs_hba));
7924        if (!host) {
7925                dev_err(dev, "scsi_host_alloc failed\n");
7926                err = -ENOMEM;
7927                goto out_error;
7928        }
7929
7930        hba = shost_priv(host);
7931        hba->host = host;
7932        hba->dev = dev;
7933        *hba_handle = hba;
7934
7935        INIT_LIST_HEAD(&hba->clk_list_head);
7936
7937out_error:
7938        return err;
7939}
7940EXPORT_SYMBOL(ufshcd_alloc_host);
7941
7942/**
7943 * ufshcd_init - Driver initialization routine
7944 * @hba: per-adapter instance
7945 * @mmio_base: base register address
7946 * @irq: Interrupt line of device
7947 * Returns 0 on success, non-zero value on failure
7948 */
7949int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
7950{
7951        int err;
7952        struct Scsi_Host *host = hba->host;
7953        struct device *dev = hba->dev;
7954
7955        if (!mmio_base) {
7956                dev_err(hba->dev,
7957                "Invalid memory reference for mmio_base is NULL\n");
7958                err = -ENODEV;
7959                goto out_error;
7960        }
7961
7962        hba->mmio_base = mmio_base;
7963        hba->irq = irq;
7964
7965        /* Set descriptor lengths to specification defaults */
7966        ufshcd_def_desc_sizes(hba);
7967
7968        err = ufshcd_hba_init(hba);
7969        if (err)
7970                goto out_error;
7971
7972        /* Read capabilities registers */
7973        ufshcd_hba_capabilities(hba);
7974
7975        /* Get UFS version supported by the controller */
7976        hba->ufs_version = ufshcd_get_ufs_version(hba);
7977
7978        if ((hba->ufs_version != UFSHCI_VERSION_10) &&
7979            (hba->ufs_version != UFSHCI_VERSION_11) &&
7980            (hba->ufs_version != UFSHCI_VERSION_20) &&
7981            (hba->ufs_version != UFSHCI_VERSION_21))
7982                dev_err(hba->dev, "invalid UFS version 0x%x\n",
7983                        hba->ufs_version);
7984
7985        /* Get Interrupt bit mask per version */
7986        hba->intr_mask = ufshcd_get_intr_mask(hba);
7987
7988        err = ufshcd_set_dma_mask(hba);
7989        if (err) {
7990                dev_err(hba->dev, "set dma mask failed\n");
7991                goto out_disable;
7992        }
7993
7994        /* Allocate memory for host memory space */
7995        err = ufshcd_memory_alloc(hba);
7996        if (err) {
7997                dev_err(hba->dev, "Memory allocation failed\n");
7998                goto out_disable;
7999        }
8000
8001        /* Configure LRB */
8002        ufshcd_host_memory_configure(hba);
8003
8004        host->can_queue = hba->nutrs;
8005        host->cmd_per_lun = hba->nutrs;
8006        host->max_id = UFSHCD_MAX_ID;
8007        host->max_lun = UFS_MAX_LUNS;
8008        host->max_channel = UFSHCD_MAX_CHANNEL;
8009        host->unique_id = host->host_no;
8010        host->max_cmd_len = MAX_CDB_SIZE;
8011
8012        hba->max_pwr_info.is_valid = false;
8013
8014        /* Initailize wait queue for task management */
8015        init_waitqueue_head(&hba->tm_wq);
8016        init_waitqueue_head(&hba->tm_tag_wq);
8017
8018        /* Initialize work queues */
8019        INIT_WORK(&hba->eh_work, ufshcd_err_handler);
8020        INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler);
8021
8022        /* Initialize UIC command mutex */
8023        mutex_init(&hba->uic_cmd_mutex);
8024
8025        /* Initialize mutex for device management commands */
8026        mutex_init(&hba->dev_cmd.lock);
8027
8028        init_rwsem(&hba->clk_scaling_lock);
8029
8030        /* Initialize device management tag acquire wait queue */
8031        init_waitqueue_head(&hba->dev_cmd.tag_wq);
8032
8033        ufshcd_init_clk_gating(hba);
8034
8035        /*
8036         * In order to avoid any spurious interrupt immediately after
8037         * registering UFS controller interrupt handler, clear any pending UFS
8038         * interrupt status and disable all the UFS interrupts.
8039         */
8040        ufshcd_writel(hba, ufshcd_readl(hba, REG_INTERRUPT_STATUS),
8041                      REG_INTERRUPT_STATUS);
8042        ufshcd_writel(hba, 0, REG_INTERRUPT_ENABLE);
8043        /*
8044         * Make sure that UFS interrupts are disabled and any pending interrupt
8045         * status is cleared before registering UFS interrupt handler.
8046         */
8047        mb();
8048
8049        /* IRQ registration */
8050        err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba);
8051        if (err) {
8052                dev_err(hba->dev, "request irq failed\n");
8053                goto exit_gating;
8054        } else {
8055                hba->is_irq_enabled = true;
8056        }
8057
8058        err = scsi_add_host(host, hba->dev);
8059        if (err) {
8060                dev_err(hba->dev, "scsi_add_host failed\n");
8061                goto exit_gating;
8062        }
8063
8064        /* Host controller enable */
8065        err = ufshcd_hba_enable(hba);
8066        if (err) {
8067                dev_err(hba->dev, "Host controller enable failed\n");
8068                ufshcd_print_host_regs(hba);
8069                ufshcd_print_host_state(hba);
8070                goto out_remove_scsi_host;
8071        }
8072
8073        if (ufshcd_is_clkscaling_supported(hba)) {
8074                char wq_name[sizeof("ufs_clkscaling_00")];
8075
8076                INIT_WORK(&hba->clk_scaling.suspend_work,
8077                          ufshcd_clk_scaling_suspend_work);
8078                INIT_WORK(&hba->clk_scaling.resume_work,
8079                          ufshcd_clk_scaling_resume_work);
8080
8081                snprintf(wq_name, sizeof(wq_name), "ufs_clkscaling_%d",
8082                         host->host_no);
8083                hba->clk_scaling.workq = create_singlethread_workqueue(wq_name);
8084
8085                ufshcd_clkscaling_init_sysfs(hba);
8086        }
8087
8088        /*
8089         * Set the default power management level for runtime and system PM.
8090         * Default power saving mode is to keep UFS link in Hibern8 state
8091         * and UFS device in sleep state.
8092         */
8093        hba->rpm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
8094                                                UFS_SLEEP_PWR_MODE,
8095                                                UIC_LINK_HIBERN8_STATE);
8096        hba->spm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
8097                                                UFS_SLEEP_PWR_MODE,
8098                                                UIC_LINK_HIBERN8_STATE);
8099
8100        /* Set the default auto-hiberate idle timer value to 150 ms */
8101        if (hba->capabilities & MASK_AUTO_HIBERN8_SUPPORT) {
8102                hba->ahit = FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, 150) |
8103                            FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK, 3);
8104        }
8105
8106        /* Hold auto suspend until async scan completes */
8107        pm_runtime_get_sync(dev);
8108        atomic_set(&hba->scsi_block_reqs_cnt, 0);
8109        /*
8110         * We are assuming that device wasn't put in sleep/power-down
8111         * state exclusively during the boot stage before kernel.
8112         * This assumption helps avoid doing link startup twice during
8113         * ufshcd_probe_hba().
8114         */
8115        ufshcd_set_ufs_dev_active(hba);
8116
8117        async_schedule(ufshcd_async_scan, hba);
8118        ufs_sysfs_add_nodes(hba->dev);
8119
8120        return 0;
8121
8122out_remove_scsi_host:
8123        scsi_remove_host(hba->host);
8124exit_gating:
8125        ufshcd_exit_clk_gating(hba);
8126out_disable:
8127        hba->is_irq_enabled = false;
8128        ufshcd_hba_exit(hba);
8129out_error:
8130        return err;
8131}
8132EXPORT_SYMBOL_GPL(ufshcd_init);
8133
8134MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>");
8135MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>");
8136MODULE_DESCRIPTION("Generic UFS host controller driver Core");
8137MODULE_LICENSE("GPL");
8138MODULE_VERSION(UFSHCD_DRIVER_VERSION);
8139