uboot/drivers/ufs/ufs.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0+
   2/**
   3 * ufs.c - Universal Flash Subsystem (UFS) driver
   4 *
   5 * Taken from Linux Kernel v5.2 (drivers/scsi/ufs/ufshcd.c) and ported
   6 * to u-boot.
   7 *
   8 * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
   9 */
  10
  11#include <charset.h>
  12#include <common.h>
  13#include <dm.h>
  14#include <dm/lists.h>
  15#include <dm/device-internal.h>
  16#include <malloc.h>
  17#include <hexdump.h>
  18#include <scsi.h>
  19
  20#include <asm/dma-mapping.h>
  21
  22#include "ufs.h"
  23
  24#define UFSHCD_ENABLE_INTRS     (UTP_TRANSFER_REQ_COMPL |\
  25                                 UTP_TASK_REQ_COMPL |\
  26                                 UFSHCD_ERROR_MASK)
  27/* maximum number of link-startup retries */
  28#define DME_LINKSTARTUP_RETRIES 3
  29
  30/* maximum number of retries for a general UIC command  */
  31#define UFS_UIC_COMMAND_RETRIES 3
  32
  33/* Query request retries */
  34#define QUERY_REQ_RETRIES 3
  35/* Query request timeout */
  36#define QUERY_REQ_TIMEOUT 1500 /* 1.5 seconds */
  37
  38/* maximum timeout in ms for a general UIC command */
  39#define UFS_UIC_CMD_TIMEOUT     1000
  40/* NOP OUT retries waiting for NOP IN response */
  41#define NOP_OUT_RETRIES    10
  42/* Timeout after 30 msecs if NOP OUT hangs without response */
  43#define NOP_OUT_TIMEOUT    30 /* msecs */
  44
  45/* Only use one Task Tag for all requests */
  46#define TASK_TAG        0
  47
  48/* Expose the flag value from utp_upiu_query.value */
  49#define MASK_QUERY_UPIU_FLAG_LOC 0xFF
  50
  51#define MAX_PRDT_ENTRY  262144
  52
  53/* maximum bytes per request */
  54#define UFS_MAX_BYTES   (128 * 256 * 1024)
  55
  56static inline bool ufshcd_is_hba_active(struct ufs_hba *hba);
  57static inline void ufshcd_hba_stop(struct ufs_hba *hba);
  58static int ufshcd_hba_enable(struct ufs_hba *hba);
  59
  60/*
  61 * ufshcd_wait_for_register - wait for register value to change
  62 */
  63static int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
  64                                    u32 val, unsigned long timeout_ms)
  65{
  66        int err = 0;
  67        unsigned long start = get_timer(0);
  68
  69        /* ignore bits that we don't intend to wait on */
  70        val = val & mask;
  71
  72        while ((ufshcd_readl(hba, reg) & mask) != val) {
  73                if (get_timer(start) > timeout_ms) {
  74                        if ((ufshcd_readl(hba, reg) & mask) != val)
  75                                err = -ETIMEDOUT;
  76                        break;
  77                }
  78        }
  79
  80        return err;
  81}
  82
  83/**
  84 * ufshcd_init_pwr_info - setting the POR (power on reset)
  85 * values in hba power info
  86 */
  87static void ufshcd_init_pwr_info(struct ufs_hba *hba)
  88{
  89        hba->pwr_info.gear_rx = UFS_PWM_G1;
  90        hba->pwr_info.gear_tx = UFS_PWM_G1;
  91        hba->pwr_info.lane_rx = 1;
  92        hba->pwr_info.lane_tx = 1;
  93        hba->pwr_info.pwr_rx = SLOWAUTO_MODE;
  94        hba->pwr_info.pwr_tx = SLOWAUTO_MODE;
  95        hba->pwr_info.hs_rate = 0;
  96}
  97
  98/**
  99 * ufshcd_print_pwr_info - print power params as saved in hba
 100 * power info
 101 */
 102static void ufshcd_print_pwr_info(struct ufs_hba *hba)
 103{
 104        static const char * const names[] = {
 105                "INVALID MODE",
 106                "FAST MODE",
 107                "SLOW_MODE",
 108                "INVALID MODE",
 109                "FASTAUTO_MODE",
 110                "SLOWAUTO_MODE",
 111                "INVALID MODE",
 112        };
 113
 114        dev_err(hba->dev, "[RX, TX]: gear=[%d, %d], lane[%d, %d], pwr[%s, %s], rate = %d\n",
 115                hba->pwr_info.gear_rx, hba->pwr_info.gear_tx,
 116                hba->pwr_info.lane_rx, hba->pwr_info.lane_tx,
 117                names[hba->pwr_info.pwr_rx],
 118                names[hba->pwr_info.pwr_tx],
 119                hba->pwr_info.hs_rate);
 120}
 121
 122/**
 123 * ufshcd_ready_for_uic_cmd - Check if controller is ready
 124 *                            to accept UIC commands
 125 */
 126static inline bool ufshcd_ready_for_uic_cmd(struct ufs_hba *hba)
 127{
 128        if (ufshcd_readl(hba, REG_CONTROLLER_STATUS) & UIC_COMMAND_READY)
 129                return true;
 130        else
 131                return false;
 132}
 133
 134/**
 135 * ufshcd_get_uic_cmd_result - Get the UIC command result
 136 */
 137static inline int ufshcd_get_uic_cmd_result(struct ufs_hba *hba)
 138{
 139        return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2) &
 140               MASK_UIC_COMMAND_RESULT;
 141}
 142
 143/**
 144 * ufshcd_get_dme_attr_val - Get the value of attribute returned by UIC command
 145 */
 146static inline u32 ufshcd_get_dme_attr_val(struct ufs_hba *hba)
 147{
 148        return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3);
 149}
 150
 151/**
 152 * ufshcd_is_device_present - Check if any device connected to
 153 *                            the host controller
 154 */
 155static inline bool ufshcd_is_device_present(struct ufs_hba *hba)
 156{
 157        return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) &
 158                                                DEVICE_PRESENT) ? true : false;
 159}
 160
 161/**
 162 * ufshcd_send_uic_cmd - UFS Interconnect layer command API
 163 *
 164 */
 165static int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
 166{
 167        unsigned long start = 0;
 168        u32 intr_status;
 169        u32 enabled_intr_status;
 170
 171        if (!ufshcd_ready_for_uic_cmd(hba)) {
 172                dev_err(hba->dev,
 173                        "Controller not ready to accept UIC commands\n");
 174                return -EIO;
 175        }
 176
 177        debug("sending uic command:%d\n", uic_cmd->command);
 178
 179        /* Write Args */
 180        ufshcd_writel(hba, uic_cmd->argument1, REG_UIC_COMMAND_ARG_1);
 181        ufshcd_writel(hba, uic_cmd->argument2, REG_UIC_COMMAND_ARG_2);
 182        ufshcd_writel(hba, uic_cmd->argument3, REG_UIC_COMMAND_ARG_3);
 183
 184        /* Write UIC Cmd */
 185        ufshcd_writel(hba, uic_cmd->command & COMMAND_OPCODE_MASK,
 186                      REG_UIC_COMMAND);
 187
 188        start = get_timer(0);
 189        do {
 190                intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
 191                enabled_intr_status = intr_status & hba->intr_mask;
 192                ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS);
 193
 194                if (get_timer(start) > UFS_UIC_CMD_TIMEOUT) {
 195                        dev_err(hba->dev,
 196                                "Timedout waiting for UIC response\n");
 197
 198                        return -ETIMEDOUT;
 199                }
 200
 201                if (enabled_intr_status & UFSHCD_ERROR_MASK) {
 202                        dev_err(hba->dev, "Error in status:%08x\n",
 203                                enabled_intr_status);
 204
 205                        return -1;
 206                }
 207        } while (!(enabled_intr_status & UFSHCD_UIC_MASK));
 208
 209        uic_cmd->argument2 = ufshcd_get_uic_cmd_result(hba);
 210        uic_cmd->argument3 = ufshcd_get_dme_attr_val(hba);
 211
 212        debug("Sent successfully\n");
 213
 214        return 0;
 215}
 216
 217/**
 218 * ufshcd_dme_set_attr - UIC command for DME_SET, DME_PEER_SET
 219 *
 220 */
 221int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel, u8 attr_set,
 222                        u32 mib_val, u8 peer)
 223{
 224        struct uic_command uic_cmd = {0};
 225        static const char *const action[] = {
 226                "dme-set",
 227                "dme-peer-set"
 228        };
 229        const char *set = action[!!peer];
 230        int ret;
 231        int retries = UFS_UIC_COMMAND_RETRIES;
 232
 233        uic_cmd.command = peer ?
 234                UIC_CMD_DME_PEER_SET : UIC_CMD_DME_SET;
 235        uic_cmd.argument1 = attr_sel;
 236        uic_cmd.argument2 = UIC_ARG_ATTR_TYPE(attr_set);
 237        uic_cmd.argument3 = mib_val;
 238
 239        do {
 240                /* for peer attributes we retry upon failure */
 241                ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
 242                if (ret)
 243                        dev_dbg(hba->dev, "%s: attr-id 0x%x val 0x%x error code %d\n",
 244                                set, UIC_GET_ATTR_ID(attr_sel), mib_val, ret);
 245        } while (ret && peer && --retries);
 246
 247        if (ret)
 248                dev_err(hba->dev, "%s: attr-id 0x%x val 0x%x failed %d retries\n",
 249                        set, UIC_GET_ATTR_ID(attr_sel), mib_val,
 250                        UFS_UIC_COMMAND_RETRIES - retries);
 251
 252        return ret;
 253}
 254
 255/**
 256 * ufshcd_dme_get_attr - UIC command for DME_GET, DME_PEER_GET
 257 *
 258 */
 259int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
 260                        u32 *mib_val, u8 peer)
 261{
 262        struct uic_command uic_cmd = {0};
 263        static const char *const action[] = {
 264                "dme-get",
 265                "dme-peer-get"
 266        };
 267        const char *get = action[!!peer];
 268        int ret;
 269        int retries = UFS_UIC_COMMAND_RETRIES;
 270
 271        uic_cmd.command = peer ?
 272                UIC_CMD_DME_PEER_GET : UIC_CMD_DME_GET;
 273        uic_cmd.argument1 = attr_sel;
 274
 275        do {
 276                /* for peer attributes we retry upon failure */
 277                ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
 278                if (ret)
 279                        dev_dbg(hba->dev, "%s: attr-id 0x%x error code %d\n",
 280                                get, UIC_GET_ATTR_ID(attr_sel), ret);
 281        } while (ret && peer && --retries);
 282
 283        if (ret)
 284                dev_err(hba->dev, "%s: attr-id 0x%x failed %d retries\n",
 285                        get, UIC_GET_ATTR_ID(attr_sel),
 286                        UFS_UIC_COMMAND_RETRIES - retries);
 287
 288        if (mib_val && !ret)
 289                *mib_val = uic_cmd.argument3;
 290
 291        return ret;
 292}
 293
 294static int ufshcd_disable_tx_lcc(struct ufs_hba *hba, bool peer)
 295{
 296        u32 tx_lanes, i, err = 0;
 297
 298        if (!peer)
 299                ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
 300                               &tx_lanes);
 301        else
 302                ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
 303                                    &tx_lanes);
 304        for (i = 0; i < tx_lanes; i++) {
 305                if (!peer)
 306                        err = ufshcd_dme_set(hba,
 307                                             UIC_ARG_MIB_SEL(TX_LCC_ENABLE,
 308                                             UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)),
 309                                             0);
 310                else
 311                        err = ufshcd_dme_peer_set(hba,
 312                                        UIC_ARG_MIB_SEL(TX_LCC_ENABLE,
 313                                        UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)),
 314                                        0);
 315                if (err) {
 316                        dev_err(hba->dev, "%s: TX LCC Disable failed, peer = %d, lane = %d, err = %d",
 317                                __func__, peer, i, err);
 318                        break;
 319                }
 320        }
 321
 322        return err;
 323}
 324
 325static inline int ufshcd_disable_device_tx_lcc(struct ufs_hba *hba)
 326{
 327        return ufshcd_disable_tx_lcc(hba, true);
 328}
 329
 330/**
 331 * ufshcd_dme_link_startup - Notify Unipro to perform link startup
 332 *
 333 */
 334static int ufshcd_dme_link_startup(struct ufs_hba *hba)
 335{
 336        struct uic_command uic_cmd = {0};
 337        int ret;
 338
 339        uic_cmd.command = UIC_CMD_DME_LINK_STARTUP;
 340
 341        ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
 342        if (ret)
 343                dev_dbg(hba->dev,
 344                        "dme-link-startup: error code %d\n", ret);
 345        return ret;
 346}
 347
 348/**
 349 * ufshcd_disable_intr_aggr - Disables interrupt aggregation.
 350 *
 351 */
 352static inline void ufshcd_disable_intr_aggr(struct ufs_hba *hba)
 353{
 354        ufshcd_writel(hba, 0, REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
 355}
 356
 357/**
 358 * ufshcd_get_lists_status - Check UCRDY, UTRLRDY and UTMRLRDY
 359 */
 360static inline int ufshcd_get_lists_status(u32 reg)
 361{
 362        return !((reg & UFSHCD_STATUS_READY) == UFSHCD_STATUS_READY);
 363}
 364
 365/**
 366 * ufshcd_enable_run_stop_reg - Enable run-stop registers,
 367 *                      When run-stop registers are set to 1, it indicates the
 368 *                      host controller that it can process the requests
 369 */
 370static void ufshcd_enable_run_stop_reg(struct ufs_hba *hba)
 371{
 372        ufshcd_writel(hba, UTP_TASK_REQ_LIST_RUN_STOP_BIT,
 373                      REG_UTP_TASK_REQ_LIST_RUN_STOP);
 374        ufshcd_writel(hba, UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT,
 375                      REG_UTP_TRANSFER_REQ_LIST_RUN_STOP);
 376}
 377
 378/**
 379 * ufshcd_enable_intr - enable interrupts
 380 */
 381static void ufshcd_enable_intr(struct ufs_hba *hba, u32 intrs)
 382{
 383        u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
 384        u32 rw;
 385
 386        if (hba->version == UFSHCI_VERSION_10) {
 387                rw = set & INTERRUPT_MASK_RW_VER_10;
 388                set = rw | ((set ^ intrs) & intrs);
 389        } else {
 390                set |= intrs;
 391        }
 392
 393        ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
 394
 395        hba->intr_mask = set;
 396}
 397
 398/**
 399 * ufshcd_make_hba_operational - Make UFS controller operational
 400 *
 401 * To bring UFS host controller to operational state,
 402 * 1. Enable required interrupts
 403 * 2. Configure interrupt aggregation
 404 * 3. Program UTRL and UTMRL base address
 405 * 4. Configure run-stop-registers
 406 *
 407 */
 408static int ufshcd_make_hba_operational(struct ufs_hba *hba)
 409{
 410        int err = 0;
 411        u32 reg;
 412
 413        /* Enable required interrupts */
 414        ufshcd_enable_intr(hba, UFSHCD_ENABLE_INTRS);
 415
 416        /* Disable interrupt aggregation */
 417        ufshcd_disable_intr_aggr(hba);
 418
 419        /* Configure UTRL and UTMRL base address registers */
 420        ufshcd_writel(hba, lower_32_bits((dma_addr_t)hba->utrdl),
 421                      REG_UTP_TRANSFER_REQ_LIST_BASE_L);
 422        ufshcd_writel(hba, upper_32_bits((dma_addr_t)hba->utrdl),
 423                      REG_UTP_TRANSFER_REQ_LIST_BASE_H);
 424        ufshcd_writel(hba, lower_32_bits((dma_addr_t)hba->utmrdl),
 425                      REG_UTP_TASK_REQ_LIST_BASE_L);
 426        ufshcd_writel(hba, upper_32_bits((dma_addr_t)hba->utmrdl),
 427                      REG_UTP_TASK_REQ_LIST_BASE_H);
 428
 429        /*
 430         * UCRDY, UTMRLDY and UTRLRDY bits must be 1
 431         */
 432        reg = ufshcd_readl(hba, REG_CONTROLLER_STATUS);
 433        if (!(ufshcd_get_lists_status(reg))) {
 434                ufshcd_enable_run_stop_reg(hba);
 435        } else {
 436                dev_err(hba->dev,
 437                        "Host controller not ready to process requests");
 438                err = -EIO;
 439                goto out;
 440        }
 441
 442out:
 443        return err;
 444}
 445
 446/**
 447 * ufshcd_link_startup - Initialize unipro link startup
 448 */
 449static int ufshcd_link_startup(struct ufs_hba *hba)
 450{
 451        int ret;
 452        int retries = DME_LINKSTARTUP_RETRIES;
 453        bool link_startup_again = true;
 454
 455link_startup:
 456        do {
 457                ufshcd_ops_link_startup_notify(hba, PRE_CHANGE);
 458
 459                ret = ufshcd_dme_link_startup(hba);
 460
 461                /* check if device is detected by inter-connect layer */
 462                if (!ret && !ufshcd_is_device_present(hba)) {
 463                        dev_err(hba->dev, "%s: Device not present\n", __func__);
 464                        ret = -ENXIO;
 465                        goto out;
 466                }
 467
 468                /*
 469                 * DME link lost indication is only received when link is up,
 470                 * but we can't be sure if the link is up until link startup
 471                 * succeeds. So reset the local Uni-Pro and try again.
 472                 */
 473                if (ret && ufshcd_hba_enable(hba))
 474                        goto out;
 475        } while (ret && retries--);
 476
 477        if (ret)
 478                /* failed to get the link up... retire */
 479                goto out;
 480
 481        if (link_startup_again) {
 482                link_startup_again = false;
 483                retries = DME_LINKSTARTUP_RETRIES;
 484                goto link_startup;
 485        }
 486
 487        /* Mark that link is up in PWM-G1, 1-lane, SLOW-AUTO mode */
 488        ufshcd_init_pwr_info(hba);
 489
 490        if (hba->quirks & UFSHCD_QUIRK_BROKEN_LCC) {
 491                ret = ufshcd_disable_device_tx_lcc(hba);
 492                if (ret)
 493                        goto out;
 494        }
 495
 496        /* Include any host controller configuration via UIC commands */
 497        ret = ufshcd_ops_link_startup_notify(hba, POST_CHANGE);
 498        if (ret)
 499                goto out;
 500
 501        ret = ufshcd_make_hba_operational(hba);
 502out:
 503        if (ret)
 504                dev_err(hba->dev, "link startup failed %d\n", ret);
 505
 506        return ret;
 507}
 508
 509/**
 510 * ufshcd_hba_stop - Send controller to reset state
 511 */
 512static inline void ufshcd_hba_stop(struct ufs_hba *hba)
 513{
 514        int err;
 515
 516        ufshcd_writel(hba, CONTROLLER_DISABLE,  REG_CONTROLLER_ENABLE);
 517        err = ufshcd_wait_for_register(hba, REG_CONTROLLER_ENABLE,
 518                                       CONTROLLER_ENABLE, CONTROLLER_DISABLE,
 519                                       10);
 520        if (err)
 521                dev_err(hba->dev, "%s: Controller disable failed\n", __func__);
 522}
 523
 524/**
 525 * ufshcd_is_hba_active - Get controller state
 526 */
 527static inline bool ufshcd_is_hba_active(struct ufs_hba *hba)
 528{
 529        return (ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & CONTROLLER_ENABLE)
 530                ? false : true;
 531}
 532
 533/**
 534 * ufshcd_hba_start - Start controller initialization sequence
 535 */
 536static inline void ufshcd_hba_start(struct ufs_hba *hba)
 537{
 538        ufshcd_writel(hba, CONTROLLER_ENABLE, REG_CONTROLLER_ENABLE);
 539}
 540
 541/**
 542 * ufshcd_hba_enable - initialize the controller
 543 */
 544static int ufshcd_hba_enable(struct ufs_hba *hba)
 545{
 546        int retry;
 547
 548        if (!ufshcd_is_hba_active(hba))
 549                /* change controller state to "reset state" */
 550                ufshcd_hba_stop(hba);
 551
 552        ufshcd_ops_hce_enable_notify(hba, PRE_CHANGE);
 553
 554        /* start controller initialization sequence */
 555        ufshcd_hba_start(hba);
 556
 557        /*
 558         * To initialize a UFS host controller HCE bit must be set to 1.
 559         * During initialization the HCE bit value changes from 1->0->1.
 560         * When the host controller completes initialization sequence
 561         * it sets the value of HCE bit to 1. The same HCE bit is read back
 562         * to check if the controller has completed initialization sequence.
 563         * So without this delay the value HCE = 1, set in the previous
 564         * instruction might be read back.
 565         * This delay can be changed based on the controller.
 566         */
 567        mdelay(1);
 568
 569        /* wait for the host controller to complete initialization */
 570        retry = 10;
 571        while (ufshcd_is_hba_active(hba)) {
 572                if (retry) {
 573                        retry--;
 574                } else {
 575                        dev_err(hba->dev, "Controller enable failed\n");
 576                        return -EIO;
 577                }
 578                mdelay(5);
 579        }
 580
 581        /* enable UIC related interrupts */
 582        ufshcd_enable_intr(hba, UFSHCD_UIC_MASK);
 583
 584        ufshcd_ops_hce_enable_notify(hba, POST_CHANGE);
 585
 586        return 0;
 587}
 588
 589/**
 590 * ufshcd_host_memory_configure - configure local reference block with
 591 *                              memory offsets
 592 */
 593static void ufshcd_host_memory_configure(struct ufs_hba *hba)
 594{
 595        struct utp_transfer_req_desc *utrdlp;
 596        dma_addr_t cmd_desc_dma_addr;
 597        u16 response_offset;
 598        u16 prdt_offset;
 599
 600        utrdlp = hba->utrdl;
 601        cmd_desc_dma_addr = (dma_addr_t)hba->ucdl;
 602
 603        utrdlp->command_desc_base_addr_lo =
 604                                cpu_to_le32(lower_32_bits(cmd_desc_dma_addr));
 605        utrdlp->command_desc_base_addr_hi =
 606                                cpu_to_le32(upper_32_bits(cmd_desc_dma_addr));
 607
 608        response_offset = offsetof(struct utp_transfer_cmd_desc, response_upiu);
 609        prdt_offset = offsetof(struct utp_transfer_cmd_desc, prd_table);
 610
 611        utrdlp->response_upiu_offset = cpu_to_le16(response_offset >> 2);
 612        utrdlp->prd_table_offset = cpu_to_le16(prdt_offset >> 2);
 613        utrdlp->response_upiu_length = cpu_to_le16(ALIGNED_UPIU_SIZE >> 2);
 614
 615        hba->ucd_req_ptr = (struct utp_upiu_req *)hba->ucdl;
 616        hba->ucd_rsp_ptr =
 617                (struct utp_upiu_rsp *)&hba->ucdl->response_upiu;
 618        hba->ucd_prdt_ptr =
 619                (struct ufshcd_sg_entry *)&hba->ucdl->prd_table;
 620}
 621
 622/**
 623 * ufshcd_memory_alloc - allocate memory for host memory space data structures
 624 */
 625static int ufshcd_memory_alloc(struct ufs_hba *hba)
 626{
 627        /* Allocate one Transfer Request Descriptor
 628         * Should be aligned to 1k boundary.
 629         */
 630        hba->utrdl = memalign(1024, sizeof(struct utp_transfer_req_desc));
 631        if (!hba->utrdl) {
 632                dev_err(hba->dev, "Transfer Descriptor memory allocation failed\n");
 633                return -ENOMEM;
 634        }
 635
 636        /* Allocate one Command Descriptor
 637         * Should be aligned to 1k boundary.
 638         */
 639        hba->ucdl = memalign(1024, sizeof(struct utp_transfer_cmd_desc));
 640        if (!hba->ucdl) {
 641                dev_err(hba->dev, "Command descriptor memory allocation failed\n");
 642                return -ENOMEM;
 643        }
 644
 645        return 0;
 646}
 647
 648/**
 649 * ufshcd_get_intr_mask - Get the interrupt bit mask
 650 */
 651static inline u32 ufshcd_get_intr_mask(struct ufs_hba *hba)
 652{
 653        u32 intr_mask = 0;
 654
 655        switch (hba->version) {
 656        case UFSHCI_VERSION_10:
 657                intr_mask = INTERRUPT_MASK_ALL_VER_10;
 658                break;
 659        case UFSHCI_VERSION_11:
 660        case UFSHCI_VERSION_20:
 661                intr_mask = INTERRUPT_MASK_ALL_VER_11;
 662                break;
 663        case UFSHCI_VERSION_21:
 664        default:
 665                intr_mask = INTERRUPT_MASK_ALL_VER_21;
 666                break;
 667        }
 668
 669        return intr_mask;
 670}
 671
 672/**
 673 * ufshcd_get_ufs_version - Get the UFS version supported by the HBA
 674 */
 675static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba)
 676{
 677        return ufshcd_readl(hba, REG_UFS_VERSION);
 678}
 679
 680/**
 681 * ufshcd_get_upmcrs - Get the power mode change request status
 682 */
 683static inline u8 ufshcd_get_upmcrs(struct ufs_hba *hba)
 684{
 685        return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) >> 8) & 0x7;
 686}
 687
 688/**
 689 * ufshcd_prepare_req_desc_hdr() - Fills the requests header
 690 * descriptor according to request
 691 */
 692static void ufshcd_prepare_req_desc_hdr(struct utp_transfer_req_desc *req_desc,
 693                                        u32 *upiu_flags,
 694                                        enum dma_data_direction cmd_dir)
 695{
 696        u32 data_direction;
 697        u32 dword_0;
 698
 699        if (cmd_dir == DMA_FROM_DEVICE) {
 700                data_direction = UTP_DEVICE_TO_HOST;
 701                *upiu_flags = UPIU_CMD_FLAGS_READ;
 702        } else if (cmd_dir == DMA_TO_DEVICE) {
 703                data_direction = UTP_HOST_TO_DEVICE;
 704                *upiu_flags = UPIU_CMD_FLAGS_WRITE;
 705        } else {
 706                data_direction = UTP_NO_DATA_TRANSFER;
 707                *upiu_flags = UPIU_CMD_FLAGS_NONE;
 708        }
 709
 710        dword_0 = data_direction | (0x1 << UPIU_COMMAND_TYPE_OFFSET);
 711
 712        /* Enable Interrupt for command */
 713        dword_0 |= UTP_REQ_DESC_INT_CMD;
 714
 715        /* Transfer request descriptor header fields */
 716        req_desc->header.dword_0 = cpu_to_le32(dword_0);
 717        /* dword_1 is reserved, hence it is set to 0 */
 718        req_desc->header.dword_1 = 0;
 719        /*
 720         * assigning invalid value for command status. Controller
 721         * updates OCS on command completion, with the command
 722         * status
 723         */
 724        req_desc->header.dword_2 =
 725                cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
 726        /* dword_3 is reserved, hence it is set to 0 */
 727        req_desc->header.dword_3 = 0;
 728
 729        req_desc->prd_table_length = 0;
 730}
 731
 732static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba,
 733                                              u32 upiu_flags)
 734{
 735        struct utp_upiu_req *ucd_req_ptr = hba->ucd_req_ptr;
 736        struct ufs_query *query = &hba->dev_cmd.query;
 737        u16 len = be16_to_cpu(query->request.upiu_req.length);
 738
 739        /* Query request header */
 740        ucd_req_ptr->header.dword_0 =
 741                                UPIU_HEADER_DWORD(UPIU_TRANSACTION_QUERY_REQ,
 742                                                  upiu_flags, 0, TASK_TAG);
 743        ucd_req_ptr->header.dword_1 =
 744                                UPIU_HEADER_DWORD(0, query->request.query_func,
 745                                                  0, 0);
 746
 747        /* Data segment length only need for WRITE_DESC */
 748        if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
 749                ucd_req_ptr->header.dword_2 =
 750                                UPIU_HEADER_DWORD(0, 0, (len >> 8), (u8)len);
 751        else
 752                ucd_req_ptr->header.dword_2 = 0;
 753
 754        /* Copy the Query Request buffer as is */
 755        memcpy(&ucd_req_ptr->qr, &query->request.upiu_req, QUERY_OSF_SIZE);
 756
 757        /* Copy the Descriptor */
 758        if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
 759                memcpy(ucd_req_ptr + 1, query->descriptor, len);
 760
 761        memset(hba->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
 762}
 763
 764static inline void ufshcd_prepare_utp_nop_upiu(struct ufs_hba *hba)
 765{
 766        struct utp_upiu_req *ucd_req_ptr = hba->ucd_req_ptr;
 767
 768        memset(ucd_req_ptr, 0, sizeof(struct utp_upiu_req));
 769
 770        /* command descriptor fields */
 771        ucd_req_ptr->header.dword_0 =
 772                        UPIU_HEADER_DWORD(UPIU_TRANSACTION_NOP_OUT, 0, 0, 0x1f);
 773        /* clear rest of the fields of basic header */
 774        ucd_req_ptr->header.dword_1 = 0;
 775        ucd_req_ptr->header.dword_2 = 0;
 776
 777        memset(hba->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
 778}
 779
 780/**
 781 * ufshcd_comp_devman_upiu - UFS Protocol Information Unit(UPIU)
 782 *                           for Device Management Purposes
 783 */
 784static int ufshcd_comp_devman_upiu(struct ufs_hba *hba,
 785                                   enum dev_cmd_type cmd_type)
 786{
 787        u32 upiu_flags;
 788        int ret = 0;
 789        struct utp_transfer_req_desc *req_desc = hba->utrdl;
 790
 791        hba->dev_cmd.type = cmd_type;
 792
 793        ufshcd_prepare_req_desc_hdr(req_desc, &upiu_flags, DMA_NONE);
 794        switch (cmd_type) {
 795        case DEV_CMD_TYPE_QUERY:
 796                ufshcd_prepare_utp_query_req_upiu(hba, upiu_flags);
 797                break;
 798        case DEV_CMD_TYPE_NOP:
 799                ufshcd_prepare_utp_nop_upiu(hba);
 800                break;
 801        default:
 802                ret = -EINVAL;
 803        }
 804
 805        return ret;
 806}
 807
 808static int ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
 809{
 810        unsigned long start;
 811        u32 intr_status;
 812        u32 enabled_intr_status;
 813
 814        ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL);
 815
 816        start = get_timer(0);
 817        do {
 818                intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
 819                enabled_intr_status = intr_status & hba->intr_mask;
 820                ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS);
 821
 822                if (get_timer(start) > QUERY_REQ_TIMEOUT) {
 823                        dev_err(hba->dev,
 824                                "Timedout waiting for UTP response\n");
 825
 826                        return -ETIMEDOUT;
 827                }
 828
 829                if (enabled_intr_status & UFSHCD_ERROR_MASK) {
 830                        dev_err(hba->dev, "Error in status:%08x\n",
 831                                enabled_intr_status);
 832
 833                        return -1;
 834                }
 835        } while (!(enabled_intr_status & UTP_TRANSFER_REQ_COMPL));
 836
 837        return 0;
 838}
 839
 840/**
 841 * ufshcd_get_req_rsp - returns the TR response transaction type
 842 */
 843static inline int ufshcd_get_req_rsp(struct utp_upiu_rsp *ucd_rsp_ptr)
 844{
 845        return be32_to_cpu(ucd_rsp_ptr->header.dword_0) >> 24;
 846}
 847
 848/**
 849 * ufshcd_get_tr_ocs - Get the UTRD Overall Command Status
 850 *
 851 */
 852static inline int ufshcd_get_tr_ocs(struct ufs_hba *hba)
 853{
 854        return le32_to_cpu(hba->utrdl->header.dword_2) & MASK_OCS;
 855}
 856
 857static inline int ufshcd_get_rsp_upiu_result(struct utp_upiu_rsp *ucd_rsp_ptr)
 858{
 859        return be32_to_cpu(ucd_rsp_ptr->header.dword_1) & MASK_RSP_UPIU_RESULT;
 860}
 861
 862static int ufshcd_check_query_response(struct ufs_hba *hba)
 863{
 864        struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
 865
 866        /* Get the UPIU response */
 867        query_res->response = ufshcd_get_rsp_upiu_result(hba->ucd_rsp_ptr) >>
 868                                UPIU_RSP_CODE_OFFSET;
 869        return query_res->response;
 870}
 871
 872/**
 873 * ufshcd_copy_query_response() - Copy the Query Response and the data
 874 * descriptor
 875 */
 876static int ufshcd_copy_query_response(struct ufs_hba *hba)
 877{
 878        struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
 879
 880        memcpy(&query_res->upiu_res, &hba->ucd_rsp_ptr->qr, QUERY_OSF_SIZE);
 881
 882        /* Get the descriptor */
 883        if (hba->dev_cmd.query.descriptor &&
 884            hba->ucd_rsp_ptr->qr.opcode == UPIU_QUERY_OPCODE_READ_DESC) {
 885                u8 *descp = (u8 *)hba->ucd_rsp_ptr +
 886                                GENERAL_UPIU_REQUEST_SIZE;
 887                u16 resp_len;
 888                u16 buf_len;
 889
 890                /* data segment length */
 891                resp_len = be32_to_cpu(hba->ucd_rsp_ptr->header.dword_2) &
 892                                                MASK_QUERY_DATA_SEG_LEN;
 893                buf_len =
 894                        be16_to_cpu(hba->dev_cmd.query.request.upiu_req.length);
 895                if (likely(buf_len >= resp_len)) {
 896                        memcpy(hba->dev_cmd.query.descriptor, descp, resp_len);
 897                } else {
 898                        dev_warn(hba->dev,
 899                                 "%s: Response size is bigger than buffer",
 900                                 __func__);
 901                        return -EINVAL;
 902                }
 903        }
 904
 905        return 0;
 906}
 907
 908/**
 909 * ufshcd_exec_dev_cmd - API for sending device management requests
 910 */
 911static int ufshcd_exec_dev_cmd(struct ufs_hba *hba, enum dev_cmd_type cmd_type,
 912                               int timeout)
 913{
 914        int err;
 915        int resp;
 916
 917        err = ufshcd_comp_devman_upiu(hba, cmd_type);
 918        if (err)
 919                return err;
 920
 921        err = ufshcd_send_command(hba, TASK_TAG);
 922        if (err)
 923                return err;
 924
 925        err = ufshcd_get_tr_ocs(hba);
 926        if (err) {
 927                dev_err(hba->dev, "Error in OCS:%d\n", err);
 928                return -EINVAL;
 929        }
 930
 931        resp = ufshcd_get_req_rsp(hba->ucd_rsp_ptr);
 932        switch (resp) {
 933        case UPIU_TRANSACTION_NOP_IN:
 934                break;
 935        case UPIU_TRANSACTION_QUERY_RSP:
 936                err = ufshcd_check_query_response(hba);
 937                if (!err)
 938                        err = ufshcd_copy_query_response(hba);
 939                break;
 940        case UPIU_TRANSACTION_REJECT_UPIU:
 941                /* TODO: handle Reject UPIU Response */
 942                err = -EPERM;
 943                dev_err(hba->dev, "%s: Reject UPIU not fully implemented\n",
 944                        __func__);
 945                break;
 946        default:
 947                err = -EINVAL;
 948                dev_err(hba->dev, "%s: Invalid device management cmd response: %x\n",
 949                        __func__, resp);
 950        }
 951
 952        return err;
 953}
 954
 955/**
 956 * ufshcd_init_query() - init the query response and request parameters
 957 */
 958static inline void ufshcd_init_query(struct ufs_hba *hba,
 959                                     struct ufs_query_req **request,
 960                                     struct ufs_query_res **response,
 961                                     enum query_opcode opcode,
 962                                     u8 idn, u8 index, u8 selector)
 963{
 964        *request = &hba->dev_cmd.query.request;
 965        *response = &hba->dev_cmd.query.response;
 966        memset(*request, 0, sizeof(struct ufs_query_req));
 967        memset(*response, 0, sizeof(struct ufs_query_res));
 968        (*request)->upiu_req.opcode = opcode;
 969        (*request)->upiu_req.idn = idn;
 970        (*request)->upiu_req.index = index;
 971        (*request)->upiu_req.selector = selector;
 972}
 973
 974/**
 975 * ufshcd_query_flag() - API function for sending flag query requests
 976 */
 977int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
 978                      enum flag_idn idn, bool *flag_res)
 979{
 980        struct ufs_query_req *request = NULL;
 981        struct ufs_query_res *response = NULL;
 982        int err, index = 0, selector = 0;
 983        int timeout = QUERY_REQ_TIMEOUT;
 984
 985        ufshcd_init_query(hba, &request, &response, opcode, idn, index,
 986                          selector);
 987
 988        switch (opcode) {
 989        case UPIU_QUERY_OPCODE_SET_FLAG:
 990        case UPIU_QUERY_OPCODE_CLEAR_FLAG:
 991        case UPIU_QUERY_OPCODE_TOGGLE_FLAG:
 992                request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
 993                break;
 994        case UPIU_QUERY_OPCODE_READ_FLAG:
 995                request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
 996                if (!flag_res) {
 997                        /* No dummy reads */
 998                        dev_err(hba->dev, "%s: Invalid argument for read request\n",
 999                                __func__);
1000                        err = -EINVAL;
1001                        goto out;
1002                }
1003                break;
1004        default:
1005                dev_err(hba->dev,
1006                        "%s: Expected query flag opcode but got = %d\n",
1007                        __func__, opcode);
1008                err = -EINVAL;
1009                goto out;
1010        }
1011
1012        err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, timeout);
1013
1014        if (err) {
1015                dev_err(hba->dev,
1016                        "%s: Sending flag query for idn %d failed, err = %d\n",
1017                        __func__, idn, err);
1018                goto out;
1019        }
1020
1021        if (flag_res)
1022                *flag_res = (be32_to_cpu(response->upiu_res.value) &
1023                                MASK_QUERY_UPIU_FLAG_LOC) & 0x1;
1024
1025out:
1026        return err;
1027}
1028
1029static int ufshcd_query_flag_retry(struct ufs_hba *hba,
1030                                   enum query_opcode opcode,
1031                                   enum flag_idn idn, bool *flag_res)
1032{
1033        int ret;
1034        int retries;
1035
1036        for (retries = 0; retries < QUERY_REQ_RETRIES; retries++) {
1037                ret = ufshcd_query_flag(hba, opcode, idn, flag_res);
1038                if (ret)
1039                        dev_dbg(hba->dev,
1040                                "%s: failed with error %d, retries %d\n",
1041                                __func__, ret, retries);
1042                else
1043                        break;
1044        }
1045
1046        if (ret)
1047                dev_err(hba->dev,
1048                        "%s: query attribute, opcode %d, idn %d, failed with error %d after %d retires\n",
1049                        __func__, opcode, idn, ret, retries);
1050        return ret;
1051}
1052
1053static int __ufshcd_query_descriptor(struct ufs_hba *hba,
1054                                     enum query_opcode opcode,
1055                                     enum desc_idn idn, u8 index, u8 selector,
1056                                     u8 *desc_buf, int *buf_len)
1057{
1058        struct ufs_query_req *request = NULL;
1059        struct ufs_query_res *response = NULL;
1060        int err;
1061
1062        if (!desc_buf) {
1063                dev_err(hba->dev, "%s: descriptor buffer required for opcode 0x%x\n",
1064                        __func__, opcode);
1065                err = -EINVAL;
1066                goto out;
1067        }
1068
1069        if (*buf_len < QUERY_DESC_MIN_SIZE || *buf_len > QUERY_DESC_MAX_SIZE) {
1070                dev_err(hba->dev, "%s: descriptor buffer size (%d) is out of range\n",
1071                        __func__, *buf_len);
1072                err = -EINVAL;
1073                goto out;
1074        }
1075
1076        ufshcd_init_query(hba, &request, &response, opcode, idn, index,
1077                          selector);
1078        hba->dev_cmd.query.descriptor = desc_buf;
1079        request->upiu_req.length = cpu_to_be16(*buf_len);
1080
1081        switch (opcode) {
1082        case UPIU_QUERY_OPCODE_WRITE_DESC:
1083                request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
1084                break;
1085        case UPIU_QUERY_OPCODE_READ_DESC:
1086                request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
1087                break;
1088        default:
1089                dev_err(hba->dev, "%s: Expected query descriptor opcode but got = 0x%.2x\n",
1090                        __func__, opcode);
1091                err = -EINVAL;
1092                goto out;
1093        }
1094
1095        err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
1096
1097        if (err) {
1098                dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
1099                        __func__, opcode, idn, index, err);
1100                goto out;
1101        }
1102
1103        hba->dev_cmd.query.descriptor = NULL;
1104        *buf_len = be16_to_cpu(response->upiu_res.length);
1105
1106out:
1107        return err;
1108}
1109
1110/**
1111 * ufshcd_query_descriptor_retry - API function for sending descriptor requests
1112 */
1113int ufshcd_query_descriptor_retry(struct ufs_hba *hba, enum query_opcode opcode,
1114                                  enum desc_idn idn, u8 index, u8 selector,
1115                                  u8 *desc_buf, int *buf_len)
1116{
1117        int err;
1118        int retries;
1119
1120        for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
1121                err = __ufshcd_query_descriptor(hba, opcode, idn, index,
1122                                                selector, desc_buf, buf_len);
1123                if (!err || err == -EINVAL)
1124                        break;
1125        }
1126
1127        return err;
1128}
1129
1130/**
1131 * ufshcd_read_desc_length - read the specified descriptor length from header
1132 */
1133static int ufshcd_read_desc_length(struct ufs_hba *hba, enum desc_idn desc_id,
1134                                   int desc_index, int *desc_length)
1135{
1136        int ret;
1137        u8 header[QUERY_DESC_HDR_SIZE];
1138        int header_len = QUERY_DESC_HDR_SIZE;
1139
1140        if (desc_id >= QUERY_DESC_IDN_MAX)
1141                return -EINVAL;
1142
1143        ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
1144                                            desc_id, desc_index, 0, header,
1145                                            &header_len);
1146
1147        if (ret) {
1148                dev_err(hba->dev, "%s: Failed to get descriptor header id %d",
1149                        __func__, desc_id);
1150                return ret;
1151        } else if (desc_id != header[QUERY_DESC_DESC_TYPE_OFFSET]) {
1152                dev_warn(hba->dev, "%s: descriptor header id %d and desc_id %d mismatch",
1153                         __func__, header[QUERY_DESC_DESC_TYPE_OFFSET],
1154                         desc_id);
1155                ret = -EINVAL;
1156        }
1157
1158        *desc_length = header[QUERY_DESC_LENGTH_OFFSET];
1159
1160        return ret;
1161}
1162
1163static void ufshcd_init_desc_sizes(struct ufs_hba *hba)
1164{
1165        int err;
1166
1167        err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_DEVICE, 0,
1168                                      &hba->desc_size.dev_desc);
1169        if (err)
1170                hba->desc_size.dev_desc = QUERY_DESC_DEVICE_DEF_SIZE;
1171
1172        err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_POWER, 0,
1173                                      &hba->desc_size.pwr_desc);
1174        if (err)
1175                hba->desc_size.pwr_desc = QUERY_DESC_POWER_DEF_SIZE;
1176
1177        err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_INTERCONNECT, 0,
1178                                      &hba->desc_size.interc_desc);
1179        if (err)
1180                hba->desc_size.interc_desc = QUERY_DESC_INTERCONNECT_DEF_SIZE;
1181
1182        err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_CONFIGURATION, 0,
1183                                      &hba->desc_size.conf_desc);
1184        if (err)
1185                hba->desc_size.conf_desc = QUERY_DESC_CONFIGURATION_DEF_SIZE;
1186
1187        err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_UNIT, 0,
1188                                      &hba->desc_size.unit_desc);
1189        if (err)
1190                hba->desc_size.unit_desc = QUERY_DESC_UNIT_DEF_SIZE;
1191
1192        err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_GEOMETRY, 0,
1193                                      &hba->desc_size.geom_desc);
1194        if (err)
1195                hba->desc_size.geom_desc = QUERY_DESC_GEOMETRY_DEF_SIZE;
1196
1197        err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_HEALTH, 0,
1198                                      &hba->desc_size.hlth_desc);
1199        if (err)
1200                hba->desc_size.hlth_desc = QUERY_DESC_HEALTH_DEF_SIZE;
1201}
1202
1203/**
1204 * ufshcd_map_desc_id_to_length - map descriptor IDN to its length
1205 *
1206 */
1207int ufshcd_map_desc_id_to_length(struct ufs_hba *hba, enum desc_idn desc_id,
1208                                 int *desc_len)
1209{
1210        switch (desc_id) {
1211        case QUERY_DESC_IDN_DEVICE:
1212                *desc_len = hba->desc_size.dev_desc;
1213                break;
1214        case QUERY_DESC_IDN_POWER:
1215                *desc_len = hba->desc_size.pwr_desc;
1216                break;
1217        case QUERY_DESC_IDN_GEOMETRY:
1218                *desc_len = hba->desc_size.geom_desc;
1219                break;
1220        case QUERY_DESC_IDN_CONFIGURATION:
1221                *desc_len = hba->desc_size.conf_desc;
1222                break;
1223        case QUERY_DESC_IDN_UNIT:
1224                *desc_len = hba->desc_size.unit_desc;
1225                break;
1226        case QUERY_DESC_IDN_INTERCONNECT:
1227                *desc_len = hba->desc_size.interc_desc;
1228                break;
1229        case QUERY_DESC_IDN_STRING:
1230                *desc_len = QUERY_DESC_MAX_SIZE;
1231                break;
1232        case QUERY_DESC_IDN_HEALTH:
1233                *desc_len = hba->desc_size.hlth_desc;
1234                break;
1235        case QUERY_DESC_IDN_RFU_0:
1236        case QUERY_DESC_IDN_RFU_1:
1237                *desc_len = 0;
1238                break;
1239        default:
1240                *desc_len = 0;
1241                return -EINVAL;
1242        }
1243        return 0;
1244}
1245EXPORT_SYMBOL(ufshcd_map_desc_id_to_length);
1246
1247/**
1248 * ufshcd_read_desc_param - read the specified descriptor parameter
1249 *
1250 */
1251int ufshcd_read_desc_param(struct ufs_hba *hba, enum desc_idn desc_id,
1252                           int desc_index, u8 param_offset, u8 *param_read_buf,
1253                           u8 param_size)
1254{
1255        int ret;
1256        u8 *desc_buf;
1257        int buff_len;
1258        bool is_kmalloc = true;
1259
1260        /* Safety check */
1261        if (desc_id >= QUERY_DESC_IDN_MAX || !param_size)
1262                return -EINVAL;
1263
1264        /* Get the max length of descriptor from structure filled up at probe
1265         * time.
1266         */
1267        ret = ufshcd_map_desc_id_to_length(hba, desc_id, &buff_len);
1268
1269        /* Sanity checks */
1270        if (ret || !buff_len) {
1271                dev_err(hba->dev, "%s: Failed to get full descriptor length",
1272                        __func__);
1273                return ret;
1274        }
1275
1276        /* Check whether we need temp memory */
1277        if (param_offset != 0 || param_size < buff_len) {
1278                desc_buf = kmalloc(buff_len, GFP_KERNEL);
1279                if (!desc_buf)
1280                        return -ENOMEM;
1281        } else {
1282                desc_buf = param_read_buf;
1283                is_kmalloc = false;
1284        }
1285
1286        /* Request for full descriptor */
1287        ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
1288                                            desc_id, desc_index, 0, desc_buf,
1289                                            &buff_len);
1290
1291        if (ret) {
1292                dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d, desc_index %d, param_offset %d, ret %d",
1293                        __func__, desc_id, desc_index, param_offset, ret);
1294                goto out;
1295        }
1296
1297        /* Sanity check */
1298        if (desc_buf[QUERY_DESC_DESC_TYPE_OFFSET] != desc_id) {
1299                dev_err(hba->dev, "%s: invalid desc_id %d in descriptor header",
1300                        __func__, desc_buf[QUERY_DESC_DESC_TYPE_OFFSET]);
1301                ret = -EINVAL;
1302                goto out;
1303        }
1304
1305        /* Check wherher we will not copy more data, than available */
1306        if (is_kmalloc && param_size > buff_len)
1307                param_size = buff_len;
1308
1309        if (is_kmalloc)
1310                memcpy(param_read_buf, &desc_buf[param_offset], param_size);
1311out:
1312        if (is_kmalloc)
1313                kfree(desc_buf);
1314        return ret;
1315}
1316
1317/* replace non-printable or non-ASCII characters with spaces */
1318static inline void ufshcd_remove_non_printable(uint8_t *val)
1319{
1320        if (!val)
1321                return;
1322
1323        if (*val < 0x20 || *val > 0x7e)
1324                *val = ' ';
1325}
1326
1327/**
1328 * ufshcd_uic_pwr_ctrl - executes UIC commands (which affects the link power
1329 * state) and waits for it to take effect.
1330 *
1331 */
1332static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
1333{
1334        unsigned long start = 0;
1335        u8 status;
1336        int ret;
1337
1338        ret = ufshcd_send_uic_cmd(hba, cmd);
1339        if (ret) {
1340                dev_err(hba->dev,
1341                        "pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n",
1342                        cmd->command, cmd->argument3, ret);
1343
1344                return ret;
1345        }
1346
1347        start = get_timer(0);
1348        do {
1349                status = ufshcd_get_upmcrs(hba);
1350                if (get_timer(start) > UFS_UIC_CMD_TIMEOUT) {
1351                        dev_err(hba->dev,
1352                                "pwr ctrl cmd 0x%x failed, host upmcrs:0x%x\n",
1353                                cmd->command, status);
1354                        ret = (status != PWR_OK) ? status : -1;
1355                        break;
1356                }
1357        } while (status != PWR_LOCAL);
1358
1359        return ret;
1360}
1361
1362/**
1363 * ufshcd_uic_change_pwr_mode - Perform the UIC power mode change
1364 *                              using DME_SET primitives.
1365 */
1366static int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode)
1367{
1368        struct uic_command uic_cmd = {0};
1369        int ret;
1370
1371        uic_cmd.command = UIC_CMD_DME_SET;
1372        uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE);
1373        uic_cmd.argument3 = mode;
1374        ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
1375
1376        return ret;
1377}
1378
1379static
1380void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufs_hba *hba,
1381                                      struct scsi_cmd *pccb, u32 upiu_flags)
1382{
1383        struct utp_upiu_req *ucd_req_ptr = hba->ucd_req_ptr;
1384        unsigned int cdb_len;
1385
1386        /* command descriptor fields */
1387        ucd_req_ptr->header.dword_0 =
1388                        UPIU_HEADER_DWORD(UPIU_TRANSACTION_COMMAND, upiu_flags,
1389                                          pccb->lun, TASK_TAG);
1390        ucd_req_ptr->header.dword_1 =
1391                        UPIU_HEADER_DWORD(UPIU_COMMAND_SET_TYPE_SCSI, 0, 0, 0);
1392
1393        /* Total EHS length and Data segment length will be zero */
1394        ucd_req_ptr->header.dword_2 = 0;
1395
1396        ucd_req_ptr->sc.exp_data_transfer_len = cpu_to_be32(pccb->datalen);
1397
1398        cdb_len = min_t(unsigned short, pccb->cmdlen, UFS_CDB_SIZE);
1399        memset(ucd_req_ptr->sc.cdb, 0, UFS_CDB_SIZE);
1400        memcpy(ucd_req_ptr->sc.cdb, pccb->cmd, cdb_len);
1401
1402        memset(hba->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
1403}
1404
1405static inline void prepare_prdt_desc(struct ufshcd_sg_entry *entry,
1406                                     unsigned char *buf, ulong len)
1407{
1408        entry->size = cpu_to_le32(len) | GENMASK(1, 0);
1409        entry->base_addr = cpu_to_le32(lower_32_bits((unsigned long)buf));
1410        entry->upper_addr = cpu_to_le32(upper_32_bits((unsigned long)buf));
1411}
1412
1413static void prepare_prdt_table(struct ufs_hba *hba, struct scsi_cmd *pccb)
1414{
1415        struct utp_transfer_req_desc *req_desc = hba->utrdl;
1416        struct ufshcd_sg_entry *prd_table = hba->ucd_prdt_ptr;
1417        ulong datalen = pccb->datalen;
1418        int table_length;
1419        u8 *buf;
1420        int i;
1421
1422        if (!datalen) {
1423                req_desc->prd_table_length = 0;
1424                return;
1425        }
1426
1427        table_length = DIV_ROUND_UP(pccb->datalen, MAX_PRDT_ENTRY);
1428        buf = pccb->pdata;
1429        i = table_length;
1430        while (--i) {
1431                prepare_prdt_desc(&prd_table[table_length - i - 1], buf,
1432                                  MAX_PRDT_ENTRY - 1);
1433                buf += MAX_PRDT_ENTRY;
1434                datalen -= MAX_PRDT_ENTRY;
1435        }
1436
1437        prepare_prdt_desc(&prd_table[table_length - i - 1], buf, datalen - 1);
1438
1439        req_desc->prd_table_length = table_length;
1440}
1441
1442static int ufs_scsi_exec(struct udevice *scsi_dev, struct scsi_cmd *pccb)
1443{
1444        struct ufs_hba *hba = dev_get_uclass_priv(scsi_dev->parent);
1445        struct utp_transfer_req_desc *req_desc = hba->utrdl;
1446        u32 upiu_flags;
1447        int ocs, result = 0;
1448        u8 scsi_status;
1449
1450        ufshcd_prepare_req_desc_hdr(req_desc, &upiu_flags, pccb->dma_dir);
1451        ufshcd_prepare_utp_scsi_cmd_upiu(hba, pccb, upiu_flags);
1452        prepare_prdt_table(hba, pccb);
1453
1454        ufshcd_send_command(hba, TASK_TAG);
1455
1456        ocs = ufshcd_get_tr_ocs(hba);
1457        switch (ocs) {
1458        case OCS_SUCCESS:
1459                result = ufshcd_get_req_rsp(hba->ucd_rsp_ptr);
1460                switch (result) {
1461                case UPIU_TRANSACTION_RESPONSE:
1462                        result = ufshcd_get_rsp_upiu_result(hba->ucd_rsp_ptr);
1463
1464                        scsi_status = result & MASK_SCSI_STATUS;
1465                        if (scsi_status)
1466                                return -EINVAL;
1467
1468                        break;
1469                case UPIU_TRANSACTION_REJECT_UPIU:
1470                        /* TODO: handle Reject UPIU Response */
1471                        dev_err(hba->dev,
1472                                "Reject UPIU not fully implemented\n");
1473                        return -EINVAL;
1474                default:
1475                        dev_err(hba->dev,
1476                                "Unexpected request response code = %x\n",
1477                                result);
1478                        return -EINVAL;
1479                }
1480                break;
1481        default:
1482                dev_err(hba->dev, "OCS error from controller = %x\n", ocs);
1483                return -EINVAL;
1484        }
1485
1486        return 0;
1487}
1488
1489static inline int ufshcd_read_desc(struct ufs_hba *hba, enum desc_idn desc_id,
1490                                   int desc_index, u8 *buf, u32 size)
1491{
1492        return ufshcd_read_desc_param(hba, desc_id, desc_index, 0, buf, size);
1493}
1494
1495static int ufshcd_read_device_desc(struct ufs_hba *hba, u8 *buf, u32 size)
1496{
1497        return ufshcd_read_desc(hba, QUERY_DESC_IDN_DEVICE, 0, buf, size);
1498}
1499
1500/**
1501 * ufshcd_read_string_desc - read string descriptor
1502 *
1503 */
1504int ufshcd_read_string_desc(struct ufs_hba *hba, int desc_index,
1505                            u8 *buf, u32 size, bool ascii)
1506{
1507        int err = 0;
1508
1509        err = ufshcd_read_desc(hba, QUERY_DESC_IDN_STRING, desc_index, buf,
1510                               size);
1511
1512        if (err) {
1513                dev_err(hba->dev, "%s: reading String Desc failed after %d retries. err = %d\n",
1514                        __func__, QUERY_REQ_RETRIES, err);
1515                goto out;
1516        }
1517
1518        if (ascii) {
1519                int desc_len;
1520                int ascii_len;
1521                int i;
1522                u8 *buff_ascii;
1523
1524                desc_len = buf[0];
1525                /* remove header and divide by 2 to move from UTF16 to UTF8 */
1526                ascii_len = (desc_len - QUERY_DESC_HDR_SIZE) / 2 + 1;
1527                if (size < ascii_len + QUERY_DESC_HDR_SIZE) {
1528                        dev_err(hba->dev, "%s: buffer allocated size is too small\n",
1529                                __func__);
1530                        err = -ENOMEM;
1531                        goto out;
1532                }
1533
1534                buff_ascii = kmalloc(ascii_len, GFP_KERNEL);
1535                if (!buff_ascii) {
1536                        err = -ENOMEM;
1537                        goto out;
1538                }
1539
1540                /*
1541                 * the descriptor contains string in UTF16 format
1542                 * we need to convert to utf-8 so it can be displayed
1543                 */
1544                utf16_to_utf8(buff_ascii,
1545                              (uint16_t *)&buf[QUERY_DESC_HDR_SIZE], ascii_len);
1546
1547                /* replace non-printable or non-ASCII characters with spaces */
1548                for (i = 0; i < ascii_len; i++)
1549                        ufshcd_remove_non_printable(&buff_ascii[i]);
1550
1551                memset(buf + QUERY_DESC_HDR_SIZE, 0,
1552                       size - QUERY_DESC_HDR_SIZE);
1553                memcpy(buf + QUERY_DESC_HDR_SIZE, buff_ascii, ascii_len);
1554                buf[QUERY_DESC_LENGTH_OFFSET] = ascii_len + QUERY_DESC_HDR_SIZE;
1555                kfree(buff_ascii);
1556        }
1557out:
1558        return err;
1559}
1560
1561static int ufs_get_device_desc(struct ufs_hba *hba,
1562                               struct ufs_dev_desc *dev_desc)
1563{
1564        int err;
1565        size_t buff_len;
1566        u8 model_index;
1567        u8 *desc_buf;
1568
1569        buff_len = max_t(size_t, hba->desc_size.dev_desc,
1570                         QUERY_DESC_MAX_SIZE + 1);
1571        desc_buf = kmalloc(buff_len, GFP_KERNEL);
1572        if (!desc_buf) {
1573                err = -ENOMEM;
1574                goto out;
1575        }
1576
1577        err = ufshcd_read_device_desc(hba, desc_buf, hba->desc_size.dev_desc);
1578        if (err) {
1579                dev_err(hba->dev, "%s: Failed reading Device Desc. err = %d\n",
1580                        __func__, err);
1581                goto out;
1582        }
1583
1584        /*
1585         * getting vendor (manufacturerID) and Bank Index in big endian
1586         * format
1587         */
1588        dev_desc->wmanufacturerid = desc_buf[DEVICE_DESC_PARAM_MANF_ID] << 8 |
1589                                     desc_buf[DEVICE_DESC_PARAM_MANF_ID + 1];
1590
1591        model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME];
1592
1593        /* Zero-pad entire buffer for string termination. */
1594        memset(desc_buf, 0, buff_len);
1595
1596        err = ufshcd_read_string_desc(hba, model_index, desc_buf,
1597                                      QUERY_DESC_MAX_SIZE, true/*ASCII*/);
1598        if (err) {
1599                dev_err(hba->dev, "%s: Failed reading Product Name. err = %d\n",
1600                        __func__, err);
1601                goto out;
1602        }
1603
1604        desc_buf[QUERY_DESC_MAX_SIZE] = '\0';
1605        strlcpy(dev_desc->model, (char *)(desc_buf + QUERY_DESC_HDR_SIZE),
1606                min_t(u8, desc_buf[QUERY_DESC_LENGTH_OFFSET],
1607                      MAX_MODEL_LEN));
1608
1609        /* Null terminate the model string */
1610        dev_desc->model[MAX_MODEL_LEN] = '\0';
1611
1612out:
1613        kfree(desc_buf);
1614        return err;
1615}
1616
1617/**
1618 * ufshcd_get_max_pwr_mode - reads the max power mode negotiated with device
1619 */
1620static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba)
1621{
1622        struct ufs_pa_layer_attr *pwr_info = &hba->max_pwr_info.info;
1623
1624        if (hba->max_pwr_info.is_valid)
1625                return 0;
1626
1627        pwr_info->pwr_tx = FAST_MODE;
1628        pwr_info->pwr_rx = FAST_MODE;
1629        pwr_info->hs_rate = PA_HS_MODE_B;
1630
1631        /* Get the connected lane count */
1632        ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDRXDATALANES),
1633                       &pwr_info->lane_rx);
1634        ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
1635                       &pwr_info->lane_tx);
1636
1637        if (!pwr_info->lane_rx || !pwr_info->lane_tx) {
1638                dev_err(hba->dev, "%s: invalid connected lanes value. rx=%d, tx=%d\n",
1639                        __func__, pwr_info->lane_rx, pwr_info->lane_tx);
1640                return -EINVAL;
1641        }
1642
1643        /*
1644         * First, get the maximum gears of HS speed.
1645         * If a zero value, it means there is no HSGEAR capability.
1646         * Then, get the maximum gears of PWM speed.
1647         */
1648        ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &pwr_info->gear_rx);
1649        if (!pwr_info->gear_rx) {
1650                ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
1651                               &pwr_info->gear_rx);
1652                if (!pwr_info->gear_rx) {
1653                        dev_err(hba->dev, "%s: invalid max pwm rx gear read = %d\n",
1654                                __func__, pwr_info->gear_rx);
1655                        return -EINVAL;
1656                }
1657                pwr_info->pwr_rx = SLOW_MODE;
1658        }
1659
1660        ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR),
1661                            &pwr_info->gear_tx);
1662        if (!pwr_info->gear_tx) {
1663                ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
1664                                    &pwr_info->gear_tx);
1665                if (!pwr_info->gear_tx) {
1666                        dev_err(hba->dev, "%s: invalid max pwm tx gear read = %d\n",
1667                                __func__, pwr_info->gear_tx);
1668                        return -EINVAL;
1669                }
1670                pwr_info->pwr_tx = SLOW_MODE;
1671        }
1672
1673        hba->max_pwr_info.is_valid = true;
1674        return 0;
1675}
1676
1677static int ufshcd_change_power_mode(struct ufs_hba *hba,
1678                                    struct ufs_pa_layer_attr *pwr_mode)
1679{
1680        int ret;
1681
1682        /* if already configured to the requested pwr_mode */
1683        if (pwr_mode->gear_rx == hba->pwr_info.gear_rx &&
1684            pwr_mode->gear_tx == hba->pwr_info.gear_tx &&
1685            pwr_mode->lane_rx == hba->pwr_info.lane_rx &&
1686            pwr_mode->lane_tx == hba->pwr_info.lane_tx &&
1687            pwr_mode->pwr_rx == hba->pwr_info.pwr_rx &&
1688            pwr_mode->pwr_tx == hba->pwr_info.pwr_tx &&
1689            pwr_mode->hs_rate == hba->pwr_info.hs_rate) {
1690                dev_dbg(hba->dev, "%s: power already configured\n", __func__);
1691                return 0;
1692        }
1693
1694        /*
1695         * Configure attributes for power mode change with below.
1696         * - PA_RXGEAR, PA_ACTIVERXDATALANES, PA_RXTERMINATION,
1697         * - PA_TXGEAR, PA_ACTIVETXDATALANES, PA_TXTERMINATION,
1698         * - PA_HSSERIES
1699         */
1700        ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), pwr_mode->gear_rx);
1701        ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES),
1702                       pwr_mode->lane_rx);
1703        if (pwr_mode->pwr_rx == FASTAUTO_MODE || pwr_mode->pwr_rx == FAST_MODE)
1704                ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), TRUE);
1705        else
1706                ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), FALSE);
1707
1708        ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), pwr_mode->gear_tx);
1709        ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES),
1710                       pwr_mode->lane_tx);
1711        if (pwr_mode->pwr_tx == FASTAUTO_MODE || pwr_mode->pwr_tx == FAST_MODE)
1712                ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), TRUE);
1713        else
1714                ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), FALSE);
1715
1716        if (pwr_mode->pwr_rx == FASTAUTO_MODE ||
1717            pwr_mode->pwr_tx == FASTAUTO_MODE ||
1718            pwr_mode->pwr_rx == FAST_MODE ||
1719            pwr_mode->pwr_tx == FAST_MODE)
1720                ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES),
1721                               pwr_mode->hs_rate);
1722
1723        ret = ufshcd_uic_change_pwr_mode(hba, pwr_mode->pwr_rx << 4 |
1724                                         pwr_mode->pwr_tx);
1725
1726        if (ret) {
1727                dev_err(hba->dev,
1728                        "%s: power mode change failed %d\n", __func__, ret);
1729
1730                return ret;
1731        }
1732
1733        /* Copy new Power Mode to power info */
1734        memcpy(&hba->pwr_info, pwr_mode, sizeof(struct ufs_pa_layer_attr));
1735
1736        return ret;
1737}
1738
1739/**
1740 * ufshcd_verify_dev_init() - Verify device initialization
1741 *
1742 */
1743static int ufshcd_verify_dev_init(struct ufs_hba *hba)
1744{
1745        int retries;
1746        int err;
1747
1748        for (retries = NOP_OUT_RETRIES; retries > 0; retries--) {
1749                err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP,
1750                                          NOP_OUT_TIMEOUT);
1751                if (!err || err == -ETIMEDOUT)
1752                        break;
1753
1754                dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err);
1755        }
1756
1757        if (err)
1758                dev_err(hba->dev, "%s: NOP OUT failed %d\n", __func__, err);
1759
1760        return err;
1761}
1762
1763/**
1764 * ufshcd_complete_dev_init() - checks device readiness
1765 */
1766static int ufshcd_complete_dev_init(struct ufs_hba *hba)
1767{
1768        int i;
1769        int err;
1770        bool flag_res = 1;
1771
1772        err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
1773                                      QUERY_FLAG_IDN_FDEVICEINIT, NULL);
1774        if (err) {
1775                dev_err(hba->dev,
1776                        "%s setting fDeviceInit flag failed with error %d\n",
1777                        __func__, err);
1778                goto out;
1779        }
1780
1781        /* poll for max. 1000 iterations for fDeviceInit flag to clear */
1782        for (i = 0; i < 1000 && !err && flag_res; i++)
1783                err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
1784                                              QUERY_FLAG_IDN_FDEVICEINIT,
1785                                              &flag_res);
1786
1787        if (err)
1788                dev_err(hba->dev,
1789                        "%s reading fDeviceInit flag failed with error %d\n",
1790                        __func__, err);
1791        else if (flag_res)
1792                dev_err(hba->dev,
1793                        "%s fDeviceInit was not cleared by the device\n",
1794                        __func__);
1795
1796out:
1797        return err;
1798}
1799
1800static void ufshcd_def_desc_sizes(struct ufs_hba *hba)
1801{
1802        hba->desc_size.dev_desc = QUERY_DESC_DEVICE_DEF_SIZE;
1803        hba->desc_size.pwr_desc = QUERY_DESC_POWER_DEF_SIZE;
1804        hba->desc_size.interc_desc = QUERY_DESC_INTERCONNECT_DEF_SIZE;
1805        hba->desc_size.conf_desc = QUERY_DESC_CONFIGURATION_DEF_SIZE;
1806        hba->desc_size.unit_desc = QUERY_DESC_UNIT_DEF_SIZE;
1807        hba->desc_size.geom_desc = QUERY_DESC_GEOMETRY_DEF_SIZE;
1808        hba->desc_size.hlth_desc = QUERY_DESC_HEALTH_DEF_SIZE;
1809}
1810
1811int ufs_start(struct ufs_hba *hba)
1812{
1813        struct ufs_dev_desc card = {0};
1814        int ret;
1815
1816        ret = ufshcd_link_startup(hba);
1817        if (ret)
1818                return ret;
1819
1820        ret = ufshcd_verify_dev_init(hba);
1821        if (ret)
1822                return ret;
1823
1824        ret = ufshcd_complete_dev_init(hba);
1825        if (ret)
1826                return ret;
1827
1828        /* Init check for device descriptor sizes */
1829        ufshcd_init_desc_sizes(hba);
1830
1831        ret = ufs_get_device_desc(hba, &card);
1832        if (ret) {
1833                dev_err(hba->dev, "%s: Failed getting device info. err = %d\n",
1834                        __func__, ret);
1835
1836                return ret;
1837        }
1838
1839        if (ufshcd_get_max_pwr_mode(hba)) {
1840                dev_err(hba->dev,
1841                        "%s: Failed getting max supported power mode\n",
1842                        __func__);
1843        } else {
1844                ret = ufshcd_change_power_mode(hba, &hba->max_pwr_info.info);
1845                if (ret) {
1846                        dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n",
1847                                __func__, ret);
1848
1849                        return ret;
1850                }
1851
1852                printf("Device at %s up at:", hba->dev->name);
1853                ufshcd_print_pwr_info(hba);
1854        }
1855
1856        return 0;
1857}
1858
1859int ufshcd_probe(struct udevice *ufs_dev, struct ufs_hba_ops *hba_ops)
1860{
1861        struct ufs_hba *hba = dev_get_uclass_priv(ufs_dev);
1862        struct scsi_platdata *scsi_plat;
1863        struct udevice *scsi_dev;
1864        int err;
1865
1866        device_find_first_child(ufs_dev, &scsi_dev);
1867        if (!scsi_dev)
1868                return -ENODEV;
1869
1870        scsi_plat = dev_get_uclass_platdata(scsi_dev);
1871        scsi_plat->max_id = UFSHCD_MAX_ID;
1872        scsi_plat->max_lun = UFS_MAX_LUNS;
1873        scsi_plat->max_bytes_per_req = UFS_MAX_BYTES;
1874
1875        hba->dev = ufs_dev;
1876        hba->ops = hba_ops;
1877        hba->mmio_base = (void *)dev_read_addr(ufs_dev);
1878
1879        /* Set descriptor lengths to specification defaults */
1880        ufshcd_def_desc_sizes(hba);
1881
1882        ufshcd_ops_init(hba);
1883
1884        /* Read capabilties registers */
1885        hba->capabilities = ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES);
1886
1887        /* Get UFS version supported by the controller */
1888        hba->version = ufshcd_get_ufs_version(hba);
1889        if (hba->version != UFSHCI_VERSION_10 &&
1890            hba->version != UFSHCI_VERSION_11 &&
1891            hba->version != UFSHCI_VERSION_20 &&
1892            hba->version != UFSHCI_VERSION_21)
1893                dev_err(hba->dev, "invalid UFS version 0x%x\n",
1894                        hba->version);
1895
1896        /* Get Interrupt bit mask per version */
1897        hba->intr_mask = ufshcd_get_intr_mask(hba);
1898
1899        /* Allocate memory for host memory space */
1900        err = ufshcd_memory_alloc(hba);
1901        if (err) {
1902                dev_err(hba->dev, "Memory allocation failed\n");
1903                return err;
1904        }
1905
1906        /* Configure Local data structures */
1907        ufshcd_host_memory_configure(hba);
1908
1909        /*
1910         * In order to avoid any spurious interrupt immediately after
1911         * registering UFS controller interrupt handler, clear any pending UFS
1912         * interrupt status and disable all the UFS interrupts.
1913         */
1914        ufshcd_writel(hba, ufshcd_readl(hba, REG_INTERRUPT_STATUS),
1915                      REG_INTERRUPT_STATUS);
1916        ufshcd_writel(hba, 0, REG_INTERRUPT_ENABLE);
1917
1918        err = ufshcd_hba_enable(hba);
1919        if (err) {
1920                dev_err(hba->dev, "Host controller enable failed\n");
1921                return err;
1922        }
1923
1924        err = ufs_start(hba);
1925        if (err)
1926                return err;
1927
1928        return 0;
1929}
1930
1931int ufs_scsi_bind(struct udevice *ufs_dev, struct udevice **scsi_devp)
1932{
1933        int ret = device_bind_driver(ufs_dev, "ufs_scsi", "ufs_scsi",
1934                                     scsi_devp);
1935
1936        return ret;
1937}
1938
1939static struct scsi_ops ufs_ops = {
1940        .exec           = ufs_scsi_exec,
1941};
1942
1943int ufs_probe_dev(int index)
1944{
1945        struct udevice *dev;
1946
1947        return uclass_get_device(UCLASS_UFS, index, &dev);
1948}
1949
1950int ufs_probe(void)
1951{
1952        struct udevice *dev;
1953        int ret, i;
1954
1955        for (i = 0;; i++) {
1956                ret = uclass_get_device(UCLASS_UFS, i, &dev);
1957                if (ret == -ENODEV)
1958                        break;
1959        }
1960
1961        return 0;
1962}
1963
1964U_BOOT_DRIVER(ufs_scsi) = {
1965        .id = UCLASS_SCSI,
1966        .name = "ufs_scsi",
1967        .ops = &ufs_ops,
1968};
1969