linux/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * Copyright (c) 2017 Hisilicon Limited.
   4 */
   5
   6#include "hisi_sas.h"
   7#define DRV_NAME "hisi_sas_v3_hw"
   8
   9/* global registers need init */
  10#define DLVRY_QUEUE_ENABLE              0x0
  11#define IOST_BASE_ADDR_LO               0x8
  12#define IOST_BASE_ADDR_HI               0xc
  13#define ITCT_BASE_ADDR_LO               0x10
  14#define ITCT_BASE_ADDR_HI               0x14
  15#define IO_BROKEN_MSG_ADDR_LO           0x18
  16#define IO_BROKEN_MSG_ADDR_HI           0x1c
  17#define PHY_CONTEXT                     0x20
  18#define PHY_STATE                       0x24
  19#define PHY_PORT_NUM_MA                 0x28
  20#define PHY_CONN_RATE                   0x30
  21#define ITCT_CLR                        0x44
  22#define ITCT_CLR_EN_OFF                 16
  23#define ITCT_CLR_EN_MSK                 (0x1 << ITCT_CLR_EN_OFF)
  24#define ITCT_DEV_OFF                    0
  25#define ITCT_DEV_MSK                    (0x7ff << ITCT_DEV_OFF)
  26#define SAS_AXI_USER3                   0x50
  27#define IO_SATA_BROKEN_MSG_ADDR_LO      0x58
  28#define IO_SATA_BROKEN_MSG_ADDR_HI      0x5c
  29#define SATA_INITI_D2H_STORE_ADDR_LO    0x60
  30#define SATA_INITI_D2H_STORE_ADDR_HI    0x64
  31#define CFG_MAX_TAG                     0x68
  32#define HGC_SAS_TX_OPEN_FAIL_RETRY_CTRL 0x84
  33#define HGC_SAS_TXFAIL_RETRY_CTRL       0x88
  34#define HGC_GET_ITV_TIME                0x90
  35#define DEVICE_MSG_WORK_MODE            0x94
  36#define OPENA_WT_CONTI_TIME             0x9c
  37#define I_T_NEXUS_LOSS_TIME             0xa0
  38#define MAX_CON_TIME_LIMIT_TIME         0xa4
  39#define BUS_INACTIVE_LIMIT_TIME         0xa8
  40#define REJECT_TO_OPEN_LIMIT_TIME       0xac
  41#define CQ_INT_CONVERGE_EN              0xb0
  42#define CFG_AGING_TIME                  0xbc
  43#define HGC_DFX_CFG2                    0xc0
  44#define CFG_ABT_SET_QUERY_IPTT  0xd4
  45#define CFG_SET_ABORTED_IPTT_OFF        0
  46#define CFG_SET_ABORTED_IPTT_MSK        (0xfff << CFG_SET_ABORTED_IPTT_OFF)
  47#define CFG_SET_ABORTED_EN_OFF  12
  48#define CFG_ABT_SET_IPTT_DONE   0xd8
  49#define CFG_ABT_SET_IPTT_DONE_OFF       0
  50#define HGC_IOMB_PROC1_STATUS   0x104
  51#define HGC_LM_DFX_STATUS2              0x128
  52#define HGC_LM_DFX_STATUS2_IOSTLIST_OFF         0
  53#define HGC_LM_DFX_STATUS2_IOSTLIST_MSK (0xfff << \
  54                                         HGC_LM_DFX_STATUS2_IOSTLIST_OFF)
  55#define HGC_LM_DFX_STATUS2_ITCTLIST_OFF         12
  56#define HGC_LM_DFX_STATUS2_ITCTLIST_MSK (0x7ff << \
  57                                         HGC_LM_DFX_STATUS2_ITCTLIST_OFF)
  58#define HGC_CQE_ECC_ADDR                0x13c
  59#define HGC_CQE_ECC_1B_ADDR_OFF 0
  60#define HGC_CQE_ECC_1B_ADDR_MSK (0x3f << HGC_CQE_ECC_1B_ADDR_OFF)
  61#define HGC_CQE_ECC_MB_ADDR_OFF 8
  62#define HGC_CQE_ECC_MB_ADDR_MSK (0x3f << HGC_CQE_ECC_MB_ADDR_OFF)
  63#define HGC_IOST_ECC_ADDR               0x140
  64#define HGC_IOST_ECC_1B_ADDR_OFF        0
  65#define HGC_IOST_ECC_1B_ADDR_MSK        (0x3ff << HGC_IOST_ECC_1B_ADDR_OFF)
  66#define HGC_IOST_ECC_MB_ADDR_OFF        16
  67#define HGC_IOST_ECC_MB_ADDR_MSK        (0x3ff << HGC_IOST_ECC_MB_ADDR_OFF)
  68#define HGC_DQE_ECC_ADDR                0x144
  69#define HGC_DQE_ECC_1B_ADDR_OFF 0
  70#define HGC_DQE_ECC_1B_ADDR_MSK (0xfff << HGC_DQE_ECC_1B_ADDR_OFF)
  71#define HGC_DQE_ECC_MB_ADDR_OFF 16
  72#define HGC_DQE_ECC_MB_ADDR_MSK (0xfff << HGC_DQE_ECC_MB_ADDR_OFF)
  73#define CHNL_INT_STATUS                 0x148
  74#define HGC_ITCT_ECC_ADDR               0x150
  75#define HGC_ITCT_ECC_1B_ADDR_OFF                0
  76#define HGC_ITCT_ECC_1B_ADDR_MSK                (0x3ff << \
  77                                                 HGC_ITCT_ECC_1B_ADDR_OFF)
  78#define HGC_ITCT_ECC_MB_ADDR_OFF                16
  79#define HGC_ITCT_ECC_MB_ADDR_MSK                (0x3ff << \
  80                                                 HGC_ITCT_ECC_MB_ADDR_OFF)
  81#define HGC_AXI_FIFO_ERR_INFO  0x154
  82#define AXI_ERR_INFO_OFF               0
  83#define AXI_ERR_INFO_MSK               (0xff << AXI_ERR_INFO_OFF)
  84#define FIFO_ERR_INFO_OFF              8
  85#define FIFO_ERR_INFO_MSK              (0xff << FIFO_ERR_INFO_OFF)
  86#define INT_COAL_EN                     0x19c
  87#define OQ_INT_COAL_TIME                0x1a0
  88#define OQ_INT_COAL_CNT                 0x1a4
  89#define ENT_INT_COAL_TIME               0x1a8
  90#define ENT_INT_COAL_CNT                0x1ac
  91#define OQ_INT_SRC                      0x1b0
  92#define OQ_INT_SRC_MSK                  0x1b4
  93#define ENT_INT_SRC1                    0x1b8
  94#define ENT_INT_SRC1_D2H_FIS_CH0_OFF    0
  95#define ENT_INT_SRC1_D2H_FIS_CH0_MSK    (0x1 << ENT_INT_SRC1_D2H_FIS_CH0_OFF)
  96#define ENT_INT_SRC1_D2H_FIS_CH1_OFF    8
  97#define ENT_INT_SRC1_D2H_FIS_CH1_MSK    (0x1 << ENT_INT_SRC1_D2H_FIS_CH1_OFF)
  98#define ENT_INT_SRC2                    0x1bc
  99#define ENT_INT_SRC3                    0x1c0
 100#define ENT_INT_SRC3_WP_DEPTH_OFF               8
 101#define ENT_INT_SRC3_IPTT_SLOT_NOMATCH_OFF      9
 102#define ENT_INT_SRC3_RP_DEPTH_OFF               10
 103#define ENT_INT_SRC3_AXI_OFF                    11
 104#define ENT_INT_SRC3_FIFO_OFF                   12
 105#define ENT_INT_SRC3_LM_OFF                             14
 106#define ENT_INT_SRC3_ITC_INT_OFF        15
 107#define ENT_INT_SRC3_ITC_INT_MSK        (0x1 << ENT_INT_SRC3_ITC_INT_OFF)
 108#define ENT_INT_SRC3_ABT_OFF            16
 109#define ENT_INT_SRC3_DQE_POISON_OFF     18
 110#define ENT_INT_SRC3_IOST_POISON_OFF    19
 111#define ENT_INT_SRC3_ITCT_POISON_OFF    20
 112#define ENT_INT_SRC3_ITCT_NCQ_POISON_OFF        21
 113#define ENT_INT_SRC_MSK1                0x1c4
 114#define ENT_INT_SRC_MSK2                0x1c8
 115#define ENT_INT_SRC_MSK3                0x1cc
 116#define ENT_INT_SRC_MSK3_ENT95_MSK_OFF  31
 117#define CHNL_PHYUPDOWN_INT_MSK          0x1d0
 118#define CHNL_ENT_INT_MSK                        0x1d4
 119#define HGC_COM_INT_MSK                         0x1d8
 120#define ENT_INT_SRC_MSK3_ENT95_MSK_MSK  (0x1 << ENT_INT_SRC_MSK3_ENT95_MSK_OFF)
 121#define SAS_ECC_INTR                    0x1e8
 122#define SAS_ECC_INTR_DQE_ECC_1B_OFF             0
 123#define SAS_ECC_INTR_DQE_ECC_MB_OFF             1
 124#define SAS_ECC_INTR_IOST_ECC_1B_OFF    2
 125#define SAS_ECC_INTR_IOST_ECC_MB_OFF    3
 126#define SAS_ECC_INTR_ITCT_ECC_1B_OFF    4
 127#define SAS_ECC_INTR_ITCT_ECC_MB_OFF    5
 128#define SAS_ECC_INTR_ITCTLIST_ECC_1B_OFF        6
 129#define SAS_ECC_INTR_ITCTLIST_ECC_MB_OFF        7
 130#define SAS_ECC_INTR_IOSTLIST_ECC_1B_OFF        8
 131#define SAS_ECC_INTR_IOSTLIST_ECC_MB_OFF        9
 132#define SAS_ECC_INTR_CQE_ECC_1B_OFF             10
 133#define SAS_ECC_INTR_CQE_ECC_MB_OFF             11
 134#define SAS_ECC_INTR_NCQ_MEM0_ECC_1B_OFF        12
 135#define SAS_ECC_INTR_NCQ_MEM0_ECC_MB_OFF        13
 136#define SAS_ECC_INTR_NCQ_MEM1_ECC_1B_OFF        14
 137#define SAS_ECC_INTR_NCQ_MEM1_ECC_MB_OFF        15
 138#define SAS_ECC_INTR_NCQ_MEM2_ECC_1B_OFF        16
 139#define SAS_ECC_INTR_NCQ_MEM2_ECC_MB_OFF        17
 140#define SAS_ECC_INTR_NCQ_MEM3_ECC_1B_OFF        18
 141#define SAS_ECC_INTR_NCQ_MEM3_ECC_MB_OFF        19
 142#define SAS_ECC_INTR_OOO_RAM_ECC_1B_OFF         20
 143#define SAS_ECC_INTR_OOO_RAM_ECC_MB_OFF         21
 144#define SAS_ECC_INTR_MSK                0x1ec
 145#define HGC_ERR_STAT_EN                 0x238
 146#define CQE_SEND_CNT                    0x248
 147#define DLVRY_Q_0_BASE_ADDR_LO          0x260
 148#define DLVRY_Q_0_BASE_ADDR_HI          0x264
 149#define DLVRY_Q_0_DEPTH                 0x268
 150#define DLVRY_Q_0_WR_PTR                0x26c
 151#define DLVRY_Q_0_RD_PTR                0x270
 152#define HYPER_STREAM_ID_EN_CFG          0xc80
 153#define OQ0_INT_SRC_MSK                 0xc90
 154#define COMPL_Q_0_BASE_ADDR_LO          0x4e0
 155#define COMPL_Q_0_BASE_ADDR_HI          0x4e4
 156#define COMPL_Q_0_DEPTH                 0x4e8
 157#define COMPL_Q_0_WR_PTR                0x4ec
 158#define COMPL_Q_0_RD_PTR                0x4f0
 159#define HGC_RXM_DFX_STATUS14            0xae8
 160#define HGC_RXM_DFX_STATUS14_MEM0_OFF   0
 161#define HGC_RXM_DFX_STATUS14_MEM0_MSK   (0x1ff << \
 162                                         HGC_RXM_DFX_STATUS14_MEM0_OFF)
 163#define HGC_RXM_DFX_STATUS14_MEM1_OFF   9
 164#define HGC_RXM_DFX_STATUS14_MEM1_MSK   (0x1ff << \
 165                                         HGC_RXM_DFX_STATUS14_MEM1_OFF)
 166#define HGC_RXM_DFX_STATUS14_MEM2_OFF   18
 167#define HGC_RXM_DFX_STATUS14_MEM2_MSK   (0x1ff << \
 168                                         HGC_RXM_DFX_STATUS14_MEM2_OFF)
 169#define HGC_RXM_DFX_STATUS15            0xaec
 170#define HGC_RXM_DFX_STATUS15_MEM3_OFF   0
 171#define HGC_RXM_DFX_STATUS15_MEM3_MSK   (0x1ff << \
 172                                         HGC_RXM_DFX_STATUS15_MEM3_OFF)
 173#define AWQOS_AWCACHE_CFG       0xc84
 174#define ARQOS_ARCACHE_CFG       0xc88
 175#define HILINK_ERR_DFX          0xe04
 176#define SAS_GPIO_CFG_0          0x1000
 177#define SAS_GPIO_CFG_1          0x1004
 178#define SAS_GPIO_TX_0_1 0x1040
 179#define SAS_CFG_DRIVE_VLD       0x1070
 180
 181/* phy registers requiring init */
 182#define PORT_BASE                       (0x2000)
 183#define PHY_CFG                         (PORT_BASE + 0x0)
 184#define HARD_PHY_LINKRATE               (PORT_BASE + 0x4)
 185#define PHY_CFG_ENA_OFF                 0
 186#define PHY_CFG_ENA_MSK                 (0x1 << PHY_CFG_ENA_OFF)
 187#define PHY_CFG_DC_OPT_OFF              2
 188#define PHY_CFG_DC_OPT_MSK              (0x1 << PHY_CFG_DC_OPT_OFF)
 189#define PHY_CFG_PHY_RST_OFF             3
 190#define PHY_CFG_PHY_RST_MSK             (0x1 << PHY_CFG_PHY_RST_OFF)
 191#define PROG_PHY_LINK_RATE              (PORT_BASE + 0x8)
 192#define PHY_CTRL                        (PORT_BASE + 0x14)
 193#define PHY_CTRL_RESET_OFF              0
 194#define PHY_CTRL_RESET_MSK              (0x1 << PHY_CTRL_RESET_OFF)
 195#define CMD_HDR_PIR_OFF                 8
 196#define CMD_HDR_PIR_MSK                 (0x1 << CMD_HDR_PIR_OFF)
 197#define SERDES_CFG                      (PORT_BASE + 0x1c)
 198#define SL_CFG                          (PORT_BASE + 0x84)
 199#define AIP_LIMIT                       (PORT_BASE + 0x90)
 200#define SL_CONTROL                      (PORT_BASE + 0x94)
 201#define SL_CONTROL_NOTIFY_EN_OFF        0
 202#define SL_CONTROL_NOTIFY_EN_MSK        (0x1 << SL_CONTROL_NOTIFY_EN_OFF)
 203#define SL_CTA_OFF              17
 204#define SL_CTA_MSK              (0x1 << SL_CTA_OFF)
 205#define RX_PRIMS_STATUS                 (PORT_BASE + 0x98)
 206#define RX_BCAST_CHG_OFF                1
 207#define RX_BCAST_CHG_MSK                (0x1 << RX_BCAST_CHG_OFF)
 208#define TX_ID_DWORD0                    (PORT_BASE + 0x9c)
 209#define TX_ID_DWORD1                    (PORT_BASE + 0xa0)
 210#define TX_ID_DWORD2                    (PORT_BASE + 0xa4)
 211#define TX_ID_DWORD3                    (PORT_BASE + 0xa8)
 212#define TX_ID_DWORD4                    (PORT_BASE + 0xaC)
 213#define TX_ID_DWORD5                    (PORT_BASE + 0xb0)
 214#define TX_ID_DWORD6                    (PORT_BASE + 0xb4)
 215#define TXID_AUTO                               (PORT_BASE + 0xb8)
 216#define CT3_OFF         1
 217#define CT3_MSK         (0x1 << CT3_OFF)
 218#define TX_HARDRST_OFF          2
 219#define TX_HARDRST_MSK          (0x1 << TX_HARDRST_OFF)
 220#define RX_IDAF_DWORD0                  (PORT_BASE + 0xc4)
 221#define RXOP_CHECK_CFG_H                (PORT_BASE + 0xfc)
 222#define STP_LINK_TIMER                  (PORT_BASE + 0x120)
 223#define STP_LINK_TIMEOUT_STATE          (PORT_BASE + 0x124)
 224#define CON_CFG_DRIVER                  (PORT_BASE + 0x130)
 225#define SAS_SSP_CON_TIMER_CFG           (PORT_BASE + 0x134)
 226#define SAS_SMP_CON_TIMER_CFG           (PORT_BASE + 0x138)
 227#define SAS_STP_CON_TIMER_CFG           (PORT_BASE + 0x13c)
 228#define CHL_INT0                        (PORT_BASE + 0x1b4)
 229#define CHL_INT0_HOTPLUG_TOUT_OFF       0
 230#define CHL_INT0_HOTPLUG_TOUT_MSK       (0x1 << CHL_INT0_HOTPLUG_TOUT_OFF)
 231#define CHL_INT0_SL_RX_BCST_ACK_OFF     1
 232#define CHL_INT0_SL_RX_BCST_ACK_MSK     (0x1 << CHL_INT0_SL_RX_BCST_ACK_OFF)
 233#define CHL_INT0_SL_PHY_ENABLE_OFF      2
 234#define CHL_INT0_SL_PHY_ENABLE_MSK      (0x1 << CHL_INT0_SL_PHY_ENABLE_OFF)
 235#define CHL_INT0_NOT_RDY_OFF            4
 236#define CHL_INT0_NOT_RDY_MSK            (0x1 << CHL_INT0_NOT_RDY_OFF)
 237#define CHL_INT0_PHY_RDY_OFF            5
 238#define CHL_INT0_PHY_RDY_MSK            (0x1 << CHL_INT0_PHY_RDY_OFF)
 239#define CHL_INT1                        (PORT_BASE + 0x1b8)
 240#define CHL_INT1_DMAC_TX_ECC_MB_ERR_OFF 15
 241#define CHL_INT1_DMAC_TX_ECC_1B_ERR_OFF 16
 242#define CHL_INT1_DMAC_RX_ECC_MB_ERR_OFF 17
 243#define CHL_INT1_DMAC_RX_ECC_1B_ERR_OFF 18
 244#define CHL_INT1_DMAC_TX_AXI_WR_ERR_OFF 19
 245#define CHL_INT1_DMAC_TX_AXI_RD_ERR_OFF 20
 246#define CHL_INT1_DMAC_RX_AXI_WR_ERR_OFF 21
 247#define CHL_INT1_DMAC_RX_AXI_RD_ERR_OFF 22
 248#define CHL_INT1_DMAC_TX_FIFO_ERR_OFF   23
 249#define CHL_INT1_DMAC_RX_FIFO_ERR_OFF   24
 250#define CHL_INT1_DMAC_TX_AXI_RUSER_ERR_OFF      26
 251#define CHL_INT1_DMAC_RX_AXI_RUSER_ERR_OFF      27
 252#define CHL_INT2                        (PORT_BASE + 0x1bc)
 253#define CHL_INT2_SL_IDAF_TOUT_CONF_OFF  0
 254#define CHL_INT2_RX_DISP_ERR_OFF        28
 255#define CHL_INT2_RX_CODE_ERR_OFF        29
 256#define CHL_INT2_RX_INVLD_DW_OFF        30
 257#define CHL_INT2_STP_LINK_TIMEOUT_OFF   31
 258#define CHL_INT0_MSK                    (PORT_BASE + 0x1c0)
 259#define CHL_INT1_MSK                    (PORT_BASE + 0x1c4)
 260#define CHL_INT2_MSK                    (PORT_BASE + 0x1c8)
 261#define SAS_EC_INT_COAL_TIME            (PORT_BASE + 0x1cc)
 262#define CHL_INT_COAL_EN                 (PORT_BASE + 0x1d0)
 263#define SAS_RX_TRAIN_TIMER              (PORT_BASE + 0x2a4)
 264#define PHY_CTRL_RDY_MSK                (PORT_BASE + 0x2b0)
 265#define PHYCTRL_NOT_RDY_MSK             (PORT_BASE + 0x2b4)
 266#define PHYCTRL_DWS_RESET_MSK           (PORT_BASE + 0x2b8)
 267#define PHYCTRL_PHY_ENA_MSK             (PORT_BASE + 0x2bc)
 268#define SL_RX_BCAST_CHK_MSK             (PORT_BASE + 0x2c0)
 269#define PHYCTRL_OOB_RESTART_MSK         (PORT_BASE + 0x2c4)
 270#define DMA_TX_STATUS                   (PORT_BASE + 0x2d0)
 271#define DMA_TX_STATUS_BUSY_OFF          0
 272#define DMA_TX_STATUS_BUSY_MSK          (0x1 << DMA_TX_STATUS_BUSY_OFF)
 273#define DMA_RX_STATUS                   (PORT_BASE + 0x2e8)
 274#define DMA_RX_STATUS_BUSY_OFF          0
 275#define DMA_RX_STATUS_BUSY_MSK          (0x1 << DMA_RX_STATUS_BUSY_OFF)
 276
 277#define COARSETUNE_TIME                 (PORT_BASE + 0x304)
 278#define ERR_CNT_DWS_LOST                (PORT_BASE + 0x380)
 279#define ERR_CNT_RESET_PROB              (PORT_BASE + 0x384)
 280#define ERR_CNT_INVLD_DW                (PORT_BASE + 0x390)
 281#define ERR_CNT_CODE_ERR                (PORT_BASE + 0x394)
 282#define ERR_CNT_DISP_ERR                (PORT_BASE + 0x398)
 283
 284#define DEFAULT_ITCT_HW         2048 /* reset value, not reprogrammed */
 285#if (HISI_SAS_MAX_DEVICES > DEFAULT_ITCT_HW)
 286#error Max ITCT exceeded
 287#endif
 288
 289#define AXI_MASTER_CFG_BASE             (0x5000)
 290#define AM_CTRL_GLOBAL                  (0x0)
 291#define AM_CTRL_SHUTDOWN_REQ_OFF        0
 292#define AM_CTRL_SHUTDOWN_REQ_MSK        (0x1 << AM_CTRL_SHUTDOWN_REQ_OFF)
 293#define AM_CURR_TRANS_RETURN    (0x150)
 294
 295#define AM_CFG_MAX_TRANS                (0x5010)
 296#define AM_CFG_SINGLE_PORT_MAX_TRANS    (0x5014)
 297#define AXI_CFG                                 (0x5100)
 298#define AM_ROB_ECC_ERR_ADDR             (0x510c)
 299#define AM_ROB_ECC_ERR_ADDR_OFF 0
 300#define AM_ROB_ECC_ERR_ADDR_MSK 0xffffffff
 301
 302/* RAS registers need init */
 303#define RAS_BASE                (0x6000)
 304#define SAS_RAS_INTR0                   (RAS_BASE)
 305#define SAS_RAS_INTR1                   (RAS_BASE + 0x04)
 306#define SAS_RAS_INTR0_MASK              (RAS_BASE + 0x08)
 307#define SAS_RAS_INTR1_MASK              (RAS_BASE + 0x0c)
 308#define CFG_SAS_RAS_INTR_MASK           (RAS_BASE + 0x1c)
 309#define SAS_RAS_INTR2                   (RAS_BASE + 0x20)
 310#define SAS_RAS_INTR2_MASK              (RAS_BASE + 0x24)
 311
 312/* HW dma structures */
 313/* Delivery queue header */
 314/* dw0 */
 315#define CMD_HDR_ABORT_FLAG_OFF          0
 316#define CMD_HDR_ABORT_FLAG_MSK          (0x3 << CMD_HDR_ABORT_FLAG_OFF)
 317#define CMD_HDR_ABORT_DEVICE_TYPE_OFF   2
 318#define CMD_HDR_ABORT_DEVICE_TYPE_MSK   (0x1 << CMD_HDR_ABORT_DEVICE_TYPE_OFF)
 319#define CMD_HDR_RESP_REPORT_OFF         5
 320#define CMD_HDR_RESP_REPORT_MSK         (0x1 << CMD_HDR_RESP_REPORT_OFF)
 321#define CMD_HDR_TLR_CTRL_OFF            6
 322#define CMD_HDR_TLR_CTRL_MSK            (0x3 << CMD_HDR_TLR_CTRL_OFF)
 323#define CMD_HDR_PORT_OFF                18
 324#define CMD_HDR_PORT_MSK                (0xf << CMD_HDR_PORT_OFF)
 325#define CMD_HDR_PRIORITY_OFF            27
 326#define CMD_HDR_PRIORITY_MSK            (0x1 << CMD_HDR_PRIORITY_OFF)
 327#define CMD_HDR_CMD_OFF                 29
 328#define CMD_HDR_CMD_MSK                 (0x7 << CMD_HDR_CMD_OFF)
 329/* dw1 */
 330#define CMD_HDR_UNCON_CMD_OFF   3
 331#define CMD_HDR_DIR_OFF                 5
 332#define CMD_HDR_DIR_MSK                 (0x3 << CMD_HDR_DIR_OFF)
 333#define CMD_HDR_RESET_OFF               7
 334#define CMD_HDR_RESET_MSK               (0x1 << CMD_HDR_RESET_OFF)
 335#define CMD_HDR_VDTL_OFF                10
 336#define CMD_HDR_VDTL_MSK                (0x1 << CMD_HDR_VDTL_OFF)
 337#define CMD_HDR_FRAME_TYPE_OFF          11
 338#define CMD_HDR_FRAME_TYPE_MSK          (0x1f << CMD_HDR_FRAME_TYPE_OFF)
 339#define CMD_HDR_DEV_ID_OFF              16
 340#define CMD_HDR_DEV_ID_MSK              (0xffff << CMD_HDR_DEV_ID_OFF)
 341/* dw2 */
 342#define CMD_HDR_CFL_OFF                 0
 343#define CMD_HDR_CFL_MSK                 (0x1ff << CMD_HDR_CFL_OFF)
 344#define CMD_HDR_NCQ_TAG_OFF             10
 345#define CMD_HDR_NCQ_TAG_MSK             (0x1f << CMD_HDR_NCQ_TAG_OFF)
 346#define CMD_HDR_MRFL_OFF                15
 347#define CMD_HDR_MRFL_MSK                (0x1ff << CMD_HDR_MRFL_OFF)
 348#define CMD_HDR_SG_MOD_OFF              24
 349#define CMD_HDR_SG_MOD_MSK              (0x3 << CMD_HDR_SG_MOD_OFF)
 350/* dw3 */
 351#define CMD_HDR_IPTT_OFF                0
 352#define CMD_HDR_IPTT_MSK                (0xffff << CMD_HDR_IPTT_OFF)
 353/* dw6 */
 354#define CMD_HDR_DIF_SGL_LEN_OFF         0
 355#define CMD_HDR_DIF_SGL_LEN_MSK         (0xffff << CMD_HDR_DIF_SGL_LEN_OFF)
 356#define CMD_HDR_DATA_SGL_LEN_OFF        16
 357#define CMD_HDR_DATA_SGL_LEN_MSK        (0xffff << CMD_HDR_DATA_SGL_LEN_OFF)
 358/* dw7 */
 359#define CMD_HDR_ADDR_MODE_SEL_OFF               15
 360#define CMD_HDR_ADDR_MODE_SEL_MSK               (1 << CMD_HDR_ADDR_MODE_SEL_OFF)
 361#define CMD_HDR_ABORT_IPTT_OFF          16
 362#define CMD_HDR_ABORT_IPTT_MSK          (0xffff << CMD_HDR_ABORT_IPTT_OFF)
 363
 364/* Completion header */
 365/* dw0 */
 366#define CMPLT_HDR_CMPLT_OFF             0
 367#define CMPLT_HDR_CMPLT_MSK             (0x3 << CMPLT_HDR_CMPLT_OFF)
 368#define CMPLT_HDR_ERROR_PHASE_OFF   2
 369#define CMPLT_HDR_ERROR_PHASE_MSK   (0xff << CMPLT_HDR_ERROR_PHASE_OFF)
 370#define CMPLT_HDR_RSPNS_XFRD_OFF        10
 371#define CMPLT_HDR_RSPNS_XFRD_MSK        (0x1 << CMPLT_HDR_RSPNS_XFRD_OFF)
 372#define CMPLT_HDR_ERX_OFF               12
 373#define CMPLT_HDR_ERX_MSK               (0x1 << CMPLT_HDR_ERX_OFF)
 374#define CMPLT_HDR_ABORT_STAT_OFF        13
 375#define CMPLT_HDR_ABORT_STAT_MSK        (0x7 << CMPLT_HDR_ABORT_STAT_OFF)
 376/* abort_stat */
 377#define STAT_IO_NOT_VALID               0x1
 378#define STAT_IO_NO_DEVICE               0x2
 379#define STAT_IO_COMPLETE                0x3
 380#define STAT_IO_ABORTED                 0x4
 381/* dw1 */
 382#define CMPLT_HDR_IPTT_OFF              0
 383#define CMPLT_HDR_IPTT_MSK              (0xffff << CMPLT_HDR_IPTT_OFF)
 384#define CMPLT_HDR_DEV_ID_OFF            16
 385#define CMPLT_HDR_DEV_ID_MSK            (0xffff << CMPLT_HDR_DEV_ID_OFF)
 386/* dw3 */
 387#define CMPLT_HDR_IO_IN_TARGET_OFF      17
 388#define CMPLT_HDR_IO_IN_TARGET_MSK      (0x1 << CMPLT_HDR_IO_IN_TARGET_OFF)
 389
 390/* ITCT header */
 391/* qw0 */
 392#define ITCT_HDR_DEV_TYPE_OFF           0
 393#define ITCT_HDR_DEV_TYPE_MSK           (0x3 << ITCT_HDR_DEV_TYPE_OFF)
 394#define ITCT_HDR_VALID_OFF              2
 395#define ITCT_HDR_VALID_MSK              (0x1 << ITCT_HDR_VALID_OFF)
 396#define ITCT_HDR_MCR_OFF                5
 397#define ITCT_HDR_MCR_MSK                (0xf << ITCT_HDR_MCR_OFF)
 398#define ITCT_HDR_VLN_OFF                9
 399#define ITCT_HDR_VLN_MSK                (0xf << ITCT_HDR_VLN_OFF)
 400#define ITCT_HDR_SMP_TIMEOUT_OFF        16
 401#define ITCT_HDR_AWT_CONTINUE_OFF       25
 402#define ITCT_HDR_PORT_ID_OFF            28
 403#define ITCT_HDR_PORT_ID_MSK            (0xf << ITCT_HDR_PORT_ID_OFF)
 404/* qw2 */
 405#define ITCT_HDR_INLT_OFF               0
 406#define ITCT_HDR_INLT_MSK               (0xffffULL << ITCT_HDR_INLT_OFF)
 407#define ITCT_HDR_RTOLT_OFF              48
 408#define ITCT_HDR_RTOLT_MSK              (0xffffULL << ITCT_HDR_RTOLT_OFF)
 409
 410struct hisi_sas_protect_iu_v3_hw {
 411        u32 dw0;
 412        u32 lbrtcv;
 413        u32 lbrtgv;
 414        u32 dw3;
 415        u32 dw4;
 416        u32 dw5;
 417        u32 rsv;
 418};
 419
 420struct hisi_sas_complete_v3_hdr {
 421        __le32 dw0;
 422        __le32 dw1;
 423        __le32 act;
 424        __le32 dw3;
 425};
 426
 427struct hisi_sas_err_record_v3 {
 428        /* dw0 */
 429        __le32 trans_tx_fail_type;
 430
 431        /* dw1 */
 432        __le32 trans_rx_fail_type;
 433
 434        /* dw2 */
 435        __le16 dma_tx_err_type;
 436        __le16 sipc_rx_err_type;
 437
 438        /* dw3 */
 439        __le32 dma_rx_err_type;
 440};
 441
 442#define RX_DATA_LEN_UNDERFLOW_OFF       6
 443#define RX_DATA_LEN_UNDERFLOW_MSK       (1 << RX_DATA_LEN_UNDERFLOW_OFF)
 444
 445#define HISI_SAS_COMMAND_ENTRIES_V3_HW 4096
 446#define HISI_SAS_MSI_COUNT_V3_HW 32
 447
 448#define DIR_NO_DATA 0
 449#define DIR_TO_INI 1
 450#define DIR_TO_DEVICE 2
 451#define DIR_RESERVED 3
 452
 453#define FIS_CMD_IS_UNCONSTRAINED(fis) \
 454        ((fis.command == ATA_CMD_READ_LOG_EXT) || \
 455        (fis.command == ATA_CMD_READ_LOG_DMA_EXT) || \
 456        ((fis.command == ATA_CMD_DEV_RESET) && \
 457        ((fis.control & ATA_SRST) != 0)))
 458
 459#define T10_INSRT_EN_OFF    0
 460#define T10_INSRT_EN_MSK    (1 << T10_INSRT_EN_OFF)
 461#define T10_RMV_EN_OFF      1
 462#define T10_RMV_EN_MSK      (1 << T10_RMV_EN_OFF)
 463#define T10_RPLC_EN_OFF     2
 464#define T10_RPLC_EN_MSK     (1 << T10_RPLC_EN_OFF)
 465#define T10_CHK_EN_OFF      3
 466#define T10_CHK_EN_MSK      (1 << T10_CHK_EN_OFF)
 467#define INCR_LBRT_OFF       5
 468#define INCR_LBRT_MSK       (1 << INCR_LBRT_OFF)
 469#define USR_DATA_BLOCK_SZ_OFF   20
 470#define USR_DATA_BLOCK_SZ_MSK   (0x3 << USR_DATA_BLOCK_SZ_OFF)
 471#define T10_CHK_MSK_OFF     16
 472#define T10_CHK_REF_TAG_MSK (0xf0 << T10_CHK_MSK_OFF)
 473#define T10_CHK_APP_TAG_MSK (0xc << T10_CHK_MSK_OFF)
 474
 475#define BASE_VECTORS_V3_HW  16
 476#define MIN_AFFINE_VECTORS_V3_HW  (BASE_VECTORS_V3_HW + 1)
 477
 478enum {
 479        DSM_FUNC_ERR_HANDLE_MSI = 0,
 480};
 481
 482static bool hisi_sas_intr_conv;
 483MODULE_PARM_DESC(intr_conv, "interrupt converge enable (0-1)");
 484
 485/* permit overriding the host protection capabilities mask (EEDP/T10 PI) */
 486static int prot_mask;
 487module_param(prot_mask, int, 0);
 488MODULE_PARM_DESC(prot_mask, " host protection capabilities mask, def=0x0 ");
 489
 490static bool auto_affine_msi_experimental;
 491module_param(auto_affine_msi_experimental, bool, 0444);
 492MODULE_PARM_DESC(auto_affine_msi_experimental, "Enable auto-affinity of MSI IRQs as experimental:\n"
 493                 "default is off");
 494
 495static u32 hisi_sas_read32(struct hisi_hba *hisi_hba, u32 off)
 496{
 497        void __iomem *regs = hisi_hba->regs + off;
 498
 499        return readl(regs);
 500}
 501
 502static u32 hisi_sas_read32_relaxed(struct hisi_hba *hisi_hba, u32 off)
 503{
 504        void __iomem *regs = hisi_hba->regs + off;
 505
 506        return readl_relaxed(regs);
 507}
 508
 509static void hisi_sas_write32(struct hisi_hba *hisi_hba, u32 off, u32 val)
 510{
 511        void __iomem *regs = hisi_hba->regs + off;
 512
 513        writel(val, regs);
 514}
 515
 516static void hisi_sas_phy_write32(struct hisi_hba *hisi_hba, int phy_no,
 517                                 u32 off, u32 val)
 518{
 519        void __iomem *regs = hisi_hba->regs + (0x400 * phy_no) + off;
 520
 521        writel(val, regs);
 522}
 523
 524static u32 hisi_sas_phy_read32(struct hisi_hba *hisi_hba,
 525                                      int phy_no, u32 off)
 526{
 527        void __iomem *regs = hisi_hba->regs + (0x400 * phy_no) + off;
 528
 529        return readl(regs);
 530}
 531
 532#define hisi_sas_read32_poll_timeout(off, val, cond, delay_us,          \
 533                                     timeout_us)                        \
 534({                                                                      \
 535        void __iomem *regs = hisi_hba->regs + off;                      \
 536        readl_poll_timeout(regs, val, cond, delay_us, timeout_us);      \
 537})
 538
 539#define hisi_sas_read32_poll_timeout_atomic(off, val, cond, delay_us,   \
 540                                            timeout_us)                 \
 541({                                                                      \
 542        void __iomem *regs = hisi_hba->regs + off;                      \
 543        readl_poll_timeout_atomic(regs, val, cond, delay_us, timeout_us);\
 544})
 545
 546static void init_reg_v3_hw(struct hisi_hba *hisi_hba)
 547{
 548        int i;
 549
 550        /* Global registers init */
 551        hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE,
 552                         (u32)((1ULL << hisi_hba->queue_count) - 1));
 553        hisi_sas_write32(hisi_hba, SAS_AXI_USER3, 0);
 554        hisi_sas_write32(hisi_hba, CFG_MAX_TAG, 0xfff0400);
 555        hisi_sas_write32(hisi_hba, HGC_SAS_TXFAIL_RETRY_CTRL, 0x108);
 556        hisi_sas_write32(hisi_hba, CFG_AGING_TIME, 0x1);
 557        hisi_sas_write32(hisi_hba, INT_COAL_EN, 0x1);
 558        hisi_sas_write32(hisi_hba, OQ_INT_COAL_TIME, 0x1);
 559        hisi_sas_write32(hisi_hba, OQ_INT_COAL_CNT, 0x1);
 560        hisi_sas_write32(hisi_hba, CQ_INT_CONVERGE_EN,
 561                         hisi_sas_intr_conv);
 562        hisi_sas_write32(hisi_hba, OQ_INT_SRC, 0xffff);
 563        hisi_sas_write32(hisi_hba, ENT_INT_SRC1, 0xffffffff);
 564        hisi_sas_write32(hisi_hba, ENT_INT_SRC2, 0xffffffff);
 565        hisi_sas_write32(hisi_hba, ENT_INT_SRC3, 0xffffffff);
 566        hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1, 0xfefefefe);
 567        hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK2, 0xfefefefe);
 568        hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, 0xffc220ff);
 569        hisi_sas_write32(hisi_hba, CHNL_PHYUPDOWN_INT_MSK, 0x0);
 570        hisi_sas_write32(hisi_hba, CHNL_ENT_INT_MSK, 0x0);
 571        hisi_sas_write32(hisi_hba, HGC_COM_INT_MSK, 0x0);
 572        hisi_sas_write32(hisi_hba, SAS_ECC_INTR_MSK, 0x155555);
 573        hisi_sas_write32(hisi_hba, AWQOS_AWCACHE_CFG, 0xf0f0);
 574        hisi_sas_write32(hisi_hba, ARQOS_ARCACHE_CFG, 0xf0f0);
 575        for (i = 0; i < hisi_hba->queue_count; i++)
 576                hisi_sas_write32(hisi_hba, OQ0_INT_SRC_MSK+0x4*i, 0);
 577
 578        hisi_sas_write32(hisi_hba, HYPER_STREAM_ID_EN_CFG, 1);
 579
 580        for (i = 0; i < hisi_hba->n_phy; i++) {
 581                struct hisi_sas_phy *phy = &hisi_hba->phy[i];
 582                struct asd_sas_phy *sas_phy = &phy->sas_phy;
 583                u32 prog_phy_link_rate = 0x800;
 584
 585                if (!sas_phy->phy || (sas_phy->phy->maximum_linkrate <
 586                                SAS_LINK_RATE_1_5_GBPS)) {
 587                        prog_phy_link_rate = 0x855;
 588                } else {
 589                        enum sas_linkrate max = sas_phy->phy->maximum_linkrate;
 590
 591                        prog_phy_link_rate =
 592                                hisi_sas_get_prog_phy_linkrate_mask(max) |
 593                                0x800;
 594                }
 595                hisi_sas_phy_write32(hisi_hba, i, PROG_PHY_LINK_RATE,
 596                        prog_phy_link_rate);
 597                hisi_sas_phy_write32(hisi_hba, i, SERDES_CFG, 0xffc00);
 598                hisi_sas_phy_write32(hisi_hba, i, SAS_RX_TRAIN_TIMER, 0x13e80);
 599                hisi_sas_phy_write32(hisi_hba, i, CHL_INT0, 0xffffffff);
 600                hisi_sas_phy_write32(hisi_hba, i, CHL_INT1, 0xffffffff);
 601                hisi_sas_phy_write32(hisi_hba, i, CHL_INT2, 0xffffffff);
 602                hisi_sas_phy_write32(hisi_hba, i, RXOP_CHECK_CFG_H, 0x1000);
 603                hisi_sas_phy_write32(hisi_hba, i, CHL_INT1_MSK, 0xf2057fff);
 604                hisi_sas_phy_write32(hisi_hba, i, CHL_INT2_MSK, 0xffffbfe);
 605                hisi_sas_phy_write32(hisi_hba, i, PHY_CTRL_RDY_MSK, 0x0);
 606                hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_NOT_RDY_MSK, 0x0);
 607                hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_DWS_RESET_MSK, 0x0);
 608                hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_PHY_ENA_MSK, 0x0);
 609                hisi_sas_phy_write32(hisi_hba, i, SL_RX_BCAST_CHK_MSK, 0x0);
 610                hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_OOB_RESTART_MSK, 0x1);
 611                hisi_sas_phy_write32(hisi_hba, i, STP_LINK_TIMER, 0x7f7a120);
 612                hisi_sas_phy_write32(hisi_hba, i, CON_CFG_DRIVER, 0x2a0a01);
 613                hisi_sas_phy_write32(hisi_hba, i, SAS_SSP_CON_TIMER_CFG, 0x32);
 614                hisi_sas_phy_write32(hisi_hba, i, SAS_EC_INT_COAL_TIME,
 615                                     0x30f4240);
 616                /* used for 12G negotiate */
 617                hisi_sas_phy_write32(hisi_hba, i, COARSETUNE_TIME, 0x1e);
 618                hisi_sas_phy_write32(hisi_hba, i, AIP_LIMIT, 0x2ffff);
 619        }
 620
 621        for (i = 0; i < hisi_hba->queue_count; i++) {
 622                /* Delivery queue */
 623                hisi_sas_write32(hisi_hba,
 624                                 DLVRY_Q_0_BASE_ADDR_HI + (i * 0x14),
 625                                 upper_32_bits(hisi_hba->cmd_hdr_dma[i]));
 626
 627                hisi_sas_write32(hisi_hba, DLVRY_Q_0_BASE_ADDR_LO + (i * 0x14),
 628                                 lower_32_bits(hisi_hba->cmd_hdr_dma[i]));
 629
 630                hisi_sas_write32(hisi_hba, DLVRY_Q_0_DEPTH + (i * 0x14),
 631                                 HISI_SAS_QUEUE_SLOTS);
 632
 633                /* Completion queue */
 634                hisi_sas_write32(hisi_hba, COMPL_Q_0_BASE_ADDR_HI + (i * 0x14),
 635                                 upper_32_bits(hisi_hba->complete_hdr_dma[i]));
 636
 637                hisi_sas_write32(hisi_hba, COMPL_Q_0_BASE_ADDR_LO + (i * 0x14),
 638                                 lower_32_bits(hisi_hba->complete_hdr_dma[i]));
 639
 640                hisi_sas_write32(hisi_hba, COMPL_Q_0_DEPTH + (i * 0x14),
 641                                 HISI_SAS_QUEUE_SLOTS);
 642        }
 643
 644        /* itct */
 645        hisi_sas_write32(hisi_hba, ITCT_BASE_ADDR_LO,
 646                         lower_32_bits(hisi_hba->itct_dma));
 647
 648        hisi_sas_write32(hisi_hba, ITCT_BASE_ADDR_HI,
 649                         upper_32_bits(hisi_hba->itct_dma));
 650
 651        /* iost */
 652        hisi_sas_write32(hisi_hba, IOST_BASE_ADDR_LO,
 653                         lower_32_bits(hisi_hba->iost_dma));
 654
 655        hisi_sas_write32(hisi_hba, IOST_BASE_ADDR_HI,
 656                         upper_32_bits(hisi_hba->iost_dma));
 657
 658        /* breakpoint */
 659        hisi_sas_write32(hisi_hba, IO_BROKEN_MSG_ADDR_LO,
 660                         lower_32_bits(hisi_hba->breakpoint_dma));
 661
 662        hisi_sas_write32(hisi_hba, IO_BROKEN_MSG_ADDR_HI,
 663                         upper_32_bits(hisi_hba->breakpoint_dma));
 664
 665        /* SATA broken msg */
 666        hisi_sas_write32(hisi_hba, IO_SATA_BROKEN_MSG_ADDR_LO,
 667                         lower_32_bits(hisi_hba->sata_breakpoint_dma));
 668
 669        hisi_sas_write32(hisi_hba, IO_SATA_BROKEN_MSG_ADDR_HI,
 670                         upper_32_bits(hisi_hba->sata_breakpoint_dma));
 671
 672        /* SATA initial fis */
 673        hisi_sas_write32(hisi_hba, SATA_INITI_D2H_STORE_ADDR_LO,
 674                         lower_32_bits(hisi_hba->initial_fis_dma));
 675
 676        hisi_sas_write32(hisi_hba, SATA_INITI_D2H_STORE_ADDR_HI,
 677                         upper_32_bits(hisi_hba->initial_fis_dma));
 678
 679        /* RAS registers init */
 680        hisi_sas_write32(hisi_hba, SAS_RAS_INTR0_MASK, 0x0);
 681        hisi_sas_write32(hisi_hba, SAS_RAS_INTR1_MASK, 0x0);
 682        hisi_sas_write32(hisi_hba, SAS_RAS_INTR2_MASK, 0x0);
 683        hisi_sas_write32(hisi_hba, CFG_SAS_RAS_INTR_MASK, 0x0);
 684
 685        /* LED registers init */
 686        hisi_sas_write32(hisi_hba, SAS_CFG_DRIVE_VLD, 0x80000ff);
 687        hisi_sas_write32(hisi_hba, SAS_GPIO_TX_0_1, 0x80808080);
 688        hisi_sas_write32(hisi_hba, SAS_GPIO_TX_0_1 + 0x4, 0x80808080);
 689        /* Configure blink generator rate A to 1Hz and B to 4Hz */
 690        hisi_sas_write32(hisi_hba, SAS_GPIO_CFG_1, 0x121700);
 691        hisi_sas_write32(hisi_hba, SAS_GPIO_CFG_0, 0x800000);
 692}
 693
 694static void config_phy_opt_mode_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
 695{
 696        u32 cfg = hisi_sas_phy_read32(hisi_hba, phy_no, PHY_CFG);
 697
 698        cfg &= ~PHY_CFG_DC_OPT_MSK;
 699        cfg |= 1 << PHY_CFG_DC_OPT_OFF;
 700        hisi_sas_phy_write32(hisi_hba, phy_no, PHY_CFG, cfg);
 701}
 702
 703static void config_id_frame_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
 704{
 705        struct sas_identify_frame identify_frame;
 706        u32 *identify_buffer;
 707
 708        memset(&identify_frame, 0, sizeof(identify_frame));
 709        identify_frame.dev_type = SAS_END_DEVICE;
 710        identify_frame.frame_type = 0;
 711        identify_frame._un1 = 1;
 712        identify_frame.initiator_bits = SAS_PROTOCOL_ALL;
 713        identify_frame.target_bits = SAS_PROTOCOL_NONE;
 714        memcpy(&identify_frame._un4_11[0], hisi_hba->sas_addr, SAS_ADDR_SIZE);
 715        memcpy(&identify_frame.sas_addr[0], hisi_hba->sas_addr, SAS_ADDR_SIZE);
 716        identify_frame.phy_id = phy_no;
 717        identify_buffer = (u32 *)(&identify_frame);
 718
 719        hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD0,
 720                        __swab32(identify_buffer[0]));
 721        hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD1,
 722                        __swab32(identify_buffer[1]));
 723        hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD2,
 724                        __swab32(identify_buffer[2]));
 725        hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD3,
 726                        __swab32(identify_buffer[3]));
 727        hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD4,
 728                        __swab32(identify_buffer[4]));
 729        hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD5,
 730                        __swab32(identify_buffer[5]));
 731}
 732
 733static void setup_itct_v3_hw(struct hisi_hba *hisi_hba,
 734                             struct hisi_sas_device *sas_dev)
 735{
 736        struct domain_device *device = sas_dev->sas_device;
 737        struct device *dev = hisi_hba->dev;
 738        u64 qw0, device_id = sas_dev->device_id;
 739        struct hisi_sas_itct *itct = &hisi_hba->itct[device_id];
 740        struct domain_device *parent_dev = device->parent;
 741        struct asd_sas_port *sas_port = device->port;
 742        struct hisi_sas_port *port = to_hisi_sas_port(sas_port);
 743        u64 sas_addr;
 744
 745        memset(itct, 0, sizeof(*itct));
 746
 747        /* qw0 */
 748        qw0 = 0;
 749        switch (sas_dev->dev_type) {
 750        case SAS_END_DEVICE:
 751        case SAS_EDGE_EXPANDER_DEVICE:
 752        case SAS_FANOUT_EXPANDER_DEVICE:
 753                qw0 = HISI_SAS_DEV_TYPE_SSP << ITCT_HDR_DEV_TYPE_OFF;
 754                break;
 755        case SAS_SATA_DEV:
 756        case SAS_SATA_PENDING:
 757                if (parent_dev && dev_is_expander(parent_dev->dev_type))
 758                        qw0 = HISI_SAS_DEV_TYPE_STP << ITCT_HDR_DEV_TYPE_OFF;
 759                else
 760                        qw0 = HISI_SAS_DEV_TYPE_SATA << ITCT_HDR_DEV_TYPE_OFF;
 761                break;
 762        default:
 763                dev_warn(dev, "setup itct: unsupported dev type (%d)\n",
 764                         sas_dev->dev_type);
 765        }
 766
 767        qw0 |= ((1 << ITCT_HDR_VALID_OFF) |
 768                (device->linkrate << ITCT_HDR_MCR_OFF) |
 769                (1 << ITCT_HDR_VLN_OFF) |
 770                (0xfa << ITCT_HDR_SMP_TIMEOUT_OFF) |
 771                (1 << ITCT_HDR_AWT_CONTINUE_OFF) |
 772                (port->id << ITCT_HDR_PORT_ID_OFF));
 773        itct->qw0 = cpu_to_le64(qw0);
 774
 775        /* qw1 */
 776        memcpy(&sas_addr, device->sas_addr, SAS_ADDR_SIZE);
 777        itct->sas_addr = cpu_to_le64(__swab64(sas_addr));
 778
 779        /* qw2 */
 780        if (!dev_is_sata(device))
 781                itct->qw2 = cpu_to_le64((5000ULL << ITCT_HDR_INLT_OFF) |
 782                                        (0x1ULL << ITCT_HDR_RTOLT_OFF));
 783}
 784
 785static void clear_itct_v3_hw(struct hisi_hba *hisi_hba,
 786                              struct hisi_sas_device *sas_dev)
 787{
 788        DECLARE_COMPLETION_ONSTACK(completion);
 789        u64 dev_id = sas_dev->device_id;
 790        struct hisi_sas_itct *itct = &hisi_hba->itct[dev_id];
 791        u32 reg_val = hisi_sas_read32(hisi_hba, ENT_INT_SRC3);
 792
 793        sas_dev->completion = &completion;
 794
 795        /* clear the itct interrupt state */
 796        if (ENT_INT_SRC3_ITC_INT_MSK & reg_val)
 797                hisi_sas_write32(hisi_hba, ENT_INT_SRC3,
 798                                 ENT_INT_SRC3_ITC_INT_MSK);
 799
 800        /* clear the itct table */
 801        reg_val = ITCT_CLR_EN_MSK | (dev_id & ITCT_DEV_MSK);
 802        hisi_sas_write32(hisi_hba, ITCT_CLR, reg_val);
 803
 804        wait_for_completion(sas_dev->completion);
 805        memset(itct, 0, sizeof(struct hisi_sas_itct));
 806}
 807
 808static void dereg_device_v3_hw(struct hisi_hba *hisi_hba,
 809                                struct domain_device *device)
 810{
 811        struct hisi_sas_slot *slot, *slot2;
 812        struct hisi_sas_device *sas_dev = device->lldd_dev;
 813        u32 cfg_abt_set_query_iptt;
 814
 815        cfg_abt_set_query_iptt = hisi_sas_read32(hisi_hba,
 816                CFG_ABT_SET_QUERY_IPTT);
 817        list_for_each_entry_safe(slot, slot2, &sas_dev->list, entry) {
 818                cfg_abt_set_query_iptt &= ~CFG_SET_ABORTED_IPTT_MSK;
 819                cfg_abt_set_query_iptt |= (1 << CFG_SET_ABORTED_EN_OFF) |
 820                        (slot->idx << CFG_SET_ABORTED_IPTT_OFF);
 821                hisi_sas_write32(hisi_hba, CFG_ABT_SET_QUERY_IPTT,
 822                        cfg_abt_set_query_iptt);
 823        }
 824        cfg_abt_set_query_iptt &= ~(1 << CFG_SET_ABORTED_EN_OFF);
 825        hisi_sas_write32(hisi_hba, CFG_ABT_SET_QUERY_IPTT,
 826                cfg_abt_set_query_iptt);
 827        hisi_sas_write32(hisi_hba, CFG_ABT_SET_IPTT_DONE,
 828                                        1 << CFG_ABT_SET_IPTT_DONE_OFF);
 829}
 830
 831static int reset_hw_v3_hw(struct hisi_hba *hisi_hba)
 832{
 833        struct device *dev = hisi_hba->dev;
 834        int ret;
 835        u32 val;
 836
 837        hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, 0);
 838
 839        /* Disable all of the PHYs */
 840        hisi_sas_stop_phys(hisi_hba);
 841        udelay(50);
 842
 843        /* Ensure axi bus idle */
 844        ret = hisi_sas_read32_poll_timeout(AXI_CFG, val, !val,
 845                                           20000, 1000000);
 846        if (ret) {
 847                dev_err(dev, "axi bus is not idle, ret = %d!\n", ret);
 848                return -EIO;
 849        }
 850
 851        if (ACPI_HANDLE(dev)) {
 852                acpi_status s;
 853
 854                s = acpi_evaluate_object(ACPI_HANDLE(dev), "_RST", NULL, NULL);
 855                if (ACPI_FAILURE(s)) {
 856                        dev_err(dev, "Reset failed\n");
 857                        return -EIO;
 858                }
 859        } else {
 860                dev_err(dev, "no reset method!\n");
 861                return -EINVAL;
 862        }
 863
 864        return 0;
 865}
 866
 867static int hw_init_v3_hw(struct hisi_hba *hisi_hba)
 868{
 869        struct device *dev = hisi_hba->dev;
 870        union acpi_object *obj;
 871        guid_t guid;
 872        int rc;
 873
 874        rc = reset_hw_v3_hw(hisi_hba);
 875        if (rc) {
 876                dev_err(dev, "hisi_sas_reset_hw failed, rc=%d", rc);
 877                return rc;
 878        }
 879
 880        msleep(100);
 881        init_reg_v3_hw(hisi_hba);
 882
 883        if (guid_parse("D5918B4B-37AE-4E10-A99F-E5E8A6EF4C1F", &guid)) {
 884                dev_err(dev, "Parse GUID failed\n");
 885                return -EINVAL;
 886        }
 887
 888        /* Switch over to MSI handling , from PCI AER default */
 889        obj = acpi_evaluate_dsm(ACPI_HANDLE(dev), &guid, 0,
 890                                DSM_FUNC_ERR_HANDLE_MSI, NULL);
 891        if (!obj)
 892                dev_warn(dev, "Switch over to MSI handling failed\n");
 893        else
 894                ACPI_FREE(obj);
 895
 896        return 0;
 897}
 898
 899static void enable_phy_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
 900{
 901        u32 cfg = hisi_sas_phy_read32(hisi_hba, phy_no, PHY_CFG);
 902
 903        cfg |= PHY_CFG_ENA_MSK;
 904        cfg &= ~PHY_CFG_PHY_RST_MSK;
 905        hisi_sas_phy_write32(hisi_hba, phy_no, PHY_CFG, cfg);
 906}
 907
 908static void disable_phy_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
 909{
 910        u32 cfg = hisi_sas_phy_read32(hisi_hba, phy_no, PHY_CFG);
 911        u32 irq_msk = hisi_sas_phy_read32(hisi_hba, phy_no, CHL_INT2_MSK);
 912        static const u32 msk = BIT(CHL_INT2_RX_DISP_ERR_OFF) |
 913                               BIT(CHL_INT2_RX_CODE_ERR_OFF) |
 914                               BIT(CHL_INT2_RX_INVLD_DW_OFF);
 915        u32 state;
 916
 917        hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT2_MSK, msk | irq_msk);
 918
 919        cfg &= ~PHY_CFG_ENA_MSK;
 920        hisi_sas_phy_write32(hisi_hba, phy_no, PHY_CFG, cfg);
 921
 922        mdelay(50);
 923
 924        state = hisi_sas_read32(hisi_hba, PHY_STATE);
 925        if (state & BIT(phy_no)) {
 926                cfg |= PHY_CFG_PHY_RST_MSK;
 927                hisi_sas_phy_write32(hisi_hba, phy_no, PHY_CFG, cfg);
 928        }
 929
 930        udelay(1);
 931
 932        hisi_sas_phy_read32(hisi_hba, phy_no, ERR_CNT_INVLD_DW);
 933        hisi_sas_phy_read32(hisi_hba, phy_no, ERR_CNT_DISP_ERR);
 934        hisi_sas_phy_read32(hisi_hba, phy_no, ERR_CNT_CODE_ERR);
 935
 936        hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT2, msk);
 937        hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT2_MSK, irq_msk);
 938}
 939
 940static void start_phy_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
 941{
 942        config_id_frame_v3_hw(hisi_hba, phy_no);
 943        config_phy_opt_mode_v3_hw(hisi_hba, phy_no);
 944        enable_phy_v3_hw(hisi_hba, phy_no);
 945}
 946
 947static void phy_hard_reset_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
 948{
 949        struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
 950        u32 txid_auto;
 951
 952        hisi_sas_phy_enable(hisi_hba, phy_no, 0);
 953        if (phy->identify.device_type == SAS_END_DEVICE) {
 954                txid_auto = hisi_sas_phy_read32(hisi_hba, phy_no, TXID_AUTO);
 955                hisi_sas_phy_write32(hisi_hba, phy_no, TXID_AUTO,
 956                                        txid_auto | TX_HARDRST_MSK);
 957        }
 958        msleep(100);
 959        hisi_sas_phy_enable(hisi_hba, phy_no, 1);
 960}
 961
 962static enum sas_linkrate phy_get_max_linkrate_v3_hw(void)
 963{
 964        return SAS_LINK_RATE_12_0_GBPS;
 965}
 966
 967static void phys_init_v3_hw(struct hisi_hba *hisi_hba)
 968{
 969        int i;
 970
 971        for (i = 0; i < hisi_hba->n_phy; i++) {
 972                struct hisi_sas_phy *phy = &hisi_hba->phy[i];
 973                struct asd_sas_phy *sas_phy = &phy->sas_phy;
 974
 975                if (!sas_phy->phy->enabled)
 976                        continue;
 977
 978                hisi_sas_phy_enable(hisi_hba, i, 1);
 979        }
 980}
 981
 982static void sl_notify_ssp_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
 983{
 984        u32 sl_control;
 985
 986        sl_control = hisi_sas_phy_read32(hisi_hba, phy_no, SL_CONTROL);
 987        sl_control |= SL_CONTROL_NOTIFY_EN_MSK;
 988        hisi_sas_phy_write32(hisi_hba, phy_no, SL_CONTROL, sl_control);
 989        msleep(1);
 990        sl_control = hisi_sas_phy_read32(hisi_hba, phy_no, SL_CONTROL);
 991        sl_control &= ~SL_CONTROL_NOTIFY_EN_MSK;
 992        hisi_sas_phy_write32(hisi_hba, phy_no, SL_CONTROL, sl_control);
 993}
 994
 995static int get_wideport_bitmap_v3_hw(struct hisi_hba *hisi_hba, int port_id)
 996{
 997        int i, bitmap = 0;
 998        u32 phy_port_num_ma = hisi_sas_read32(hisi_hba, PHY_PORT_NUM_MA);
 999        u32 phy_state = hisi_sas_read32(hisi_hba, PHY_STATE);
1000
1001        for (i = 0; i < hisi_hba->n_phy; i++)
1002                if (phy_state & BIT(i))
1003                        if (((phy_port_num_ma >> (i * 4)) & 0xf) == port_id)
1004                                bitmap |= BIT(i);
1005
1006        return bitmap;
1007}
1008
1009/**
1010 * The callpath to this function and upto writing the write
1011 * queue pointer should be safe from interruption.
1012 */
1013static int
1014get_free_slot_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_dq *dq)
1015{
1016        struct device *dev = hisi_hba->dev;
1017        int queue = dq->id;
1018        u32 r, w;
1019
1020        w = dq->wr_point;
1021        r = hisi_sas_read32_relaxed(hisi_hba,
1022                                DLVRY_Q_0_RD_PTR + (queue * 0x14));
1023        if (r == (w+1) % HISI_SAS_QUEUE_SLOTS) {
1024                dev_warn(dev, "full queue=%d r=%d w=%d\n",
1025                         queue, r, w);
1026                return -EAGAIN;
1027        }
1028
1029        dq->wr_point = (dq->wr_point + 1) % HISI_SAS_QUEUE_SLOTS;
1030
1031        return w;
1032}
1033
1034static void start_delivery_v3_hw(struct hisi_sas_dq *dq)
1035{
1036        struct hisi_hba *hisi_hba = dq->hisi_hba;
1037        struct hisi_sas_slot *s, *s1, *s2 = NULL;
1038        int dlvry_queue = dq->id;
1039        int wp;
1040
1041        list_for_each_entry_safe(s, s1, &dq->list, delivery) {
1042                if (!s->ready)
1043                        break;
1044                s2 = s;
1045                list_del(&s->delivery);
1046        }
1047
1048        if (!s2)
1049                return;
1050
1051        /*
1052         * Ensure that memories for slots built on other CPUs is observed.
1053         */
1054        smp_rmb();
1055        wp = (s2->dlvry_queue_slot + 1) % HISI_SAS_QUEUE_SLOTS;
1056
1057        hisi_sas_write32(hisi_hba, DLVRY_Q_0_WR_PTR + (dlvry_queue * 0x14), wp);
1058}
1059
1060static void prep_prd_sge_v3_hw(struct hisi_hba *hisi_hba,
1061                              struct hisi_sas_slot *slot,
1062                              struct hisi_sas_cmd_hdr *hdr,
1063                              struct scatterlist *scatter,
1064                              int n_elem)
1065{
1066        struct hisi_sas_sge_page *sge_page = hisi_sas_sge_addr_mem(slot);
1067        struct scatterlist *sg;
1068        int i;
1069
1070        for_each_sg(scatter, sg, n_elem, i) {
1071                struct hisi_sas_sge *entry = &sge_page->sge[i];
1072
1073                entry->addr = cpu_to_le64(sg_dma_address(sg));
1074                entry->page_ctrl_0 = entry->page_ctrl_1 = 0;
1075                entry->data_len = cpu_to_le32(sg_dma_len(sg));
1076                entry->data_off = 0;
1077        }
1078
1079        hdr->prd_table_addr = cpu_to_le64(hisi_sas_sge_addr_dma(slot));
1080
1081        hdr->sg_len |= cpu_to_le32(n_elem << CMD_HDR_DATA_SGL_LEN_OFF);
1082}
1083
1084static void prep_prd_sge_dif_v3_hw(struct hisi_hba *hisi_hba,
1085                                   struct hisi_sas_slot *slot,
1086                                   struct hisi_sas_cmd_hdr *hdr,
1087                                   struct scatterlist *scatter,
1088                                   int n_elem)
1089{
1090        struct hisi_sas_sge_dif_page *sge_dif_page;
1091        struct scatterlist *sg;
1092        int i;
1093
1094        sge_dif_page = hisi_sas_sge_dif_addr_mem(slot);
1095
1096        for_each_sg(scatter, sg, n_elem, i) {
1097                struct hisi_sas_sge *entry = &sge_dif_page->sge[i];
1098
1099                entry->addr = cpu_to_le64(sg_dma_address(sg));
1100                entry->page_ctrl_0 = 0;
1101                entry->page_ctrl_1 = 0;
1102                entry->data_len = cpu_to_le32(sg_dma_len(sg));
1103                entry->data_off = 0;
1104        }
1105
1106        hdr->dif_prd_table_addr =
1107                cpu_to_le64(hisi_sas_sge_dif_addr_dma(slot));
1108
1109        hdr->sg_len |= cpu_to_le32(n_elem << CMD_HDR_DIF_SGL_LEN_OFF);
1110}
1111
1112static u32 get_prot_chk_msk_v3_hw(struct scsi_cmnd *scsi_cmnd)
1113{
1114        unsigned char prot_flags = scsi_cmnd->prot_flags;
1115
1116        if (prot_flags & SCSI_PROT_REF_CHECK)
1117                return T10_CHK_APP_TAG_MSK;
1118        return T10_CHK_REF_TAG_MSK | T10_CHK_APP_TAG_MSK;
1119}
1120
1121static void fill_prot_v3_hw(struct scsi_cmnd *scsi_cmnd,
1122                            struct hisi_sas_protect_iu_v3_hw *prot)
1123{
1124        unsigned char prot_op = scsi_get_prot_op(scsi_cmnd);
1125        unsigned int interval = scsi_prot_interval(scsi_cmnd);
1126        u32 lbrt_chk_val = t10_pi_ref_tag(scsi_cmnd->request);
1127
1128        switch (prot_op) {
1129        case SCSI_PROT_READ_INSERT:
1130                prot->dw0 |= T10_INSRT_EN_MSK;
1131                prot->lbrtgv = lbrt_chk_val;
1132                break;
1133        case SCSI_PROT_READ_STRIP:
1134                prot->dw0 |= (T10_RMV_EN_MSK | T10_CHK_EN_MSK);
1135                prot->lbrtcv = lbrt_chk_val;
1136                prot->dw4 |= get_prot_chk_msk_v3_hw(scsi_cmnd);
1137                break;
1138        case SCSI_PROT_READ_PASS:
1139                prot->dw0 |= T10_CHK_EN_MSK;
1140                prot->lbrtcv = lbrt_chk_val;
1141                prot->dw4 |= get_prot_chk_msk_v3_hw(scsi_cmnd);
1142                break;
1143        case SCSI_PROT_WRITE_INSERT:
1144                prot->dw0 |= T10_INSRT_EN_MSK;
1145                prot->lbrtgv = lbrt_chk_val;
1146                break;
1147        case SCSI_PROT_WRITE_STRIP:
1148                prot->dw0 |= (T10_RMV_EN_MSK | T10_CHK_EN_MSK);
1149                prot->lbrtcv = lbrt_chk_val;
1150                break;
1151        case SCSI_PROT_WRITE_PASS:
1152                prot->dw0 |= T10_CHK_EN_MSK;
1153                prot->lbrtcv = lbrt_chk_val;
1154                prot->dw4 |= get_prot_chk_msk_v3_hw(scsi_cmnd);
1155                break;
1156        default:
1157                WARN(1, "prot_op(0x%x) is not valid\n", prot_op);
1158                break;
1159        }
1160
1161        switch (interval) {
1162        case 512:
1163                break;
1164        case 4096:
1165                prot->dw0 |= (0x1 << USR_DATA_BLOCK_SZ_OFF);
1166                break;
1167        case 520:
1168                prot->dw0 |= (0x2 << USR_DATA_BLOCK_SZ_OFF);
1169                break;
1170        default:
1171                WARN(1, "protection interval (0x%x) invalid\n",
1172                     interval);
1173                break;
1174        }
1175
1176        prot->dw0 |= INCR_LBRT_MSK;
1177}
1178
1179static void prep_ssp_v3_hw(struct hisi_hba *hisi_hba,
1180                          struct hisi_sas_slot *slot)
1181{
1182        struct sas_task *task = slot->task;
1183        struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr;
1184        struct domain_device *device = task->dev;
1185        struct hisi_sas_device *sas_dev = device->lldd_dev;
1186        struct hisi_sas_port *port = slot->port;
1187        struct sas_ssp_task *ssp_task = &task->ssp_task;
1188        struct scsi_cmnd *scsi_cmnd = ssp_task->cmd;
1189        struct hisi_sas_tmf_task *tmf = slot->tmf;
1190        int has_data = 0, priority = !!tmf;
1191        unsigned char prot_op;
1192        u8 *buf_cmd;
1193        u32 dw1 = 0, dw2 = 0, len = 0;
1194
1195        hdr->dw0 = cpu_to_le32((1 << CMD_HDR_RESP_REPORT_OFF) |
1196                               (2 << CMD_HDR_TLR_CTRL_OFF) |
1197                               (port->id << CMD_HDR_PORT_OFF) |
1198                               (priority << CMD_HDR_PRIORITY_OFF) |
1199                               (1 << CMD_HDR_CMD_OFF)); /* ssp */
1200
1201        dw1 = 1 << CMD_HDR_VDTL_OFF;
1202        if (tmf) {
1203                dw1 |= 2 << CMD_HDR_FRAME_TYPE_OFF;
1204                dw1 |= DIR_NO_DATA << CMD_HDR_DIR_OFF;
1205        } else {
1206                prot_op = scsi_get_prot_op(scsi_cmnd);
1207                dw1 |= 1 << CMD_HDR_FRAME_TYPE_OFF;
1208                switch (scsi_cmnd->sc_data_direction) {
1209                case DMA_TO_DEVICE:
1210                        has_data = 1;
1211                        dw1 |= DIR_TO_DEVICE << CMD_HDR_DIR_OFF;
1212                        break;
1213                case DMA_FROM_DEVICE:
1214                        has_data = 1;
1215                        dw1 |= DIR_TO_INI << CMD_HDR_DIR_OFF;
1216                        break;
1217                default:
1218                        dw1 &= ~CMD_HDR_DIR_MSK;
1219                }
1220        }
1221
1222        /* map itct entry */
1223        dw1 |= sas_dev->device_id << CMD_HDR_DEV_ID_OFF;
1224
1225        dw2 = (((sizeof(struct ssp_command_iu) + sizeof(struct ssp_frame_hdr)
1226              + 3) / 4) << CMD_HDR_CFL_OFF) |
1227              ((HISI_SAS_MAX_SSP_RESP_SZ / 4) << CMD_HDR_MRFL_OFF) |
1228              (2 << CMD_HDR_SG_MOD_OFF);
1229        hdr->dw2 = cpu_to_le32(dw2);
1230        hdr->transfer_tags = cpu_to_le32(slot->idx);
1231
1232        if (has_data) {
1233                prep_prd_sge_v3_hw(hisi_hba, slot, hdr, task->scatter,
1234                                   slot->n_elem);
1235
1236                if (scsi_prot_sg_count(scsi_cmnd))
1237                        prep_prd_sge_dif_v3_hw(hisi_hba, slot, hdr,
1238                                               scsi_prot_sglist(scsi_cmnd),
1239                                               slot->n_elem_dif);
1240        }
1241
1242        hdr->cmd_table_addr = cpu_to_le64(hisi_sas_cmd_hdr_addr_dma(slot));
1243        hdr->sts_buffer_addr = cpu_to_le64(hisi_sas_status_buf_addr_dma(slot));
1244
1245        buf_cmd = hisi_sas_cmd_hdr_addr_mem(slot) +
1246                sizeof(struct ssp_frame_hdr);
1247
1248        memcpy(buf_cmd, &task->ssp_task.LUN, 8);
1249        if (!tmf) {
1250                buf_cmd[9] = ssp_task->task_attr | (ssp_task->task_prio << 3);
1251                memcpy(buf_cmd + 12, scsi_cmnd->cmnd, scsi_cmnd->cmd_len);
1252        } else {
1253                buf_cmd[10] = tmf->tmf;
1254                switch (tmf->tmf) {
1255                case TMF_ABORT_TASK:
1256                case TMF_QUERY_TASK:
1257                        buf_cmd[12] =
1258                                (tmf->tag_of_task_to_be_managed >> 8) & 0xff;
1259                        buf_cmd[13] =
1260                                tmf->tag_of_task_to_be_managed & 0xff;
1261                        break;
1262                default:
1263                        break;
1264                }
1265        }
1266
1267        if (has_data && (prot_op != SCSI_PROT_NORMAL)) {
1268                struct hisi_sas_protect_iu_v3_hw prot;
1269                u8 *buf_cmd_prot;
1270
1271                hdr->dw7 |= cpu_to_le32(1 << CMD_HDR_ADDR_MODE_SEL_OFF);
1272                dw1 |= CMD_HDR_PIR_MSK;
1273                buf_cmd_prot = hisi_sas_cmd_hdr_addr_mem(slot) +
1274                               sizeof(struct ssp_frame_hdr) +
1275                               sizeof(struct ssp_command_iu);
1276
1277                memset(&prot, 0, sizeof(struct hisi_sas_protect_iu_v3_hw));
1278                fill_prot_v3_hw(scsi_cmnd, &prot);
1279                memcpy(buf_cmd_prot, &prot,
1280                       sizeof(struct hisi_sas_protect_iu_v3_hw));
1281                /*
1282                 * For READ, we need length of info read to memory, while for
1283                 * WRITE we need length of data written to the disk.
1284                 */
1285                if (prot_op == SCSI_PROT_WRITE_INSERT ||
1286                    prot_op == SCSI_PROT_READ_INSERT ||
1287                    prot_op == SCSI_PROT_WRITE_PASS ||
1288                    prot_op == SCSI_PROT_READ_PASS) {
1289                        unsigned int interval = scsi_prot_interval(scsi_cmnd);
1290                        unsigned int ilog2_interval = ilog2(interval);
1291
1292                        len = (task->total_xfer_len >> ilog2_interval) * 8;
1293                }
1294        }
1295
1296        hdr->dw1 = cpu_to_le32(dw1);
1297
1298        hdr->data_transfer_len = cpu_to_le32(task->total_xfer_len + len);
1299}
1300
1301static void prep_smp_v3_hw(struct hisi_hba *hisi_hba,
1302                          struct hisi_sas_slot *slot)
1303{
1304        struct sas_task *task = slot->task;
1305        struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr;
1306        struct domain_device *device = task->dev;
1307        struct hisi_sas_port *port = slot->port;
1308        struct scatterlist *sg_req;
1309        struct hisi_sas_device *sas_dev = device->lldd_dev;
1310        dma_addr_t req_dma_addr;
1311        unsigned int req_len;
1312
1313        /* req */
1314        sg_req = &task->smp_task.smp_req;
1315        req_len = sg_dma_len(sg_req);
1316        req_dma_addr = sg_dma_address(sg_req);
1317
1318        /* create header */
1319        /* dw0 */
1320        hdr->dw0 = cpu_to_le32((port->id << CMD_HDR_PORT_OFF) |
1321                               (1 << CMD_HDR_PRIORITY_OFF) | /* high pri */
1322                               (2 << CMD_HDR_CMD_OFF)); /* smp */
1323
1324        /* map itct entry */
1325        hdr->dw1 = cpu_to_le32((sas_dev->device_id << CMD_HDR_DEV_ID_OFF) |
1326                               (1 << CMD_HDR_FRAME_TYPE_OFF) |
1327                               (DIR_NO_DATA << CMD_HDR_DIR_OFF));
1328
1329        /* dw2 */
1330        hdr->dw2 = cpu_to_le32((((req_len - 4) / 4) << CMD_HDR_CFL_OFF) |
1331                               (HISI_SAS_MAX_SMP_RESP_SZ / 4 <<
1332                               CMD_HDR_MRFL_OFF));
1333
1334        hdr->transfer_tags = cpu_to_le32(slot->idx << CMD_HDR_IPTT_OFF);
1335
1336        hdr->cmd_table_addr = cpu_to_le64(req_dma_addr);
1337        hdr->sts_buffer_addr = cpu_to_le64(hisi_sas_status_buf_addr_dma(slot));
1338
1339}
1340
1341static void prep_ata_v3_hw(struct hisi_hba *hisi_hba,
1342                          struct hisi_sas_slot *slot)
1343{
1344        struct sas_task *task = slot->task;
1345        struct domain_device *device = task->dev;
1346        struct domain_device *parent_dev = device->parent;
1347        struct hisi_sas_device *sas_dev = device->lldd_dev;
1348        struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr;
1349        struct asd_sas_port *sas_port = device->port;
1350        struct hisi_sas_port *port = to_hisi_sas_port(sas_port);
1351        u8 *buf_cmd;
1352        int has_data = 0, hdr_tag = 0;
1353        u32 dw1 = 0, dw2 = 0;
1354
1355        hdr->dw0 = cpu_to_le32(port->id << CMD_HDR_PORT_OFF);
1356        if (parent_dev && dev_is_expander(parent_dev->dev_type))
1357                hdr->dw0 |= cpu_to_le32(3 << CMD_HDR_CMD_OFF);
1358        else
1359                hdr->dw0 |= cpu_to_le32(4U << CMD_HDR_CMD_OFF);
1360
1361        switch (task->data_dir) {
1362        case DMA_TO_DEVICE:
1363                has_data = 1;
1364                dw1 |= DIR_TO_DEVICE << CMD_HDR_DIR_OFF;
1365                break;
1366        case DMA_FROM_DEVICE:
1367                has_data = 1;
1368                dw1 |= DIR_TO_INI << CMD_HDR_DIR_OFF;
1369                break;
1370        default:
1371                dw1 &= ~CMD_HDR_DIR_MSK;
1372        }
1373
1374        if ((task->ata_task.fis.command == ATA_CMD_DEV_RESET) &&
1375                        (task->ata_task.fis.control & ATA_SRST))
1376                dw1 |= 1 << CMD_HDR_RESET_OFF;
1377
1378        dw1 |= (hisi_sas_get_ata_protocol(
1379                &task->ata_task.fis, task->data_dir))
1380                << CMD_HDR_FRAME_TYPE_OFF;
1381        dw1 |= sas_dev->device_id << CMD_HDR_DEV_ID_OFF;
1382
1383        if (FIS_CMD_IS_UNCONSTRAINED(task->ata_task.fis))
1384                dw1 |= 1 << CMD_HDR_UNCON_CMD_OFF;
1385
1386        hdr->dw1 = cpu_to_le32(dw1);
1387
1388        /* dw2 */
1389        if (task->ata_task.use_ncq && hisi_sas_get_ncq_tag(task, &hdr_tag)) {
1390                task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3);
1391                dw2 |= hdr_tag << CMD_HDR_NCQ_TAG_OFF;
1392        }
1393
1394        dw2 |= (HISI_SAS_MAX_STP_RESP_SZ / 4) << CMD_HDR_CFL_OFF |
1395                        2 << CMD_HDR_SG_MOD_OFF;
1396        hdr->dw2 = cpu_to_le32(dw2);
1397
1398        /* dw3 */
1399        hdr->transfer_tags = cpu_to_le32(slot->idx);
1400
1401        if (has_data)
1402                prep_prd_sge_v3_hw(hisi_hba, slot, hdr, task->scatter,
1403                                        slot->n_elem);
1404
1405        hdr->data_transfer_len = cpu_to_le32(task->total_xfer_len);
1406        hdr->cmd_table_addr = cpu_to_le64(hisi_sas_cmd_hdr_addr_dma(slot));
1407        hdr->sts_buffer_addr = cpu_to_le64(hisi_sas_status_buf_addr_dma(slot));
1408
1409        buf_cmd = hisi_sas_cmd_hdr_addr_mem(slot);
1410
1411        if (likely(!task->ata_task.device_control_reg_update))
1412                task->ata_task.fis.flags |= 0x80; /* C=1: update ATA cmd reg */
1413        /* fill in command FIS */
1414        memcpy(buf_cmd, &task->ata_task.fis, sizeof(struct host_to_dev_fis));
1415}
1416
1417static void prep_abort_v3_hw(struct hisi_hba *hisi_hba,
1418                struct hisi_sas_slot *slot,
1419                int device_id, int abort_flag, int tag_to_abort)
1420{
1421        struct sas_task *task = slot->task;
1422        struct domain_device *dev = task->dev;
1423        struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr;
1424        struct hisi_sas_port *port = slot->port;
1425
1426        /* dw0 */
1427        hdr->dw0 = cpu_to_le32((5U << CMD_HDR_CMD_OFF) | /*abort*/
1428                               (port->id << CMD_HDR_PORT_OFF) |
1429                                   (dev_is_sata(dev)
1430                                        << CMD_HDR_ABORT_DEVICE_TYPE_OFF) |
1431                                        (abort_flag
1432                                         << CMD_HDR_ABORT_FLAG_OFF));
1433
1434        /* dw1 */
1435        hdr->dw1 = cpu_to_le32(device_id
1436                        << CMD_HDR_DEV_ID_OFF);
1437
1438        /* dw7 */
1439        hdr->dw7 = cpu_to_le32(tag_to_abort << CMD_HDR_ABORT_IPTT_OFF);
1440        hdr->transfer_tags = cpu_to_le32(slot->idx);
1441
1442}
1443
1444static irqreturn_t phy_up_v3_hw(int phy_no, struct hisi_hba *hisi_hba)
1445{
1446        int i;
1447        irqreturn_t res;
1448        u32 context, port_id, link_rate;
1449        struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
1450        struct asd_sas_phy *sas_phy = &phy->sas_phy;
1451        struct device *dev = hisi_hba->dev;
1452        unsigned long flags;
1453
1454        del_timer(&phy->timer);
1455        hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_PHY_ENA_MSK, 1);
1456
1457        port_id = hisi_sas_read32(hisi_hba, PHY_PORT_NUM_MA);
1458        port_id = (port_id >> (4 * phy_no)) & 0xf;
1459        link_rate = hisi_sas_read32(hisi_hba, PHY_CONN_RATE);
1460        link_rate = (link_rate >> (phy_no * 4)) & 0xf;
1461
1462        if (port_id == 0xf) {
1463                dev_err(dev, "phyup: phy%d invalid portid\n", phy_no);
1464                res = IRQ_NONE;
1465                goto end;
1466        }
1467        sas_phy->linkrate = link_rate;
1468        phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA);
1469
1470        /* Check for SATA dev */
1471        context = hisi_sas_read32(hisi_hba, PHY_CONTEXT);
1472        if (context & (1 << phy_no)) {
1473                struct hisi_sas_initial_fis *initial_fis;
1474                struct dev_to_host_fis *fis;
1475                u8 attached_sas_addr[SAS_ADDR_SIZE] = {0};
1476                struct Scsi_Host *shost = hisi_hba->shost;
1477
1478                dev_info(dev, "phyup: phy%d link_rate=%d(sata)\n", phy_no, link_rate);
1479                initial_fis = &hisi_hba->initial_fis[phy_no];
1480                fis = &initial_fis->fis;
1481
1482                /* check ERR bit of Status Register */
1483                if (fis->status & ATA_ERR) {
1484                        dev_warn(dev, "sata int: phy%d FIS status: 0x%x\n",
1485                                 phy_no, fis->status);
1486                        hisi_sas_notify_phy_event(phy, HISI_PHYE_LINK_RESET);
1487                        res = IRQ_NONE;
1488                        goto end;
1489                }
1490
1491                sas_phy->oob_mode = SATA_OOB_MODE;
1492                attached_sas_addr[0] = 0x50;
1493                attached_sas_addr[6] = shost->host_no;
1494                attached_sas_addr[7] = phy_no;
1495                memcpy(sas_phy->attached_sas_addr,
1496                       attached_sas_addr,
1497                       SAS_ADDR_SIZE);
1498                memcpy(sas_phy->frame_rcvd, fis,
1499                       sizeof(struct dev_to_host_fis));
1500                phy->phy_type |= PORT_TYPE_SATA;
1501                phy->identify.device_type = SAS_SATA_DEV;
1502                phy->frame_rcvd_size = sizeof(struct dev_to_host_fis);
1503                phy->identify.target_port_protocols = SAS_PROTOCOL_SATA;
1504        } else {
1505                u32 *frame_rcvd = (u32 *)sas_phy->frame_rcvd;
1506                struct sas_identify_frame *id =
1507                        (struct sas_identify_frame *)frame_rcvd;
1508
1509                dev_info(dev, "phyup: phy%d link_rate=%d\n", phy_no, link_rate);
1510                for (i = 0; i < 6; i++) {
1511                        u32 idaf = hisi_sas_phy_read32(hisi_hba, phy_no,
1512                                               RX_IDAF_DWORD0 + (i * 4));
1513                        frame_rcvd[i] = __swab32(idaf);
1514                }
1515                sas_phy->oob_mode = SAS_OOB_MODE;
1516                memcpy(sas_phy->attached_sas_addr,
1517                       &id->sas_addr,
1518                       SAS_ADDR_SIZE);
1519                phy->phy_type |= PORT_TYPE_SAS;
1520                phy->identify.device_type = id->dev_type;
1521                phy->frame_rcvd_size = sizeof(struct sas_identify_frame);
1522                if (phy->identify.device_type == SAS_END_DEVICE)
1523                        phy->identify.target_port_protocols =
1524                                SAS_PROTOCOL_SSP;
1525                else if (phy->identify.device_type != SAS_PHY_UNUSED)
1526                        phy->identify.target_port_protocols =
1527                                SAS_PROTOCOL_SMP;
1528        }
1529
1530        phy->port_id = port_id;
1531        phy->phy_attached = 1;
1532        hisi_sas_notify_phy_event(phy, HISI_PHYE_PHY_UP);
1533        res = IRQ_HANDLED;
1534        spin_lock_irqsave(&phy->lock, flags);
1535        if (phy->reset_completion) {
1536                phy->in_reset = 0;
1537                complete(phy->reset_completion);
1538        }
1539        spin_unlock_irqrestore(&phy->lock, flags);
1540end:
1541        hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0,
1542                             CHL_INT0_SL_PHY_ENABLE_MSK);
1543        hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_PHY_ENA_MSK, 0);
1544
1545        return res;
1546}
1547
1548static irqreturn_t phy_down_v3_hw(int phy_no, struct hisi_hba *hisi_hba)
1549{
1550        struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
1551        u32 phy_state, sl_ctrl, txid_auto;
1552        struct device *dev = hisi_hba->dev;
1553
1554        del_timer(&phy->timer);
1555        hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_NOT_RDY_MSK, 1);
1556
1557        phy_state = hisi_sas_read32(hisi_hba, PHY_STATE);
1558        dev_info(dev, "phydown: phy%d phy_state=0x%x\n", phy_no, phy_state);
1559        hisi_sas_phy_down(hisi_hba, phy_no, (phy_state & 1 << phy_no) ? 1 : 0);
1560
1561        sl_ctrl = hisi_sas_phy_read32(hisi_hba, phy_no, SL_CONTROL);
1562        hisi_sas_phy_write32(hisi_hba, phy_no, SL_CONTROL,
1563                                                sl_ctrl&(~SL_CTA_MSK));
1564
1565        txid_auto = hisi_sas_phy_read32(hisi_hba, phy_no, TXID_AUTO);
1566        hisi_sas_phy_write32(hisi_hba, phy_no, TXID_AUTO,
1567                                                txid_auto | CT3_MSK);
1568
1569        hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0, CHL_INT0_NOT_RDY_MSK);
1570        hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_NOT_RDY_MSK, 0);
1571
1572        return IRQ_HANDLED;
1573}
1574
1575static irqreturn_t phy_bcast_v3_hw(int phy_no, struct hisi_hba *hisi_hba)
1576{
1577        struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
1578        struct asd_sas_phy *sas_phy = &phy->sas_phy;
1579        struct sas_ha_struct *sas_ha = &hisi_hba->sha;
1580        u32 bcast_status;
1581
1582        hisi_sas_phy_write32(hisi_hba, phy_no, SL_RX_BCAST_CHK_MSK, 1);
1583        bcast_status = hisi_sas_phy_read32(hisi_hba, phy_no, RX_PRIMS_STATUS);
1584        if ((bcast_status & RX_BCAST_CHG_MSK) &&
1585            !test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags))
1586                sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
1587        hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0,
1588                             CHL_INT0_SL_RX_BCST_ACK_MSK);
1589        hisi_sas_phy_write32(hisi_hba, phy_no, SL_RX_BCAST_CHK_MSK, 0);
1590
1591        return IRQ_HANDLED;
1592}
1593
1594static irqreturn_t int_phy_up_down_bcast_v3_hw(int irq_no, void *p)
1595{
1596        struct hisi_hba *hisi_hba = p;
1597        u32 irq_msk;
1598        int phy_no = 0;
1599        irqreturn_t res = IRQ_NONE;
1600
1601        irq_msk = hisi_sas_read32(hisi_hba, CHNL_INT_STATUS)
1602                                & 0x11111111;
1603        while (irq_msk) {
1604                if (irq_msk  & 1) {
1605                        u32 irq_value = hisi_sas_phy_read32(hisi_hba, phy_no,
1606                                                            CHL_INT0);
1607                        u32 phy_state = hisi_sas_read32(hisi_hba, PHY_STATE);
1608                        int rdy = phy_state & (1 << phy_no);
1609
1610                        if (rdy) {
1611                                if (irq_value & CHL_INT0_SL_PHY_ENABLE_MSK)
1612                                        /* phy up */
1613                                        if (phy_up_v3_hw(phy_no, hisi_hba)
1614                                                        == IRQ_HANDLED)
1615                                                res = IRQ_HANDLED;
1616                                if (irq_value & CHL_INT0_SL_RX_BCST_ACK_MSK)
1617                                        /* phy bcast */
1618                                        if (phy_bcast_v3_hw(phy_no, hisi_hba)
1619                                                        == IRQ_HANDLED)
1620                                                res = IRQ_HANDLED;
1621                        } else {
1622                                if (irq_value & CHL_INT0_NOT_RDY_MSK)
1623                                        /* phy down */
1624                                        if (phy_down_v3_hw(phy_no, hisi_hba)
1625                                                        == IRQ_HANDLED)
1626                                                res = IRQ_HANDLED;
1627                        }
1628                }
1629                irq_msk >>= 4;
1630                phy_no++;
1631        }
1632
1633        return res;
1634}
1635
1636static const struct hisi_sas_hw_error port_axi_error[] = {
1637        {
1638                .irq_msk = BIT(CHL_INT1_DMAC_TX_ECC_MB_ERR_OFF),
1639                .msg = "dmac_tx_ecc_bad_err",
1640        },
1641        {
1642                .irq_msk = BIT(CHL_INT1_DMAC_RX_ECC_MB_ERR_OFF),
1643                .msg = "dmac_rx_ecc_bad_err",
1644        },
1645        {
1646                .irq_msk = BIT(CHL_INT1_DMAC_TX_AXI_WR_ERR_OFF),
1647                .msg = "dma_tx_axi_wr_err",
1648        },
1649        {
1650                .irq_msk = BIT(CHL_INT1_DMAC_TX_AXI_RD_ERR_OFF),
1651                .msg = "dma_tx_axi_rd_err",
1652        },
1653        {
1654                .irq_msk = BIT(CHL_INT1_DMAC_RX_AXI_WR_ERR_OFF),
1655                .msg = "dma_rx_axi_wr_err",
1656        },
1657        {
1658                .irq_msk = BIT(CHL_INT1_DMAC_RX_AXI_RD_ERR_OFF),
1659                .msg = "dma_rx_axi_rd_err",
1660        },
1661        {
1662                .irq_msk = BIT(CHL_INT1_DMAC_TX_FIFO_ERR_OFF),
1663                .msg = "dma_tx_fifo_err",
1664        },
1665        {
1666                .irq_msk = BIT(CHL_INT1_DMAC_RX_FIFO_ERR_OFF),
1667                .msg = "dma_rx_fifo_err",
1668        },
1669        {
1670                .irq_msk = BIT(CHL_INT1_DMAC_TX_AXI_RUSER_ERR_OFF),
1671                .msg = "dma_tx_axi_ruser_err",
1672        },
1673        {
1674                .irq_msk = BIT(CHL_INT1_DMAC_RX_AXI_RUSER_ERR_OFF),
1675                .msg = "dma_rx_axi_ruser_err",
1676        },
1677};
1678
1679static void handle_chl_int1_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
1680{
1681        u32 irq_value = hisi_sas_phy_read32(hisi_hba, phy_no, CHL_INT1);
1682        u32 irq_msk = hisi_sas_phy_read32(hisi_hba, phy_no, CHL_INT1_MSK);
1683        struct device *dev = hisi_hba->dev;
1684        int i;
1685
1686        irq_value &= ~irq_msk;
1687        if (!irq_value)
1688                return;
1689
1690        for (i = 0; i < ARRAY_SIZE(port_axi_error); i++) {
1691                const struct hisi_sas_hw_error *error = &port_axi_error[i];
1692
1693                if (!(irq_value & error->irq_msk))
1694                        continue;
1695
1696                dev_err(dev, "%s error (phy%d 0x%x) found!\n",
1697                        error->msg, phy_no, irq_value);
1698                queue_work(hisi_hba->wq, &hisi_hba->rst_work);
1699        }
1700
1701        hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT1, irq_value);
1702}
1703
1704static void phy_get_events_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
1705{
1706        struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
1707        struct asd_sas_phy *sas_phy = &phy->sas_phy;
1708        struct sas_phy *sphy = sas_phy->phy;
1709        unsigned long flags;
1710        u32 reg_value;
1711
1712        spin_lock_irqsave(&phy->lock, flags);
1713
1714        /* loss dword sync */
1715        reg_value = hisi_sas_phy_read32(hisi_hba, phy_no, ERR_CNT_DWS_LOST);
1716        sphy->loss_of_dword_sync_count += reg_value;
1717
1718        /* phy reset problem */
1719        reg_value = hisi_sas_phy_read32(hisi_hba, phy_no, ERR_CNT_RESET_PROB);
1720        sphy->phy_reset_problem_count += reg_value;
1721
1722        /* invalid dword */
1723        reg_value = hisi_sas_phy_read32(hisi_hba, phy_no, ERR_CNT_INVLD_DW);
1724        sphy->invalid_dword_count += reg_value;
1725
1726        /* disparity err */
1727        reg_value = hisi_sas_phy_read32(hisi_hba, phy_no, ERR_CNT_DISP_ERR);
1728        sphy->running_disparity_error_count += reg_value;
1729
1730        /* code violation error */
1731        reg_value = hisi_sas_phy_read32(hisi_hba, phy_no, ERR_CNT_CODE_ERR);
1732        phy->code_violation_err_count += reg_value;
1733
1734        spin_unlock_irqrestore(&phy->lock, flags);
1735}
1736
1737static void handle_chl_int2_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
1738{
1739        u32 irq_msk = hisi_sas_phy_read32(hisi_hba, phy_no, CHL_INT2_MSK);
1740        u32 irq_value = hisi_sas_phy_read32(hisi_hba, phy_no, CHL_INT2);
1741        struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
1742        struct pci_dev *pci_dev = hisi_hba->pci_dev;
1743        struct device *dev = hisi_hba->dev;
1744        static const u32 msk = BIT(CHL_INT2_RX_DISP_ERR_OFF) |
1745                        BIT(CHL_INT2_RX_CODE_ERR_OFF) |
1746                        BIT(CHL_INT2_RX_INVLD_DW_OFF);
1747
1748        irq_value &= ~irq_msk;
1749        if (!irq_value)
1750                return;
1751
1752        if (irq_value & BIT(CHL_INT2_SL_IDAF_TOUT_CONF_OFF)) {
1753                dev_warn(dev, "phy%d identify timeout\n", phy_no);
1754                hisi_sas_notify_phy_event(phy, HISI_PHYE_LINK_RESET);
1755        }
1756
1757        if (irq_value & BIT(CHL_INT2_STP_LINK_TIMEOUT_OFF)) {
1758                u32 reg_value = hisi_sas_phy_read32(hisi_hba, phy_no,
1759                                STP_LINK_TIMEOUT_STATE);
1760
1761                dev_warn(dev, "phy%d stp link timeout (0x%x)\n",
1762                         phy_no, reg_value);
1763                if (reg_value & BIT(4))
1764                        hisi_sas_notify_phy_event(phy, HISI_PHYE_LINK_RESET);
1765        }
1766
1767        if (pci_dev->revision > 0x20 && (irq_value & msk)) {
1768                struct asd_sas_phy *sas_phy = &phy->sas_phy;
1769                struct sas_phy *sphy = sas_phy->phy;
1770
1771                phy_get_events_v3_hw(hisi_hba, phy_no);
1772
1773                if (irq_value & BIT(CHL_INT2_RX_INVLD_DW_OFF))
1774                        dev_info(dev, "phy%d invalid dword cnt:   %u\n", phy_no,
1775                                 sphy->invalid_dword_count);
1776
1777                if (irq_value & BIT(CHL_INT2_RX_CODE_ERR_OFF))
1778                        dev_info(dev, "phy%d code violation cnt:  %u\n", phy_no,
1779                                 phy->code_violation_err_count);
1780
1781                if (irq_value & BIT(CHL_INT2_RX_DISP_ERR_OFF))
1782                        dev_info(dev, "phy%d disparity error cnt: %u\n", phy_no,
1783                                 sphy->running_disparity_error_count);
1784        }
1785
1786        if ((irq_value & BIT(CHL_INT2_RX_INVLD_DW_OFF)) &&
1787            (pci_dev->revision == 0x20)) {
1788                u32 reg_value;
1789                int rc;
1790
1791                rc = hisi_sas_read32_poll_timeout_atomic(
1792                                HILINK_ERR_DFX, reg_value,
1793                                !((reg_value >> 8) & BIT(phy_no)),
1794                                1000, 10000);
1795                if (rc)
1796                        hisi_sas_notify_phy_event(phy, HISI_PHYE_LINK_RESET);
1797        }
1798
1799        hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT2, irq_value);
1800}
1801
1802static void handle_chl_int0_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
1803{
1804        u32 irq_value0 = hisi_sas_phy_read32(hisi_hba, phy_no, CHL_INT0);
1805
1806        if (irq_value0 & CHL_INT0_PHY_RDY_MSK)
1807                hisi_sas_phy_oob_ready(hisi_hba, phy_no);
1808
1809        hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0,
1810                             irq_value0 & (~CHL_INT0_SL_RX_BCST_ACK_MSK)
1811                             & (~CHL_INT0_SL_PHY_ENABLE_MSK)
1812                             & (~CHL_INT0_NOT_RDY_MSK));
1813}
1814
1815static irqreturn_t int_chnl_int_v3_hw(int irq_no, void *p)
1816{
1817        struct hisi_hba *hisi_hba = p;
1818        u32 irq_msk;
1819        int phy_no = 0;
1820
1821        irq_msk = hisi_sas_read32(hisi_hba, CHNL_INT_STATUS)
1822                                & 0xeeeeeeee;
1823
1824        while (irq_msk) {
1825                if (irq_msk & (2 << (phy_no * 4)))
1826                        handle_chl_int0_v3_hw(hisi_hba, phy_no);
1827
1828                if (irq_msk & (4 << (phy_no * 4)))
1829                        handle_chl_int1_v3_hw(hisi_hba, phy_no);
1830
1831                if (irq_msk & (8 << (phy_no * 4)))
1832                        handle_chl_int2_v3_hw(hisi_hba, phy_no);
1833
1834                irq_msk &= ~(0xe << (phy_no * 4));
1835                phy_no++;
1836        }
1837
1838        return IRQ_HANDLED;
1839}
1840
1841static const struct hisi_sas_hw_error multi_bit_ecc_errors[] = {
1842        {
1843                .irq_msk = BIT(SAS_ECC_INTR_DQE_ECC_MB_OFF),
1844                .msk = HGC_DQE_ECC_MB_ADDR_MSK,
1845                .shift = HGC_DQE_ECC_MB_ADDR_OFF,
1846                .msg = "hgc_dqe_eccbad_intr",
1847                .reg = HGC_DQE_ECC_ADDR,
1848        },
1849        {
1850                .irq_msk = BIT(SAS_ECC_INTR_IOST_ECC_MB_OFF),
1851                .msk = HGC_IOST_ECC_MB_ADDR_MSK,
1852                .shift = HGC_IOST_ECC_MB_ADDR_OFF,
1853                .msg = "hgc_iost_eccbad_intr",
1854                .reg = HGC_IOST_ECC_ADDR,
1855        },
1856        {
1857                .irq_msk = BIT(SAS_ECC_INTR_ITCT_ECC_MB_OFF),
1858                .msk = HGC_ITCT_ECC_MB_ADDR_MSK,
1859                .shift = HGC_ITCT_ECC_MB_ADDR_OFF,
1860                .msg = "hgc_itct_eccbad_intr",
1861                .reg = HGC_ITCT_ECC_ADDR,
1862        },
1863        {
1864                .irq_msk = BIT(SAS_ECC_INTR_IOSTLIST_ECC_MB_OFF),
1865                .msk = HGC_LM_DFX_STATUS2_IOSTLIST_MSK,
1866                .shift = HGC_LM_DFX_STATUS2_IOSTLIST_OFF,
1867                .msg = "hgc_iostl_eccbad_intr",
1868                .reg = HGC_LM_DFX_STATUS2,
1869        },
1870        {
1871                .irq_msk = BIT(SAS_ECC_INTR_ITCTLIST_ECC_MB_OFF),
1872                .msk = HGC_LM_DFX_STATUS2_ITCTLIST_MSK,
1873                .shift = HGC_LM_DFX_STATUS2_ITCTLIST_OFF,
1874                .msg = "hgc_itctl_eccbad_intr",
1875                .reg = HGC_LM_DFX_STATUS2,
1876        },
1877        {
1878                .irq_msk = BIT(SAS_ECC_INTR_CQE_ECC_MB_OFF),
1879                .msk = HGC_CQE_ECC_MB_ADDR_MSK,
1880                .shift = HGC_CQE_ECC_MB_ADDR_OFF,
1881                .msg = "hgc_cqe_eccbad_intr",
1882                .reg = HGC_CQE_ECC_ADDR,
1883        },
1884        {
1885                .irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM0_ECC_MB_OFF),
1886                .msk = HGC_RXM_DFX_STATUS14_MEM0_MSK,
1887                .shift = HGC_RXM_DFX_STATUS14_MEM0_OFF,
1888                .msg = "rxm_mem0_eccbad_intr",
1889                .reg = HGC_RXM_DFX_STATUS14,
1890        },
1891        {
1892                .irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM1_ECC_MB_OFF),
1893                .msk = HGC_RXM_DFX_STATUS14_MEM1_MSK,
1894                .shift = HGC_RXM_DFX_STATUS14_MEM1_OFF,
1895                .msg = "rxm_mem1_eccbad_intr",
1896                .reg = HGC_RXM_DFX_STATUS14,
1897        },
1898        {
1899                .irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM2_ECC_MB_OFF),
1900                .msk = HGC_RXM_DFX_STATUS14_MEM2_MSK,
1901                .shift = HGC_RXM_DFX_STATUS14_MEM2_OFF,
1902                .msg = "rxm_mem2_eccbad_intr",
1903                .reg = HGC_RXM_DFX_STATUS14,
1904        },
1905        {
1906                .irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM3_ECC_MB_OFF),
1907                .msk = HGC_RXM_DFX_STATUS15_MEM3_MSK,
1908                .shift = HGC_RXM_DFX_STATUS15_MEM3_OFF,
1909                .msg = "rxm_mem3_eccbad_intr",
1910                .reg = HGC_RXM_DFX_STATUS15,
1911        },
1912        {
1913                .irq_msk = BIT(SAS_ECC_INTR_OOO_RAM_ECC_MB_OFF),
1914                .msk = AM_ROB_ECC_ERR_ADDR_MSK,
1915                .shift = AM_ROB_ECC_ERR_ADDR_OFF,
1916                .msg = "ooo_ram_eccbad_intr",
1917                .reg = AM_ROB_ECC_ERR_ADDR,
1918        },
1919};
1920
1921static void multi_bit_ecc_error_process_v3_hw(struct hisi_hba *hisi_hba,
1922                                              u32 irq_value)
1923{
1924        struct device *dev = hisi_hba->dev;
1925        const struct hisi_sas_hw_error *ecc_error;
1926        u32 val;
1927        int i;
1928
1929        for (i = 0; i < ARRAY_SIZE(multi_bit_ecc_errors); i++) {
1930                ecc_error = &multi_bit_ecc_errors[i];
1931                if (irq_value & ecc_error->irq_msk) {
1932                        val = hisi_sas_read32(hisi_hba, ecc_error->reg);
1933                        val &= ecc_error->msk;
1934                        val >>= ecc_error->shift;
1935                        dev_err(dev, "%s (0x%x) found: mem addr is 0x%08X\n",
1936                                ecc_error->msg, irq_value, val);
1937                        queue_work(hisi_hba->wq, &hisi_hba->rst_work);
1938                }
1939        }
1940}
1941
1942static void fatal_ecc_int_v3_hw(struct hisi_hba *hisi_hba)
1943{
1944        u32 irq_value, irq_msk;
1945
1946        irq_msk = hisi_sas_read32(hisi_hba, SAS_ECC_INTR_MSK);
1947        hisi_sas_write32(hisi_hba, SAS_ECC_INTR_MSK, irq_msk | 0xffffffff);
1948
1949        irq_value = hisi_sas_read32(hisi_hba, SAS_ECC_INTR);
1950        if (irq_value)
1951                multi_bit_ecc_error_process_v3_hw(hisi_hba, irq_value);
1952
1953        hisi_sas_write32(hisi_hba, SAS_ECC_INTR, irq_value);
1954        hisi_sas_write32(hisi_hba, SAS_ECC_INTR_MSK, irq_msk);
1955}
1956
1957static const struct hisi_sas_hw_error axi_error[] = {
1958        { .msk = BIT(0), .msg = "IOST_AXI_W_ERR" },
1959        { .msk = BIT(1), .msg = "IOST_AXI_R_ERR" },
1960        { .msk = BIT(2), .msg = "ITCT_AXI_W_ERR" },
1961        { .msk = BIT(3), .msg = "ITCT_AXI_R_ERR" },
1962        { .msk = BIT(4), .msg = "SATA_AXI_W_ERR" },
1963        { .msk = BIT(5), .msg = "SATA_AXI_R_ERR" },
1964        { .msk = BIT(6), .msg = "DQE_AXI_R_ERR" },
1965        { .msk = BIT(7), .msg = "CQE_AXI_W_ERR" },
1966        {}
1967};
1968
1969static const struct hisi_sas_hw_error fifo_error[] = {
1970        { .msk = BIT(8),  .msg = "CQE_WINFO_FIFO" },
1971        { .msk = BIT(9),  .msg = "CQE_MSG_FIFIO" },
1972        { .msk = BIT(10), .msg = "GETDQE_FIFO" },
1973        { .msk = BIT(11), .msg = "CMDP_FIFO" },
1974        { .msk = BIT(12), .msg = "AWTCTRL_FIFO" },
1975        {}
1976};
1977
1978static const struct hisi_sas_hw_error fatal_axi_error[] = {
1979        {
1980                .irq_msk = BIT(ENT_INT_SRC3_WP_DEPTH_OFF),
1981                .msg = "write pointer and depth",
1982        },
1983        {
1984                .irq_msk = BIT(ENT_INT_SRC3_IPTT_SLOT_NOMATCH_OFF),
1985                .msg = "iptt no match slot",
1986        },
1987        {
1988                .irq_msk = BIT(ENT_INT_SRC3_RP_DEPTH_OFF),
1989                .msg = "read pointer and depth",
1990        },
1991        {
1992                .irq_msk = BIT(ENT_INT_SRC3_AXI_OFF),
1993                .reg = HGC_AXI_FIFO_ERR_INFO,
1994                .sub = axi_error,
1995        },
1996        {
1997                .irq_msk = BIT(ENT_INT_SRC3_FIFO_OFF),
1998                .reg = HGC_AXI_FIFO_ERR_INFO,
1999                .sub = fifo_error,
2000        },
2001        {
2002                .irq_msk = BIT(ENT_INT_SRC3_LM_OFF),
2003                .msg = "LM add/fetch list",
2004        },
2005        {
2006                .irq_msk = BIT(ENT_INT_SRC3_ABT_OFF),
2007                .msg = "SAS_HGC_ABT fetch LM list",
2008        },
2009        {
2010                .irq_msk = BIT(ENT_INT_SRC3_DQE_POISON_OFF),
2011                .msg = "read dqe poison",
2012        },
2013        {
2014                .irq_msk = BIT(ENT_INT_SRC3_IOST_POISON_OFF),
2015                .msg = "read iost poison",
2016        },
2017        {
2018                .irq_msk = BIT(ENT_INT_SRC3_ITCT_POISON_OFF),
2019                .msg = "read itct poison",
2020        },
2021        {
2022                .irq_msk = BIT(ENT_INT_SRC3_ITCT_NCQ_POISON_OFF),
2023                .msg = "read itct ncq poison",
2024        },
2025
2026};
2027
2028static irqreturn_t fatal_axi_int_v3_hw(int irq_no, void *p)
2029{
2030        u32 irq_value, irq_msk;
2031        struct hisi_hba *hisi_hba = p;
2032        struct device *dev = hisi_hba->dev;
2033        struct pci_dev *pdev = hisi_hba->pci_dev;
2034        int i;
2035
2036        irq_msk = hisi_sas_read32(hisi_hba, ENT_INT_SRC_MSK3);
2037        hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, irq_msk | 0x1df00);
2038
2039        irq_value = hisi_sas_read32(hisi_hba, ENT_INT_SRC3);
2040        irq_value &= ~irq_msk;
2041
2042        for (i = 0; i < ARRAY_SIZE(fatal_axi_error); i++) {
2043                const struct hisi_sas_hw_error *error = &fatal_axi_error[i];
2044
2045                if (!(irq_value & error->irq_msk))
2046                        continue;
2047
2048                if (error->sub) {
2049                        const struct hisi_sas_hw_error *sub = error->sub;
2050                        u32 err_value = hisi_sas_read32(hisi_hba, error->reg);
2051
2052                        for (; sub->msk || sub->msg; sub++) {
2053                                if (!(err_value & sub->msk))
2054                                        continue;
2055
2056                                dev_err(dev, "%s error (0x%x) found!\n",
2057                                        sub->msg, irq_value);
2058                                queue_work(hisi_hba->wq, &hisi_hba->rst_work);
2059                        }
2060                } else {
2061                        dev_err(dev, "%s error (0x%x) found!\n",
2062                                error->msg, irq_value);
2063                        queue_work(hisi_hba->wq, &hisi_hba->rst_work);
2064                }
2065
2066                if (pdev->revision < 0x21) {
2067                        u32 reg_val;
2068
2069                        reg_val = hisi_sas_read32(hisi_hba,
2070                                                  AXI_MASTER_CFG_BASE +
2071                                                  AM_CTRL_GLOBAL);
2072                        reg_val |= AM_CTRL_SHUTDOWN_REQ_MSK;
2073                        hisi_sas_write32(hisi_hba, AXI_MASTER_CFG_BASE +
2074                                         AM_CTRL_GLOBAL, reg_val);
2075                }
2076        }
2077
2078        fatal_ecc_int_v3_hw(hisi_hba);
2079
2080        if (irq_value & BIT(ENT_INT_SRC3_ITC_INT_OFF)) {
2081                u32 reg_val = hisi_sas_read32(hisi_hba, ITCT_CLR);
2082                u32 dev_id = reg_val & ITCT_DEV_MSK;
2083                struct hisi_sas_device *sas_dev =
2084                                &hisi_hba->devices[dev_id];
2085
2086                hisi_sas_write32(hisi_hba, ITCT_CLR, 0);
2087                dev_dbg(dev, "clear ITCT ok\n");
2088                complete(sas_dev->completion);
2089        }
2090
2091        hisi_sas_write32(hisi_hba, ENT_INT_SRC3, irq_value & 0x1df00);
2092        hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, irq_msk);
2093
2094        return IRQ_HANDLED;
2095}
2096
2097static void
2098slot_err_v3_hw(struct hisi_hba *hisi_hba, struct sas_task *task,
2099               struct hisi_sas_slot *slot)
2100{
2101        struct task_status_struct *ts = &task->task_status;
2102        struct hisi_sas_complete_v3_hdr *complete_queue =
2103                        hisi_hba->complete_hdr[slot->cmplt_queue];
2104        struct hisi_sas_complete_v3_hdr *complete_hdr =
2105                        &complete_queue[slot->cmplt_queue_slot];
2106        struct hisi_sas_err_record_v3 *record =
2107                        hisi_sas_status_buf_addr_mem(slot);
2108        u32 dma_rx_err_type = le32_to_cpu(record->dma_rx_err_type);
2109        u32 trans_tx_fail_type = le32_to_cpu(record->trans_tx_fail_type);
2110        u32 dw3 = le32_to_cpu(complete_hdr->dw3);
2111
2112        switch (task->task_proto) {
2113        case SAS_PROTOCOL_SSP:
2114                if (dma_rx_err_type & RX_DATA_LEN_UNDERFLOW_MSK) {
2115                        ts->residual = trans_tx_fail_type;
2116                        ts->stat = SAS_DATA_UNDERRUN;
2117                } else if (dw3 & CMPLT_HDR_IO_IN_TARGET_MSK) {
2118                        ts->stat = SAS_QUEUE_FULL;
2119                        slot->abort = 1;
2120                } else {
2121                        ts->stat = SAS_OPEN_REJECT;
2122                        ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
2123                }
2124                break;
2125        case SAS_PROTOCOL_SATA:
2126        case SAS_PROTOCOL_STP:
2127        case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
2128                if (dma_rx_err_type & RX_DATA_LEN_UNDERFLOW_MSK) {
2129                        ts->residual = trans_tx_fail_type;
2130                        ts->stat = SAS_DATA_UNDERRUN;
2131                } else if (dw3 & CMPLT_HDR_IO_IN_TARGET_MSK) {
2132                        ts->stat = SAS_PHY_DOWN;
2133                        slot->abort = 1;
2134                } else {
2135                        ts->stat = SAS_OPEN_REJECT;
2136                        ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
2137                }
2138                hisi_sas_sata_done(task, slot);
2139                break;
2140        case SAS_PROTOCOL_SMP:
2141                ts->stat = SAM_STAT_CHECK_CONDITION;
2142                break;
2143        default:
2144                break;
2145        }
2146}
2147
2148static int
2149slot_complete_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
2150{
2151        struct sas_task *task = slot->task;
2152        struct hisi_sas_device *sas_dev;
2153        struct device *dev = hisi_hba->dev;
2154        struct task_status_struct *ts;
2155        struct domain_device *device;
2156        struct sas_ha_struct *ha;
2157        enum exec_status sts;
2158        struct hisi_sas_complete_v3_hdr *complete_queue =
2159                        hisi_hba->complete_hdr[slot->cmplt_queue];
2160        struct hisi_sas_complete_v3_hdr *complete_hdr =
2161                        &complete_queue[slot->cmplt_queue_slot];
2162        unsigned long flags;
2163        bool is_internal = slot->is_internal;
2164        u32 dw0, dw1, dw3;
2165
2166        if (unlikely(!task || !task->lldd_task || !task->dev))
2167                return -EINVAL;
2168
2169        ts = &task->task_status;
2170        device = task->dev;
2171        ha = device->port->ha;
2172        sas_dev = device->lldd_dev;
2173
2174        spin_lock_irqsave(&task->task_state_lock, flags);
2175        task->task_state_flags &=
2176                ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR);
2177        spin_unlock_irqrestore(&task->task_state_lock, flags);
2178
2179        memset(ts, 0, sizeof(*ts));
2180        ts->resp = SAS_TASK_COMPLETE;
2181
2182        if (unlikely(!sas_dev)) {
2183                dev_dbg(dev, "slot complete: port has not device\n");
2184                ts->stat = SAS_PHY_DOWN;
2185                goto out;
2186        }
2187
2188        dw0 = le32_to_cpu(complete_hdr->dw0);
2189        dw1 = le32_to_cpu(complete_hdr->dw1);
2190        dw3 = le32_to_cpu(complete_hdr->dw3);
2191
2192        /*
2193         * Use SAS+TMF status codes
2194         */
2195        switch ((dw0 & CMPLT_HDR_ABORT_STAT_MSK) >> CMPLT_HDR_ABORT_STAT_OFF) {
2196        case STAT_IO_ABORTED:
2197                /* this IO has been aborted by abort command */
2198                ts->stat = SAS_ABORTED_TASK;
2199                goto out;
2200        case STAT_IO_COMPLETE:
2201                /* internal abort command complete */
2202                ts->stat = TMF_RESP_FUNC_SUCC;
2203                goto out;
2204        case STAT_IO_NO_DEVICE:
2205                ts->stat = TMF_RESP_FUNC_COMPLETE;
2206                goto out;
2207        case STAT_IO_NOT_VALID:
2208                /*
2209                 * abort single IO, the controller can't find the IO
2210                 */
2211                ts->stat = TMF_RESP_FUNC_FAILED;
2212                goto out;
2213        default:
2214                break;
2215        }
2216
2217        /* check for erroneous completion */
2218        if ((dw0 & CMPLT_HDR_CMPLT_MSK) == 0x3) {
2219                u32 *error_info = hisi_sas_status_buf_addr_mem(slot);
2220
2221                slot_err_v3_hw(hisi_hba, task, slot);
2222                if (ts->stat != SAS_DATA_UNDERRUN)
2223                        dev_info(dev, "erroneous completion iptt=%d task=%p dev id=%d CQ hdr: 0x%x 0x%x 0x%x 0x%x Error info: 0x%x 0x%x 0x%x 0x%x\n",
2224                                 slot->idx, task, sas_dev->device_id,
2225                                 dw0, dw1, complete_hdr->act, dw3,
2226                                 error_info[0], error_info[1],
2227                                 error_info[2], error_info[3]);
2228                if (unlikely(slot->abort))
2229                        return ts->stat;
2230                goto out;
2231        }
2232
2233        switch (task->task_proto) {
2234        case SAS_PROTOCOL_SSP: {
2235                struct ssp_response_iu *iu =
2236                        hisi_sas_status_buf_addr_mem(slot) +
2237                        sizeof(struct hisi_sas_err_record);
2238
2239                sas_ssp_task_response(dev, task, iu);
2240                break;
2241        }
2242        case SAS_PROTOCOL_SMP: {
2243                struct scatterlist *sg_resp = &task->smp_task.smp_resp;
2244                void *to;
2245
2246                ts->stat = SAM_STAT_GOOD;
2247                to = kmap_atomic(sg_page(sg_resp));
2248
2249                dma_unmap_sg(dev, &task->smp_task.smp_resp, 1,
2250                             DMA_FROM_DEVICE);
2251                dma_unmap_sg(dev, &task->smp_task.smp_req, 1,
2252                             DMA_TO_DEVICE);
2253                memcpy(to + sg_resp->offset,
2254                        hisi_sas_status_buf_addr_mem(slot) +
2255                       sizeof(struct hisi_sas_err_record),
2256                       sg_dma_len(sg_resp));
2257                kunmap_atomic(to);
2258                break;
2259        }
2260        case SAS_PROTOCOL_SATA:
2261        case SAS_PROTOCOL_STP:
2262        case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
2263                ts->stat = SAM_STAT_GOOD;
2264                hisi_sas_sata_done(task, slot);
2265                break;
2266        default:
2267                ts->stat = SAM_STAT_CHECK_CONDITION;
2268                break;
2269        }
2270
2271        if (!slot->port->port_attached) {
2272                dev_warn(dev, "slot complete: port %d has removed\n",
2273                        slot->port->sas_port.id);
2274                ts->stat = SAS_PHY_DOWN;
2275        }
2276
2277out:
2278        sts = ts->stat;
2279        spin_lock_irqsave(&task->task_state_lock, flags);
2280        if (task->task_state_flags & SAS_TASK_STATE_ABORTED) {
2281                spin_unlock_irqrestore(&task->task_state_lock, flags);
2282                dev_info(dev, "slot complete: task(%p) aborted\n", task);
2283                return SAS_ABORTED_TASK;
2284        }
2285        task->task_state_flags |= SAS_TASK_STATE_DONE;
2286        spin_unlock_irqrestore(&task->task_state_lock, flags);
2287        hisi_sas_slot_task_free(hisi_hba, task, slot);
2288
2289        if (!is_internal && (task->task_proto != SAS_PROTOCOL_SMP)) {
2290                spin_lock_irqsave(&device->done_lock, flags);
2291                if (test_bit(SAS_HA_FROZEN, &ha->state)) {
2292                        spin_unlock_irqrestore(&device->done_lock, flags);
2293                        dev_info(dev, "slot complete: task(%p) ignored\n ",
2294                                 task);
2295                        return sts;
2296                }
2297                spin_unlock_irqrestore(&device->done_lock, flags);
2298        }
2299
2300        if (task->task_done)
2301                task->task_done(task);
2302
2303        return sts;
2304}
2305
2306static void cq_tasklet_v3_hw(unsigned long val)
2307{
2308        struct hisi_sas_cq *cq = (struct hisi_sas_cq *)val;
2309        struct hisi_hba *hisi_hba = cq->hisi_hba;
2310        struct hisi_sas_slot *slot;
2311        struct hisi_sas_complete_v3_hdr *complete_queue;
2312        u32 rd_point = cq->rd_point, wr_point;
2313        int queue = cq->id;
2314
2315        complete_queue = hisi_hba->complete_hdr[queue];
2316
2317        wr_point = hisi_sas_read32(hisi_hba, COMPL_Q_0_WR_PTR +
2318                                   (0x14 * queue));
2319
2320        while (rd_point != wr_point) {
2321                struct hisi_sas_complete_v3_hdr *complete_hdr;
2322                struct device *dev = hisi_hba->dev;
2323                u32 dw1;
2324                int iptt;
2325
2326                complete_hdr = &complete_queue[rd_point];
2327                dw1 = le32_to_cpu(complete_hdr->dw1);
2328
2329                iptt = dw1 & CMPLT_HDR_IPTT_MSK;
2330                if (likely(iptt < HISI_SAS_COMMAND_ENTRIES_V3_HW)) {
2331                        slot = &hisi_hba->slot_info[iptt];
2332                        slot->cmplt_queue_slot = rd_point;
2333                        slot->cmplt_queue = queue;
2334                        slot_complete_v3_hw(hisi_hba, slot);
2335                } else
2336                        dev_err(dev, "IPTT %d is invalid, discard it.\n", iptt);
2337
2338                if (++rd_point >= HISI_SAS_QUEUE_SLOTS)
2339                        rd_point = 0;
2340        }
2341
2342        /* update rd_point */
2343        cq->rd_point = rd_point;
2344        hisi_sas_write32(hisi_hba, COMPL_Q_0_RD_PTR + (0x14 * queue), rd_point);
2345}
2346
2347static irqreturn_t cq_interrupt_v3_hw(int irq_no, void *p)
2348{
2349        struct hisi_sas_cq *cq = p;
2350        struct hisi_hba *hisi_hba = cq->hisi_hba;
2351        int queue = cq->id;
2352
2353        hisi_sas_write32(hisi_hba, OQ_INT_SRC, 1 << queue);
2354
2355        tasklet_schedule(&cq->tasklet);
2356
2357        return IRQ_HANDLED;
2358}
2359
2360static void setup_reply_map_v3_hw(struct hisi_hba *hisi_hba, int nvecs)
2361{
2362        const struct cpumask *mask;
2363        int queue, cpu;
2364
2365        for (queue = 0; queue < nvecs; queue++) {
2366                struct hisi_sas_cq *cq = &hisi_hba->cq[queue];
2367
2368                mask = pci_irq_get_affinity(hisi_hba->pci_dev, queue +
2369                                            BASE_VECTORS_V3_HW);
2370                if (!mask)
2371                        goto fallback;
2372                cq->pci_irq_mask = mask;
2373                for_each_cpu(cpu, mask)
2374                        hisi_hba->reply_map[cpu] = queue;
2375        }
2376        return;
2377
2378fallback:
2379        for_each_possible_cpu(cpu)
2380                hisi_hba->reply_map[cpu] = cpu % hisi_hba->queue_count;
2381        /* Don't clean all CQ masks */
2382}
2383
2384static int interrupt_init_v3_hw(struct hisi_hba *hisi_hba)
2385{
2386        struct device *dev = hisi_hba->dev;
2387        struct pci_dev *pdev = hisi_hba->pci_dev;
2388        int vectors, rc;
2389        int i, k;
2390        int max_msi = HISI_SAS_MSI_COUNT_V3_HW, min_msi;
2391
2392        if (auto_affine_msi_experimental) {
2393                struct irq_affinity desc = {
2394                        .pre_vectors = BASE_VECTORS_V3_HW,
2395                };
2396
2397                min_msi = MIN_AFFINE_VECTORS_V3_HW;
2398
2399                hisi_hba->reply_map = devm_kcalloc(dev, nr_cpu_ids,
2400                                                   sizeof(unsigned int),
2401                                                   GFP_KERNEL);
2402                if (!hisi_hba->reply_map)
2403                        return -ENOMEM;
2404                vectors = pci_alloc_irq_vectors_affinity(hisi_hba->pci_dev,
2405                                                         min_msi, max_msi,
2406                                                         PCI_IRQ_MSI |
2407                                                         PCI_IRQ_AFFINITY,
2408                                                         &desc);
2409                if (vectors < 0)
2410                        return -ENOENT;
2411                setup_reply_map_v3_hw(hisi_hba, vectors - BASE_VECTORS_V3_HW);
2412        } else {
2413                min_msi = max_msi;
2414                vectors = pci_alloc_irq_vectors(hisi_hba->pci_dev, min_msi,
2415                                                max_msi, PCI_IRQ_MSI);
2416                if (vectors < 0)
2417                        return vectors;
2418        }
2419
2420        hisi_hba->cq_nvecs = vectors - BASE_VECTORS_V3_HW;
2421
2422        rc = devm_request_irq(dev, pci_irq_vector(pdev, 1),
2423                              int_phy_up_down_bcast_v3_hw, 0,
2424                              DRV_NAME " phy", hisi_hba);
2425        if (rc) {
2426                dev_err(dev, "could not request phy interrupt, rc=%d\n", rc);
2427                rc = -ENOENT;
2428                goto free_irq_vectors;
2429        }
2430
2431        rc = devm_request_irq(dev, pci_irq_vector(pdev, 2),
2432                              int_chnl_int_v3_hw, 0,
2433                              DRV_NAME " channel", hisi_hba);
2434        if (rc) {
2435                dev_err(dev, "could not request chnl interrupt, rc=%d\n", rc);
2436                rc = -ENOENT;
2437                goto free_phy_irq;
2438        }
2439
2440        rc = devm_request_irq(dev, pci_irq_vector(pdev, 11),
2441                              fatal_axi_int_v3_hw, 0,
2442                              DRV_NAME " fatal", hisi_hba);
2443        if (rc) {
2444                dev_err(dev, "could not request fatal interrupt, rc=%d\n", rc);
2445                rc = -ENOENT;
2446                goto free_chnl_interrupt;
2447        }
2448
2449        /* Init tasklets for cq only */
2450        for (i = 0; i < hisi_hba->cq_nvecs; i++) {
2451                struct hisi_sas_cq *cq = &hisi_hba->cq[i];
2452                struct tasklet_struct *t = &cq->tasklet;
2453                int nr = hisi_sas_intr_conv ? 16 : 16 + i;
2454                unsigned long irqflags = hisi_sas_intr_conv ? IRQF_SHARED : 0;
2455
2456                rc = devm_request_irq(dev, pci_irq_vector(pdev, nr),
2457                                      cq_interrupt_v3_hw, irqflags,
2458                                      DRV_NAME " cq", cq);
2459                if (rc) {
2460                        dev_err(dev, "could not request cq%d interrupt, rc=%d\n",
2461                                i, rc);
2462                        rc = -ENOENT;
2463                        goto free_cq_irqs;
2464                }
2465
2466                tasklet_init(t, cq_tasklet_v3_hw, (unsigned long)cq);
2467        }
2468
2469        return 0;
2470
2471free_cq_irqs:
2472        for (k = 0; k < i; k++) {
2473                struct hisi_sas_cq *cq = &hisi_hba->cq[k];
2474                int nr = hisi_sas_intr_conv ? 16 : 16 + k;
2475
2476                free_irq(pci_irq_vector(pdev, nr), cq);
2477        }
2478        free_irq(pci_irq_vector(pdev, 11), hisi_hba);
2479free_chnl_interrupt:
2480        free_irq(pci_irq_vector(pdev, 2), hisi_hba);
2481free_phy_irq:
2482        free_irq(pci_irq_vector(pdev, 1), hisi_hba);
2483free_irq_vectors:
2484        pci_free_irq_vectors(pdev);
2485        return rc;
2486}
2487
2488static int hisi_sas_v3_init(struct hisi_hba *hisi_hba)
2489{
2490        int rc;
2491
2492        rc = hw_init_v3_hw(hisi_hba);
2493        if (rc)
2494                return rc;
2495
2496        rc = interrupt_init_v3_hw(hisi_hba);
2497        if (rc)
2498                return rc;
2499
2500        return 0;
2501}
2502
2503static void phy_set_linkrate_v3_hw(struct hisi_hba *hisi_hba, int phy_no,
2504                struct sas_phy_linkrates *r)
2505{
2506        enum sas_linkrate max = r->maximum_linkrate;
2507        u32 prog_phy_link_rate = 0x800;
2508
2509        prog_phy_link_rate |= hisi_sas_get_prog_phy_linkrate_mask(max);
2510        hisi_sas_phy_write32(hisi_hba, phy_no, PROG_PHY_LINK_RATE,
2511                             prog_phy_link_rate);
2512}
2513
2514static void interrupt_disable_v3_hw(struct hisi_hba *hisi_hba)
2515{
2516        struct pci_dev *pdev = hisi_hba->pci_dev;
2517        int i;
2518
2519        synchronize_irq(pci_irq_vector(pdev, 1));
2520        synchronize_irq(pci_irq_vector(pdev, 2));
2521        synchronize_irq(pci_irq_vector(pdev, 11));
2522        for (i = 0; i < hisi_hba->queue_count; i++) {
2523                hisi_sas_write32(hisi_hba, OQ0_INT_SRC_MSK + 0x4 * i, 0x1);
2524                synchronize_irq(pci_irq_vector(pdev, i + 16));
2525        }
2526
2527        hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1, 0xffffffff);
2528        hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK2, 0xffffffff);
2529        hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, 0xffffffff);
2530        hisi_sas_write32(hisi_hba, SAS_ECC_INTR_MSK, 0xffffffff);
2531
2532        for (i = 0; i < hisi_hba->n_phy; i++) {
2533                hisi_sas_phy_write32(hisi_hba, i, CHL_INT1_MSK, 0xffffffff);
2534                hisi_sas_phy_write32(hisi_hba, i, CHL_INT2_MSK, 0xffffffff);
2535                hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_NOT_RDY_MSK, 0x1);
2536                hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_PHY_ENA_MSK, 0x1);
2537                hisi_sas_phy_write32(hisi_hba, i, SL_RX_BCAST_CHK_MSK, 0x1);
2538        }
2539}
2540
2541static u32 get_phys_state_v3_hw(struct hisi_hba *hisi_hba)
2542{
2543        return hisi_sas_read32(hisi_hba, PHY_STATE);
2544}
2545
2546static int disable_host_v3_hw(struct hisi_hba *hisi_hba)
2547{
2548        struct device *dev = hisi_hba->dev;
2549        u32 status, reg_val;
2550        int rc;
2551
2552        interrupt_disable_v3_hw(hisi_hba);
2553        hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, 0x0);
2554        hisi_sas_kill_tasklets(hisi_hba);
2555
2556        hisi_sas_stop_phys(hisi_hba);
2557
2558        mdelay(10);
2559
2560        reg_val = hisi_sas_read32(hisi_hba, AXI_MASTER_CFG_BASE +
2561                                  AM_CTRL_GLOBAL);
2562        reg_val |= AM_CTRL_SHUTDOWN_REQ_MSK;
2563        hisi_sas_write32(hisi_hba, AXI_MASTER_CFG_BASE +
2564                         AM_CTRL_GLOBAL, reg_val);
2565
2566        /* wait until bus idle */
2567        rc = hisi_sas_read32_poll_timeout(AXI_MASTER_CFG_BASE +
2568                                          AM_CURR_TRANS_RETURN, status,
2569                                          status == 0x3, 10, 100);
2570        if (rc) {
2571                dev_err(dev, "axi bus is not idle, rc=%d\n", rc);
2572                return rc;
2573        }
2574
2575        return 0;
2576}
2577
2578static int soft_reset_v3_hw(struct hisi_hba *hisi_hba)
2579{
2580        struct device *dev = hisi_hba->dev;
2581        int rc;
2582
2583        rc = disable_host_v3_hw(hisi_hba);
2584        if (rc) {
2585                dev_err(dev, "soft reset: disable host failed rc=%d\n", rc);
2586                return rc;
2587        }
2588
2589        hisi_sas_init_mem(hisi_hba);
2590
2591        return hw_init_v3_hw(hisi_hba);
2592}
2593
2594static int write_gpio_v3_hw(struct hisi_hba *hisi_hba, u8 reg_type,
2595                        u8 reg_index, u8 reg_count, u8 *write_data)
2596{
2597        struct device *dev = hisi_hba->dev;
2598        u32 *data = (u32 *)write_data;
2599        int i;
2600
2601        switch (reg_type) {
2602        case SAS_GPIO_REG_TX:
2603                if ((reg_index + reg_count) > ((hisi_hba->n_phy + 3) / 4)) {
2604                        dev_err(dev, "write gpio: invalid reg range[%d, %d]\n",
2605                                reg_index, reg_index + reg_count - 1);
2606                        return -EINVAL;
2607                }
2608
2609                for (i = 0; i < reg_count; i++)
2610                        hisi_sas_write32(hisi_hba,
2611                                         SAS_GPIO_TX_0_1 + (reg_index + i) * 4,
2612                                         data[i]);
2613                break;
2614        default:
2615                dev_err(dev, "write gpio: unsupported or bad reg type %d\n",
2616                        reg_type);
2617                return -EINVAL;
2618        }
2619
2620        return 0;
2621}
2622
2623static int wait_cmds_complete_timeout_v3_hw(struct hisi_hba *hisi_hba,
2624                                            int delay_ms, int timeout_ms)
2625{
2626        struct device *dev = hisi_hba->dev;
2627        int entries, entries_old = 0, time;
2628
2629        for (time = 0; time < timeout_ms; time += delay_ms) {
2630                entries = hisi_sas_read32(hisi_hba, CQE_SEND_CNT);
2631                if (entries == entries_old)
2632                        break;
2633
2634                entries_old = entries;
2635                msleep(delay_ms);
2636        }
2637
2638        if (time >= timeout_ms)
2639                return -ETIMEDOUT;
2640
2641        dev_dbg(dev, "wait commands complete %dms\n", time);
2642
2643        return 0;
2644}
2645
2646static ssize_t intr_conv_v3_hw_show(struct device *dev,
2647                                    struct device_attribute *attr, char *buf)
2648{
2649        return scnprintf(buf, PAGE_SIZE, "%u\n", hisi_sas_intr_conv);
2650}
2651static DEVICE_ATTR_RO(intr_conv_v3_hw);
2652
2653static void config_intr_coal_v3_hw(struct hisi_hba *hisi_hba)
2654{
2655        /* config those registers between enable and disable PHYs */
2656        hisi_sas_stop_phys(hisi_hba);
2657
2658        if (hisi_hba->intr_coal_ticks == 0 ||
2659            hisi_hba->intr_coal_count == 0) {
2660                hisi_sas_write32(hisi_hba, INT_COAL_EN, 0x1);
2661                hisi_sas_write32(hisi_hba, OQ_INT_COAL_TIME, 0x1);
2662                hisi_sas_write32(hisi_hba, OQ_INT_COAL_CNT, 0x1);
2663        } else {
2664                hisi_sas_write32(hisi_hba, INT_COAL_EN, 0x3);
2665                hisi_sas_write32(hisi_hba, OQ_INT_COAL_TIME,
2666                                 hisi_hba->intr_coal_ticks);
2667                hisi_sas_write32(hisi_hba, OQ_INT_COAL_CNT,
2668                                 hisi_hba->intr_coal_count);
2669        }
2670        phys_init_v3_hw(hisi_hba);
2671}
2672
2673static ssize_t intr_coal_ticks_v3_hw_show(struct device *dev,
2674                                          struct device_attribute *attr,
2675                                          char *buf)
2676{
2677        struct Scsi_Host *shost = class_to_shost(dev);
2678        struct hisi_hba *hisi_hba = shost_priv(shost);
2679
2680        return scnprintf(buf, PAGE_SIZE, "%u\n",
2681                         hisi_hba->intr_coal_ticks);
2682}
2683
2684static ssize_t intr_coal_ticks_v3_hw_store(struct device *dev,
2685                                           struct device_attribute *attr,
2686                                           const char *buf, size_t count)
2687{
2688        struct Scsi_Host *shost = class_to_shost(dev);
2689        struct hisi_hba *hisi_hba = shost_priv(shost);
2690        u32 intr_coal_ticks;
2691        int ret;
2692
2693        ret = kstrtou32(buf, 10, &intr_coal_ticks);
2694        if (ret) {
2695                dev_err(dev, "Input data of interrupt coalesce unmatch\n");
2696                return -EINVAL;
2697        }
2698
2699        if (intr_coal_ticks >= BIT(24)) {
2700                dev_err(dev, "intr_coal_ticks must be less than 2^24!\n");
2701                return -EINVAL;
2702        }
2703
2704        hisi_hba->intr_coal_ticks = intr_coal_ticks;
2705
2706        config_intr_coal_v3_hw(hisi_hba);
2707
2708        return count;
2709}
2710static DEVICE_ATTR_RW(intr_coal_ticks_v3_hw);
2711
2712static ssize_t intr_coal_count_v3_hw_show(struct device *dev,
2713                                          struct device_attribute
2714                                          *attr, char *buf)
2715{
2716        struct Scsi_Host *shost = class_to_shost(dev);
2717        struct hisi_hba *hisi_hba = shost_priv(shost);
2718
2719        return scnprintf(buf, PAGE_SIZE, "%u\n",
2720                         hisi_hba->intr_coal_count);
2721}
2722
2723static ssize_t intr_coal_count_v3_hw_store(struct device *dev,
2724                struct device_attribute
2725                *attr, const char *buf, size_t count)
2726{
2727        struct Scsi_Host *shost = class_to_shost(dev);
2728        struct hisi_hba *hisi_hba = shost_priv(shost);
2729        u32 intr_coal_count;
2730        int ret;
2731
2732        ret = kstrtou32(buf, 10, &intr_coal_count);
2733        if (ret) {
2734                dev_err(dev, "Input data of interrupt coalesce unmatch\n");
2735                return -EINVAL;
2736        }
2737
2738        if (intr_coal_count >= BIT(8)) {
2739                dev_err(dev, "intr_coal_count must be less than 2^8!\n");
2740                return -EINVAL;
2741        }
2742
2743        hisi_hba->intr_coal_count = intr_coal_count;
2744
2745        config_intr_coal_v3_hw(hisi_hba);
2746
2747        return count;
2748}
2749static DEVICE_ATTR_RW(intr_coal_count_v3_hw);
2750
2751static struct device_attribute *host_attrs_v3_hw[] = {
2752        &dev_attr_phy_event_threshold,
2753        &dev_attr_intr_conv_v3_hw,
2754        &dev_attr_intr_coal_ticks_v3_hw,
2755        &dev_attr_intr_coal_count_v3_hw,
2756        NULL
2757};
2758
2759static const struct hisi_sas_debugfs_reg_lu debugfs_port_reg_lu[] = {
2760        HISI_SAS_DEBUGFS_REG(PHY_CFG),
2761        HISI_SAS_DEBUGFS_REG(HARD_PHY_LINKRATE),
2762        HISI_SAS_DEBUGFS_REG(PROG_PHY_LINK_RATE),
2763        HISI_SAS_DEBUGFS_REG(PHY_CTRL),
2764        HISI_SAS_DEBUGFS_REG(SL_CFG),
2765        HISI_SAS_DEBUGFS_REG(AIP_LIMIT),
2766        HISI_SAS_DEBUGFS_REG(SL_CONTROL),
2767        HISI_SAS_DEBUGFS_REG(RX_PRIMS_STATUS),
2768        HISI_SAS_DEBUGFS_REG(TX_ID_DWORD0),
2769        HISI_SAS_DEBUGFS_REG(TX_ID_DWORD1),
2770        HISI_SAS_DEBUGFS_REG(TX_ID_DWORD2),
2771        HISI_SAS_DEBUGFS_REG(TX_ID_DWORD3),
2772        HISI_SAS_DEBUGFS_REG(TX_ID_DWORD4),
2773        HISI_SAS_DEBUGFS_REG(TX_ID_DWORD5),
2774        HISI_SAS_DEBUGFS_REG(TX_ID_DWORD6),
2775        HISI_SAS_DEBUGFS_REG(TXID_AUTO),
2776        HISI_SAS_DEBUGFS_REG(RX_IDAF_DWORD0),
2777        HISI_SAS_DEBUGFS_REG(RXOP_CHECK_CFG_H),
2778        HISI_SAS_DEBUGFS_REG(STP_LINK_TIMER),
2779        HISI_SAS_DEBUGFS_REG(STP_LINK_TIMEOUT_STATE),
2780        HISI_SAS_DEBUGFS_REG(CON_CFG_DRIVER),
2781        HISI_SAS_DEBUGFS_REG(SAS_SSP_CON_TIMER_CFG),
2782        HISI_SAS_DEBUGFS_REG(SAS_SMP_CON_TIMER_CFG),
2783        HISI_SAS_DEBUGFS_REG(SAS_STP_CON_TIMER_CFG),
2784        HISI_SAS_DEBUGFS_REG(CHL_INT0),
2785        HISI_SAS_DEBUGFS_REG(CHL_INT1),
2786        HISI_SAS_DEBUGFS_REG(CHL_INT2),
2787        HISI_SAS_DEBUGFS_REG(CHL_INT0_MSK),
2788        HISI_SAS_DEBUGFS_REG(CHL_INT1_MSK),
2789        HISI_SAS_DEBUGFS_REG(CHL_INT2_MSK),
2790        HISI_SAS_DEBUGFS_REG(SAS_EC_INT_COAL_TIME),
2791        HISI_SAS_DEBUGFS_REG(CHL_INT_COAL_EN),
2792        HISI_SAS_DEBUGFS_REG(SAS_RX_TRAIN_TIMER),
2793        HISI_SAS_DEBUGFS_REG(PHY_CTRL_RDY_MSK),
2794        HISI_SAS_DEBUGFS_REG(PHYCTRL_NOT_RDY_MSK),
2795        HISI_SAS_DEBUGFS_REG(PHYCTRL_DWS_RESET_MSK),
2796        HISI_SAS_DEBUGFS_REG(PHYCTRL_PHY_ENA_MSK),
2797        HISI_SAS_DEBUGFS_REG(SL_RX_BCAST_CHK_MSK),
2798        HISI_SAS_DEBUGFS_REG(PHYCTRL_OOB_RESTART_MSK),
2799        HISI_SAS_DEBUGFS_REG(DMA_TX_STATUS),
2800        HISI_SAS_DEBUGFS_REG(DMA_RX_STATUS),
2801        HISI_SAS_DEBUGFS_REG(COARSETUNE_TIME),
2802        HISI_SAS_DEBUGFS_REG(ERR_CNT_DWS_LOST),
2803        HISI_SAS_DEBUGFS_REG(ERR_CNT_RESET_PROB),
2804        HISI_SAS_DEBUGFS_REG(ERR_CNT_INVLD_DW),
2805        HISI_SAS_DEBUGFS_REG(ERR_CNT_CODE_ERR),
2806        HISI_SAS_DEBUGFS_REG(ERR_CNT_DISP_ERR),
2807        {}
2808};
2809
2810static const struct hisi_sas_debugfs_reg debugfs_port_reg = {
2811        .lu = debugfs_port_reg_lu,
2812        .count = 0x100,
2813        .base_off = PORT_BASE,
2814        .read_port_reg = hisi_sas_phy_read32,
2815};
2816
2817static const struct hisi_sas_debugfs_reg_lu debugfs_global_reg_lu[] = {
2818        HISI_SAS_DEBUGFS_REG(DLVRY_QUEUE_ENABLE),
2819        HISI_SAS_DEBUGFS_REG(PHY_CONTEXT),
2820        HISI_SAS_DEBUGFS_REG(PHY_STATE),
2821        HISI_SAS_DEBUGFS_REG(PHY_PORT_NUM_MA),
2822        HISI_SAS_DEBUGFS_REG(PHY_CONN_RATE),
2823        HISI_SAS_DEBUGFS_REG(ITCT_CLR),
2824        HISI_SAS_DEBUGFS_REG(IO_SATA_BROKEN_MSG_ADDR_LO),
2825        HISI_SAS_DEBUGFS_REG(IO_SATA_BROKEN_MSG_ADDR_HI),
2826        HISI_SAS_DEBUGFS_REG(SATA_INITI_D2H_STORE_ADDR_LO),
2827        HISI_SAS_DEBUGFS_REG(SATA_INITI_D2H_STORE_ADDR_HI),
2828        HISI_SAS_DEBUGFS_REG(CFG_MAX_TAG),
2829        HISI_SAS_DEBUGFS_REG(HGC_SAS_TX_OPEN_FAIL_RETRY_CTRL),
2830        HISI_SAS_DEBUGFS_REG(HGC_SAS_TXFAIL_RETRY_CTRL),
2831        HISI_SAS_DEBUGFS_REG(HGC_GET_ITV_TIME),
2832        HISI_SAS_DEBUGFS_REG(DEVICE_MSG_WORK_MODE),
2833        HISI_SAS_DEBUGFS_REG(OPENA_WT_CONTI_TIME),
2834        HISI_SAS_DEBUGFS_REG(I_T_NEXUS_LOSS_TIME),
2835        HISI_SAS_DEBUGFS_REG(MAX_CON_TIME_LIMIT_TIME),
2836        HISI_SAS_DEBUGFS_REG(BUS_INACTIVE_LIMIT_TIME),
2837        HISI_SAS_DEBUGFS_REG(REJECT_TO_OPEN_LIMIT_TIME),
2838        HISI_SAS_DEBUGFS_REG(CQ_INT_CONVERGE_EN),
2839        HISI_SAS_DEBUGFS_REG(CFG_AGING_TIME),
2840        HISI_SAS_DEBUGFS_REG(HGC_DFX_CFG2),
2841        HISI_SAS_DEBUGFS_REG(CFG_ABT_SET_QUERY_IPTT),
2842        HISI_SAS_DEBUGFS_REG(CFG_ABT_SET_IPTT_DONE),
2843        HISI_SAS_DEBUGFS_REG(HGC_IOMB_PROC1_STATUS),
2844        HISI_SAS_DEBUGFS_REG(CHNL_INT_STATUS),
2845        HISI_SAS_DEBUGFS_REG(HGC_AXI_FIFO_ERR_INFO),
2846        HISI_SAS_DEBUGFS_REG(INT_COAL_EN),
2847        HISI_SAS_DEBUGFS_REG(OQ_INT_COAL_TIME),
2848        HISI_SAS_DEBUGFS_REG(OQ_INT_COAL_CNT),
2849        HISI_SAS_DEBUGFS_REG(ENT_INT_COAL_TIME),
2850        HISI_SAS_DEBUGFS_REG(ENT_INT_COAL_CNT),
2851        HISI_SAS_DEBUGFS_REG(OQ_INT_SRC),
2852        HISI_SAS_DEBUGFS_REG(OQ_INT_SRC_MSK),
2853        HISI_SAS_DEBUGFS_REG(ENT_INT_SRC1),
2854        HISI_SAS_DEBUGFS_REG(ENT_INT_SRC2),
2855        HISI_SAS_DEBUGFS_REG(ENT_INT_SRC3),
2856        HISI_SAS_DEBUGFS_REG(ENT_INT_SRC_MSK1),
2857        HISI_SAS_DEBUGFS_REG(ENT_INT_SRC_MSK2),
2858        HISI_SAS_DEBUGFS_REG(ENT_INT_SRC_MSK3),
2859        HISI_SAS_DEBUGFS_REG(CHNL_PHYUPDOWN_INT_MSK),
2860        HISI_SAS_DEBUGFS_REG(CHNL_ENT_INT_MSK),
2861        HISI_SAS_DEBUGFS_REG(HGC_COM_INT_MSK),
2862        HISI_SAS_DEBUGFS_REG(SAS_ECC_INTR),
2863        HISI_SAS_DEBUGFS_REG(SAS_ECC_INTR_MSK),
2864        HISI_SAS_DEBUGFS_REG(HGC_ERR_STAT_EN),
2865        HISI_SAS_DEBUGFS_REG(CQE_SEND_CNT),
2866        HISI_SAS_DEBUGFS_REG(DLVRY_Q_0_DEPTH),
2867        HISI_SAS_DEBUGFS_REG(DLVRY_Q_0_WR_PTR),
2868        HISI_SAS_DEBUGFS_REG(DLVRY_Q_0_RD_PTR),
2869        HISI_SAS_DEBUGFS_REG(HYPER_STREAM_ID_EN_CFG),
2870        HISI_SAS_DEBUGFS_REG(OQ0_INT_SRC_MSK),
2871        HISI_SAS_DEBUGFS_REG(COMPL_Q_0_DEPTH),
2872        HISI_SAS_DEBUGFS_REG(COMPL_Q_0_WR_PTR),
2873        HISI_SAS_DEBUGFS_REG(COMPL_Q_0_RD_PTR),
2874        HISI_SAS_DEBUGFS_REG(AWQOS_AWCACHE_CFG),
2875        HISI_SAS_DEBUGFS_REG(ARQOS_ARCACHE_CFG),
2876        HISI_SAS_DEBUGFS_REG(HILINK_ERR_DFX),
2877        HISI_SAS_DEBUGFS_REG(SAS_GPIO_CFG_0),
2878        HISI_SAS_DEBUGFS_REG(SAS_GPIO_CFG_1),
2879        HISI_SAS_DEBUGFS_REG(SAS_GPIO_TX_0_1),
2880        HISI_SAS_DEBUGFS_REG(SAS_CFG_DRIVE_VLD),
2881        {}
2882};
2883
2884static const struct hisi_sas_debugfs_reg debugfs_global_reg = {
2885        .lu = debugfs_global_reg_lu,
2886        .count = 0x800,
2887        .read_global_reg = hisi_sas_read32,
2888};
2889
2890static void debugfs_snapshot_prepare_v3_hw(struct hisi_hba *hisi_hba)
2891{
2892        struct device *dev = hisi_hba->dev;
2893
2894        set_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
2895
2896        hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, 0);
2897
2898        if (wait_cmds_complete_timeout_v3_hw(hisi_hba, 100, 5000) == -ETIMEDOUT)
2899                dev_dbg(dev, "Wait commands complete timeout!\n");
2900
2901        hisi_sas_kill_tasklets(hisi_hba);
2902}
2903
2904static void debugfs_snapshot_restore_v3_hw(struct hisi_hba *hisi_hba)
2905{
2906        hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE,
2907                         (u32)((1ULL << hisi_hba->queue_count) - 1));
2908
2909        clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
2910}
2911
2912static struct scsi_host_template sht_v3_hw = {
2913        .name                   = DRV_NAME,
2914        .module                 = THIS_MODULE,
2915        .queuecommand           = sas_queuecommand,
2916        .target_alloc           = sas_target_alloc,
2917        .slave_configure        = hisi_sas_slave_configure,
2918        .scan_finished          = hisi_sas_scan_finished,
2919        .scan_start             = hisi_sas_scan_start,
2920        .change_queue_depth     = sas_change_queue_depth,
2921        .bios_param             = sas_bios_param,
2922        .this_id                = -1,
2923        .sg_tablesize           = HISI_SAS_SGE_PAGE_CNT,
2924        .sg_prot_tablesize      = HISI_SAS_SGE_PAGE_CNT,
2925        .max_sectors            = SCSI_DEFAULT_MAX_SECTORS,
2926        .eh_device_reset_handler = sas_eh_device_reset_handler,
2927        .eh_target_reset_handler = sas_eh_target_reset_handler,
2928        .target_destroy         = sas_target_destroy,
2929        .ioctl                  = sas_ioctl,
2930        .shost_attrs            = host_attrs_v3_hw,
2931        .tag_alloc_policy       = BLK_TAG_ALLOC_RR,
2932        .host_reset             = hisi_sas_host_reset,
2933};
2934
2935static const struct hisi_sas_hw hisi_sas_v3_hw = {
2936        .hw_init = hisi_sas_v3_init,
2937        .setup_itct = setup_itct_v3_hw,
2938        .max_command_entries = HISI_SAS_COMMAND_ENTRIES_V3_HW,
2939        .get_wideport_bitmap = get_wideport_bitmap_v3_hw,
2940        .complete_hdr_size = sizeof(struct hisi_sas_complete_v3_hdr),
2941        .clear_itct = clear_itct_v3_hw,
2942        .sl_notify_ssp = sl_notify_ssp_v3_hw,
2943        .prep_ssp = prep_ssp_v3_hw,
2944        .prep_smp = prep_smp_v3_hw,
2945        .prep_stp = prep_ata_v3_hw,
2946        .prep_abort = prep_abort_v3_hw,
2947        .get_free_slot = get_free_slot_v3_hw,
2948        .start_delivery = start_delivery_v3_hw,
2949        .slot_complete = slot_complete_v3_hw,
2950        .phys_init = phys_init_v3_hw,
2951        .phy_start = start_phy_v3_hw,
2952        .phy_disable = disable_phy_v3_hw,
2953        .phy_hard_reset = phy_hard_reset_v3_hw,
2954        .phy_get_max_linkrate = phy_get_max_linkrate_v3_hw,
2955        .phy_set_linkrate = phy_set_linkrate_v3_hw,
2956        .dereg_device = dereg_device_v3_hw,
2957        .soft_reset = soft_reset_v3_hw,
2958        .get_phys_state = get_phys_state_v3_hw,
2959        .get_events = phy_get_events_v3_hw,
2960        .write_gpio = write_gpio_v3_hw,
2961        .wait_cmds_complete_timeout = wait_cmds_complete_timeout_v3_hw,
2962        .debugfs_reg_global = &debugfs_global_reg,
2963        .debugfs_reg_port = &debugfs_port_reg,
2964        .snapshot_prepare = debugfs_snapshot_prepare_v3_hw,
2965        .snapshot_restore = debugfs_snapshot_restore_v3_hw,
2966};
2967
2968static struct Scsi_Host *
2969hisi_sas_shost_alloc_pci(struct pci_dev *pdev)
2970{
2971        struct Scsi_Host *shost;
2972        struct hisi_hba *hisi_hba;
2973        struct device *dev = &pdev->dev;
2974
2975        shost = scsi_host_alloc(&sht_v3_hw, sizeof(*hisi_hba));
2976        if (!shost) {
2977                dev_err(dev, "shost alloc failed\n");
2978                return NULL;
2979        }
2980        hisi_hba = shost_priv(shost);
2981
2982        INIT_WORK(&hisi_hba->rst_work, hisi_sas_rst_work_handler);
2983        INIT_WORK(&hisi_hba->debugfs_work, hisi_sas_debugfs_work_handler);
2984        hisi_hba->hw = &hisi_sas_v3_hw;
2985        hisi_hba->pci_dev = pdev;
2986        hisi_hba->dev = dev;
2987        hisi_hba->shost = shost;
2988        SHOST_TO_SAS_HA(shost) = &hisi_hba->sha;
2989
2990        if (prot_mask & ~HISI_SAS_PROT_MASK)
2991                dev_err(dev, "unsupported protection mask 0x%x, using default (0x0)\n",
2992                        prot_mask);
2993        else
2994                hisi_hba->prot_mask = prot_mask;
2995
2996        timer_setup(&hisi_hba->timer, NULL, 0);
2997
2998        if (hisi_sas_get_fw_info(hisi_hba) < 0)
2999                goto err_out;
3000
3001        if (hisi_sas_alloc(hisi_hba)) {
3002                hisi_sas_free(hisi_hba);
3003                goto err_out;
3004        }
3005
3006        return shost;
3007err_out:
3008        scsi_host_put(shost);
3009        dev_err(dev, "shost alloc failed\n");
3010        return NULL;
3011}
3012
3013static int
3014hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id)
3015{
3016        struct Scsi_Host *shost;
3017        struct hisi_hba *hisi_hba;
3018        struct device *dev = &pdev->dev;
3019        struct asd_sas_phy **arr_phy;
3020        struct asd_sas_port **arr_port;
3021        struct sas_ha_struct *sha;
3022        int rc, phy_nr, port_nr, i;
3023
3024        rc = pci_enable_device(pdev);
3025        if (rc)
3026                goto err_out;
3027
3028        pci_set_master(pdev);
3029
3030        rc = pci_request_regions(pdev, DRV_NAME);
3031        if (rc)
3032                goto err_out_disable_device;
3033
3034        rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
3035        if (rc)
3036                rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
3037        if (rc) {
3038                dev_err(dev, "No usable DMA addressing method\n");
3039                rc = -ENODEV;
3040                goto err_out_regions;
3041        }
3042
3043        shost = hisi_sas_shost_alloc_pci(pdev);
3044        if (!shost) {
3045                rc = -ENOMEM;
3046                goto err_out_regions;
3047        }
3048
3049        sha = SHOST_TO_SAS_HA(shost);
3050        hisi_hba = shost_priv(shost);
3051        dev_set_drvdata(dev, sha);
3052
3053        hisi_hba->regs = pcim_iomap(pdev, 5, 0);
3054        if (!hisi_hba->regs) {
3055                dev_err(dev, "cannot map register\n");
3056                rc = -ENOMEM;
3057                goto err_out_ha;
3058        }
3059
3060        phy_nr = port_nr = hisi_hba->n_phy;
3061
3062        arr_phy = devm_kcalloc(dev, phy_nr, sizeof(void *), GFP_KERNEL);
3063        arr_port = devm_kcalloc(dev, port_nr, sizeof(void *), GFP_KERNEL);
3064        if (!arr_phy || !arr_port) {
3065                rc = -ENOMEM;
3066                goto err_out_ha;
3067        }
3068
3069        sha->sas_phy = arr_phy;
3070        sha->sas_port = arr_port;
3071        sha->core.shost = shost;
3072        sha->lldd_ha = hisi_hba;
3073
3074        shost->transportt = hisi_sas_stt;
3075        shost->max_id = HISI_SAS_MAX_DEVICES;
3076        shost->max_lun = ~0;
3077        shost->max_channel = 1;
3078        shost->max_cmd_len = 16;
3079        shost->can_queue = hisi_hba->hw->max_command_entries -
3080                HISI_SAS_RESERVED_IPTT_CNT;
3081        shost->cmd_per_lun = hisi_hba->hw->max_command_entries -
3082                HISI_SAS_RESERVED_IPTT_CNT;
3083
3084        sha->sas_ha_name = DRV_NAME;
3085        sha->dev = dev;
3086        sha->lldd_module = THIS_MODULE;
3087        sha->sas_addr = &hisi_hba->sas_addr[0];
3088        sha->num_phys = hisi_hba->n_phy;
3089        sha->core.shost = hisi_hba->shost;
3090
3091        for (i = 0; i < hisi_hba->n_phy; i++) {
3092                sha->sas_phy[i] = &hisi_hba->phy[i].sas_phy;
3093                sha->sas_port[i] = &hisi_hba->port[i].sas_port;
3094        }
3095
3096        if (hisi_hba->prot_mask) {
3097                dev_info(dev, "Registering for DIF/DIX prot_mask=0x%x\n",
3098                         prot_mask);
3099                scsi_host_set_prot(hisi_hba->shost, prot_mask);
3100                if (hisi_hba->prot_mask & HISI_SAS_DIX_PROT_MASK)
3101                        scsi_host_set_guard(hisi_hba->shost,
3102                                            SHOST_DIX_GUARD_CRC);
3103        }
3104
3105        if (hisi_sas_debugfs_enable)
3106                hisi_sas_debugfs_init(hisi_hba);
3107
3108        rc = scsi_add_host(shost, dev);
3109        if (rc)
3110                goto err_out_ha;
3111
3112        rc = sas_register_ha(sha);
3113        if (rc)
3114                goto err_out_register_ha;
3115
3116        rc = hisi_hba->hw->hw_init(hisi_hba);
3117        if (rc)
3118                goto err_out_register_ha;
3119
3120        scsi_scan_host(shost);
3121
3122        return 0;
3123
3124err_out_register_ha:
3125        scsi_remove_host(shost);
3126err_out_ha:
3127        scsi_host_put(shost);
3128err_out_regions:
3129        pci_release_regions(pdev);
3130err_out_disable_device:
3131        pci_disable_device(pdev);
3132err_out:
3133        return rc;
3134}
3135
3136static void
3137hisi_sas_v3_destroy_irqs(struct pci_dev *pdev, struct hisi_hba *hisi_hba)
3138{
3139        int i;
3140
3141        free_irq(pci_irq_vector(pdev, 1), hisi_hba);
3142        free_irq(pci_irq_vector(pdev, 2), hisi_hba);
3143        free_irq(pci_irq_vector(pdev, 11), hisi_hba);
3144        for (i = 0; i < hisi_hba->cq_nvecs; i++) {
3145                struct hisi_sas_cq *cq = &hisi_hba->cq[i];
3146                int nr = hisi_sas_intr_conv ? 16 : 16 + i;
3147
3148                free_irq(pci_irq_vector(pdev, nr), cq);
3149        }
3150        pci_free_irq_vectors(pdev);
3151}
3152
3153static void hisi_sas_v3_remove(struct pci_dev *pdev)
3154{
3155        struct device *dev = &pdev->dev;
3156        struct sas_ha_struct *sha = dev_get_drvdata(dev);
3157        struct hisi_hba *hisi_hba = sha->lldd_ha;
3158        struct Scsi_Host *shost = sha->core.shost;
3159
3160        hisi_sas_debugfs_exit(hisi_hba);
3161
3162        if (timer_pending(&hisi_hba->timer))
3163                del_timer(&hisi_hba->timer);
3164
3165        sas_unregister_ha(sha);
3166        sas_remove_host(sha->core.shost);
3167
3168        hisi_sas_v3_destroy_irqs(pdev, hisi_hba);
3169        hisi_sas_kill_tasklets(hisi_hba);
3170        pci_release_regions(pdev);
3171        pci_disable_device(pdev);
3172        hisi_sas_free(hisi_hba);
3173        scsi_host_put(shost);
3174}
3175
3176static void hisi_sas_reset_prepare_v3_hw(struct pci_dev *pdev)
3177{
3178        struct sas_ha_struct *sha = pci_get_drvdata(pdev);
3179        struct hisi_hba *hisi_hba = sha->lldd_ha;
3180        struct device *dev = hisi_hba->dev;
3181        int rc;
3182
3183        dev_info(dev, "FLR prepare\n");
3184        set_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags);
3185        hisi_sas_controller_reset_prepare(hisi_hba);
3186
3187        rc = disable_host_v3_hw(hisi_hba);
3188        if (rc)
3189                dev_err(dev, "FLR: disable host failed rc=%d\n", rc);
3190}
3191
3192static void hisi_sas_reset_done_v3_hw(struct pci_dev *pdev)
3193{
3194        struct sas_ha_struct *sha = pci_get_drvdata(pdev);
3195        struct hisi_hba *hisi_hba = sha->lldd_ha;
3196        struct device *dev = hisi_hba->dev;
3197        int rc;
3198
3199        hisi_sas_init_mem(hisi_hba);
3200
3201        rc = hw_init_v3_hw(hisi_hba);
3202        if (rc) {
3203                dev_err(dev, "FLR: hw init failed rc=%d\n", rc);
3204                return;
3205        }
3206
3207        hisi_sas_controller_reset_done(hisi_hba);
3208        dev_info(dev, "FLR done\n");
3209}
3210
3211enum {
3212        /* instances of the controller */
3213        hip08,
3214};
3215
3216static int hisi_sas_v3_suspend(struct pci_dev *pdev, pm_message_t state)
3217{
3218        struct sas_ha_struct *sha = pci_get_drvdata(pdev);
3219        struct hisi_hba *hisi_hba = sha->lldd_ha;
3220        struct device *dev = hisi_hba->dev;
3221        struct Scsi_Host *shost = hisi_hba->shost;
3222        pci_power_t device_state;
3223        int rc;
3224
3225        if (!pdev->pm_cap) {
3226                dev_err(dev, "PCI PM not supported\n");
3227                return -ENODEV;
3228        }
3229
3230        if (test_and_set_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags))
3231                return -1;
3232
3233        scsi_block_requests(shost);
3234        set_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
3235        flush_workqueue(hisi_hba->wq);
3236
3237        rc = disable_host_v3_hw(hisi_hba);
3238        if (rc) {
3239                dev_err(dev, "PM suspend: disable host failed rc=%d\n", rc);
3240                clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
3241                clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags);
3242                scsi_unblock_requests(shost);
3243                return rc;
3244        }
3245
3246        hisi_sas_init_mem(hisi_hba);
3247
3248        device_state = pci_choose_state(pdev, state);
3249        dev_warn(dev, "entering operating state [D%d]\n",
3250                        device_state);
3251        pci_save_state(pdev);
3252        pci_disable_device(pdev);
3253        pci_set_power_state(pdev, device_state);
3254
3255        hisi_sas_release_tasks(hisi_hba);
3256
3257        sas_suspend_ha(sha);
3258        return 0;
3259}
3260
3261static int hisi_sas_v3_resume(struct pci_dev *pdev)
3262{
3263        struct sas_ha_struct *sha = pci_get_drvdata(pdev);
3264        struct hisi_hba *hisi_hba = sha->lldd_ha;
3265        struct Scsi_Host *shost = hisi_hba->shost;
3266        struct device *dev = hisi_hba->dev;
3267        unsigned int rc;
3268        pci_power_t device_state = pdev->current_state;
3269
3270        dev_warn(dev, "resuming from operating state [D%d]\n",
3271                 device_state);
3272        pci_set_power_state(pdev, PCI_D0);
3273        pci_enable_wake(pdev, PCI_D0, 0);
3274        pci_restore_state(pdev);
3275        rc = pci_enable_device(pdev);
3276        if (rc)
3277                dev_err(dev, "enable device failed during resume (%d)\n", rc);
3278
3279        pci_set_master(pdev);
3280        scsi_unblock_requests(shost);
3281        clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
3282
3283        sas_prep_resume_ha(sha);
3284        init_reg_v3_hw(hisi_hba);
3285        hisi_hba->hw->phys_init(hisi_hba);
3286        sas_resume_ha(sha);
3287        clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags);
3288
3289        return 0;
3290}
3291
3292static const struct pci_device_id sas_v3_pci_table[] = {
3293        { PCI_VDEVICE(HUAWEI, 0xa230), hip08 },
3294        {}
3295};
3296MODULE_DEVICE_TABLE(pci, sas_v3_pci_table);
3297
3298static const struct pci_error_handlers hisi_sas_err_handler = {
3299        .reset_prepare  = hisi_sas_reset_prepare_v3_hw,
3300        .reset_done     = hisi_sas_reset_done_v3_hw,
3301};
3302
3303static struct pci_driver sas_v3_pci_driver = {
3304        .name           = DRV_NAME,
3305        .id_table       = sas_v3_pci_table,
3306        .probe          = hisi_sas_v3_probe,
3307        .remove         = hisi_sas_v3_remove,
3308        .suspend        = hisi_sas_v3_suspend,
3309        .resume         = hisi_sas_v3_resume,
3310        .err_handler    = &hisi_sas_err_handler,
3311};
3312
3313module_pci_driver(sas_v3_pci_driver);
3314module_param_named(intr_conv, hisi_sas_intr_conv, bool, 0444);
3315
3316MODULE_LICENSE("GPL");
3317MODULE_AUTHOR("John Garry <john.garry@huawei.com>");
3318MODULE_DESCRIPTION("HISILICON SAS controller v3 hw driver based on pci device");
3319MODULE_ALIAS("pci:" DRV_NAME);
3320