linux/drivers/usb/gadget/udc/tegra-xudc.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0+
   2/*
   3 * NVIDIA Tegra XUSB device mode controller
   4 *
   5 * Copyright (c) 2013-2019, NVIDIA CORPORATION.  All rights reserved.
   6 * Copyright (c) 2015, Google Inc.
   7 */
   8
   9#include <linux/clk.h>
  10#include <linux/completion.h>
  11#include <linux/delay.h>
  12#include <linux/dma-mapping.h>
  13#include <linux/dmapool.h>
  14#include <linux/interrupt.h>
  15#include <linux/iopoll.h>
  16#include <linux/kernel.h>
  17#include <linux/module.h>
  18#include <linux/of.h>
  19#include <linux/of_device.h>
  20#include <linux/phy/phy.h>
  21#include <linux/phy/tegra/xusb.h>
  22#include <linux/pm_domain.h>
  23#include <linux/platform_device.h>
  24#include <linux/pm_runtime.h>
  25#include <linux/regulator/consumer.h>
  26#include <linux/reset.h>
  27#include <linux/usb/ch9.h>
  28#include <linux/usb/gadget.h>
  29#include <linux/usb/otg.h>
  30#include <linux/usb/role.h>
  31#include <linux/usb/phy.h>
  32#include <linux/workqueue.h>
  33
  34/* XUSB_DEV registers */
  35#define SPARAM 0x000
  36#define  SPARAM_ERSTMAX_MASK GENMASK(20, 16)
  37#define  SPARAM_ERSTMAX(x) (((x) << 16) & SPARAM_ERSTMAX_MASK)
  38#define DB 0x004
  39#define  DB_TARGET_MASK GENMASK(15, 8)
  40#define  DB_TARGET(x) (((x) << 8) & DB_TARGET_MASK)
  41#define  DB_STREAMID_MASK GENMASK(31, 16)
  42#define  DB_STREAMID(x) (((x) << 16) & DB_STREAMID_MASK)
  43#define ERSTSZ 0x008
  44#define  ERSTSZ_ERSTXSZ_SHIFT(x) ((x) * 16)
  45#define  ERSTSZ_ERSTXSZ_MASK GENMASK(15, 0)
  46#define ERSTXBALO(x) (0x010 + 8 * (x))
  47#define ERSTXBAHI(x) (0x014 + 8 * (x))
  48#define ERDPLO 0x020
  49#define  ERDPLO_EHB BIT(3)
  50#define ERDPHI 0x024
  51#define EREPLO 0x028
  52#define  EREPLO_ECS BIT(0)
  53#define  EREPLO_SEGI BIT(1)
  54#define EREPHI 0x02c
  55#define CTRL 0x030
  56#define  CTRL_RUN BIT(0)
  57#define  CTRL_LSE BIT(1)
  58#define  CTRL_IE BIT(4)
  59#define  CTRL_SMI_EVT BIT(5)
  60#define  CTRL_SMI_DSE BIT(6)
  61#define  CTRL_EWE BIT(7)
  62#define  CTRL_DEVADDR_MASK GENMASK(30, 24)
  63#define  CTRL_DEVADDR(x) (((x) << 24) & CTRL_DEVADDR_MASK)
  64#define  CTRL_ENABLE BIT(31)
  65#define ST 0x034
  66#define  ST_RC BIT(0)
  67#define  ST_IP BIT(4)
  68#define RT_IMOD 0x038
  69#define  RT_IMOD_IMODI_MASK GENMASK(15, 0)
  70#define  RT_IMOD_IMODI(x) ((x) & RT_IMOD_IMODI_MASK)
  71#define  RT_IMOD_IMODC_MASK GENMASK(31, 16)
  72#define  RT_IMOD_IMODC(x) (((x) << 16) & RT_IMOD_IMODC_MASK)
  73#define PORTSC 0x03c
  74#define  PORTSC_CCS BIT(0)
  75#define  PORTSC_PED BIT(1)
  76#define  PORTSC_PR BIT(4)
  77#define  PORTSC_PLS_SHIFT 5
  78#define  PORTSC_PLS_MASK GENMASK(8, 5)
  79#define  PORTSC_PLS_U0 0x0
  80#define  PORTSC_PLS_U2 0x2
  81#define  PORTSC_PLS_U3 0x3
  82#define  PORTSC_PLS_DISABLED 0x4
  83#define  PORTSC_PLS_RXDETECT 0x5
  84#define  PORTSC_PLS_INACTIVE 0x6
  85#define  PORTSC_PLS_RESUME 0xf
  86#define  PORTSC_PLS(x) (((x) << PORTSC_PLS_SHIFT) & PORTSC_PLS_MASK)
  87#define  PORTSC_PS_SHIFT 10
  88#define  PORTSC_PS_MASK GENMASK(13, 10)
  89#define  PORTSC_PS_UNDEFINED 0x0
  90#define  PORTSC_PS_FS 0x1
  91#define  PORTSC_PS_LS 0x2
  92#define  PORTSC_PS_HS 0x3
  93#define  PORTSC_PS_SS 0x4
  94#define  PORTSC_LWS BIT(16)
  95#define  PORTSC_CSC BIT(17)
  96#define  PORTSC_WRC BIT(19)
  97#define  PORTSC_PRC BIT(21)
  98#define  PORTSC_PLC BIT(22)
  99#define  PORTSC_CEC BIT(23)
 100#define  PORTSC_WPR BIT(30)
 101#define  PORTSC_CHANGE_MASK (PORTSC_CSC | PORTSC_WRC | PORTSC_PRC | \
 102                             PORTSC_PLC | PORTSC_CEC)
 103#define ECPLO 0x040
 104#define ECPHI 0x044
 105#define MFINDEX 0x048
 106#define  MFINDEX_FRAME_SHIFT 3
 107#define  MFINDEX_FRAME_MASK GENMASK(13, 3)
 108#define PORTPM 0x04c
 109#define  PORTPM_L1S_MASK GENMASK(1, 0)
 110#define  PORTPM_L1S_DROP 0x0
 111#define  PORTPM_L1S_ACCEPT 0x1
 112#define  PORTPM_L1S_NYET 0x2
 113#define  PORTPM_L1S_STALL 0x3
 114#define  PORTPM_L1S(x) ((x) & PORTPM_L1S_MASK)
 115#define  PORTPM_RWE BIT(3)
 116#define  PORTPM_U2TIMEOUT_MASK GENMASK(15, 8)
 117#define  PORTPM_U1TIMEOUT_MASK GENMASK(23, 16)
 118#define  PORTPM_FLA BIT(24)
 119#define  PORTPM_VBA BIT(25)
 120#define  PORTPM_WOC BIT(26)
 121#define  PORTPM_WOD BIT(27)
 122#define  PORTPM_U1E BIT(28)
 123#define  PORTPM_U2E BIT(29)
 124#define  PORTPM_FRWE BIT(30)
 125#define  PORTPM_PNG_CYA BIT(31)
 126#define EP_HALT 0x050
 127#define EP_PAUSE 0x054
 128#define EP_RELOAD 0x058
 129#define EP_STCHG 0x05c
 130#define DEVNOTIF_LO 0x064
 131#define  DEVNOTIF_LO_TRIG BIT(0)
 132#define  DEVNOTIF_LO_TYPE_MASK GENMASK(7, 4)
 133#define  DEVNOTIF_LO_TYPE(x) (((x) << 4)  & DEVNOTIF_LO_TYPE_MASK)
 134#define  DEVNOTIF_LO_TYPE_FUNCTION_WAKE 0x1
 135#define DEVNOTIF_HI 0x068
 136#define PORTHALT 0x06c
 137#define  PORTHALT_HALT_LTSSM BIT(0)
 138#define  PORTHALT_HALT_REJECT BIT(1)
 139#define  PORTHALT_STCHG_REQ BIT(20)
 140#define  PORTHALT_STCHG_INTR_EN BIT(24)
 141#define PORT_TM 0x070
 142#define EP_THREAD_ACTIVE 0x074
 143#define EP_STOPPED 0x078
 144#define HSFSPI_COUNT0 0x100
 145#define HSFSPI_COUNT13 0x134
 146#define  HSFSPI_COUNT13_U2_RESUME_K_DURATION_MASK GENMASK(29, 0)
 147#define  HSFSPI_COUNT13_U2_RESUME_K_DURATION(x) ((x) & \
 148                                HSFSPI_COUNT13_U2_RESUME_K_DURATION_MASK)
 149#define BLCG 0x840
 150#define SSPX_CORE_CNT0 0x610
 151#define  SSPX_CORE_CNT0_PING_TBURST_MASK GENMASK(7, 0)
 152#define  SSPX_CORE_CNT0_PING_TBURST(x) ((x) & SSPX_CORE_CNT0_PING_TBURST_MASK)
 153#define SSPX_CORE_CNT30 0x688
 154#define  SSPX_CORE_CNT30_LMPITP_TIMER_MASK GENMASK(19, 0)
 155#define  SSPX_CORE_CNT30_LMPITP_TIMER(x) ((x) & \
 156                                        SSPX_CORE_CNT30_LMPITP_TIMER_MASK)
 157#define SSPX_CORE_CNT32 0x690
 158#define  SSPX_CORE_CNT32_POLL_TBURST_MAX_MASK GENMASK(7, 0)
 159#define  SSPX_CORE_CNT32_POLL_TBURST_MAX(x) ((x) & \
 160                                        SSPX_CORE_CNT32_POLL_TBURST_MAX_MASK)
 161#define SSPX_CORE_CNT56 0x6fc
 162#define  SSPX_CORE_CNT56_SCD_BIT0_TRPT_MAX_MASK GENMASK(19, 0)
 163#define  SSPX_CORE_CNT56_SCD_BIT0_TRPT_MAX(x) ((x) & \
 164                                SSPX_CORE_CNT56_SCD_BIT0_TRPT_MAX_MASK)
 165#define SSPX_CORE_CNT57 0x700
 166#define  SSPX_CORE_CNT57_SCD_BIT1_TRPT_MAX_MASK GENMASK(19, 0)
 167#define  SSPX_CORE_CNT57_SCD_BIT1_TRPT_MAX(x) ((x) & \
 168                                SSPX_CORE_CNT57_SCD_BIT1_TRPT_MAX_MASK)
 169#define SSPX_CORE_CNT65 0x720
 170#define  SSPX_CORE_CNT65_TX_SCD_END_TRPT_MID_MASK GENMASK(19, 0)
 171#define  SSPX_CORE_CNT65_TX_SCD_END_TRPT_MID(x) ((x) & \
 172                                SSPX_CORE_CNT65_TX_SCD_END_TRPT_MID_MASK)
 173#define SSPX_CORE_CNT66 0x724
 174#define  SSPX_CORE_CNT66_TX_SCD_BIT0_TRPT_MID_MASK GENMASK(19, 0)
 175#define  SSPX_CORE_CNT66_TX_SCD_BIT0_TRPT_MID(x) ((x) & \
 176                                SSPX_CORE_CNT66_TX_SCD_BIT0_TRPT_MID_MASK)
 177#define SSPX_CORE_CNT67 0x728
 178#define  SSPX_CORE_CNT67_TX_SCD_BIT1_TRPT_MID_MASK GENMASK(19, 0)
 179#define  SSPX_CORE_CNT67_TX_SCD_BIT1_TRPT_MID(x) ((x) & \
 180                                SSPX_CORE_CNT67_TX_SCD_BIT1_TRPT_MID_MASK)
 181#define SSPX_CORE_CNT72 0x73c
 182#define  SSPX_CORE_CNT72_SCD_LFPS_TIMEOUT_MASK GENMASK(19, 0)
 183#define  SSPX_CORE_CNT72_SCD_LFPS_TIMEOUT(x) ((x) & \
 184                                SSPX_CORE_CNT72_SCD_LFPS_TIMEOUT_MASK)
 185#define SSPX_CORE_PADCTL4 0x750
 186#define  SSPX_CORE_PADCTL4_RXDAT_VLD_TIMEOUT_U3_MASK GENMASK(19, 0)
 187#define  SSPX_CORE_PADCTL4_RXDAT_VLD_TIMEOUT_U3(x) ((x) & \
 188                                SSPX_CORE_PADCTL4_RXDAT_VLD_TIMEOUT_U3_MASK)
 189#define  BLCG_DFPCI BIT(0)
 190#define  BLCG_UFPCI BIT(1)
 191#define  BLCG_FE BIT(2)
 192#define  BLCG_COREPLL_PWRDN BIT(8)
 193#define  BLCG_IOPLL_0_PWRDN BIT(9)
 194#define  BLCG_IOPLL_1_PWRDN BIT(10)
 195#define  BLCG_IOPLL_2_PWRDN BIT(11)
 196#define  BLCG_ALL 0x1ff
 197#define CFG_DEV_SSPI_XFER 0x858
 198#define  CFG_DEV_SSPI_XFER_ACKTIMEOUT_MASK GENMASK(31, 0)
 199#define  CFG_DEV_SSPI_XFER_ACKTIMEOUT(x) ((x) & \
 200                                        CFG_DEV_SSPI_XFER_ACKTIMEOUT_MASK)
 201#define CFG_DEV_FE 0x85c
 202#define  CFG_DEV_FE_PORTREGSEL_MASK GENMASK(1, 0)
 203#define  CFG_DEV_FE_PORTREGSEL_SS_PI 1
 204#define  CFG_DEV_FE_PORTREGSEL_HSFS_PI 2
 205#define  CFG_DEV_FE_PORTREGSEL(x) ((x) & CFG_DEV_FE_PORTREGSEL_MASK)
 206#define  CFG_DEV_FE_INFINITE_SS_RETRY BIT(29)
 207
 208/* FPCI registers */
 209#define XUSB_DEV_CFG_1 0x004
 210#define  XUSB_DEV_CFG_1_IO_SPACE_EN BIT(0)
 211#define  XUSB_DEV_CFG_1_MEMORY_SPACE_EN BIT(1)
 212#define  XUSB_DEV_CFG_1_BUS_MASTER_EN BIT(2)
 213#define XUSB_DEV_CFG_4 0x010
 214#define  XUSB_DEV_CFG_4_BASE_ADDR_MASK GENMASK(31, 15)
 215#define XUSB_DEV_CFG_5 0x014
 216
 217/* IPFS registers */
 218#define XUSB_DEV_CONFIGURATION_0 0x180
 219#define  XUSB_DEV_CONFIGURATION_0_EN_FPCI BIT(0)
 220#define XUSB_DEV_INTR_MASK_0 0x188
 221#define  XUSB_DEV_INTR_MASK_0_IP_INT_MASK BIT(16)
 222
 223struct tegra_xudc_ep_context {
 224        __le32 info0;
 225        __le32 info1;
 226        __le32 deq_lo;
 227        __le32 deq_hi;
 228        __le32 tx_info;
 229        __le32 rsvd[11];
 230};
 231
 232#define EP_STATE_DISABLED 0
 233#define EP_STATE_RUNNING 1
 234#define EP_STATE_HALTED 2
 235#define EP_STATE_STOPPED 3
 236#define EP_STATE_ERROR 4
 237
 238#define EP_TYPE_INVALID 0
 239#define EP_TYPE_ISOCH_OUT 1
 240#define EP_TYPE_BULK_OUT 2
 241#define EP_TYPE_INTERRUPT_OUT 3
 242#define EP_TYPE_CONTROL 4
 243#define EP_TYPE_ISCOH_IN 5
 244#define EP_TYPE_BULK_IN 6
 245#define EP_TYPE_INTERRUPT_IN 7
 246
 247#define BUILD_EP_CONTEXT_RW(name, member, shift, mask)                  \
 248static inline u32 ep_ctx_read_##name(struct tegra_xudc_ep_context *ctx) \
 249{                                                                       \
 250        return (le32_to_cpu(ctx->member) >> (shift)) & (mask);          \
 251}                                                                       \
 252static inline void                                                      \
 253ep_ctx_write_##name(struct tegra_xudc_ep_context *ctx, u32 val)         \
 254{                                                                       \
 255        u32 tmp;                                                        \
 256                                                                        \
 257        tmp = le32_to_cpu(ctx->member) & ~((mask) << (shift));          \
 258        tmp |= (val & (mask)) << (shift);                               \
 259        ctx->member = cpu_to_le32(tmp);                                 \
 260}
 261
 262BUILD_EP_CONTEXT_RW(state, info0, 0, 0x7)
 263BUILD_EP_CONTEXT_RW(mult, info0, 8, 0x3)
 264BUILD_EP_CONTEXT_RW(max_pstreams, info0, 10, 0x1f)
 265BUILD_EP_CONTEXT_RW(lsa, info0, 15, 0x1)
 266BUILD_EP_CONTEXT_RW(interval, info0, 16, 0xff)
 267BUILD_EP_CONTEXT_RW(cerr, info1, 1, 0x3)
 268BUILD_EP_CONTEXT_RW(type, info1, 3, 0x7)
 269BUILD_EP_CONTEXT_RW(hid, info1, 7, 0x1)
 270BUILD_EP_CONTEXT_RW(max_burst_size, info1, 8, 0xff)
 271BUILD_EP_CONTEXT_RW(max_packet_size, info1, 16, 0xffff)
 272BUILD_EP_CONTEXT_RW(dcs, deq_lo, 0, 0x1)
 273BUILD_EP_CONTEXT_RW(deq_lo, deq_lo, 4, 0xfffffff)
 274BUILD_EP_CONTEXT_RW(deq_hi, deq_hi, 0, 0xffffffff)
 275BUILD_EP_CONTEXT_RW(avg_trb_len, tx_info, 0, 0xffff)
 276BUILD_EP_CONTEXT_RW(max_esit_payload, tx_info, 16, 0xffff)
 277BUILD_EP_CONTEXT_RW(edtla, rsvd[0], 0, 0xffffff)
 278BUILD_EP_CONTEXT_RW(seq_num, rsvd[0], 24, 0xff)
 279BUILD_EP_CONTEXT_RW(partial_td, rsvd[0], 25, 0x1)
 280BUILD_EP_CONTEXT_RW(cerrcnt, rsvd[1], 18, 0x3)
 281BUILD_EP_CONTEXT_RW(data_offset, rsvd[2], 0, 0x1ffff)
 282BUILD_EP_CONTEXT_RW(numtrbs, rsvd[2], 22, 0x1f)
 283BUILD_EP_CONTEXT_RW(devaddr, rsvd[6], 0, 0x7f)
 284
 285static inline u64 ep_ctx_read_deq_ptr(struct tegra_xudc_ep_context *ctx)
 286{
 287        return ((u64)ep_ctx_read_deq_hi(ctx) << 32) |
 288                (ep_ctx_read_deq_lo(ctx) << 4);
 289}
 290
 291static inline void
 292ep_ctx_write_deq_ptr(struct tegra_xudc_ep_context *ctx, u64 addr)
 293{
 294        ep_ctx_write_deq_lo(ctx, lower_32_bits(addr) >> 4);
 295        ep_ctx_write_deq_hi(ctx, upper_32_bits(addr));
 296}
 297
 298struct tegra_xudc_trb {
 299        __le32 data_lo;
 300        __le32 data_hi;
 301        __le32 status;
 302        __le32 control;
 303};
 304
 305#define TRB_TYPE_RSVD 0
 306#define TRB_TYPE_NORMAL 1
 307#define TRB_TYPE_SETUP_STAGE 2
 308#define TRB_TYPE_DATA_STAGE 3
 309#define TRB_TYPE_STATUS_STAGE 4
 310#define TRB_TYPE_ISOCH 5
 311#define TRB_TYPE_LINK 6
 312#define TRB_TYPE_TRANSFER_EVENT 32
 313#define TRB_TYPE_PORT_STATUS_CHANGE_EVENT 34
 314#define TRB_TYPE_STREAM 48
 315#define TRB_TYPE_SETUP_PACKET_EVENT 63
 316
 317#define TRB_CMPL_CODE_INVALID 0
 318#define TRB_CMPL_CODE_SUCCESS 1
 319#define TRB_CMPL_CODE_DATA_BUFFER_ERR 2
 320#define TRB_CMPL_CODE_BABBLE_DETECTED_ERR 3
 321#define TRB_CMPL_CODE_USB_TRANS_ERR 4
 322#define TRB_CMPL_CODE_TRB_ERR 5
 323#define TRB_CMPL_CODE_STALL 6
 324#define TRB_CMPL_CODE_INVALID_STREAM_TYPE_ERR 10
 325#define TRB_CMPL_CODE_SHORT_PACKET 13
 326#define TRB_CMPL_CODE_RING_UNDERRUN 14
 327#define TRB_CMPL_CODE_RING_OVERRUN 15
 328#define TRB_CMPL_CODE_EVENT_RING_FULL_ERR 21
 329#define TRB_CMPL_CODE_STOPPED 26
 330#define TRB_CMPL_CODE_ISOCH_BUFFER_OVERRUN 31
 331#define TRB_CMPL_CODE_STREAM_NUMP_ERROR 219
 332#define TRB_CMPL_CODE_PRIME_PIPE_RECEIVED 220
 333#define TRB_CMPL_CODE_HOST_REJECTED 221
 334#define TRB_CMPL_CODE_CTRL_DIR_ERR 222
 335#define TRB_CMPL_CODE_CTRL_SEQNUM_ERR 223
 336
 337#define BUILD_TRB_RW(name, member, shift, mask)                         \
 338static inline u32 trb_read_##name(struct tegra_xudc_trb *trb)           \
 339{                                                                       \
 340        return (le32_to_cpu(trb->member) >> (shift)) & (mask);          \
 341}                                                                       \
 342static inline void                                                      \
 343trb_write_##name(struct tegra_xudc_trb *trb, u32 val)                   \
 344{                                                                       \
 345        u32 tmp;                                                        \
 346                                                                        \
 347        tmp = le32_to_cpu(trb->member) & ~((mask) << (shift));          \
 348        tmp |= (val & (mask)) << (shift);                               \
 349        trb->member = cpu_to_le32(tmp);                                 \
 350}
 351
 352BUILD_TRB_RW(data_lo, data_lo, 0, 0xffffffff)
 353BUILD_TRB_RW(data_hi, data_hi, 0, 0xffffffff)
 354BUILD_TRB_RW(seq_num, status, 0, 0xffff)
 355BUILD_TRB_RW(transfer_len, status, 0, 0xffffff)
 356BUILD_TRB_RW(td_size, status, 17, 0x1f)
 357BUILD_TRB_RW(cmpl_code, status, 24, 0xff)
 358BUILD_TRB_RW(cycle, control, 0, 0x1)
 359BUILD_TRB_RW(toggle_cycle, control, 1, 0x1)
 360BUILD_TRB_RW(isp, control, 2, 0x1)
 361BUILD_TRB_RW(chain, control, 4, 0x1)
 362BUILD_TRB_RW(ioc, control, 5, 0x1)
 363BUILD_TRB_RW(type, control, 10, 0x3f)
 364BUILD_TRB_RW(stream_id, control, 16, 0xffff)
 365BUILD_TRB_RW(endpoint_id, control, 16, 0x1f)
 366BUILD_TRB_RW(tlbpc, control, 16, 0xf)
 367BUILD_TRB_RW(data_stage_dir, control, 16, 0x1)
 368BUILD_TRB_RW(frame_id, control, 20, 0x7ff)
 369BUILD_TRB_RW(sia, control, 31, 0x1)
 370
 371static inline u64 trb_read_data_ptr(struct tegra_xudc_trb *trb)
 372{
 373        return ((u64)trb_read_data_hi(trb) << 32) |
 374                trb_read_data_lo(trb);
 375}
 376
 377static inline void trb_write_data_ptr(struct tegra_xudc_trb *trb, u64 addr)
 378{
 379        trb_write_data_lo(trb, lower_32_bits(addr));
 380        trb_write_data_hi(trb, upper_32_bits(addr));
 381}
 382
 383struct tegra_xudc_request {
 384        struct usb_request usb_req;
 385
 386        size_t buf_queued;
 387        unsigned int trbs_queued;
 388        unsigned int trbs_needed;
 389        bool need_zlp;
 390
 391        struct tegra_xudc_trb *first_trb;
 392        struct tegra_xudc_trb *last_trb;
 393
 394        struct list_head list;
 395};
 396
 397struct tegra_xudc_ep {
 398        struct tegra_xudc *xudc;
 399        struct usb_ep usb_ep;
 400        unsigned int index;
 401        char name[8];
 402
 403        struct tegra_xudc_ep_context *context;
 404
 405#define XUDC_TRANSFER_RING_SIZE 64
 406        struct tegra_xudc_trb *transfer_ring;
 407        dma_addr_t transfer_ring_phys;
 408
 409        unsigned int enq_ptr;
 410        unsigned int deq_ptr;
 411        bool pcs;
 412        bool ring_full;
 413        bool stream_rejected;
 414
 415        struct list_head queue;
 416        const struct usb_endpoint_descriptor *desc;
 417        const struct usb_ss_ep_comp_descriptor *comp_desc;
 418};
 419
 420struct tegra_xudc_sel_timing {
 421        __u8 u1sel;
 422        __u8 u1pel;
 423        __le16 u2sel;
 424        __le16 u2pel;
 425};
 426
 427enum tegra_xudc_setup_state {
 428        WAIT_FOR_SETUP,
 429        DATA_STAGE_XFER,
 430        DATA_STAGE_RECV,
 431        STATUS_STAGE_XFER,
 432        STATUS_STAGE_RECV,
 433};
 434
 435struct tegra_xudc_setup_packet {
 436        struct usb_ctrlrequest ctrl_req;
 437        unsigned int seq_num;
 438};
 439
 440struct tegra_xudc_save_regs {
 441        u32 ctrl;
 442        u32 portpm;
 443};
 444
 445struct tegra_xudc {
 446        struct device *dev;
 447        const struct tegra_xudc_soc *soc;
 448        struct tegra_xusb_padctl *padctl;
 449
 450        spinlock_t lock;
 451
 452        struct usb_gadget gadget;
 453        struct usb_gadget_driver *driver;
 454
 455#define XUDC_NR_EVENT_RINGS 2
 456#define XUDC_EVENT_RING_SIZE 4096
 457        struct tegra_xudc_trb *event_ring[XUDC_NR_EVENT_RINGS];
 458        dma_addr_t event_ring_phys[XUDC_NR_EVENT_RINGS];
 459        unsigned int event_ring_index;
 460        unsigned int event_ring_deq_ptr;
 461        bool ccs;
 462
 463#define XUDC_NR_EPS 32
 464        struct tegra_xudc_ep ep[XUDC_NR_EPS];
 465        struct tegra_xudc_ep_context *ep_context;
 466        dma_addr_t ep_context_phys;
 467
 468        struct device *genpd_dev_device;
 469        struct device *genpd_dev_ss;
 470        struct device_link *genpd_dl_device;
 471        struct device_link *genpd_dl_ss;
 472
 473        struct dma_pool *transfer_ring_pool;
 474
 475        bool queued_setup_packet;
 476        struct tegra_xudc_setup_packet setup_packet;
 477        enum tegra_xudc_setup_state setup_state;
 478        u16 setup_seq_num;
 479
 480        u16 dev_addr;
 481        u16 isoch_delay;
 482        struct tegra_xudc_sel_timing sel_timing;
 483        u8 test_mode_pattern;
 484        u16 status_buf;
 485        struct tegra_xudc_request *ep0_req;
 486
 487        bool pullup;
 488
 489        unsigned int nr_enabled_eps;
 490        unsigned int nr_isoch_eps;
 491
 492        unsigned int device_state;
 493        unsigned int resume_state;
 494
 495        int irq;
 496
 497        void __iomem *base;
 498        resource_size_t phys_base;
 499        void __iomem *ipfs;
 500        void __iomem *fpci;
 501
 502        struct regulator_bulk_data *supplies;
 503
 504        struct clk_bulk_data *clks;
 505
 506        bool device_mode;
 507        struct work_struct usb_role_sw_work;
 508
 509        struct phy **usb3_phy;
 510        struct phy *curr_usb3_phy;
 511        struct phy **utmi_phy;
 512        struct phy *curr_utmi_phy;
 513
 514        struct tegra_xudc_save_regs saved_regs;
 515        bool suspended;
 516        bool powergated;
 517
 518        struct usb_phy **usbphy;
 519        struct usb_phy *curr_usbphy;
 520        struct notifier_block vbus_nb;
 521
 522        struct completion disconnect_complete;
 523
 524        bool selfpowered;
 525
 526#define TOGGLE_VBUS_WAIT_MS 100
 527        struct delayed_work plc_reset_work;
 528        bool wait_csc;
 529
 530        struct delayed_work port_reset_war_work;
 531        bool wait_for_sec_prc;
 532};
 533
 534#define XUDC_TRB_MAX_BUFFER_SIZE 65536
 535#define XUDC_MAX_ISOCH_EPS 4
 536#define XUDC_INTERRUPT_MODERATION_US 0
 537
 538static struct usb_endpoint_descriptor tegra_xudc_ep0_desc = {
 539        .bLength = USB_DT_ENDPOINT_SIZE,
 540        .bDescriptorType = USB_DT_ENDPOINT,
 541        .bEndpointAddress = 0,
 542        .bmAttributes = USB_ENDPOINT_XFER_CONTROL,
 543        .wMaxPacketSize = cpu_to_le16(64),
 544};
 545
 546struct tegra_xudc_soc {
 547        const char * const *supply_names;
 548        unsigned int num_supplies;
 549        const char * const *clock_names;
 550        unsigned int num_clks;
 551        unsigned int num_phys;
 552        bool u1_enable;
 553        bool u2_enable;
 554        bool lpm_enable;
 555        bool invalid_seq_num;
 556        bool pls_quirk;
 557        bool port_reset_quirk;
 558        bool port_speed_quirk;
 559        bool has_ipfs;
 560};
 561
 562static inline u32 fpci_readl(struct tegra_xudc *xudc, unsigned int offset)
 563{
 564        return readl(xudc->fpci + offset);
 565}
 566
 567static inline void fpci_writel(struct tegra_xudc *xudc, u32 val,
 568                               unsigned int offset)
 569{
 570        writel(val, xudc->fpci + offset);
 571}
 572
 573static inline u32 ipfs_readl(struct tegra_xudc *xudc, unsigned int offset)
 574{
 575        return readl(xudc->ipfs + offset);
 576}
 577
 578static inline void ipfs_writel(struct tegra_xudc *xudc, u32 val,
 579                               unsigned int offset)
 580{
 581        writel(val, xudc->ipfs + offset);
 582}
 583
 584static inline u32 xudc_readl(struct tegra_xudc *xudc, unsigned int offset)
 585{
 586        return readl(xudc->base + offset);
 587}
 588
 589static inline void xudc_writel(struct tegra_xudc *xudc, u32 val,
 590                               unsigned int offset)
 591{
 592        writel(val, xudc->base + offset);
 593}
 594
 595static inline int xudc_readl_poll(struct tegra_xudc *xudc,
 596                                  unsigned int offset, u32 mask, u32 val)
 597{
 598        u32 regval;
 599
 600        return readl_poll_timeout_atomic(xudc->base + offset, regval,
 601                                         (regval & mask) == val, 1, 100);
 602}
 603
 604static inline struct tegra_xudc *to_xudc(struct usb_gadget *gadget)
 605{
 606        return container_of(gadget, struct tegra_xudc, gadget);
 607}
 608
 609static inline struct tegra_xudc_ep *to_xudc_ep(struct usb_ep *ep)
 610{
 611        return container_of(ep, struct tegra_xudc_ep, usb_ep);
 612}
 613
 614static inline struct tegra_xudc_request *to_xudc_req(struct usb_request *req)
 615{
 616        return container_of(req, struct tegra_xudc_request, usb_req);
 617}
 618
 619static inline void dump_trb(struct tegra_xudc *xudc, const char *type,
 620                            struct tegra_xudc_trb *trb)
 621{
 622        dev_dbg(xudc->dev,
 623                "%s: %p, lo = %#x, hi = %#x, status = %#x, control = %#x\n",
 624                type, trb, trb->data_lo, trb->data_hi, trb->status,
 625                trb->control);
 626}
 627
 628static void tegra_xudc_limit_port_speed(struct tegra_xudc *xudc)
 629{
 630        u32 val;
 631
 632        /* limit port speed to gen 1 */
 633        val = xudc_readl(xudc, SSPX_CORE_CNT56);
 634        val &= ~(SSPX_CORE_CNT56_SCD_BIT0_TRPT_MAX_MASK);
 635        val |= SSPX_CORE_CNT56_SCD_BIT0_TRPT_MAX(0x260);
 636        xudc_writel(xudc, val, SSPX_CORE_CNT56);
 637
 638        val = xudc_readl(xudc, SSPX_CORE_CNT57);
 639        val &= ~(SSPX_CORE_CNT57_SCD_BIT1_TRPT_MAX_MASK);
 640        val |= SSPX_CORE_CNT57_SCD_BIT1_TRPT_MAX(0x6D6);
 641        xudc_writel(xudc, val, SSPX_CORE_CNT57);
 642
 643        val = xudc_readl(xudc, SSPX_CORE_CNT65);
 644        val &= ~(SSPX_CORE_CNT65_TX_SCD_END_TRPT_MID_MASK);
 645        val |= SSPX_CORE_CNT65_TX_SCD_END_TRPT_MID(0x4B0);
 646        xudc_writel(xudc, val, SSPX_CORE_CNT66);
 647
 648        val = xudc_readl(xudc, SSPX_CORE_CNT66);
 649        val &= ~(SSPX_CORE_CNT66_TX_SCD_BIT0_TRPT_MID_MASK);
 650        val |= SSPX_CORE_CNT66_TX_SCD_BIT0_TRPT_MID(0x4B0);
 651        xudc_writel(xudc, val, SSPX_CORE_CNT66);
 652
 653        val = xudc_readl(xudc, SSPX_CORE_CNT67);
 654        val &= ~(SSPX_CORE_CNT67_TX_SCD_BIT1_TRPT_MID_MASK);
 655        val |= SSPX_CORE_CNT67_TX_SCD_BIT1_TRPT_MID(0x4B0);
 656        xudc_writel(xudc, val, SSPX_CORE_CNT67);
 657
 658        val = xudc_readl(xudc, SSPX_CORE_CNT72);
 659        val &= ~(SSPX_CORE_CNT72_SCD_LFPS_TIMEOUT_MASK);
 660        val |= SSPX_CORE_CNT72_SCD_LFPS_TIMEOUT(0x10);
 661        xudc_writel(xudc, val, SSPX_CORE_CNT72);
 662}
 663
 664static void tegra_xudc_restore_port_speed(struct tegra_xudc *xudc)
 665{
 666        u32 val;
 667
 668        /* restore port speed to gen2 */
 669        val = xudc_readl(xudc, SSPX_CORE_CNT56);
 670        val &= ~(SSPX_CORE_CNT56_SCD_BIT0_TRPT_MAX_MASK);
 671        val |= SSPX_CORE_CNT56_SCD_BIT0_TRPT_MAX(0x438);
 672        xudc_writel(xudc, val, SSPX_CORE_CNT56);
 673
 674        val = xudc_readl(xudc, SSPX_CORE_CNT57);
 675        val &= ~(SSPX_CORE_CNT57_SCD_BIT1_TRPT_MAX_MASK);
 676        val |= SSPX_CORE_CNT57_SCD_BIT1_TRPT_MAX(0x528);
 677        xudc_writel(xudc, val, SSPX_CORE_CNT57);
 678
 679        val = xudc_readl(xudc, SSPX_CORE_CNT65);
 680        val &= ~(SSPX_CORE_CNT65_TX_SCD_END_TRPT_MID_MASK);
 681        val |= SSPX_CORE_CNT65_TX_SCD_END_TRPT_MID(0xE10);
 682        xudc_writel(xudc, val, SSPX_CORE_CNT66);
 683
 684        val = xudc_readl(xudc, SSPX_CORE_CNT66);
 685        val &= ~(SSPX_CORE_CNT66_TX_SCD_BIT0_TRPT_MID_MASK);
 686        val |= SSPX_CORE_CNT66_TX_SCD_BIT0_TRPT_MID(0x348);
 687        xudc_writel(xudc, val, SSPX_CORE_CNT66);
 688
 689        val = xudc_readl(xudc, SSPX_CORE_CNT67);
 690        val &= ~(SSPX_CORE_CNT67_TX_SCD_BIT1_TRPT_MID_MASK);
 691        val |= SSPX_CORE_CNT67_TX_SCD_BIT1_TRPT_MID(0x5a0);
 692        xudc_writel(xudc, val, SSPX_CORE_CNT67);
 693
 694        val = xudc_readl(xudc, SSPX_CORE_CNT72);
 695        val &= ~(SSPX_CORE_CNT72_SCD_LFPS_TIMEOUT_MASK);
 696        val |= SSPX_CORE_CNT72_SCD_LFPS_TIMEOUT(0x1c21);
 697        xudc_writel(xudc, val, SSPX_CORE_CNT72);
 698}
 699
 700static void tegra_xudc_device_mode_on(struct tegra_xudc *xudc)
 701{
 702        int err;
 703
 704        pm_runtime_get_sync(xudc->dev);
 705
 706        err = phy_power_on(xudc->curr_utmi_phy);
 707        if (err < 0)
 708                dev_err(xudc->dev, "UTMI power on failed: %d\n", err);
 709
 710        err = phy_power_on(xudc->curr_usb3_phy);
 711        if (err < 0)
 712                dev_err(xudc->dev, "USB3 PHY power on failed: %d\n", err);
 713
 714        dev_dbg(xudc->dev, "device mode on\n");
 715
 716        phy_set_mode_ext(xudc->curr_utmi_phy, PHY_MODE_USB_OTG,
 717                         USB_ROLE_DEVICE);
 718}
 719
 720static void tegra_xudc_device_mode_off(struct tegra_xudc *xudc)
 721{
 722        bool connected = false;
 723        u32 pls, val;
 724        int err;
 725
 726        dev_dbg(xudc->dev, "device mode off\n");
 727
 728        connected = !!(xudc_readl(xudc, PORTSC) & PORTSC_CCS);
 729
 730        reinit_completion(&xudc->disconnect_complete);
 731
 732        if (xudc->soc->port_speed_quirk)
 733                tegra_xudc_restore_port_speed(xudc);
 734
 735        phy_set_mode_ext(xudc->curr_utmi_phy, PHY_MODE_USB_OTG, USB_ROLE_NONE);
 736
 737        pls = (xudc_readl(xudc, PORTSC) & PORTSC_PLS_MASK) >>
 738                PORTSC_PLS_SHIFT;
 739
 740        /* Direct link to U0 if disconnected in RESUME or U2. */
 741        if (xudc->soc->pls_quirk && xudc->gadget.speed == USB_SPEED_SUPER &&
 742            (pls == PORTSC_PLS_RESUME || pls == PORTSC_PLS_U2)) {
 743                val = xudc_readl(xudc, PORTPM);
 744                val |= PORTPM_FRWE;
 745                xudc_writel(xudc, val, PORTPM);
 746
 747                val = xudc_readl(xudc, PORTSC);
 748                val &= ~(PORTSC_CHANGE_MASK | PORTSC_PLS_MASK);
 749                val |= PORTSC_LWS | PORTSC_PLS(PORTSC_PLS_U0);
 750                xudc_writel(xudc, val, PORTSC);
 751        }
 752
 753        /* Wait for disconnect event. */
 754        if (connected)
 755                wait_for_completion(&xudc->disconnect_complete);
 756
 757        /* Make sure interrupt handler has completed before powergating. */
 758        synchronize_irq(xudc->irq);
 759
 760        err = phy_power_off(xudc->curr_utmi_phy);
 761        if (err < 0)
 762                dev_err(xudc->dev, "UTMI PHY power off failed: %d\n", err);
 763
 764        err = phy_power_off(xudc->curr_usb3_phy);
 765        if (err < 0)
 766                dev_err(xudc->dev, "USB3 PHY power off failed: %d\n", err);
 767
 768        pm_runtime_put(xudc->dev);
 769}
 770
 771static void tegra_xudc_usb_role_sw_work(struct work_struct *work)
 772{
 773        struct tegra_xudc *xudc = container_of(work, struct tegra_xudc,
 774                                               usb_role_sw_work);
 775
 776        if (xudc->device_mode)
 777                tegra_xudc_device_mode_on(xudc);
 778        else
 779                tegra_xudc_device_mode_off(xudc);
 780}
 781
 782static int tegra_xudc_get_phy_index(struct tegra_xudc *xudc,
 783                                              struct usb_phy *usbphy)
 784{
 785        unsigned int i;
 786
 787        for (i = 0; i < xudc->soc->num_phys; i++) {
 788                if (xudc->usbphy[i] && usbphy == xudc->usbphy[i])
 789                        return i;
 790        }
 791
 792        dev_info(xudc->dev, "phy index could not be found for shared USB PHY");
 793        return -1;
 794}
 795
 796static int tegra_xudc_vbus_notify(struct notifier_block *nb,
 797                                         unsigned long action, void *data)
 798{
 799        struct tegra_xudc *xudc = container_of(nb, struct tegra_xudc,
 800                                               vbus_nb);
 801        struct usb_phy *usbphy = (struct usb_phy *)data;
 802        int phy_index;
 803
 804        dev_dbg(xudc->dev, "%s(): event is %d\n", __func__, usbphy->last_event);
 805
 806        if ((xudc->device_mode && usbphy->last_event == USB_EVENT_VBUS) ||
 807            (!xudc->device_mode && usbphy->last_event != USB_EVENT_VBUS)) {
 808                dev_dbg(xudc->dev, "Same role(%d) received. Ignore",
 809                        xudc->device_mode);
 810                return NOTIFY_OK;
 811        }
 812
 813        xudc->device_mode = (usbphy->last_event == USB_EVENT_VBUS) ? true :
 814                                                                     false;
 815
 816        phy_index = tegra_xudc_get_phy_index(xudc, usbphy);
 817        dev_dbg(xudc->dev, "%s(): current phy index is %d\n", __func__,
 818                phy_index);
 819
 820        if (!xudc->suspended && phy_index != -1) {
 821                xudc->curr_utmi_phy = xudc->utmi_phy[phy_index];
 822                xudc->curr_usb3_phy = xudc->usb3_phy[phy_index];
 823                xudc->curr_usbphy = usbphy;
 824                schedule_work(&xudc->usb_role_sw_work);
 825        }
 826
 827        return NOTIFY_OK;
 828}
 829
 830static void tegra_xudc_plc_reset_work(struct work_struct *work)
 831{
 832        struct delayed_work *dwork = to_delayed_work(work);
 833        struct tegra_xudc *xudc = container_of(dwork, struct tegra_xudc,
 834                                               plc_reset_work);
 835        unsigned long flags;
 836
 837        spin_lock_irqsave(&xudc->lock, flags);
 838
 839        if (xudc->wait_csc) {
 840                u32 pls = (xudc_readl(xudc, PORTSC) & PORTSC_PLS_MASK) >>
 841                        PORTSC_PLS_SHIFT;
 842
 843                if (pls == PORTSC_PLS_INACTIVE) {
 844                        dev_info(xudc->dev, "PLS = Inactive. Toggle VBUS\n");
 845                        phy_set_mode_ext(xudc->curr_utmi_phy, PHY_MODE_USB_OTG,
 846                                         USB_ROLE_NONE);
 847                        phy_set_mode_ext(xudc->curr_utmi_phy, PHY_MODE_USB_OTG,
 848                                         USB_ROLE_DEVICE);
 849
 850                        xudc->wait_csc = false;
 851                }
 852        }
 853
 854        spin_unlock_irqrestore(&xudc->lock, flags);
 855}
 856
 857static void tegra_xudc_port_reset_war_work(struct work_struct *work)
 858{
 859        struct delayed_work *dwork = to_delayed_work(work);
 860        struct tegra_xudc *xudc =
 861                container_of(dwork, struct tegra_xudc, port_reset_war_work);
 862        unsigned long flags;
 863        u32 pls;
 864        int ret;
 865
 866        spin_lock_irqsave(&xudc->lock, flags);
 867
 868        if (xudc->device_mode && xudc->wait_for_sec_prc) {
 869                pls = (xudc_readl(xudc, PORTSC) & PORTSC_PLS_MASK) >>
 870                        PORTSC_PLS_SHIFT;
 871                dev_dbg(xudc->dev, "pls = %x\n", pls);
 872
 873                if (pls == PORTSC_PLS_DISABLED) {
 874                        dev_dbg(xudc->dev, "toggle vbus\n");
 875                        /* PRC doesn't complete in 100ms, toggle the vbus */
 876                        ret = tegra_phy_xusb_utmi_port_reset(
 877                                xudc->curr_utmi_phy);
 878                        if (ret == 1)
 879                                xudc->wait_for_sec_prc = 0;
 880                }
 881        }
 882
 883        spin_unlock_irqrestore(&xudc->lock, flags);
 884}
 885
 886static dma_addr_t trb_virt_to_phys(struct tegra_xudc_ep *ep,
 887                                   struct tegra_xudc_trb *trb)
 888{
 889        unsigned int index;
 890
 891        index = trb - ep->transfer_ring;
 892
 893        if (WARN_ON(index >= XUDC_TRANSFER_RING_SIZE))
 894                return 0;
 895
 896        return (ep->transfer_ring_phys + index * sizeof(*trb));
 897}
 898
 899static struct tegra_xudc_trb *trb_phys_to_virt(struct tegra_xudc_ep *ep,
 900                                               dma_addr_t addr)
 901{
 902        struct tegra_xudc_trb *trb;
 903        unsigned int index;
 904
 905        index = (addr - ep->transfer_ring_phys) / sizeof(*trb);
 906
 907        if (WARN_ON(index >= XUDC_TRANSFER_RING_SIZE))
 908                return NULL;
 909
 910        trb = &ep->transfer_ring[index];
 911
 912        return trb;
 913}
 914
 915static void ep_reload(struct tegra_xudc *xudc, unsigned int ep)
 916{
 917        xudc_writel(xudc, BIT(ep), EP_RELOAD);
 918        xudc_readl_poll(xudc, EP_RELOAD, BIT(ep), 0);
 919}
 920
 921static void ep_pause(struct tegra_xudc *xudc, unsigned int ep)
 922{
 923        u32 val;
 924
 925        val = xudc_readl(xudc, EP_PAUSE);
 926        if (val & BIT(ep))
 927                return;
 928        val |= BIT(ep);
 929
 930        xudc_writel(xudc, val, EP_PAUSE);
 931
 932        xudc_readl_poll(xudc, EP_STCHG, BIT(ep), BIT(ep));
 933
 934        xudc_writel(xudc, BIT(ep), EP_STCHG);
 935}
 936
 937static void ep_unpause(struct tegra_xudc *xudc, unsigned int ep)
 938{
 939        u32 val;
 940
 941        val = xudc_readl(xudc, EP_PAUSE);
 942        if (!(val & BIT(ep)))
 943                return;
 944        val &= ~BIT(ep);
 945
 946        xudc_writel(xudc, val, EP_PAUSE);
 947
 948        xudc_readl_poll(xudc, EP_STCHG, BIT(ep), BIT(ep));
 949
 950        xudc_writel(xudc, BIT(ep), EP_STCHG);
 951}
 952
 953static void ep_unpause_all(struct tegra_xudc *xudc)
 954{
 955        u32 val;
 956
 957        val = xudc_readl(xudc, EP_PAUSE);
 958
 959        xudc_writel(xudc, 0, EP_PAUSE);
 960
 961        xudc_readl_poll(xudc, EP_STCHG, val, val);
 962
 963        xudc_writel(xudc, val, EP_STCHG);
 964}
 965
 966static void ep_halt(struct tegra_xudc *xudc, unsigned int ep)
 967{
 968        u32 val;
 969
 970        val = xudc_readl(xudc, EP_HALT);
 971        if (val & BIT(ep))
 972                return;
 973        val |= BIT(ep);
 974        xudc_writel(xudc, val, EP_HALT);
 975
 976        xudc_readl_poll(xudc, EP_STCHG, BIT(ep), BIT(ep));
 977
 978        xudc_writel(xudc, BIT(ep), EP_STCHG);
 979}
 980
 981static void ep_unhalt(struct tegra_xudc *xudc, unsigned int ep)
 982{
 983        u32 val;
 984
 985        val = xudc_readl(xudc, EP_HALT);
 986        if (!(val & BIT(ep)))
 987                return;
 988        val &= ~BIT(ep);
 989        xudc_writel(xudc, val, EP_HALT);
 990
 991        xudc_readl_poll(xudc, EP_STCHG, BIT(ep), BIT(ep));
 992
 993        xudc_writel(xudc, BIT(ep), EP_STCHG);
 994}
 995
 996static void ep_unhalt_all(struct tegra_xudc *xudc)
 997{
 998        u32 val;
 999
1000        val = xudc_readl(xudc, EP_HALT);
1001        if (!val)
1002                return;
1003        xudc_writel(xudc, 0, EP_HALT);
1004
1005        xudc_readl_poll(xudc, EP_STCHG, val, val);
1006
1007        xudc_writel(xudc, val, EP_STCHG);
1008}
1009
1010static void ep_wait_for_stopped(struct tegra_xudc *xudc, unsigned int ep)
1011{
1012        xudc_readl_poll(xudc, EP_STOPPED, BIT(ep), BIT(ep));
1013        xudc_writel(xudc, BIT(ep), EP_STOPPED);
1014}
1015
1016static void ep_wait_for_inactive(struct tegra_xudc *xudc, unsigned int ep)
1017{
1018        xudc_readl_poll(xudc, EP_THREAD_ACTIVE, BIT(ep), 0);
1019}
1020
1021static void tegra_xudc_req_done(struct tegra_xudc_ep *ep,
1022                                struct tegra_xudc_request *req, int status)
1023{
1024        struct tegra_xudc *xudc = ep->xudc;
1025
1026        dev_dbg(xudc->dev, "completing request %p on EP %u with status %d\n",
1027                 req, ep->index, status);
1028
1029        if (likely(req->usb_req.status == -EINPROGRESS))
1030                req->usb_req.status = status;
1031
1032        list_del_init(&req->list);
1033
1034        if (usb_endpoint_xfer_control(ep->desc)) {
1035                usb_gadget_unmap_request(&xudc->gadget, &req->usb_req,
1036                                         (xudc->setup_state ==
1037                                          DATA_STAGE_XFER));
1038        } else {
1039                usb_gadget_unmap_request(&xudc->gadget, &req->usb_req,
1040                                         usb_endpoint_dir_in(ep->desc));
1041        }
1042
1043        spin_unlock(&xudc->lock);
1044        usb_gadget_giveback_request(&ep->usb_ep, &req->usb_req);
1045        spin_lock(&xudc->lock);
1046}
1047
1048static void tegra_xudc_ep_nuke(struct tegra_xudc_ep *ep, int status)
1049{
1050        struct tegra_xudc_request *req;
1051
1052        while (!list_empty(&ep->queue)) {
1053                req = list_first_entry(&ep->queue, struct tegra_xudc_request,
1054                                       list);
1055                tegra_xudc_req_done(ep, req, status);
1056        }
1057}
1058
1059static unsigned int ep_available_trbs(struct tegra_xudc_ep *ep)
1060{
1061        if (ep->ring_full)
1062                return 0;
1063
1064        if (ep->deq_ptr > ep->enq_ptr)
1065                return ep->deq_ptr - ep->enq_ptr - 1;
1066
1067        return XUDC_TRANSFER_RING_SIZE - (ep->enq_ptr - ep->deq_ptr) - 2;
1068}
1069
1070static void tegra_xudc_queue_one_trb(struct tegra_xudc_ep *ep,
1071                                     struct tegra_xudc_request *req,
1072                                     struct tegra_xudc_trb *trb,
1073                                     bool ioc)
1074{
1075        struct tegra_xudc *xudc = ep->xudc;
1076        dma_addr_t buf_addr;
1077        size_t len;
1078
1079        len = min_t(size_t, XUDC_TRB_MAX_BUFFER_SIZE, req->usb_req.length -
1080                    req->buf_queued);
1081        if (len > 0)
1082                buf_addr = req->usb_req.dma + req->buf_queued;
1083        else
1084                buf_addr = 0;
1085
1086        trb_write_data_ptr(trb, buf_addr);
1087
1088        trb_write_transfer_len(trb, len);
1089        trb_write_td_size(trb, req->trbs_needed - req->trbs_queued - 1);
1090
1091        if (req->trbs_queued == req->trbs_needed - 1 ||
1092                (req->need_zlp && req->trbs_queued == req->trbs_needed - 2))
1093                trb_write_chain(trb, 0);
1094        else
1095                trb_write_chain(trb, 1);
1096
1097        trb_write_ioc(trb, ioc);
1098
1099        if (usb_endpoint_dir_out(ep->desc) ||
1100            (usb_endpoint_xfer_control(ep->desc) &&
1101             (xudc->setup_state == DATA_STAGE_RECV)))
1102                trb_write_isp(trb, 1);
1103        else
1104                trb_write_isp(trb, 0);
1105
1106        if (usb_endpoint_xfer_control(ep->desc)) {
1107                if (xudc->setup_state == DATA_STAGE_XFER ||
1108                    xudc->setup_state == DATA_STAGE_RECV)
1109                        trb_write_type(trb, TRB_TYPE_DATA_STAGE);
1110                else
1111                        trb_write_type(trb, TRB_TYPE_STATUS_STAGE);
1112
1113                if (xudc->setup_state == DATA_STAGE_XFER ||
1114                    xudc->setup_state == STATUS_STAGE_XFER)
1115                        trb_write_data_stage_dir(trb, 1);
1116                else
1117                        trb_write_data_stage_dir(trb, 0);
1118        } else if (usb_endpoint_xfer_isoc(ep->desc)) {
1119                trb_write_type(trb, TRB_TYPE_ISOCH);
1120                trb_write_sia(trb, 1);
1121                trb_write_frame_id(trb, 0);
1122                trb_write_tlbpc(trb, 0);
1123        } else if (usb_ss_max_streams(ep->comp_desc)) {
1124                trb_write_type(trb, TRB_TYPE_STREAM);
1125                trb_write_stream_id(trb, req->usb_req.stream_id);
1126        } else {
1127                trb_write_type(trb, TRB_TYPE_NORMAL);
1128                trb_write_stream_id(trb, 0);
1129        }
1130
1131        trb_write_cycle(trb, ep->pcs);
1132
1133        req->trbs_queued++;
1134        req->buf_queued += len;
1135
1136        dump_trb(xudc, "TRANSFER", trb);
1137}
1138
1139static unsigned int tegra_xudc_queue_trbs(struct tegra_xudc_ep *ep,
1140                                          struct tegra_xudc_request *req)
1141{
1142        unsigned int i, count, available;
1143        bool wait_td = false;
1144
1145        available = ep_available_trbs(ep);
1146        count = req->trbs_needed - req->trbs_queued;
1147        if (available < count) {
1148                count = available;
1149                ep->ring_full = true;
1150        }
1151
1152        /*
1153         * To generate zero-length packet on USB bus, SW needs schedule a
1154         * standalone zero-length TD. According to HW's behavior, SW needs
1155         * to schedule TDs in different ways for different endpoint types.
1156         *
1157         * For control endpoint:
1158         * - Data stage TD (IOC = 1, CH = 0)
1159         * - Ring doorbell and wait transfer event
1160         * - Data stage TD for ZLP (IOC = 1, CH = 0)
1161         * - Ring doorbell
1162         *
1163         * For bulk and interrupt endpoints:
1164         * - Normal transfer TD (IOC = 0, CH = 0)
1165         * - Normal transfer TD for ZLP (IOC = 1, CH = 0)
1166         * - Ring doorbell
1167         */
1168
1169        if (req->need_zlp && usb_endpoint_xfer_control(ep->desc) && count > 1)
1170                wait_td = true;
1171
1172        if (!req->first_trb)
1173                req->first_trb = &ep->transfer_ring[ep->enq_ptr];
1174
1175        for (i = 0; i < count; i++) {
1176                struct tegra_xudc_trb *trb = &ep->transfer_ring[ep->enq_ptr];
1177                bool ioc = false;
1178
1179                if ((i == count - 1) || (wait_td && i == count - 2))
1180                        ioc = true;
1181
1182                tegra_xudc_queue_one_trb(ep, req, trb, ioc);
1183                req->last_trb = trb;
1184
1185                ep->enq_ptr++;
1186                if (ep->enq_ptr == XUDC_TRANSFER_RING_SIZE - 1) {
1187                        trb = &ep->transfer_ring[ep->enq_ptr];
1188                        trb_write_cycle(trb, ep->pcs);
1189                        ep->pcs = !ep->pcs;
1190                        ep->enq_ptr = 0;
1191                }
1192
1193                if (ioc)
1194                        break;
1195        }
1196
1197        return count;
1198}
1199
1200static void tegra_xudc_ep_ring_doorbell(struct tegra_xudc_ep *ep)
1201{
1202        struct tegra_xudc *xudc = ep->xudc;
1203        u32 val;
1204
1205        if (list_empty(&ep->queue))
1206                return;
1207
1208        val = DB_TARGET(ep->index);
1209        if (usb_endpoint_xfer_control(ep->desc)) {
1210                val |= DB_STREAMID(xudc->setup_seq_num);
1211        } else if (usb_ss_max_streams(ep->comp_desc) > 0) {
1212                struct tegra_xudc_request *req;
1213
1214                /* Don't ring doorbell if the stream has been rejected. */
1215                if (ep->stream_rejected)
1216                        return;
1217
1218                req = list_first_entry(&ep->queue, struct tegra_xudc_request,
1219                                       list);
1220                val |= DB_STREAMID(req->usb_req.stream_id);
1221        }
1222
1223        dev_dbg(xudc->dev, "ring doorbell: %#x\n", val);
1224        xudc_writel(xudc, val, DB);
1225}
1226
1227static void tegra_xudc_ep_kick_queue(struct tegra_xudc_ep *ep)
1228{
1229        struct tegra_xudc_request *req;
1230        bool trbs_queued = false;
1231
1232        list_for_each_entry(req, &ep->queue, list) {
1233                if (ep->ring_full)
1234                        break;
1235
1236                if (tegra_xudc_queue_trbs(ep, req) > 0)
1237                        trbs_queued = true;
1238        }
1239
1240        if (trbs_queued)
1241                tegra_xudc_ep_ring_doorbell(ep);
1242}
1243
1244static int
1245__tegra_xudc_ep_queue(struct tegra_xudc_ep *ep, struct tegra_xudc_request *req)
1246{
1247        struct tegra_xudc *xudc = ep->xudc;
1248        int err;
1249
1250        if (usb_endpoint_xfer_control(ep->desc) && !list_empty(&ep->queue)) {
1251                dev_err(xudc->dev, "control EP has pending transfers\n");
1252                return -EINVAL;
1253        }
1254
1255        if (usb_endpoint_xfer_control(ep->desc)) {
1256                err = usb_gadget_map_request(&xudc->gadget, &req->usb_req,
1257                                             (xudc->setup_state ==
1258                                              DATA_STAGE_XFER));
1259        } else {
1260                err = usb_gadget_map_request(&xudc->gadget, &req->usb_req,
1261                                             usb_endpoint_dir_in(ep->desc));
1262        }
1263
1264        if (err < 0) {
1265                dev_err(xudc->dev, "failed to map request: %d\n", err);
1266                return err;
1267        }
1268
1269        req->first_trb = NULL;
1270        req->last_trb = NULL;
1271        req->buf_queued = 0;
1272        req->trbs_queued = 0;
1273        req->need_zlp = false;
1274        req->trbs_needed = DIV_ROUND_UP(req->usb_req.length,
1275                                        XUDC_TRB_MAX_BUFFER_SIZE);
1276        if (req->usb_req.length == 0)
1277                req->trbs_needed++;
1278
1279        if (!usb_endpoint_xfer_isoc(ep->desc) &&
1280            req->usb_req.zero && req->usb_req.length &&
1281            ((req->usb_req.length % ep->usb_ep.maxpacket) == 0)) {
1282                req->trbs_needed++;
1283                req->need_zlp = true;
1284        }
1285
1286        req->usb_req.status = -EINPROGRESS;
1287        req->usb_req.actual = 0;
1288
1289        list_add_tail(&req->list, &ep->queue);
1290
1291        tegra_xudc_ep_kick_queue(ep);
1292
1293        return 0;
1294}
1295
1296static int
1297tegra_xudc_ep_queue(struct usb_ep *usb_ep, struct usb_request *usb_req,
1298                    gfp_t gfp)
1299{
1300        struct tegra_xudc_request *req;
1301        struct tegra_xudc_ep *ep;
1302        struct tegra_xudc *xudc;
1303        unsigned long flags;
1304        int ret;
1305
1306        if (!usb_ep || !usb_req)
1307                return -EINVAL;
1308
1309        ep = to_xudc_ep(usb_ep);
1310        req = to_xudc_req(usb_req);
1311        xudc = ep->xudc;
1312
1313        spin_lock_irqsave(&xudc->lock, flags);
1314        if (xudc->powergated || !ep->desc) {
1315                ret = -ESHUTDOWN;
1316                goto unlock;
1317        }
1318
1319        ret = __tegra_xudc_ep_queue(ep, req);
1320unlock:
1321        spin_unlock_irqrestore(&xudc->lock, flags);
1322
1323        return ret;
1324}
1325
1326static void squeeze_transfer_ring(struct tegra_xudc_ep *ep,
1327                                  struct tegra_xudc_request *req)
1328{
1329        struct tegra_xudc_trb *trb = req->first_trb;
1330        bool pcs_enq = trb_read_cycle(trb);
1331        bool pcs;
1332
1333        /*
1334         * Clear out all the TRBs part of or after the cancelled request,
1335         * and must correct trb cycle bit to the last un-enqueued state.
1336         */
1337        while (trb != &ep->transfer_ring[ep->enq_ptr]) {
1338                pcs = trb_read_cycle(trb);
1339                memset(trb, 0, sizeof(*trb));
1340                trb_write_cycle(trb, !pcs);
1341                trb++;
1342
1343                if (trb_read_type(trb) == TRB_TYPE_LINK)
1344                        trb = ep->transfer_ring;
1345        }
1346
1347        /* Requests will be re-queued at the start of the cancelled request. */
1348        ep->enq_ptr = req->first_trb - ep->transfer_ring;
1349        /*
1350         * Retrieve the correct cycle bit state from the first trb of
1351         * the cancelled request.
1352         */
1353        ep->pcs = pcs_enq;
1354        ep->ring_full = false;
1355        list_for_each_entry_continue(req, &ep->queue, list) {
1356                req->usb_req.status = -EINPROGRESS;
1357                req->usb_req.actual = 0;
1358
1359                req->first_trb = NULL;
1360                req->last_trb = NULL;
1361                req->buf_queued = 0;
1362                req->trbs_queued = 0;
1363        }
1364}
1365
1366/*
1367 * Determine if the given TRB is in the range [first trb, last trb] for the
1368 * given request.
1369 */
1370static bool trb_in_request(struct tegra_xudc_ep *ep,
1371                           struct tegra_xudc_request *req,
1372                           struct tegra_xudc_trb *trb)
1373{
1374        dev_dbg(ep->xudc->dev, "%s: request %p -> %p; trb %p\n", __func__,
1375                req->first_trb, req->last_trb, trb);
1376
1377        if (trb >= req->first_trb && (trb <= req->last_trb ||
1378                                      req->last_trb < req->first_trb))
1379                return true;
1380
1381        if (trb < req->first_trb && trb <= req->last_trb &&
1382            req->last_trb < req->first_trb)
1383                return true;
1384
1385        return false;
1386}
1387
1388/*
1389 * Determine if the given TRB is in the range [EP enqueue pointer, first TRB)
1390 * for the given endpoint and request.
1391 */
1392static bool trb_before_request(struct tegra_xudc_ep *ep,
1393                               struct tegra_xudc_request *req,
1394                               struct tegra_xudc_trb *trb)
1395{
1396        struct tegra_xudc_trb *enq_trb = &ep->transfer_ring[ep->enq_ptr];
1397
1398        dev_dbg(ep->xudc->dev, "%s: request %p -> %p; enq ptr: %p; trb %p\n",
1399                __func__, req->first_trb, req->last_trb, enq_trb, trb);
1400
1401        if (trb < req->first_trb && (enq_trb <= trb ||
1402                                     req->first_trb < enq_trb))
1403                return true;
1404
1405        if (trb > req->first_trb && req->first_trb < enq_trb && enq_trb <= trb)
1406                return true;
1407
1408        return false;
1409}
1410
1411static int
1412__tegra_xudc_ep_dequeue(struct tegra_xudc_ep *ep,
1413                        struct tegra_xudc_request *req)
1414{
1415        struct tegra_xudc *xudc = ep->xudc;
1416        struct tegra_xudc_request *r;
1417        struct tegra_xudc_trb *deq_trb;
1418        bool busy, kick_queue = false;
1419        int ret = 0;
1420
1421        /* Make sure the request is actually queued to this endpoint. */
1422        list_for_each_entry(r, &ep->queue, list) {
1423                if (r == req)
1424                        break;
1425        }
1426
1427        if (r != req)
1428                return -EINVAL;
1429
1430        /* Request hasn't been queued in the transfer ring yet. */
1431        if (!req->trbs_queued) {
1432                tegra_xudc_req_done(ep, req, -ECONNRESET);
1433                return 0;
1434        }
1435
1436        /* Halt DMA for this endpiont. */
1437        if (ep_ctx_read_state(ep->context) == EP_STATE_RUNNING) {
1438                ep_pause(xudc, ep->index);
1439                ep_wait_for_inactive(xudc, ep->index);
1440        }
1441
1442        deq_trb = trb_phys_to_virt(ep, ep_ctx_read_deq_ptr(ep->context));
1443        /* Is the hardware processing the TRB at the dequeue pointer? */
1444        busy = (trb_read_cycle(deq_trb) == ep_ctx_read_dcs(ep->context));
1445
1446        if (trb_in_request(ep, req, deq_trb) && busy) {
1447                /*
1448                 * Request has been partially completed or it hasn't
1449                 * started processing yet.
1450                 */
1451                dma_addr_t deq_ptr;
1452
1453                squeeze_transfer_ring(ep, req);
1454
1455                req->usb_req.actual = ep_ctx_read_edtla(ep->context);
1456                tegra_xudc_req_done(ep, req, -ECONNRESET);
1457                kick_queue = true;
1458
1459                /* EDTLA is > 0: request has been partially completed */
1460                if (req->usb_req.actual > 0) {
1461                        /*
1462                         * Abort the pending transfer and update the dequeue
1463                         * pointer
1464                         */
1465                        ep_ctx_write_edtla(ep->context, 0);
1466                        ep_ctx_write_partial_td(ep->context, 0);
1467                        ep_ctx_write_data_offset(ep->context, 0);
1468
1469                        deq_ptr = trb_virt_to_phys(ep,
1470                                        &ep->transfer_ring[ep->enq_ptr]);
1471
1472                        if (dma_mapping_error(xudc->dev, deq_ptr)) {
1473                                ret = -EINVAL;
1474                        } else {
1475                                ep_ctx_write_deq_ptr(ep->context, deq_ptr);
1476                                ep_ctx_write_dcs(ep->context, ep->pcs);
1477                                ep_reload(xudc, ep->index);
1478                        }
1479                }
1480        } else if (trb_before_request(ep, req, deq_trb) && busy) {
1481                /* Request hasn't started processing yet. */
1482                squeeze_transfer_ring(ep, req);
1483
1484                tegra_xudc_req_done(ep, req, -ECONNRESET);
1485                kick_queue = true;
1486        } else {
1487                /*
1488                 * Request has completed, but we haven't processed the
1489                 * completion event yet.
1490                 */
1491                tegra_xudc_req_done(ep, req, -ECONNRESET);
1492                ret = -EINVAL;
1493        }
1494
1495        /* Resume the endpoint. */
1496        ep_unpause(xudc, ep->index);
1497
1498        if (kick_queue)
1499                tegra_xudc_ep_kick_queue(ep);
1500
1501        return ret;
1502}
1503
1504static int
1505tegra_xudc_ep_dequeue(struct usb_ep *usb_ep, struct usb_request *usb_req)
1506{
1507        struct tegra_xudc_request *req;
1508        struct tegra_xudc_ep *ep;
1509        struct tegra_xudc *xudc;
1510        unsigned long flags;
1511        int ret;
1512
1513        if (!usb_ep || !usb_req)
1514                return -EINVAL;
1515
1516        ep = to_xudc_ep(usb_ep);
1517        req = to_xudc_req(usb_req);
1518        xudc = ep->xudc;
1519
1520        spin_lock_irqsave(&xudc->lock, flags);
1521
1522        if (xudc->powergated || !ep->desc) {
1523                ret = -ESHUTDOWN;
1524                goto unlock;
1525        }
1526
1527        ret = __tegra_xudc_ep_dequeue(ep, req);
1528unlock:
1529        spin_unlock_irqrestore(&xudc->lock, flags);
1530
1531        return ret;
1532}
1533
1534static int __tegra_xudc_ep_set_halt(struct tegra_xudc_ep *ep, bool halt)
1535{
1536        struct tegra_xudc *xudc = ep->xudc;
1537
1538        if (!ep->desc)
1539                return -EINVAL;
1540
1541        if (usb_endpoint_xfer_isoc(ep->desc)) {
1542                dev_err(xudc->dev, "can't halt isochronous EP\n");
1543                return -ENOTSUPP;
1544        }
1545
1546        if (!!(xudc_readl(xudc, EP_HALT) & BIT(ep->index)) == halt) {
1547                dev_dbg(xudc->dev, "EP %u already %s\n", ep->index,
1548                        halt ? "halted" : "not halted");
1549                return 0;
1550        }
1551
1552        if (halt) {
1553                ep_halt(xudc, ep->index);
1554        } else {
1555                ep_ctx_write_state(ep->context, EP_STATE_DISABLED);
1556
1557                ep_reload(xudc, ep->index);
1558
1559                ep_ctx_write_state(ep->context, EP_STATE_RUNNING);
1560                ep_ctx_write_seq_num(ep->context, 0);
1561
1562                ep_reload(xudc, ep->index);
1563                ep_unpause(xudc, ep->index);
1564                ep_unhalt(xudc, ep->index);
1565
1566                tegra_xudc_ep_ring_doorbell(ep);
1567        }
1568
1569        return 0;
1570}
1571
1572static int tegra_xudc_ep_set_halt(struct usb_ep *usb_ep, int value)
1573{
1574        struct tegra_xudc_ep *ep;
1575        struct tegra_xudc *xudc;
1576        unsigned long flags;
1577        int ret;
1578
1579        if (!usb_ep)
1580                return -EINVAL;
1581
1582        ep = to_xudc_ep(usb_ep);
1583        xudc = ep->xudc;
1584
1585        spin_lock_irqsave(&xudc->lock, flags);
1586        if (xudc->powergated) {
1587                ret = -ESHUTDOWN;
1588                goto unlock;
1589        }
1590
1591        if (value && usb_endpoint_dir_in(ep->desc) &&
1592            !list_empty(&ep->queue)) {
1593                dev_err(xudc->dev, "can't halt EP with requests pending\n");
1594                ret = -EAGAIN;
1595                goto unlock;
1596        }
1597
1598        ret = __tegra_xudc_ep_set_halt(ep, value);
1599unlock:
1600        spin_unlock_irqrestore(&xudc->lock, flags);
1601
1602        return ret;
1603}
1604
1605static void tegra_xudc_ep_context_setup(struct tegra_xudc_ep *ep)
1606{
1607        const struct usb_endpoint_descriptor *desc = ep->desc;
1608        const struct usb_ss_ep_comp_descriptor *comp_desc = ep->comp_desc;
1609        struct tegra_xudc *xudc = ep->xudc;
1610        u16 maxpacket, maxburst = 0, esit = 0;
1611        u32 val;
1612
1613        maxpacket = usb_endpoint_maxp(desc);
1614        if (xudc->gadget.speed == USB_SPEED_SUPER) {
1615                if (!usb_endpoint_xfer_control(desc))
1616                        maxburst = comp_desc->bMaxBurst;
1617
1618                if (usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc))
1619                        esit = le16_to_cpu(comp_desc->wBytesPerInterval);
1620        } else if ((xudc->gadget.speed < USB_SPEED_SUPER) &&
1621                   (usb_endpoint_xfer_int(desc) ||
1622                    usb_endpoint_xfer_isoc(desc))) {
1623                if (xudc->gadget.speed == USB_SPEED_HIGH) {
1624                        maxburst = usb_endpoint_maxp_mult(desc) - 1;
1625                        if (maxburst == 0x3) {
1626                                dev_warn(xudc->dev,
1627                                         "invalid endpoint maxburst\n");
1628                                maxburst = 0x2;
1629                        }
1630                }
1631                esit = maxpacket * (maxburst + 1);
1632        }
1633
1634        memset(ep->context, 0, sizeof(*ep->context));
1635
1636        ep_ctx_write_state(ep->context, EP_STATE_RUNNING);
1637        ep_ctx_write_interval(ep->context, desc->bInterval);
1638        if (xudc->gadget.speed == USB_SPEED_SUPER) {
1639                if (usb_endpoint_xfer_isoc(desc)) {
1640                        ep_ctx_write_mult(ep->context,
1641                                          comp_desc->bmAttributes & 0x3);
1642                }
1643
1644                if (usb_endpoint_xfer_bulk(desc)) {
1645                        ep_ctx_write_max_pstreams(ep->context,
1646                                                  comp_desc->bmAttributes &
1647                                                  0x1f);
1648                        ep_ctx_write_lsa(ep->context, 1);
1649                }
1650        }
1651
1652        if (!usb_endpoint_xfer_control(desc) && usb_endpoint_dir_out(desc))
1653                val = usb_endpoint_type(desc);
1654        else
1655                val = usb_endpoint_type(desc) + EP_TYPE_CONTROL;
1656
1657        ep_ctx_write_type(ep->context, val);
1658        ep_ctx_write_cerr(ep->context, 0x3);
1659        ep_ctx_write_max_packet_size(ep->context, maxpacket);
1660        ep_ctx_write_max_burst_size(ep->context, maxburst);
1661
1662        ep_ctx_write_deq_ptr(ep->context, ep->transfer_ring_phys);
1663        ep_ctx_write_dcs(ep->context, ep->pcs);
1664
1665        /* Select a reasonable average TRB length based on endpoint type. */
1666        switch (usb_endpoint_type(desc)) {
1667        case USB_ENDPOINT_XFER_CONTROL:
1668                val = 8;
1669                break;
1670        case USB_ENDPOINT_XFER_INT:
1671                val = 1024;
1672                break;
1673        case USB_ENDPOINT_XFER_BULK:
1674        case USB_ENDPOINT_XFER_ISOC:
1675        default:
1676                val = 3072;
1677                break;
1678        }
1679
1680        ep_ctx_write_avg_trb_len(ep->context, val);
1681        ep_ctx_write_max_esit_payload(ep->context, esit);
1682
1683        ep_ctx_write_cerrcnt(ep->context, 0x3);
1684}
1685
1686static void setup_link_trb(struct tegra_xudc_ep *ep,
1687                           struct tegra_xudc_trb *trb)
1688{
1689        trb_write_data_ptr(trb, ep->transfer_ring_phys);
1690        trb_write_type(trb, TRB_TYPE_LINK);
1691        trb_write_toggle_cycle(trb, 1);
1692}
1693
1694static int __tegra_xudc_ep_disable(struct tegra_xudc_ep *ep)
1695{
1696        struct tegra_xudc *xudc = ep->xudc;
1697
1698        if (ep_ctx_read_state(ep->context) == EP_STATE_DISABLED) {
1699                dev_err(xudc->dev, "endpoint %u already disabled\n",
1700                        ep->index);
1701                return -EINVAL;
1702        }
1703
1704        ep_ctx_write_state(ep->context, EP_STATE_DISABLED);
1705
1706        ep_reload(xudc, ep->index);
1707
1708        tegra_xudc_ep_nuke(ep, -ESHUTDOWN);
1709
1710        xudc->nr_enabled_eps--;
1711        if (usb_endpoint_xfer_isoc(ep->desc))
1712                xudc->nr_isoch_eps--;
1713
1714        ep->desc = NULL;
1715        ep->comp_desc = NULL;
1716
1717        memset(ep->context, 0, sizeof(*ep->context));
1718
1719        ep_unpause(xudc, ep->index);
1720        ep_unhalt(xudc, ep->index);
1721        if (xudc_readl(xudc, EP_STOPPED) & BIT(ep->index))
1722                xudc_writel(xudc, BIT(ep->index), EP_STOPPED);
1723
1724        /*
1725         * If this is the last endpoint disabled in a de-configure request,
1726         * switch back to address state.
1727         */
1728        if ((xudc->device_state == USB_STATE_CONFIGURED) &&
1729            (xudc->nr_enabled_eps == 1)) {
1730                u32 val;
1731
1732                xudc->device_state = USB_STATE_ADDRESS;
1733                usb_gadget_set_state(&xudc->gadget, xudc->device_state);
1734
1735                val = xudc_readl(xudc, CTRL);
1736                val &= ~CTRL_RUN;
1737                xudc_writel(xudc, val, CTRL);
1738        }
1739
1740        dev_info(xudc->dev, "ep %u disabled\n", ep->index);
1741
1742        return 0;
1743}
1744
1745static int tegra_xudc_ep_disable(struct usb_ep *usb_ep)
1746{
1747        struct tegra_xudc_ep *ep;
1748        struct tegra_xudc *xudc;
1749        unsigned long flags;
1750        int ret;
1751
1752        if (!usb_ep)
1753                return -EINVAL;
1754
1755        ep = to_xudc_ep(usb_ep);
1756        xudc = ep->xudc;
1757
1758        spin_lock_irqsave(&xudc->lock, flags);
1759        if (xudc->powergated) {
1760                ret = -ESHUTDOWN;
1761                goto unlock;
1762        }
1763
1764        ret = __tegra_xudc_ep_disable(ep);
1765unlock:
1766        spin_unlock_irqrestore(&xudc->lock, flags);
1767
1768        return ret;
1769}
1770
1771static int __tegra_xudc_ep_enable(struct tegra_xudc_ep *ep,
1772                                  const struct usb_endpoint_descriptor *desc)
1773{
1774        struct tegra_xudc *xudc = ep->xudc;
1775        unsigned int i;
1776        u32 val;
1777
1778        if (xudc->gadget.speed == USB_SPEED_SUPER &&
1779                !usb_endpoint_xfer_control(desc) && !ep->usb_ep.comp_desc)
1780                return -EINVAL;
1781
1782        /* Disable the EP if it is not disabled */
1783        if (ep_ctx_read_state(ep->context) != EP_STATE_DISABLED)
1784                __tegra_xudc_ep_disable(ep);
1785
1786        ep->desc = desc;
1787        ep->comp_desc = ep->usb_ep.comp_desc;
1788
1789        if (usb_endpoint_xfer_isoc(desc)) {
1790                if (xudc->nr_isoch_eps > XUDC_MAX_ISOCH_EPS) {
1791                        dev_err(xudc->dev, "too many isochronous endpoints\n");
1792                        return -EBUSY;
1793                }
1794                xudc->nr_isoch_eps++;
1795        }
1796
1797        memset(ep->transfer_ring, 0, XUDC_TRANSFER_RING_SIZE *
1798               sizeof(*ep->transfer_ring));
1799        setup_link_trb(ep, &ep->transfer_ring[XUDC_TRANSFER_RING_SIZE - 1]);
1800
1801        ep->enq_ptr = 0;
1802        ep->deq_ptr = 0;
1803        ep->pcs = true;
1804        ep->ring_full = false;
1805        xudc->nr_enabled_eps++;
1806
1807        tegra_xudc_ep_context_setup(ep);
1808
1809        /*
1810         * No need to reload and un-halt EP0.  This will be done automatically
1811         * once a valid SETUP packet is received.
1812         */
1813        if (usb_endpoint_xfer_control(desc))
1814                goto out;
1815
1816        /*
1817         * Transition to configured state once the first non-control
1818         * endpoint is enabled.
1819         */
1820        if (xudc->device_state == USB_STATE_ADDRESS) {
1821                val = xudc_readl(xudc, CTRL);
1822                val |= CTRL_RUN;
1823                xudc_writel(xudc, val, CTRL);
1824
1825                xudc->device_state = USB_STATE_CONFIGURED;
1826                usb_gadget_set_state(&xudc->gadget, xudc->device_state);
1827        }
1828
1829        if (usb_endpoint_xfer_isoc(desc)) {
1830                /*
1831                 * Pause all bulk endpoints when enabling an isoch endpoint
1832                 * to ensure the isoch endpoint is allocated enough bandwidth.
1833                 */
1834                for (i = 0; i < ARRAY_SIZE(xudc->ep); i++) {
1835                        if (xudc->ep[i].desc &&
1836                            usb_endpoint_xfer_bulk(xudc->ep[i].desc))
1837                                ep_pause(xudc, i);
1838                }
1839        }
1840
1841        ep_reload(xudc, ep->index);
1842        ep_unpause(xudc, ep->index);
1843        ep_unhalt(xudc, ep->index);
1844
1845        if (usb_endpoint_xfer_isoc(desc)) {
1846                for (i = 0; i < ARRAY_SIZE(xudc->ep); i++) {
1847                        if (xudc->ep[i].desc &&
1848                            usb_endpoint_xfer_bulk(xudc->ep[i].desc))
1849                                ep_unpause(xudc, i);
1850                }
1851        }
1852
1853out:
1854        dev_info(xudc->dev, "EP %u (type: %s, dir: %s) enabled\n", ep->index,
1855                 usb_ep_type_string(usb_endpoint_type(ep->desc)),
1856                 usb_endpoint_dir_in(ep->desc) ? "in" : "out");
1857
1858        return 0;
1859}
1860
1861static int tegra_xudc_ep_enable(struct usb_ep *usb_ep,
1862                                const struct usb_endpoint_descriptor *desc)
1863{
1864        struct tegra_xudc_ep *ep;
1865        struct tegra_xudc *xudc;
1866        unsigned long flags;
1867        int ret;
1868
1869        if  (!usb_ep || !desc || (desc->bDescriptorType != USB_DT_ENDPOINT))
1870                return -EINVAL;
1871
1872        ep = to_xudc_ep(usb_ep);
1873        xudc = ep->xudc;
1874
1875        spin_lock_irqsave(&xudc->lock, flags);
1876        if (xudc->powergated) {
1877                ret = -ESHUTDOWN;
1878                goto unlock;
1879        }
1880
1881        ret = __tegra_xudc_ep_enable(ep, desc);
1882unlock:
1883        spin_unlock_irqrestore(&xudc->lock, flags);
1884
1885        return ret;
1886}
1887
1888static struct usb_request *
1889tegra_xudc_ep_alloc_request(struct usb_ep *usb_ep, gfp_t gfp)
1890{
1891        struct tegra_xudc_request *req;
1892
1893        req = kzalloc(sizeof(*req), gfp);
1894        if (!req)
1895                return NULL;
1896
1897        INIT_LIST_HEAD(&req->list);
1898
1899        return &req->usb_req;
1900}
1901
1902static void tegra_xudc_ep_free_request(struct usb_ep *usb_ep,
1903                                       struct usb_request *usb_req)
1904{
1905        struct tegra_xudc_request *req = to_xudc_req(usb_req);
1906
1907        kfree(req);
1908}
1909
1910static const struct usb_ep_ops tegra_xudc_ep_ops = {
1911        .enable = tegra_xudc_ep_enable,
1912        .disable = tegra_xudc_ep_disable,
1913        .alloc_request = tegra_xudc_ep_alloc_request,
1914        .free_request = tegra_xudc_ep_free_request,
1915        .queue = tegra_xudc_ep_queue,
1916        .dequeue = tegra_xudc_ep_dequeue,
1917        .set_halt = tegra_xudc_ep_set_halt,
1918};
1919
1920static int tegra_xudc_ep0_enable(struct usb_ep *usb_ep,
1921                                 const struct usb_endpoint_descriptor *desc)
1922{
1923        return -EBUSY;
1924}
1925
1926static int tegra_xudc_ep0_disable(struct usb_ep *usb_ep)
1927{
1928        return -EBUSY;
1929}
1930
1931static const struct usb_ep_ops tegra_xudc_ep0_ops = {
1932        .enable = tegra_xudc_ep0_enable,
1933        .disable = tegra_xudc_ep0_disable,
1934        .alloc_request = tegra_xudc_ep_alloc_request,
1935        .free_request = tegra_xudc_ep_free_request,
1936        .queue = tegra_xudc_ep_queue,
1937        .dequeue = tegra_xudc_ep_dequeue,
1938        .set_halt = tegra_xudc_ep_set_halt,
1939};
1940
1941static int tegra_xudc_gadget_get_frame(struct usb_gadget *gadget)
1942{
1943        struct tegra_xudc *xudc = to_xudc(gadget);
1944        unsigned long flags;
1945        int ret;
1946
1947        spin_lock_irqsave(&xudc->lock, flags);
1948        if (xudc->powergated) {
1949                ret = -ESHUTDOWN;
1950                goto unlock;
1951        }
1952
1953        ret = (xudc_readl(xudc, MFINDEX) & MFINDEX_FRAME_MASK) >>
1954                MFINDEX_FRAME_SHIFT;
1955unlock:
1956        spin_unlock_irqrestore(&xudc->lock, flags);
1957
1958        return ret;
1959}
1960
1961static void tegra_xudc_resume_device_state(struct tegra_xudc *xudc)
1962{
1963        unsigned int i;
1964        u32 val;
1965
1966        ep_unpause_all(xudc);
1967
1968        /* Direct link to U0. */
1969        val = xudc_readl(xudc, PORTSC);
1970        if (((val & PORTSC_PLS_MASK) >> PORTSC_PLS_SHIFT) != PORTSC_PLS_U0) {
1971                val &= ~(PORTSC_CHANGE_MASK | PORTSC_PLS_MASK);
1972                val |= PORTSC_LWS | PORTSC_PLS(PORTSC_PLS_U0);
1973                xudc_writel(xudc, val, PORTSC);
1974        }
1975
1976        if (xudc->device_state == USB_STATE_SUSPENDED) {
1977                xudc->device_state = xudc->resume_state;
1978                usb_gadget_set_state(&xudc->gadget, xudc->device_state);
1979                xudc->resume_state = 0;
1980        }
1981
1982        /*
1983         * Doorbells may be dropped if they are sent too soon (< ~200ns)
1984         * after unpausing the endpoint.  Wait for 500ns just to be safe.
1985         */
1986        ndelay(500);
1987        for (i = 0; i < ARRAY_SIZE(xudc->ep); i++)
1988                tegra_xudc_ep_ring_doorbell(&xudc->ep[i]);
1989}
1990
1991static int tegra_xudc_gadget_wakeup(struct usb_gadget *gadget)
1992{
1993        struct tegra_xudc *xudc = to_xudc(gadget);
1994        unsigned long flags;
1995        int ret = 0;
1996        u32 val;
1997
1998        spin_lock_irqsave(&xudc->lock, flags);
1999
2000        if (xudc->powergated) {
2001                ret = -ESHUTDOWN;
2002                goto unlock;
2003        }
2004        val = xudc_readl(xudc, PORTPM);
2005        dev_dbg(xudc->dev, "%s: PORTPM=%#x, speed=%x\n", __func__,
2006                        val, gadget->speed);
2007
2008        if (((xudc->gadget.speed <= USB_SPEED_HIGH) &&
2009             (val & PORTPM_RWE)) ||
2010            ((xudc->gadget.speed == USB_SPEED_SUPER) &&
2011             (val & PORTPM_FRWE))) {
2012                tegra_xudc_resume_device_state(xudc);
2013
2014                /* Send Device Notification packet. */
2015                if (xudc->gadget.speed == USB_SPEED_SUPER) {
2016                        val = DEVNOTIF_LO_TYPE(DEVNOTIF_LO_TYPE_FUNCTION_WAKE)
2017                                             | DEVNOTIF_LO_TRIG;
2018                        xudc_writel(xudc, 0, DEVNOTIF_HI);
2019                        xudc_writel(xudc, val, DEVNOTIF_LO);
2020                }
2021        }
2022
2023unlock:
2024        dev_dbg(xudc->dev, "%s: ret value is %d", __func__, ret);
2025        spin_unlock_irqrestore(&xudc->lock, flags);
2026
2027        return ret;
2028}
2029
2030static int tegra_xudc_gadget_pullup(struct usb_gadget *gadget, int is_on)
2031{
2032        struct tegra_xudc *xudc = to_xudc(gadget);
2033        unsigned long flags;
2034        u32 val;
2035
2036        pm_runtime_get_sync(xudc->dev);
2037
2038        spin_lock_irqsave(&xudc->lock, flags);
2039
2040        if (is_on != xudc->pullup) {
2041                val = xudc_readl(xudc, CTRL);
2042                if (is_on)
2043                        val |= CTRL_ENABLE;
2044                else
2045                        val &= ~CTRL_ENABLE;
2046                xudc_writel(xudc, val, CTRL);
2047        }
2048
2049        xudc->pullup = is_on;
2050        dev_dbg(xudc->dev, "%s: pullup:%d", __func__, is_on);
2051
2052        spin_unlock_irqrestore(&xudc->lock, flags);
2053
2054        pm_runtime_put(xudc->dev);
2055
2056        return 0;
2057}
2058
2059static int tegra_xudc_gadget_start(struct usb_gadget *gadget,
2060                                   struct usb_gadget_driver *driver)
2061{
2062        struct tegra_xudc *xudc = to_xudc(gadget);
2063        unsigned long flags;
2064        u32 val;
2065        int ret;
2066        unsigned int i;
2067
2068        if (!driver)
2069                return -EINVAL;
2070
2071        pm_runtime_get_sync(xudc->dev);
2072
2073        spin_lock_irqsave(&xudc->lock, flags);
2074
2075        if (xudc->driver) {
2076                ret = -EBUSY;
2077                goto unlock;
2078        }
2079
2080        xudc->setup_state = WAIT_FOR_SETUP;
2081        xudc->device_state = USB_STATE_DEFAULT;
2082        usb_gadget_set_state(&xudc->gadget, xudc->device_state);
2083
2084        ret = __tegra_xudc_ep_enable(&xudc->ep[0], &tegra_xudc_ep0_desc);
2085        if (ret < 0)
2086                goto unlock;
2087
2088        val = xudc_readl(xudc, CTRL);
2089        val |= CTRL_IE | CTRL_LSE;
2090        xudc_writel(xudc, val, CTRL);
2091
2092        val = xudc_readl(xudc, PORTHALT);
2093        val |= PORTHALT_STCHG_INTR_EN;
2094        xudc_writel(xudc, val, PORTHALT);
2095
2096        if (xudc->pullup) {
2097                val = xudc_readl(xudc, CTRL);
2098                val |= CTRL_ENABLE;
2099                xudc_writel(xudc, val, CTRL);
2100        }
2101
2102        for (i = 0; i < xudc->soc->num_phys; i++)
2103                if (xudc->usbphy[i])
2104                        otg_set_peripheral(xudc->usbphy[i]->otg, gadget);
2105
2106        xudc->driver = driver;
2107unlock:
2108        dev_dbg(xudc->dev, "%s: ret value is %d", __func__, ret);
2109        spin_unlock_irqrestore(&xudc->lock, flags);
2110
2111        pm_runtime_put(xudc->dev);
2112
2113        return ret;
2114}
2115
2116static int tegra_xudc_gadget_stop(struct usb_gadget *gadget)
2117{
2118        struct tegra_xudc *xudc = to_xudc(gadget);
2119        unsigned long flags;
2120        u32 val;
2121        unsigned int i;
2122
2123        pm_runtime_get_sync(xudc->dev);
2124
2125        spin_lock_irqsave(&xudc->lock, flags);
2126
2127        for (i = 0; i < xudc->soc->num_phys; i++)
2128                if (xudc->usbphy[i])
2129                        otg_set_peripheral(xudc->usbphy[i]->otg, NULL);
2130
2131        val = xudc_readl(xudc, CTRL);
2132        val &= ~(CTRL_IE | CTRL_ENABLE);
2133        xudc_writel(xudc, val, CTRL);
2134
2135        __tegra_xudc_ep_disable(&xudc->ep[0]);
2136
2137        xudc->driver = NULL;
2138        dev_dbg(xudc->dev, "Gadget stopped");
2139
2140        spin_unlock_irqrestore(&xudc->lock, flags);
2141
2142        pm_runtime_put(xudc->dev);
2143
2144        return 0;
2145}
2146
2147static int tegra_xudc_gadget_vbus_draw(struct usb_gadget *gadget,
2148                                                unsigned int m_a)
2149{
2150        int ret = 0;
2151        struct tegra_xudc *xudc = to_xudc(gadget);
2152
2153        dev_dbg(xudc->dev, "%s: %u mA\n", __func__, m_a);
2154
2155        if (xudc->curr_usbphy->chg_type == SDP_TYPE)
2156                ret = usb_phy_set_power(xudc->curr_usbphy, m_a);
2157
2158        return ret;
2159}
2160
2161static int tegra_xudc_set_selfpowered(struct usb_gadget *gadget, int is_on)
2162{
2163        struct tegra_xudc *xudc = to_xudc(gadget);
2164
2165        dev_dbg(xudc->dev, "%s: %d\n", __func__, is_on);
2166        xudc->selfpowered = !!is_on;
2167
2168        return 0;
2169}
2170
2171static const struct usb_gadget_ops tegra_xudc_gadget_ops = {
2172        .get_frame = tegra_xudc_gadget_get_frame,
2173        .wakeup = tegra_xudc_gadget_wakeup,
2174        .pullup = tegra_xudc_gadget_pullup,
2175        .udc_start = tegra_xudc_gadget_start,
2176        .udc_stop = tegra_xudc_gadget_stop,
2177        .vbus_draw = tegra_xudc_gadget_vbus_draw,
2178        .set_selfpowered = tegra_xudc_set_selfpowered,
2179};
2180
2181static void no_op_complete(struct usb_ep *ep, struct usb_request *req)
2182{
2183}
2184
2185static int
2186tegra_xudc_ep0_queue_status(struct tegra_xudc *xudc,
2187                void (*cmpl)(struct usb_ep *, struct usb_request *))
2188{
2189        xudc->ep0_req->usb_req.buf = NULL;
2190        xudc->ep0_req->usb_req.dma = 0;
2191        xudc->ep0_req->usb_req.length = 0;
2192        xudc->ep0_req->usb_req.complete = cmpl;
2193        xudc->ep0_req->usb_req.context = xudc;
2194
2195        return __tegra_xudc_ep_queue(&xudc->ep[0], xudc->ep0_req);
2196}
2197
2198static int
2199tegra_xudc_ep0_queue_data(struct tegra_xudc *xudc, void *buf, size_t len,
2200                void (*cmpl)(struct usb_ep *, struct usb_request *))
2201{
2202        xudc->ep0_req->usb_req.buf = buf;
2203        xudc->ep0_req->usb_req.length = len;
2204        xudc->ep0_req->usb_req.complete = cmpl;
2205        xudc->ep0_req->usb_req.context = xudc;
2206
2207        return __tegra_xudc_ep_queue(&xudc->ep[0], xudc->ep0_req);
2208}
2209
2210static void tegra_xudc_ep0_req_done(struct tegra_xudc *xudc)
2211{
2212        switch (xudc->setup_state) {
2213        case DATA_STAGE_XFER:
2214                xudc->setup_state = STATUS_STAGE_RECV;
2215                tegra_xudc_ep0_queue_status(xudc, no_op_complete);
2216                break;
2217        case DATA_STAGE_RECV:
2218                xudc->setup_state = STATUS_STAGE_XFER;
2219                tegra_xudc_ep0_queue_status(xudc, no_op_complete);
2220                break;
2221        default:
2222                xudc->setup_state = WAIT_FOR_SETUP;
2223                break;
2224        }
2225}
2226
2227static int tegra_xudc_ep0_delegate_req(struct tegra_xudc *xudc,
2228                                       struct usb_ctrlrequest *ctrl)
2229{
2230        int ret;
2231
2232        spin_unlock(&xudc->lock);
2233        ret = xudc->driver->setup(&xudc->gadget, ctrl);
2234        spin_lock(&xudc->lock);
2235
2236        return ret;
2237}
2238
2239static void set_feature_complete(struct usb_ep *ep, struct usb_request *req)
2240{
2241        struct tegra_xudc *xudc = req->context;
2242
2243        if (xudc->test_mode_pattern) {
2244                xudc_writel(xudc, xudc->test_mode_pattern, PORT_TM);
2245                xudc->test_mode_pattern = 0;
2246        }
2247}
2248
2249static int tegra_xudc_ep0_set_feature(struct tegra_xudc *xudc,
2250                                      struct usb_ctrlrequest *ctrl)
2251{
2252        bool set = (ctrl->bRequest == USB_REQ_SET_FEATURE);
2253        u32 feature = le16_to_cpu(ctrl->wValue);
2254        u32 index = le16_to_cpu(ctrl->wIndex);
2255        u32 val, ep;
2256        int ret;
2257
2258        if (le16_to_cpu(ctrl->wLength) != 0)
2259                return -EINVAL;
2260
2261        switch (ctrl->bRequestType & USB_RECIP_MASK) {
2262        case USB_RECIP_DEVICE:
2263                switch (feature) {
2264                case USB_DEVICE_REMOTE_WAKEUP:
2265                        if ((xudc->gadget.speed == USB_SPEED_SUPER) ||
2266                            (xudc->device_state == USB_STATE_DEFAULT))
2267                                return -EINVAL;
2268
2269                        val = xudc_readl(xudc, PORTPM);
2270                        if (set)
2271                                val |= PORTPM_RWE;
2272                        else
2273                                val &= ~PORTPM_RWE;
2274
2275                        xudc_writel(xudc, val, PORTPM);
2276                        break;
2277                case USB_DEVICE_U1_ENABLE:
2278                case USB_DEVICE_U2_ENABLE:
2279                        if ((xudc->device_state != USB_STATE_CONFIGURED) ||
2280                            (xudc->gadget.speed != USB_SPEED_SUPER))
2281                                return -EINVAL;
2282
2283                        val = xudc_readl(xudc, PORTPM);
2284                        if ((feature == USB_DEVICE_U1_ENABLE) &&
2285                             xudc->soc->u1_enable) {
2286                                if (set)
2287                                        val |= PORTPM_U1E;
2288                                else
2289                                        val &= ~PORTPM_U1E;
2290                        }
2291
2292                        if ((feature == USB_DEVICE_U2_ENABLE) &&
2293                             xudc->soc->u2_enable) {
2294                                if (set)
2295                                        val |= PORTPM_U2E;
2296                                else
2297                                        val &= ~PORTPM_U2E;
2298                        }
2299
2300                        xudc_writel(xudc, val, PORTPM);
2301                        break;
2302                case USB_DEVICE_TEST_MODE:
2303                        if (xudc->gadget.speed != USB_SPEED_HIGH)
2304                                return -EINVAL;
2305
2306                        if (!set)
2307                                return -EINVAL;
2308
2309                        xudc->test_mode_pattern = index >> 8;
2310                        break;
2311                default:
2312                        return -EINVAL;
2313                }
2314
2315                break;
2316        case USB_RECIP_INTERFACE:
2317                if (xudc->device_state != USB_STATE_CONFIGURED)
2318                        return -EINVAL;
2319
2320                switch (feature) {
2321                case USB_INTRF_FUNC_SUSPEND:
2322                        if (set) {
2323                                val = xudc_readl(xudc, PORTPM);
2324
2325                                if (index & USB_INTRF_FUNC_SUSPEND_RW)
2326                                        val |= PORTPM_FRWE;
2327                                else
2328                                        val &= ~PORTPM_FRWE;
2329
2330                                xudc_writel(xudc, val, PORTPM);
2331                        }
2332
2333                        return tegra_xudc_ep0_delegate_req(xudc, ctrl);
2334                default:
2335                        return -EINVAL;
2336                }
2337
2338                break;
2339        case USB_RECIP_ENDPOINT:
2340                ep = (index & USB_ENDPOINT_NUMBER_MASK) * 2 +
2341                        ((index & USB_DIR_IN) ? 1 : 0);
2342
2343                if ((xudc->device_state == USB_STATE_DEFAULT) ||
2344                    ((xudc->device_state == USB_STATE_ADDRESS) &&
2345                     (index != 0)))
2346                        return -EINVAL;
2347
2348                ret = __tegra_xudc_ep_set_halt(&xudc->ep[ep], set);
2349                if (ret < 0)
2350                        return ret;
2351                break;
2352        default:
2353                return -EINVAL;
2354        }
2355
2356        return tegra_xudc_ep0_queue_status(xudc, set_feature_complete);
2357}
2358
2359static int tegra_xudc_ep0_get_status(struct tegra_xudc *xudc,
2360                                     struct usb_ctrlrequest *ctrl)
2361{
2362        struct tegra_xudc_ep_context *ep_ctx;
2363        u32 val, ep, index = le16_to_cpu(ctrl->wIndex);
2364        u16 status = 0;
2365
2366        if (!(ctrl->bRequestType & USB_DIR_IN))
2367                return -EINVAL;
2368
2369        if ((le16_to_cpu(ctrl->wValue) != 0) ||
2370            (le16_to_cpu(ctrl->wLength) != 2))
2371                return -EINVAL;
2372
2373        switch (ctrl->bRequestType & USB_RECIP_MASK) {
2374        case USB_RECIP_DEVICE:
2375                val = xudc_readl(xudc, PORTPM);
2376
2377                if (xudc->selfpowered)
2378                        status |= BIT(USB_DEVICE_SELF_POWERED);
2379
2380                if ((xudc->gadget.speed < USB_SPEED_SUPER) &&
2381                    (val & PORTPM_RWE))
2382                        status |= BIT(USB_DEVICE_REMOTE_WAKEUP);
2383
2384                if (xudc->gadget.speed == USB_SPEED_SUPER) {
2385                        if (val & PORTPM_U1E)
2386                                status |= BIT(USB_DEV_STAT_U1_ENABLED);
2387                        if (val & PORTPM_U2E)
2388                                status |= BIT(USB_DEV_STAT_U2_ENABLED);
2389                }
2390                break;
2391        case USB_RECIP_INTERFACE:
2392                if (xudc->gadget.speed == USB_SPEED_SUPER) {
2393                        status |= USB_INTRF_STAT_FUNC_RW_CAP;
2394                        val = xudc_readl(xudc, PORTPM);
2395                        if (val & PORTPM_FRWE)
2396                                status |= USB_INTRF_STAT_FUNC_RW;
2397                }
2398                break;
2399        case USB_RECIP_ENDPOINT:
2400                ep = (index & USB_ENDPOINT_NUMBER_MASK) * 2 +
2401                        ((index & USB_DIR_IN) ? 1 : 0);
2402                ep_ctx = &xudc->ep_context[ep];
2403
2404                if ((xudc->device_state != USB_STATE_CONFIGURED) &&
2405                    ((xudc->device_state != USB_STATE_ADDRESS) || (ep != 0)))
2406                        return -EINVAL;
2407
2408                if (ep_ctx_read_state(ep_ctx) == EP_STATE_DISABLED)
2409                        return -EINVAL;
2410
2411                if (xudc_readl(xudc, EP_HALT) & BIT(ep))
2412                        status |= BIT(USB_ENDPOINT_HALT);
2413                break;
2414        default:
2415                return -EINVAL;
2416        }
2417
2418        xudc->status_buf = cpu_to_le16(status);
2419        return tegra_xudc_ep0_queue_data(xudc, &xudc->status_buf,
2420                                         sizeof(xudc->status_buf),
2421                                         no_op_complete);
2422}
2423
2424static void set_sel_complete(struct usb_ep *ep, struct usb_request *req)
2425{
2426        /* Nothing to do with SEL values */
2427}
2428
2429static int tegra_xudc_ep0_set_sel(struct tegra_xudc *xudc,
2430                                  struct usb_ctrlrequest *ctrl)
2431{
2432        if (ctrl->bRequestType != (USB_DIR_OUT | USB_RECIP_DEVICE |
2433                                     USB_TYPE_STANDARD))
2434                return -EINVAL;
2435
2436        if (xudc->device_state == USB_STATE_DEFAULT)
2437                return -EINVAL;
2438
2439        if ((le16_to_cpu(ctrl->wIndex) != 0) ||
2440            (le16_to_cpu(ctrl->wValue) != 0) ||
2441            (le16_to_cpu(ctrl->wLength) != 6))
2442                return -EINVAL;
2443
2444        return tegra_xudc_ep0_queue_data(xudc, &xudc->sel_timing,
2445                                         sizeof(xudc->sel_timing),
2446                                         set_sel_complete);
2447}
2448
2449static void set_isoch_delay_complete(struct usb_ep *ep, struct usb_request *req)
2450{
2451        /* Nothing to do with isoch delay */
2452}
2453
2454static int tegra_xudc_ep0_set_isoch_delay(struct tegra_xudc *xudc,
2455                                          struct usb_ctrlrequest *ctrl)
2456{
2457        u32 delay = le16_to_cpu(ctrl->wValue);
2458
2459        if (ctrl->bRequestType != (USB_DIR_OUT | USB_RECIP_DEVICE |
2460                                   USB_TYPE_STANDARD))
2461                return -EINVAL;
2462
2463        if ((delay > 65535) || (le16_to_cpu(ctrl->wIndex) != 0) ||
2464            (le16_to_cpu(ctrl->wLength) != 0))
2465                return -EINVAL;
2466
2467        xudc->isoch_delay = delay;
2468
2469        return tegra_xudc_ep0_queue_status(xudc, set_isoch_delay_complete);
2470}
2471
2472static void set_address_complete(struct usb_ep *ep, struct usb_request *req)
2473{
2474        struct tegra_xudc *xudc = req->context;
2475
2476        if ((xudc->device_state == USB_STATE_DEFAULT) &&
2477            (xudc->dev_addr != 0)) {
2478                xudc->device_state = USB_STATE_ADDRESS;
2479                usb_gadget_set_state(&xudc->gadget, xudc->device_state);
2480        } else if ((xudc->device_state == USB_STATE_ADDRESS) &&
2481                   (xudc->dev_addr == 0)) {
2482                xudc->device_state = USB_STATE_DEFAULT;
2483                usb_gadget_set_state(&xudc->gadget, xudc->device_state);
2484        }
2485}
2486
2487static int tegra_xudc_ep0_set_address(struct tegra_xudc *xudc,
2488                                      struct usb_ctrlrequest *ctrl)
2489{
2490        struct tegra_xudc_ep *ep0 = &xudc->ep[0];
2491        u32 val, addr = le16_to_cpu(ctrl->wValue);
2492
2493        if (ctrl->bRequestType != (USB_DIR_OUT | USB_RECIP_DEVICE |
2494                                     USB_TYPE_STANDARD))
2495                return -EINVAL;
2496
2497        if ((addr > 127) || (le16_to_cpu(ctrl->wIndex) != 0) ||
2498            (le16_to_cpu(ctrl->wLength) != 0))
2499                return -EINVAL;
2500
2501        if (xudc->device_state == USB_STATE_CONFIGURED)
2502                return -EINVAL;
2503
2504        dev_dbg(xudc->dev, "set address: %u\n", addr);
2505
2506        xudc->dev_addr = addr;
2507        val = xudc_readl(xudc, CTRL);
2508        val &= ~(CTRL_DEVADDR_MASK);
2509        val |= CTRL_DEVADDR(addr);
2510        xudc_writel(xudc, val, CTRL);
2511
2512        ep_ctx_write_devaddr(ep0->context, addr);
2513
2514        return tegra_xudc_ep0_queue_status(xudc, set_address_complete);
2515}
2516
2517static int tegra_xudc_ep0_standard_req(struct tegra_xudc *xudc,
2518                                      struct usb_ctrlrequest *ctrl)
2519{
2520        int ret;
2521
2522        switch (ctrl->bRequest) {
2523        case USB_REQ_GET_STATUS:
2524                dev_dbg(xudc->dev, "USB_REQ_GET_STATUS\n");
2525                ret = tegra_xudc_ep0_get_status(xudc, ctrl);
2526                break;
2527        case USB_REQ_SET_ADDRESS:
2528                dev_dbg(xudc->dev, "USB_REQ_SET_ADDRESS\n");
2529                ret = tegra_xudc_ep0_set_address(xudc, ctrl);
2530                break;
2531        case USB_REQ_SET_SEL:
2532                dev_dbg(xudc->dev, "USB_REQ_SET_SEL\n");
2533                ret = tegra_xudc_ep0_set_sel(xudc, ctrl);
2534                break;
2535        case USB_REQ_SET_ISOCH_DELAY:
2536                dev_dbg(xudc->dev, "USB_REQ_SET_ISOCH_DELAY\n");
2537                ret = tegra_xudc_ep0_set_isoch_delay(xudc, ctrl);
2538                break;
2539        case USB_REQ_CLEAR_FEATURE:
2540        case USB_REQ_SET_FEATURE:
2541                dev_dbg(xudc->dev, "USB_REQ_CLEAR/SET_FEATURE\n");
2542                ret = tegra_xudc_ep0_set_feature(xudc, ctrl);
2543                break;
2544        case USB_REQ_SET_CONFIGURATION:
2545                dev_dbg(xudc->dev, "USB_REQ_SET_CONFIGURATION\n");
2546                /*
2547                 * In theory we need to clear RUN bit before status stage of
2548                 * deconfig request sent, but this seems to be causing problems.
2549                 * Clear RUN once all endpoints are disabled instead.
2550                 */
2551                fallthrough;
2552        default:
2553                ret = tegra_xudc_ep0_delegate_req(xudc, ctrl);
2554                break;
2555        }
2556
2557        return ret;
2558}
2559
2560static void tegra_xudc_handle_ep0_setup_packet(struct tegra_xudc *xudc,
2561                                               struct usb_ctrlrequest *ctrl,
2562                                               u16 seq_num)
2563{
2564        int ret;
2565
2566        xudc->setup_seq_num = seq_num;
2567
2568        /* Ensure EP0 is unhalted. */
2569        ep_unhalt(xudc, 0);
2570
2571        /*
2572         * On Tegra210, setup packets with sequence numbers 0xfffe or 0xffff
2573         * are invalid.  Halt EP0 until we get a valid packet.
2574         */
2575        if (xudc->soc->invalid_seq_num &&
2576            (seq_num == 0xfffe || seq_num == 0xffff)) {
2577                dev_warn(xudc->dev, "invalid sequence number detected\n");
2578                ep_halt(xudc, 0);
2579                return;
2580        }
2581
2582        if (ctrl->wLength)
2583                xudc->setup_state = (ctrl->bRequestType & USB_DIR_IN) ?
2584                        DATA_STAGE_XFER :  DATA_STAGE_RECV;
2585        else
2586                xudc->setup_state = STATUS_STAGE_XFER;
2587
2588        if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD)
2589                ret = tegra_xudc_ep0_standard_req(xudc, ctrl);
2590        else
2591                ret = tegra_xudc_ep0_delegate_req(xudc, ctrl);
2592
2593        if (ret < 0) {
2594                dev_warn(xudc->dev, "setup request failed: %d\n", ret);
2595                xudc->setup_state = WAIT_FOR_SETUP;
2596                ep_halt(xudc, 0);
2597        }
2598}
2599
2600static void tegra_xudc_handle_ep0_event(struct tegra_xudc *xudc,
2601                                        struct tegra_xudc_trb *event)
2602{
2603        struct usb_ctrlrequest *ctrl = (struct usb_ctrlrequest *)event;
2604        u16 seq_num = trb_read_seq_num(event);
2605
2606        if (xudc->setup_state != WAIT_FOR_SETUP) {
2607                /*
2608                 * The controller is in the process of handling another
2609                 * setup request.  Queue subsequent requests and handle
2610                 * the last one once the controller reports a sequence
2611                 * number error.
2612                 */
2613                memcpy(&xudc->setup_packet.ctrl_req, ctrl, sizeof(*ctrl));
2614                xudc->setup_packet.seq_num = seq_num;
2615                xudc->queued_setup_packet = true;
2616        } else {
2617                tegra_xudc_handle_ep0_setup_packet(xudc, ctrl, seq_num);
2618        }
2619}
2620
2621static struct tegra_xudc_request *
2622trb_to_request(struct tegra_xudc_ep *ep, struct tegra_xudc_trb *trb)
2623{
2624        struct tegra_xudc_request *req;
2625
2626        list_for_each_entry(req, &ep->queue, list) {
2627                if (!req->trbs_queued)
2628                        break;
2629
2630                if (trb_in_request(ep, req, trb))
2631                        return req;
2632        }
2633
2634        return NULL;
2635}
2636
2637static void tegra_xudc_handle_transfer_completion(struct tegra_xudc *xudc,
2638                                                  struct tegra_xudc_ep *ep,
2639                                                  struct tegra_xudc_trb *event)
2640{
2641        struct tegra_xudc_request *req;
2642        struct tegra_xudc_trb *trb;
2643        bool short_packet;
2644
2645        short_packet = (trb_read_cmpl_code(event) ==
2646                        TRB_CMPL_CODE_SHORT_PACKET);
2647
2648        trb = trb_phys_to_virt(ep, trb_read_data_ptr(event));
2649        req = trb_to_request(ep, trb);
2650
2651        /*
2652         * TDs are complete on short packet or when the completed TRB is the
2653         * last TRB in the TD (the CHAIN bit is unset).
2654         */
2655        if (req && (short_packet || (!trb_read_chain(trb) &&
2656                (req->trbs_needed == req->trbs_queued)))) {
2657                struct tegra_xudc_trb *last = req->last_trb;
2658                unsigned int residual;
2659
2660                residual = trb_read_transfer_len(event);
2661                req->usb_req.actual = req->usb_req.length - residual;
2662
2663                dev_dbg(xudc->dev, "bytes transferred %u / %u\n",
2664                        req->usb_req.actual, req->usb_req.length);
2665
2666                tegra_xudc_req_done(ep, req, 0);
2667
2668                if (ep->desc && usb_endpoint_xfer_control(ep->desc))
2669                        tegra_xudc_ep0_req_done(xudc);
2670
2671                /*
2672                 * Advance the dequeue pointer past the end of the current TD
2673                 * on short packet completion.
2674                 */
2675                if (short_packet) {
2676                        ep->deq_ptr = (last - ep->transfer_ring) + 1;
2677                        if (ep->deq_ptr == XUDC_TRANSFER_RING_SIZE - 1)
2678                                ep->deq_ptr = 0;
2679                }
2680        } else if (!req) {
2681                dev_warn(xudc->dev, "transfer event on dequeued request\n");
2682        }
2683
2684        if (ep->desc)
2685                tegra_xudc_ep_kick_queue(ep);
2686}
2687
2688static void tegra_xudc_handle_transfer_event(struct tegra_xudc *xudc,
2689                                             struct tegra_xudc_trb *event)
2690{
2691        unsigned int ep_index = trb_read_endpoint_id(event);
2692        struct tegra_xudc_ep *ep = &xudc->ep[ep_index];
2693        struct tegra_xudc_trb *trb;
2694        u16 comp_code;
2695
2696        if (ep_ctx_read_state(ep->context) == EP_STATE_DISABLED) {
2697                dev_warn(xudc->dev, "transfer event on disabled EP %u\n",
2698                         ep_index);
2699                return;
2700        }
2701
2702        /* Update transfer ring dequeue pointer. */
2703        trb = trb_phys_to_virt(ep, trb_read_data_ptr(event));
2704        comp_code = trb_read_cmpl_code(event);
2705        if (comp_code != TRB_CMPL_CODE_BABBLE_DETECTED_ERR) {
2706                ep->deq_ptr = (trb - ep->transfer_ring) + 1;
2707
2708                if (ep->deq_ptr == XUDC_TRANSFER_RING_SIZE - 1)
2709                        ep->deq_ptr = 0;
2710                ep->ring_full = false;
2711        }
2712
2713        switch (comp_code) {
2714        case TRB_CMPL_CODE_SUCCESS:
2715        case TRB_CMPL_CODE_SHORT_PACKET:
2716                tegra_xudc_handle_transfer_completion(xudc, ep, event);
2717                break;
2718        case TRB_CMPL_CODE_HOST_REJECTED:
2719                dev_info(xudc->dev, "stream rejected on EP %u\n", ep_index);
2720
2721                ep->stream_rejected = true;
2722                break;
2723        case TRB_CMPL_CODE_PRIME_PIPE_RECEIVED:
2724                dev_info(xudc->dev, "prime pipe received on EP %u\n", ep_index);
2725
2726                if (ep->stream_rejected) {
2727                        ep->stream_rejected = false;
2728                        /*
2729                         * An EP is stopped when a stream is rejected.  Wait
2730                         * for the EP to report that it is stopped and then
2731                         * un-stop it.
2732                         */
2733                        ep_wait_for_stopped(xudc, ep_index);
2734                }
2735                tegra_xudc_ep_ring_doorbell(ep);
2736                break;
2737        case TRB_CMPL_CODE_BABBLE_DETECTED_ERR:
2738                /*
2739                 * Wait for the EP to be stopped so the controller stops
2740                 * processing doorbells.
2741                 */
2742                ep_wait_for_stopped(xudc, ep_index);
2743                ep->enq_ptr = ep->deq_ptr;
2744                tegra_xudc_ep_nuke(ep, -EIO);
2745                fallthrough;
2746        case TRB_CMPL_CODE_STREAM_NUMP_ERROR:
2747        case TRB_CMPL_CODE_CTRL_DIR_ERR:
2748        case TRB_CMPL_CODE_INVALID_STREAM_TYPE_ERR:
2749        case TRB_CMPL_CODE_RING_UNDERRUN:
2750        case TRB_CMPL_CODE_RING_OVERRUN:
2751        case TRB_CMPL_CODE_ISOCH_BUFFER_OVERRUN:
2752        case TRB_CMPL_CODE_USB_TRANS_ERR:
2753        case TRB_CMPL_CODE_TRB_ERR:
2754                dev_err(xudc->dev, "completion error %#x on EP %u\n",
2755                        comp_code, ep_index);
2756
2757                ep_halt(xudc, ep_index);
2758                break;
2759        case TRB_CMPL_CODE_CTRL_SEQNUM_ERR:
2760                dev_info(xudc->dev, "sequence number error\n");
2761
2762                /*
2763                 * Kill any queued control request and skip to the last
2764                 * setup packet we received.
2765                 */
2766                tegra_xudc_ep_nuke(ep, -EINVAL);
2767                xudc->setup_state = WAIT_FOR_SETUP;
2768                if (!xudc->queued_setup_packet)
2769                        break;
2770
2771                tegra_xudc_handle_ep0_setup_packet(xudc,
2772                                                   &xudc->setup_packet.ctrl_req,
2773                                                   xudc->setup_packet.seq_num);
2774                xudc->queued_setup_packet = false;
2775                break;
2776        case TRB_CMPL_CODE_STOPPED:
2777                dev_dbg(xudc->dev, "stop completion code on EP %u\n",
2778                        ep_index);
2779
2780                /* Disconnected. */
2781                tegra_xudc_ep_nuke(ep, -ECONNREFUSED);
2782                break;
2783        default:
2784                dev_dbg(xudc->dev, "completion event %#x on EP %u\n",
2785                        comp_code, ep_index);
2786                break;
2787        }
2788}
2789
2790static void tegra_xudc_reset(struct tegra_xudc *xudc)
2791{
2792        struct tegra_xudc_ep *ep0 = &xudc->ep[0];
2793        dma_addr_t deq_ptr;
2794        unsigned int i;
2795
2796        xudc->setup_state = WAIT_FOR_SETUP;
2797        xudc->device_state = USB_STATE_DEFAULT;
2798        usb_gadget_set_state(&xudc->gadget, xudc->device_state);
2799
2800        ep_unpause_all(xudc);
2801
2802        for (i = 0; i < ARRAY_SIZE(xudc->ep); i++)
2803                tegra_xudc_ep_nuke(&xudc->ep[i], -ESHUTDOWN);
2804
2805        /*
2806         * Reset sequence number and dequeue pointer to flush the transfer
2807         * ring.
2808         */
2809        ep0->deq_ptr = ep0->enq_ptr;
2810        ep0->ring_full = false;
2811
2812        xudc->setup_seq_num = 0;
2813        xudc->queued_setup_packet = false;
2814
2815        ep_ctx_write_seq_num(ep0->context, xudc->setup_seq_num);
2816
2817        deq_ptr = trb_virt_to_phys(ep0, &ep0->transfer_ring[ep0->deq_ptr]);
2818
2819        if (!dma_mapping_error(xudc->dev, deq_ptr)) {
2820                ep_ctx_write_deq_ptr(ep0->context, deq_ptr);
2821                ep_ctx_write_dcs(ep0->context, ep0->pcs);
2822        }
2823
2824        ep_unhalt_all(xudc);
2825        ep_reload(xudc, 0);
2826        ep_unpause(xudc, 0);
2827}
2828
2829static void tegra_xudc_port_connect(struct tegra_xudc *xudc)
2830{
2831        struct tegra_xudc_ep *ep0 = &xudc->ep[0];
2832        u16 maxpacket;
2833        u32 val;
2834
2835        val = (xudc_readl(xudc, PORTSC) & PORTSC_PS_MASK) >> PORTSC_PS_SHIFT;
2836        switch (val) {
2837        case PORTSC_PS_LS:
2838                xudc->gadget.speed = USB_SPEED_LOW;
2839                break;
2840        case PORTSC_PS_FS:
2841                xudc->gadget.speed = USB_SPEED_FULL;
2842                break;
2843        case PORTSC_PS_HS:
2844                xudc->gadget.speed = USB_SPEED_HIGH;
2845                break;
2846        case PORTSC_PS_SS:
2847                xudc->gadget.speed = USB_SPEED_SUPER;
2848                break;
2849        default:
2850                xudc->gadget.speed = USB_SPEED_UNKNOWN;
2851                break;
2852        }
2853
2854        xudc->device_state = USB_STATE_DEFAULT;
2855        usb_gadget_set_state(&xudc->gadget, xudc->device_state);
2856
2857        xudc->setup_state = WAIT_FOR_SETUP;
2858
2859        if (xudc->gadget.speed == USB_SPEED_SUPER)
2860                maxpacket = 512;
2861        else
2862                maxpacket = 64;
2863
2864        ep_ctx_write_max_packet_size(ep0->context, maxpacket);
2865        tegra_xudc_ep0_desc.wMaxPacketSize = cpu_to_le16(maxpacket);
2866        usb_ep_set_maxpacket_limit(&ep0->usb_ep, maxpacket);
2867
2868        if (!xudc->soc->u1_enable) {
2869                val = xudc_readl(xudc, PORTPM);
2870                val &= ~(PORTPM_U1TIMEOUT_MASK);
2871                xudc_writel(xudc, val, PORTPM);
2872        }
2873
2874        if (!xudc->soc->u2_enable) {
2875                val = xudc_readl(xudc, PORTPM);
2876                val &= ~(PORTPM_U2TIMEOUT_MASK);
2877                xudc_writel(xudc, val, PORTPM);
2878        }
2879
2880        if (xudc->gadget.speed <= USB_SPEED_HIGH) {
2881                val = xudc_readl(xudc, PORTPM);
2882                val &= ~(PORTPM_L1S_MASK);
2883                if (xudc->soc->lpm_enable)
2884                        val |= PORTPM_L1S(PORTPM_L1S_ACCEPT);
2885                else
2886                        val |= PORTPM_L1S(PORTPM_L1S_NYET);
2887                xudc_writel(xudc, val, PORTPM);
2888        }
2889
2890        val = xudc_readl(xudc, ST);
2891        if (val & ST_RC)
2892                xudc_writel(xudc, ST_RC, ST);
2893}
2894
2895static void tegra_xudc_port_disconnect(struct tegra_xudc *xudc)
2896{
2897        tegra_xudc_reset(xudc);
2898
2899        if (xudc->driver && xudc->driver->disconnect) {
2900                spin_unlock(&xudc->lock);
2901                xudc->driver->disconnect(&xudc->gadget);
2902                spin_lock(&xudc->lock);
2903        }
2904
2905        xudc->device_state = USB_STATE_NOTATTACHED;
2906        usb_gadget_set_state(&xudc->gadget, xudc->device_state);
2907
2908        complete(&xudc->disconnect_complete);
2909}
2910
2911static void tegra_xudc_port_reset(struct tegra_xudc *xudc)
2912{
2913        tegra_xudc_reset(xudc);
2914
2915        if (xudc->driver) {
2916                spin_unlock(&xudc->lock);
2917                usb_gadget_udc_reset(&xudc->gadget, xudc->driver);
2918                spin_lock(&xudc->lock);
2919        }
2920
2921        tegra_xudc_port_connect(xudc);
2922}
2923
2924static void tegra_xudc_port_suspend(struct tegra_xudc *xudc)
2925{
2926        dev_dbg(xudc->dev, "port suspend\n");
2927
2928        xudc->resume_state = xudc->device_state;
2929        xudc->device_state = USB_STATE_SUSPENDED;
2930        usb_gadget_set_state(&xudc->gadget, xudc->device_state);
2931
2932        if (xudc->driver->suspend) {
2933                spin_unlock(&xudc->lock);
2934                xudc->driver->suspend(&xudc->gadget);
2935                spin_lock(&xudc->lock);
2936        }
2937}
2938
2939static void tegra_xudc_port_resume(struct tegra_xudc *xudc)
2940{
2941        dev_dbg(xudc->dev, "port resume\n");
2942
2943        tegra_xudc_resume_device_state(xudc);
2944
2945        if (xudc->driver->resume) {
2946                spin_unlock(&xudc->lock);
2947                xudc->driver->resume(&xudc->gadget);
2948                spin_lock(&xudc->lock);
2949        }
2950}
2951
2952static inline void clear_port_change(struct tegra_xudc *xudc, u32 flag)
2953{
2954        u32 val;
2955
2956        val = xudc_readl(xudc, PORTSC);
2957        val &= ~PORTSC_CHANGE_MASK;
2958        val |= flag;
2959        xudc_writel(xudc, val, PORTSC);
2960}
2961
2962static void __tegra_xudc_handle_port_status(struct tegra_xudc *xudc)
2963{
2964        u32 portsc, porthalt;
2965
2966        porthalt = xudc_readl(xudc, PORTHALT);
2967        if ((porthalt & PORTHALT_STCHG_REQ) &&
2968            (porthalt & PORTHALT_HALT_LTSSM)) {
2969                dev_dbg(xudc->dev, "STCHG_REQ, PORTHALT = %#x\n", porthalt);
2970                porthalt &= ~PORTHALT_HALT_LTSSM;
2971                xudc_writel(xudc, porthalt, PORTHALT);
2972        }
2973
2974        portsc = xudc_readl(xudc, PORTSC);
2975        if ((portsc & PORTSC_PRC) && (portsc & PORTSC_PR)) {
2976                dev_dbg(xudc->dev, "PRC, PR, PORTSC = %#x\n", portsc);
2977                clear_port_change(xudc, PORTSC_PRC | PORTSC_PED);
2978#define TOGGLE_VBUS_WAIT_MS 100
2979                if (xudc->soc->port_reset_quirk) {
2980                        schedule_delayed_work(&xudc->port_reset_war_work,
2981                                msecs_to_jiffies(TOGGLE_VBUS_WAIT_MS));
2982                        xudc->wait_for_sec_prc = 1;
2983                }
2984        }
2985
2986        if ((portsc & PORTSC_PRC) && !(portsc & PORTSC_PR)) {
2987                dev_dbg(xudc->dev, "PRC, Not PR, PORTSC = %#x\n", portsc);
2988                clear_port_change(xudc, PORTSC_PRC | PORTSC_PED);
2989                tegra_xudc_port_reset(xudc);
2990                cancel_delayed_work(&xudc->port_reset_war_work);
2991                xudc->wait_for_sec_prc = 0;
2992        }
2993
2994        portsc = xudc_readl(xudc, PORTSC);
2995        if (portsc & PORTSC_WRC) {
2996                dev_dbg(xudc->dev, "WRC, PORTSC = %#x\n", portsc);
2997                clear_port_change(xudc, PORTSC_WRC | PORTSC_PED);
2998                if (!(xudc_readl(xudc, PORTSC) & PORTSC_WPR))
2999                        tegra_xudc_port_reset(xudc);
3000        }
3001
3002        portsc = xudc_readl(xudc, PORTSC);
3003        if (portsc & PORTSC_CSC) {
3004                dev_dbg(xudc->dev, "CSC, PORTSC = %#x\n", portsc);
3005                clear_port_change(xudc, PORTSC_CSC);
3006
3007                if (portsc & PORTSC_CCS)
3008                        tegra_xudc_port_connect(xudc);
3009                else
3010                        tegra_xudc_port_disconnect(xudc);
3011
3012                if (xudc->wait_csc) {
3013                        cancel_delayed_work(&xudc->plc_reset_work);
3014                        xudc->wait_csc = false;
3015                }
3016        }
3017
3018        portsc = xudc_readl(xudc, PORTSC);
3019        if (portsc & PORTSC_PLC) {
3020                u32 pls = (portsc & PORTSC_PLS_MASK) >> PORTSC_PLS_SHIFT;
3021
3022                dev_dbg(xudc->dev, "PLC, PORTSC = %#x\n", portsc);
3023                clear_port_change(xudc, PORTSC_PLC);
3024                switch (pls) {
3025                case PORTSC_PLS_U3:
3026                        tegra_xudc_port_suspend(xudc);
3027                        break;
3028                case PORTSC_PLS_U0:
3029                        if (xudc->gadget.speed < USB_SPEED_SUPER)
3030                                tegra_xudc_port_resume(xudc);
3031                        break;
3032                case PORTSC_PLS_RESUME:
3033                        if (xudc->gadget.speed == USB_SPEED_SUPER)
3034                                tegra_xudc_port_resume(xudc);
3035                        break;
3036                case PORTSC_PLS_INACTIVE:
3037                        schedule_delayed_work(&xudc->plc_reset_work,
3038                                        msecs_to_jiffies(TOGGLE_VBUS_WAIT_MS));
3039                        xudc->wait_csc = true;
3040                        break;
3041                default:
3042                        break;
3043                }
3044        }
3045
3046        if (portsc & PORTSC_CEC) {
3047                dev_warn(xudc->dev, "CEC, PORTSC = %#x\n", portsc);
3048                clear_port_change(xudc, PORTSC_CEC);
3049        }
3050
3051        dev_dbg(xudc->dev, "PORTSC = %#x\n", xudc_readl(xudc, PORTSC));
3052}
3053
3054static void tegra_xudc_handle_port_status(struct tegra_xudc *xudc)
3055{
3056        while ((xudc_readl(xudc, PORTSC) & PORTSC_CHANGE_MASK) ||
3057               (xudc_readl(xudc, PORTHALT) & PORTHALT_STCHG_REQ))
3058                __tegra_xudc_handle_port_status(xudc);
3059}
3060
3061static void tegra_xudc_handle_event(struct tegra_xudc *xudc,
3062                                    struct tegra_xudc_trb *event)
3063{
3064        u32 type = trb_read_type(event);
3065
3066        dump_trb(xudc, "EVENT", event);
3067
3068        switch (type) {
3069        case TRB_TYPE_PORT_STATUS_CHANGE_EVENT:
3070                tegra_xudc_handle_port_status(xudc);
3071                break;
3072        case TRB_TYPE_TRANSFER_EVENT:
3073                tegra_xudc_handle_transfer_event(xudc, event);
3074                break;
3075        case TRB_TYPE_SETUP_PACKET_EVENT:
3076                tegra_xudc_handle_ep0_event(xudc, event);
3077                break;
3078        default:
3079                dev_info(xudc->dev, "Unrecognized TRB type = %#x\n", type);
3080                break;
3081        }
3082}
3083
3084static void tegra_xudc_process_event_ring(struct tegra_xudc *xudc)
3085{
3086        struct tegra_xudc_trb *event;
3087        dma_addr_t erdp;
3088
3089        while (true) {
3090                event = xudc->event_ring[xudc->event_ring_index] +
3091                        xudc->event_ring_deq_ptr;
3092
3093                if (trb_read_cycle(event) != xudc->ccs)
3094                        break;
3095
3096                tegra_xudc_handle_event(xudc, event);
3097
3098                xudc->event_ring_deq_ptr++;
3099                if (xudc->event_ring_deq_ptr == XUDC_EVENT_RING_SIZE) {
3100                        xudc->event_ring_deq_ptr = 0;
3101                        xudc->event_ring_index++;
3102                }
3103
3104                if (xudc->event_ring_index == XUDC_NR_EVENT_RINGS) {
3105                        xudc->event_ring_index = 0;
3106                        xudc->ccs = !xudc->ccs;
3107                }
3108        }
3109
3110        erdp = xudc->event_ring_phys[xudc->event_ring_index] +
3111                xudc->event_ring_deq_ptr * sizeof(*event);
3112
3113        xudc_writel(xudc, upper_32_bits(erdp), ERDPHI);
3114        xudc_writel(xudc, lower_32_bits(erdp) | ERDPLO_EHB, ERDPLO);
3115}
3116
3117static irqreturn_t tegra_xudc_irq(int irq, void *data)
3118{
3119        struct tegra_xudc *xudc = data;
3120        unsigned long flags;
3121        u32 val;
3122
3123        val = xudc_readl(xudc, ST);
3124        if (!(val & ST_IP))
3125                return IRQ_NONE;
3126        xudc_writel(xudc, ST_IP, ST);
3127
3128        spin_lock_irqsave(&xudc->lock, flags);
3129        tegra_xudc_process_event_ring(xudc);
3130        spin_unlock_irqrestore(&xudc->lock, flags);
3131
3132        return IRQ_HANDLED;
3133}
3134
3135static int tegra_xudc_alloc_ep(struct tegra_xudc *xudc, unsigned int index)
3136{
3137        struct tegra_xudc_ep *ep = &xudc->ep[index];
3138
3139        ep->xudc = xudc;
3140        ep->index = index;
3141        ep->context = &xudc->ep_context[index];
3142        INIT_LIST_HEAD(&ep->queue);
3143
3144        /*
3145         * EP1 would be the input endpoint corresponding to EP0, but since
3146         * EP0 is bi-directional, EP1 is unused.
3147         */
3148        if (index == 1)
3149                return 0;
3150
3151        ep->transfer_ring = dma_pool_alloc(xudc->transfer_ring_pool,
3152                                           GFP_KERNEL,
3153                                           &ep->transfer_ring_phys);
3154        if (!ep->transfer_ring)
3155                return -ENOMEM;
3156
3157        if (index) {
3158                snprintf(ep->name, sizeof(ep->name), "ep%u%s", index / 2,
3159                         (index % 2 == 0) ? "out" : "in");
3160                ep->usb_ep.name = ep->name;
3161                usb_ep_set_maxpacket_limit(&ep->usb_ep, 1024);
3162                ep->usb_ep.max_streams = 16;
3163                ep->usb_ep.ops = &tegra_xudc_ep_ops;
3164                ep->usb_ep.caps.type_bulk = true;
3165                ep->usb_ep.caps.type_int = true;
3166                if (index & 1)
3167                        ep->usb_ep.caps.dir_in = true;
3168                else
3169                        ep->usb_ep.caps.dir_out = true;
3170                list_add_tail(&ep->usb_ep.ep_list, &xudc->gadget.ep_list);
3171        } else {
3172                strscpy(ep->name, "ep0", 3);
3173                ep->usb_ep.name = ep->name;
3174                usb_ep_set_maxpacket_limit(&ep->usb_ep, 512);
3175                ep->usb_ep.ops = &tegra_xudc_ep0_ops;
3176                ep->usb_ep.caps.type_control = true;
3177                ep->usb_ep.caps.dir_in = true;
3178                ep->usb_ep.caps.dir_out = true;
3179        }
3180
3181        return 0;
3182}
3183
3184static void tegra_xudc_free_ep(struct tegra_xudc *xudc, unsigned int index)
3185{
3186        struct tegra_xudc_ep *ep = &xudc->ep[index];
3187
3188        /*
3189         * EP1 would be the input endpoint corresponding to EP0, but since
3190         * EP0 is bi-directional, EP1 is unused.
3191         */
3192        if (index == 1)
3193                return;
3194
3195        dma_pool_free(xudc->transfer_ring_pool, ep->transfer_ring,
3196                      ep->transfer_ring_phys);
3197}
3198
3199static int tegra_xudc_alloc_eps(struct tegra_xudc *xudc)
3200{
3201        struct usb_request *req;
3202        unsigned int i;
3203        int err;
3204
3205        xudc->ep_context =
3206                dma_alloc_coherent(xudc->dev, XUDC_NR_EPS *
3207                                    sizeof(*xudc->ep_context),
3208                                    &xudc->ep_context_phys, GFP_KERNEL);
3209        if (!xudc->ep_context)
3210                return -ENOMEM;
3211
3212        xudc->transfer_ring_pool =
3213                dmam_pool_create(dev_name(xudc->dev), xudc->dev,
3214                                 XUDC_TRANSFER_RING_SIZE *
3215                                 sizeof(struct tegra_xudc_trb),
3216                                 sizeof(struct tegra_xudc_trb), 0);
3217        if (!xudc->transfer_ring_pool) {
3218                err = -ENOMEM;
3219                goto free_ep_context;
3220        }
3221
3222        INIT_LIST_HEAD(&xudc->gadget.ep_list);
3223        for (i = 0; i < ARRAY_SIZE(xudc->ep); i++) {
3224                err = tegra_xudc_alloc_ep(xudc, i);
3225                if (err < 0)
3226                        goto free_eps;
3227        }
3228
3229        req = tegra_xudc_ep_alloc_request(&xudc->ep[0].usb_ep, GFP_KERNEL);
3230        if (!req) {
3231                err = -ENOMEM;
3232                goto free_eps;
3233        }
3234        xudc->ep0_req = to_xudc_req(req);
3235
3236        return 0;
3237
3238free_eps:
3239        for (; i > 0; i--)
3240                tegra_xudc_free_ep(xudc, i - 1);
3241free_ep_context:
3242        dma_free_coherent(xudc->dev, XUDC_NR_EPS * sizeof(*xudc->ep_context),
3243                          xudc->ep_context, xudc->ep_context_phys);
3244        return err;
3245}
3246
3247static void tegra_xudc_init_eps(struct tegra_xudc *xudc)
3248{
3249        xudc_writel(xudc, lower_32_bits(xudc->ep_context_phys), ECPLO);
3250        xudc_writel(xudc, upper_32_bits(xudc->ep_context_phys), ECPHI);
3251}
3252
3253static void tegra_xudc_free_eps(struct tegra_xudc *xudc)
3254{
3255        unsigned int i;
3256
3257        tegra_xudc_ep_free_request(&xudc->ep[0].usb_ep,
3258                                   &xudc->ep0_req->usb_req);
3259
3260        for (i = 0; i < ARRAY_SIZE(xudc->ep); i++)
3261                tegra_xudc_free_ep(xudc, i);
3262
3263        dma_free_coherent(xudc->dev, XUDC_NR_EPS * sizeof(*xudc->ep_context),
3264                          xudc->ep_context, xudc->ep_context_phys);
3265}
3266
3267static int tegra_xudc_alloc_event_ring(struct tegra_xudc *xudc)
3268{
3269        unsigned int i;
3270
3271        for (i = 0; i < ARRAY_SIZE(xudc->event_ring); i++) {
3272                xudc->event_ring[i] =
3273                        dma_alloc_coherent(xudc->dev, XUDC_EVENT_RING_SIZE *
3274                                           sizeof(*xudc->event_ring[i]),
3275                                           &xudc->event_ring_phys[i],
3276                                           GFP_KERNEL);
3277                if (!xudc->event_ring[i])
3278                        goto free_dma;
3279        }
3280
3281        return 0;
3282
3283free_dma:
3284        for (; i > 0; i--) {
3285                dma_free_coherent(xudc->dev, XUDC_EVENT_RING_SIZE *
3286                                  sizeof(*xudc->event_ring[i - 1]),
3287                                  xudc->event_ring[i - 1],
3288                                  xudc->event_ring_phys[i - 1]);
3289        }
3290        return -ENOMEM;
3291}
3292
3293static void tegra_xudc_init_event_ring(struct tegra_xudc *xudc)
3294{
3295        unsigned int i;
3296        u32 val;
3297
3298        val = xudc_readl(xudc, SPARAM);
3299        val &= ~(SPARAM_ERSTMAX_MASK);
3300        val |= SPARAM_ERSTMAX(XUDC_NR_EVENT_RINGS);
3301        xudc_writel(xudc, val, SPARAM);
3302
3303        for (i = 0; i < ARRAY_SIZE(xudc->event_ring); i++) {
3304                memset(xudc->event_ring[i], 0, XUDC_EVENT_RING_SIZE *
3305                       sizeof(*xudc->event_ring[i]));
3306
3307                val = xudc_readl(xudc, ERSTSZ);
3308                val &= ~(ERSTSZ_ERSTXSZ_MASK << ERSTSZ_ERSTXSZ_SHIFT(i));
3309                val |= XUDC_EVENT_RING_SIZE << ERSTSZ_ERSTXSZ_SHIFT(i);
3310                xudc_writel(xudc, val, ERSTSZ);
3311
3312                xudc_writel(xudc, lower_32_bits(xudc->event_ring_phys[i]),
3313                            ERSTXBALO(i));
3314                xudc_writel(xudc, upper_32_bits(xudc->event_ring_phys[i]),
3315                            ERSTXBAHI(i));
3316        }
3317
3318        val = lower_32_bits(xudc->event_ring_phys[0]);
3319        xudc_writel(xudc, val, ERDPLO);
3320        val |= EREPLO_ECS;
3321        xudc_writel(xudc, val, EREPLO);
3322
3323        val = upper_32_bits(xudc->event_ring_phys[0]);
3324        xudc_writel(xudc, val, ERDPHI);
3325        xudc_writel(xudc, val, EREPHI);
3326
3327        xudc->ccs = true;
3328        xudc->event_ring_index = 0;
3329        xudc->event_ring_deq_ptr = 0;
3330}
3331
3332static void tegra_xudc_free_event_ring(struct tegra_xudc *xudc)
3333{
3334        unsigned int i;
3335
3336        for (i = 0; i < ARRAY_SIZE(xudc->event_ring); i++) {
3337                dma_free_coherent(xudc->dev, XUDC_EVENT_RING_SIZE *
3338                                  sizeof(*xudc->event_ring[i]),
3339                                  xudc->event_ring[i],
3340                                  xudc->event_ring_phys[i]);
3341        }
3342}
3343
3344static void tegra_xudc_fpci_ipfs_init(struct tegra_xudc *xudc)
3345{
3346        u32 val;
3347
3348        if (xudc->soc->has_ipfs) {
3349                val = ipfs_readl(xudc, XUSB_DEV_CONFIGURATION_0);
3350                val |= XUSB_DEV_CONFIGURATION_0_EN_FPCI;
3351                ipfs_writel(xudc, val, XUSB_DEV_CONFIGURATION_0);
3352                usleep_range(10, 15);
3353        }
3354
3355        /* Enable bus master */
3356        val = XUSB_DEV_CFG_1_IO_SPACE_EN | XUSB_DEV_CFG_1_MEMORY_SPACE_EN |
3357                XUSB_DEV_CFG_1_BUS_MASTER_EN;
3358        fpci_writel(xudc, val, XUSB_DEV_CFG_1);
3359
3360        /* Program BAR0 space */
3361        val = fpci_readl(xudc, XUSB_DEV_CFG_4);
3362        val &= ~(XUSB_DEV_CFG_4_BASE_ADDR_MASK);
3363        val |= xudc->phys_base & (XUSB_DEV_CFG_4_BASE_ADDR_MASK);
3364
3365        fpci_writel(xudc, val, XUSB_DEV_CFG_4);
3366        fpci_writel(xudc, upper_32_bits(xudc->phys_base), XUSB_DEV_CFG_5);
3367
3368        usleep_range(100, 200);
3369
3370        if (xudc->soc->has_ipfs) {
3371                /* Enable interrupt assertion */
3372                val = ipfs_readl(xudc, XUSB_DEV_INTR_MASK_0);
3373                val |= XUSB_DEV_INTR_MASK_0_IP_INT_MASK;
3374                ipfs_writel(xudc, val, XUSB_DEV_INTR_MASK_0);
3375        }
3376}
3377
3378static void tegra_xudc_device_params_init(struct tegra_xudc *xudc)
3379{
3380        u32 val, imod;
3381
3382        if (xudc->soc->has_ipfs) {
3383                val = xudc_readl(xudc, BLCG);
3384                val |= BLCG_ALL;
3385                val &= ~(BLCG_DFPCI | BLCG_UFPCI | BLCG_FE |
3386                                BLCG_COREPLL_PWRDN);
3387                val |= BLCG_IOPLL_0_PWRDN;
3388                val |= BLCG_IOPLL_1_PWRDN;
3389                val |= BLCG_IOPLL_2_PWRDN;
3390
3391                xudc_writel(xudc, val, BLCG);
3392        }
3393
3394        if (xudc->soc->port_speed_quirk)
3395                tegra_xudc_limit_port_speed(xudc);
3396
3397        /* Set a reasonable U3 exit timer value. */
3398        val = xudc_readl(xudc, SSPX_CORE_PADCTL4);
3399        val &= ~(SSPX_CORE_PADCTL4_RXDAT_VLD_TIMEOUT_U3_MASK);
3400        val |= SSPX_CORE_PADCTL4_RXDAT_VLD_TIMEOUT_U3(0x5dc0);
3401        xudc_writel(xudc, val, SSPX_CORE_PADCTL4);
3402
3403        /* Default ping LFPS tBurst is too large. */
3404        val = xudc_readl(xudc, SSPX_CORE_CNT0);
3405        val &= ~(SSPX_CORE_CNT0_PING_TBURST_MASK);
3406        val |= SSPX_CORE_CNT0_PING_TBURST(0xa);
3407        xudc_writel(xudc, val, SSPX_CORE_CNT0);
3408
3409        /* Default tPortConfiguration timeout is too small. */
3410        val = xudc_readl(xudc, SSPX_CORE_CNT30);
3411        val &= ~(SSPX_CORE_CNT30_LMPITP_TIMER_MASK);
3412        val |= SSPX_CORE_CNT30_LMPITP_TIMER(0x978);
3413        xudc_writel(xudc, val, SSPX_CORE_CNT30);
3414
3415        if (xudc->soc->lpm_enable) {
3416                /* Set L1 resume duration to 95 us. */
3417                val = xudc_readl(xudc, HSFSPI_COUNT13);
3418                val &= ~(HSFSPI_COUNT13_U2_RESUME_K_DURATION_MASK);
3419                val |= HSFSPI_COUNT13_U2_RESUME_K_DURATION(0x2c88);
3420                xudc_writel(xudc, val, HSFSPI_COUNT13);
3421        }
3422
3423        /*
3424         * Compliacne suite appears to be violating polling LFPS tBurst max
3425         * of 1.4us.  Send 1.45us instead.
3426         */
3427        val = xudc_readl(xudc, SSPX_CORE_CNT32);
3428        val &= ~(SSPX_CORE_CNT32_POLL_TBURST_MAX_MASK);
3429        val |= SSPX_CORE_CNT32_POLL_TBURST_MAX(0xb0);
3430        xudc_writel(xudc, val, SSPX_CORE_CNT32);
3431
3432        /* Direct HS/FS port instance to RxDetect. */
3433        val = xudc_readl(xudc, CFG_DEV_FE);
3434        val &= ~(CFG_DEV_FE_PORTREGSEL_MASK);
3435        val |= CFG_DEV_FE_PORTREGSEL(CFG_DEV_FE_PORTREGSEL_HSFS_PI);
3436        xudc_writel(xudc, val, CFG_DEV_FE);
3437
3438        val = xudc_readl(xudc, PORTSC);
3439        val &= ~(PORTSC_CHANGE_MASK | PORTSC_PLS_MASK);
3440        val |= PORTSC_LWS | PORTSC_PLS(PORTSC_PLS_RXDETECT);
3441        xudc_writel(xudc, val, PORTSC);
3442
3443        /* Direct SS port instance to RxDetect. */
3444        val = xudc_readl(xudc, CFG_DEV_FE);
3445        val &= ~(CFG_DEV_FE_PORTREGSEL_MASK);
3446        val |= CFG_DEV_FE_PORTREGSEL_SS_PI & CFG_DEV_FE_PORTREGSEL_MASK;
3447        xudc_writel(xudc, val, CFG_DEV_FE);
3448
3449        val = xudc_readl(xudc, PORTSC);
3450        val &= ~(PORTSC_CHANGE_MASK | PORTSC_PLS_MASK);
3451        val |= PORTSC_LWS | PORTSC_PLS(PORTSC_PLS_RXDETECT);
3452        xudc_writel(xudc, val, PORTSC);
3453
3454        /* Restore port instance. */
3455        val = xudc_readl(xudc, CFG_DEV_FE);
3456        val &= ~(CFG_DEV_FE_PORTREGSEL_MASK);
3457        xudc_writel(xudc, val, CFG_DEV_FE);
3458
3459        /*
3460         * Enable INFINITE_SS_RETRY to prevent device from entering
3461         * Disabled.Error when attached to buggy SuperSpeed hubs.
3462         */
3463        val = xudc_readl(xudc, CFG_DEV_FE);
3464        val |= CFG_DEV_FE_INFINITE_SS_RETRY;
3465        xudc_writel(xudc, val, CFG_DEV_FE);
3466
3467        /* Set interrupt moderation. */
3468        imod = XUDC_INTERRUPT_MODERATION_US * 4;
3469        val = xudc_readl(xudc, RT_IMOD);
3470        val &= ~((RT_IMOD_IMODI_MASK) | (RT_IMOD_IMODC_MASK));
3471        val |= (RT_IMOD_IMODI(imod) | RT_IMOD_IMODC(imod));
3472        xudc_writel(xudc, val, RT_IMOD);
3473
3474        /* increase SSPI transaction timeout from 32us to 512us */
3475        val = xudc_readl(xudc, CFG_DEV_SSPI_XFER);
3476        val &= ~(CFG_DEV_SSPI_XFER_ACKTIMEOUT_MASK);
3477        val |= CFG_DEV_SSPI_XFER_ACKTIMEOUT(0xf000);
3478        xudc_writel(xudc, val, CFG_DEV_SSPI_XFER);
3479}
3480
3481static int tegra_xudc_phy_get(struct tegra_xudc *xudc)
3482{
3483        int err = 0, usb3;
3484        unsigned int i;
3485
3486        xudc->utmi_phy = devm_kcalloc(xudc->dev, xudc->soc->num_phys,
3487                                           sizeof(*xudc->utmi_phy), GFP_KERNEL);
3488        if (!xudc->utmi_phy)
3489                return -ENOMEM;
3490
3491        xudc->usb3_phy = devm_kcalloc(xudc->dev, xudc->soc->num_phys,
3492                                           sizeof(*xudc->usb3_phy), GFP_KERNEL);
3493        if (!xudc->usb3_phy)
3494                return -ENOMEM;
3495
3496        xudc->usbphy = devm_kcalloc(xudc->dev, xudc->soc->num_phys,
3497                                           sizeof(*xudc->usbphy), GFP_KERNEL);
3498        if (!xudc->usbphy)
3499                return -ENOMEM;
3500
3501        xudc->vbus_nb.notifier_call = tegra_xudc_vbus_notify;
3502
3503        for (i = 0; i < xudc->soc->num_phys; i++) {
3504                char phy_name[] = "usb.-.";
3505
3506                /* Get USB2 phy */
3507                snprintf(phy_name, sizeof(phy_name), "usb2-%d", i);
3508                xudc->utmi_phy[i] = devm_phy_optional_get(xudc->dev, phy_name);
3509                if (IS_ERR(xudc->utmi_phy[i])) {
3510                        err = PTR_ERR(xudc->utmi_phy[i]);
3511                        dev_err_probe(xudc->dev, err,
3512                                      "failed to get usb2-%d PHY\n", i);
3513                        goto clean_up;
3514                } else if (xudc->utmi_phy[i]) {
3515                        /* Get usb-phy, if utmi phy is available */
3516                        xudc->usbphy[i] = devm_usb_get_phy_by_node(xudc->dev,
3517                                                xudc->utmi_phy[i]->dev.of_node,
3518                                                &xudc->vbus_nb);
3519                        if (IS_ERR(xudc->usbphy[i])) {
3520                                err = PTR_ERR(xudc->usbphy[i]);
3521                                dev_err_probe(xudc->dev, err,
3522                                              "failed to get usbphy-%d\n", i);
3523                                goto clean_up;
3524                        }
3525                } else if (!xudc->utmi_phy[i]) {
3526                        /* if utmi phy is not available, ignore USB3 phy get */
3527                        continue;
3528                }
3529
3530                /* Get USB3 phy */
3531                usb3 = tegra_xusb_padctl_get_usb3_companion(xudc->padctl, i);
3532                if (usb3 < 0)
3533                        continue;
3534
3535                snprintf(phy_name, sizeof(phy_name), "usb3-%d", usb3);
3536                xudc->usb3_phy[i] = devm_phy_optional_get(xudc->dev, phy_name);
3537                if (IS_ERR(xudc->usb3_phy[i])) {
3538                        err = PTR_ERR(xudc->usb3_phy[i]);
3539                        dev_err_probe(xudc->dev, err,
3540                                      "failed to get usb3-%d PHY\n", usb3);
3541                        goto clean_up;
3542                } else if (xudc->usb3_phy[i])
3543                        dev_dbg(xudc->dev, "usb3-%d PHY registered", usb3);
3544        }
3545
3546        return err;
3547
3548clean_up:
3549        for (i = 0; i < xudc->soc->num_phys; i++) {
3550                xudc->usb3_phy[i] = NULL;
3551                xudc->utmi_phy[i] = NULL;
3552                xudc->usbphy[i] = NULL;
3553        }
3554
3555        return err;
3556}
3557
3558static void tegra_xudc_phy_exit(struct tegra_xudc *xudc)
3559{
3560        unsigned int i;
3561
3562        for (i = 0; i < xudc->soc->num_phys; i++) {
3563                phy_exit(xudc->usb3_phy[i]);
3564                phy_exit(xudc->utmi_phy[i]);
3565        }
3566}
3567
3568static int tegra_xudc_phy_init(struct tegra_xudc *xudc)
3569{
3570        int err;
3571        unsigned int i;
3572
3573        for (i = 0; i < xudc->soc->num_phys; i++) {
3574                err = phy_init(xudc->utmi_phy[i]);
3575                if (err < 0) {
3576                        dev_err(xudc->dev, "UTMI PHY #%u initialization failed: %d\n", i, err);
3577                        goto exit_phy;
3578                }
3579
3580                err = phy_init(xudc->usb3_phy[i]);
3581                if (err < 0) {
3582                        dev_err(xudc->dev, "USB3 PHY #%u initialization failed: %d\n", i, err);
3583                        goto exit_phy;
3584                }
3585        }
3586        return 0;
3587
3588exit_phy:
3589        tegra_xudc_phy_exit(xudc);
3590        return err;
3591}
3592
3593static const char * const tegra210_xudc_supply_names[] = {
3594        "hvdd-usb",
3595        "avddio-usb",
3596};
3597
3598static const char * const tegra210_xudc_clock_names[] = {
3599        "dev",
3600        "ss",
3601        "ss_src",
3602        "hs_src",
3603        "fs_src",
3604};
3605
3606static const char * const tegra186_xudc_clock_names[] = {
3607        "dev",
3608        "ss",
3609        "ss_src",
3610        "fs_src",
3611};
3612
3613static struct tegra_xudc_soc tegra210_xudc_soc_data = {
3614        .supply_names = tegra210_xudc_supply_names,
3615        .num_supplies = ARRAY_SIZE(tegra210_xudc_supply_names),
3616        .clock_names = tegra210_xudc_clock_names,
3617        .num_clks = ARRAY_SIZE(tegra210_xudc_clock_names),
3618        .num_phys = 4,
3619        .u1_enable = false,
3620        .u2_enable = true,
3621        .lpm_enable = false,
3622        .invalid_seq_num = true,
3623        .pls_quirk = true,
3624        .port_reset_quirk = true,
3625        .port_speed_quirk = false,
3626        .has_ipfs = true,
3627};
3628
3629static struct tegra_xudc_soc tegra186_xudc_soc_data = {
3630        .clock_names = tegra186_xudc_clock_names,
3631        .num_clks = ARRAY_SIZE(tegra186_xudc_clock_names),
3632        .num_phys = 4,
3633        .u1_enable = true,
3634        .u2_enable = true,
3635        .lpm_enable = false,
3636        .invalid_seq_num = false,
3637        .pls_quirk = false,
3638        .port_reset_quirk = false,
3639        .port_speed_quirk = false,
3640        .has_ipfs = false,
3641};
3642
3643static struct tegra_xudc_soc tegra194_xudc_soc_data = {
3644        .clock_names = tegra186_xudc_clock_names,
3645        .num_clks = ARRAY_SIZE(tegra186_xudc_clock_names),
3646        .num_phys = 4,
3647        .u1_enable = true,
3648        .u2_enable = true,
3649        .lpm_enable = true,
3650        .invalid_seq_num = false,
3651        .pls_quirk = false,
3652        .port_reset_quirk = false,
3653        .port_speed_quirk = true,
3654        .has_ipfs = false,
3655};
3656
3657static const struct of_device_id tegra_xudc_of_match[] = {
3658        {
3659                .compatible = "nvidia,tegra210-xudc",
3660                .data = &tegra210_xudc_soc_data
3661        },
3662        {
3663                .compatible = "nvidia,tegra186-xudc",
3664                .data = &tegra186_xudc_soc_data
3665        },
3666        {
3667                .compatible = "nvidia,tegra194-xudc",
3668                .data = &tegra194_xudc_soc_data
3669        },
3670        { }
3671};
3672MODULE_DEVICE_TABLE(of, tegra_xudc_of_match);
3673
3674static void tegra_xudc_powerdomain_remove(struct tegra_xudc *xudc)
3675{
3676        if (xudc->genpd_dl_ss)
3677                device_link_del(xudc->genpd_dl_ss);
3678        if (xudc->genpd_dl_device)
3679                device_link_del(xudc->genpd_dl_device);
3680        if (xudc->genpd_dev_ss)
3681                dev_pm_domain_detach(xudc->genpd_dev_ss, true);
3682        if (xudc->genpd_dev_device)
3683                dev_pm_domain_detach(xudc->genpd_dev_device, true);
3684}
3685
3686static int tegra_xudc_powerdomain_init(struct tegra_xudc *xudc)
3687{
3688        struct device *dev = xudc->dev;
3689        int err;
3690
3691        xudc->genpd_dev_device = dev_pm_domain_attach_by_name(dev, "dev");
3692        if (IS_ERR(xudc->genpd_dev_device)) {
3693                err = PTR_ERR(xudc->genpd_dev_device);
3694                dev_err(dev, "failed to get device power domain: %d\n", err);
3695                return err;
3696        }
3697
3698        xudc->genpd_dev_ss = dev_pm_domain_attach_by_name(dev, "ss");
3699        if (IS_ERR(xudc->genpd_dev_ss)) {
3700                err = PTR_ERR(xudc->genpd_dev_ss);
3701                dev_err(dev, "failed to get SuperSpeed power domain: %d\n", err);
3702                return err;
3703        }
3704
3705        xudc->genpd_dl_device = device_link_add(dev, xudc->genpd_dev_device,
3706                                                DL_FLAG_PM_RUNTIME |
3707                                                DL_FLAG_STATELESS);
3708        if (!xudc->genpd_dl_device) {
3709                dev_err(dev, "failed to add USB device link\n");
3710                return -ENODEV;
3711        }
3712
3713        xudc->genpd_dl_ss = device_link_add(dev, xudc->genpd_dev_ss,
3714                                            DL_FLAG_PM_RUNTIME |
3715                                            DL_FLAG_STATELESS);
3716        if (!xudc->genpd_dl_ss) {
3717                dev_err(dev, "failed to add SuperSpeed device link\n");
3718                return -ENODEV;
3719        }
3720
3721        return 0;
3722}
3723
3724static int tegra_xudc_probe(struct platform_device *pdev)
3725{
3726        struct tegra_xudc *xudc;
3727        struct resource *res;
3728        unsigned int i;
3729        int err;
3730
3731        xudc = devm_kzalloc(&pdev->dev, sizeof(*xudc), GFP_KERNEL);
3732        if (!xudc)
3733                return -ENOMEM;
3734
3735        xudc->dev = &pdev->dev;
3736        platform_set_drvdata(pdev, xudc);
3737
3738        xudc->soc = of_device_get_match_data(&pdev->dev);
3739        if (!xudc->soc)
3740                return -ENODEV;
3741
3742        res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base");
3743        xudc->base = devm_ioremap_resource(&pdev->dev, res);
3744        if (IS_ERR(xudc->base))
3745                return PTR_ERR(xudc->base);
3746        xudc->phys_base = res->start;
3747
3748        xudc->fpci = devm_platform_ioremap_resource_byname(pdev, "fpci");
3749        if (IS_ERR(xudc->fpci))
3750                return PTR_ERR(xudc->fpci);
3751
3752        if (xudc->soc->has_ipfs) {
3753                xudc->ipfs = devm_platform_ioremap_resource_byname(pdev, "ipfs");
3754                if (IS_ERR(xudc->ipfs))
3755                        return PTR_ERR(xudc->ipfs);
3756        }
3757
3758        xudc->irq = platform_get_irq(pdev, 0);
3759        if (xudc->irq < 0)
3760                return xudc->irq;
3761
3762        err = devm_request_irq(&pdev->dev, xudc->irq, tegra_xudc_irq, 0,
3763                               dev_name(&pdev->dev), xudc);
3764        if (err < 0) {
3765                dev_err(xudc->dev, "failed to claim IRQ#%u: %d\n", xudc->irq,
3766                        err);
3767                return err;
3768        }
3769
3770        xudc->clks = devm_kcalloc(&pdev->dev, xudc->soc->num_clks, sizeof(*xudc->clks),
3771                                  GFP_KERNEL);
3772        if (!xudc->clks)
3773                return -ENOMEM;
3774
3775        for (i = 0; i < xudc->soc->num_clks; i++)
3776                xudc->clks[i].id = xudc->soc->clock_names[i];
3777
3778        err = devm_clk_bulk_get(&pdev->dev, xudc->soc->num_clks, xudc->clks);
3779        if (err) {
3780                dev_err_probe(xudc->dev, err, "failed to request clocks\n");
3781                return err;
3782        }
3783
3784        xudc->supplies = devm_kcalloc(&pdev->dev, xudc->soc->num_supplies,
3785                                      sizeof(*xudc->supplies), GFP_KERNEL);
3786        if (!xudc->supplies)
3787                return -ENOMEM;
3788
3789        for (i = 0; i < xudc->soc->num_supplies; i++)
3790                xudc->supplies[i].supply = xudc->soc->supply_names[i];
3791
3792        err = devm_regulator_bulk_get(&pdev->dev, xudc->soc->num_supplies,
3793                                      xudc->supplies);
3794        if (err) {
3795                dev_err_probe(xudc->dev, err, "failed to request regulators\n");
3796                return err;
3797        }
3798
3799        xudc->padctl = tegra_xusb_padctl_get(&pdev->dev);
3800        if (IS_ERR(xudc->padctl))
3801                return PTR_ERR(xudc->padctl);
3802
3803        err = regulator_bulk_enable(xudc->soc->num_supplies, xudc->supplies);
3804        if (err) {
3805                dev_err(xudc->dev, "failed to enable regulators: %d\n", err);
3806                goto put_padctl;
3807        }
3808
3809        err = tegra_xudc_phy_get(xudc);
3810        if (err)
3811                goto disable_regulator;
3812
3813        err = tegra_xudc_powerdomain_init(xudc);
3814        if (err)
3815                goto put_powerdomains;
3816
3817        err = tegra_xudc_phy_init(xudc);
3818        if (err)
3819                goto put_powerdomains;
3820
3821        err = tegra_xudc_alloc_event_ring(xudc);
3822        if (err)
3823                goto disable_phy;
3824
3825        err = tegra_xudc_alloc_eps(xudc);
3826        if (err)
3827                goto free_event_ring;
3828
3829        spin_lock_init(&xudc->lock);
3830
3831        init_completion(&xudc->disconnect_complete);
3832
3833        INIT_WORK(&xudc->usb_role_sw_work, tegra_xudc_usb_role_sw_work);
3834
3835        INIT_DELAYED_WORK(&xudc->plc_reset_work, tegra_xudc_plc_reset_work);
3836
3837        INIT_DELAYED_WORK(&xudc->port_reset_war_work,
3838                                tegra_xudc_port_reset_war_work);
3839
3840        pm_runtime_enable(&pdev->dev);
3841
3842        xudc->gadget.ops = &tegra_xudc_gadget_ops;
3843        xudc->gadget.ep0 = &xudc->ep[0].usb_ep;
3844        xudc->gadget.name = "tegra-xudc";
3845        xudc->gadget.max_speed = USB_SPEED_SUPER;
3846
3847        err = usb_add_gadget_udc(&pdev->dev, &xudc->gadget);
3848        if (err) {
3849                dev_err(&pdev->dev, "failed to add USB gadget: %d\n", err);
3850                goto free_eps;
3851        }
3852
3853        return 0;
3854
3855free_eps:
3856        pm_runtime_disable(&pdev->dev);
3857        tegra_xudc_free_eps(xudc);
3858free_event_ring:
3859        tegra_xudc_free_event_ring(xudc);
3860disable_phy:
3861        tegra_xudc_phy_exit(xudc);
3862put_powerdomains:
3863        tegra_xudc_powerdomain_remove(xudc);
3864disable_regulator:
3865        regulator_bulk_disable(xudc->soc->num_supplies, xudc->supplies);
3866put_padctl:
3867        tegra_xusb_padctl_put(xudc->padctl);
3868
3869        return err;
3870}
3871
3872static int tegra_xudc_remove(struct platform_device *pdev)
3873{
3874        struct tegra_xudc *xudc = platform_get_drvdata(pdev);
3875        unsigned int i;
3876
3877        pm_runtime_get_sync(xudc->dev);
3878
3879        cancel_delayed_work_sync(&xudc->plc_reset_work);
3880        cancel_work_sync(&xudc->usb_role_sw_work);
3881
3882        usb_del_gadget_udc(&xudc->gadget);
3883
3884        tegra_xudc_free_eps(xudc);
3885        tegra_xudc_free_event_ring(xudc);
3886
3887        tegra_xudc_powerdomain_remove(xudc);
3888
3889        regulator_bulk_disable(xudc->soc->num_supplies, xudc->supplies);
3890
3891        for (i = 0; i < xudc->soc->num_phys; i++) {
3892                phy_power_off(xudc->utmi_phy[i]);
3893                phy_power_off(xudc->usb3_phy[i]);
3894        }
3895
3896        tegra_xudc_phy_exit(xudc);
3897
3898        pm_runtime_disable(xudc->dev);
3899        pm_runtime_put(xudc->dev);
3900
3901        tegra_xusb_padctl_put(xudc->padctl);
3902
3903        return 0;
3904}
3905
3906static int __maybe_unused tegra_xudc_powergate(struct tegra_xudc *xudc)
3907{
3908        unsigned long flags;
3909
3910        dev_dbg(xudc->dev, "entering ELPG\n");
3911
3912        spin_lock_irqsave(&xudc->lock, flags);
3913
3914        xudc->powergated = true;
3915        xudc->saved_regs.ctrl = xudc_readl(xudc, CTRL);
3916        xudc->saved_regs.portpm = xudc_readl(xudc, PORTPM);
3917        xudc_writel(xudc, 0, CTRL);
3918
3919        spin_unlock_irqrestore(&xudc->lock, flags);
3920
3921        clk_bulk_disable_unprepare(xudc->soc->num_clks, xudc->clks);
3922
3923        regulator_bulk_disable(xudc->soc->num_supplies, xudc->supplies);
3924
3925        dev_dbg(xudc->dev, "entering ELPG done\n");
3926        return 0;
3927}
3928
3929static int __maybe_unused tegra_xudc_unpowergate(struct tegra_xudc *xudc)
3930{
3931        unsigned long flags;
3932        int err;
3933
3934        dev_dbg(xudc->dev, "exiting ELPG\n");
3935
3936        err = regulator_bulk_enable(xudc->soc->num_supplies,
3937                        xudc->supplies);
3938        if (err < 0)
3939                return err;
3940
3941        err = clk_bulk_prepare_enable(xudc->soc->num_clks, xudc->clks);
3942        if (err < 0)
3943                return err;
3944
3945        tegra_xudc_fpci_ipfs_init(xudc);
3946
3947        tegra_xudc_device_params_init(xudc);
3948
3949        tegra_xudc_init_event_ring(xudc);
3950
3951        tegra_xudc_init_eps(xudc);
3952
3953        xudc_writel(xudc, xudc->saved_regs.portpm, PORTPM);
3954        xudc_writel(xudc, xudc->saved_regs.ctrl, CTRL);
3955
3956        spin_lock_irqsave(&xudc->lock, flags);
3957        xudc->powergated = false;
3958        spin_unlock_irqrestore(&xudc->lock, flags);
3959
3960        dev_dbg(xudc->dev, "exiting ELPG done\n");
3961        return 0;
3962}
3963
3964static int __maybe_unused tegra_xudc_suspend(struct device *dev)
3965{
3966        struct tegra_xudc *xudc = dev_get_drvdata(dev);
3967        unsigned long flags;
3968
3969        spin_lock_irqsave(&xudc->lock, flags);
3970        xudc->suspended = true;
3971        spin_unlock_irqrestore(&xudc->lock, flags);
3972
3973        flush_work(&xudc->usb_role_sw_work);
3974
3975        if (!pm_runtime_status_suspended(dev)) {
3976                /* Forcibly disconnect before powergating. */
3977                tegra_xudc_device_mode_off(xudc);
3978                tegra_xudc_powergate(xudc);
3979        }
3980
3981        pm_runtime_disable(dev);
3982
3983        return 0;
3984}
3985
3986static int __maybe_unused tegra_xudc_resume(struct device *dev)
3987{
3988        struct tegra_xudc *xudc = dev_get_drvdata(dev);
3989        unsigned long flags;
3990        int err;
3991
3992        err = tegra_xudc_unpowergate(xudc);
3993        if (err < 0)
3994                return err;
3995
3996        spin_lock_irqsave(&xudc->lock, flags);
3997        xudc->suspended = false;
3998        spin_unlock_irqrestore(&xudc->lock, flags);
3999
4000        schedule_work(&xudc->usb_role_sw_work);
4001
4002        pm_runtime_enable(dev);
4003
4004        return 0;
4005}
4006
4007static int __maybe_unused tegra_xudc_runtime_suspend(struct device *dev)
4008{
4009        struct tegra_xudc *xudc = dev_get_drvdata(dev);
4010
4011        return tegra_xudc_powergate(xudc);
4012}
4013
4014static int __maybe_unused tegra_xudc_runtime_resume(struct device *dev)
4015{
4016        struct tegra_xudc *xudc = dev_get_drvdata(dev);
4017
4018        return tegra_xudc_unpowergate(xudc);
4019}
4020
4021static const struct dev_pm_ops tegra_xudc_pm_ops = {
4022        SET_SYSTEM_SLEEP_PM_OPS(tegra_xudc_suspend, tegra_xudc_resume)
4023        SET_RUNTIME_PM_OPS(tegra_xudc_runtime_suspend,
4024                           tegra_xudc_runtime_resume, NULL)
4025};
4026
4027static struct platform_driver tegra_xudc_driver = {
4028        .probe = tegra_xudc_probe,
4029        .remove = tegra_xudc_remove,
4030        .driver = {
4031                .name = "tegra-xudc",
4032                .pm = &tegra_xudc_pm_ops,
4033                .of_match_table = tegra_xudc_of_match,
4034        },
4035};
4036module_platform_driver(tegra_xudc_driver);
4037
4038MODULE_DESCRIPTION("NVIDIA Tegra XUSB Device Controller");
4039MODULE_AUTHOR("Andrew Bresticker <abrestic@chromium.org>");
4040MODULE_AUTHOR("Hui Fu <hfu@nvidia.com>");
4041MODULE_AUTHOR("Nagarjuna Kristam <nkristam@nvidia.com>");
4042MODULE_LICENSE("GPL v2");
4043