linux/drivers/usb/mtu3/mtu3_qmu.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * mtu3_qmu.c - Queue Management Unit driver for device controller
   4 *
   5 * Copyright (C) 2016 MediaTek Inc.
   6 *
   7 * Author: Chunfeng Yun <chunfeng.yun@mediatek.com>
   8 */
   9
  10/*
  11 * Queue Management Unit (QMU) is designed to unload SW effort
  12 * to serve DMA interrupts.
  13 * By preparing General Purpose Descriptor (GPD) and Buffer Descriptor (BD),
  14 * SW links data buffers and triggers QMU to send / receive data to
  15 * host / from device at a time.
  16 * And now only GPD is supported.
  17 *
  18 * For more detailed information, please refer to QMU Programming Guide
  19 */
  20
  21#include <linux/dmapool.h>
  22#include <linux/iopoll.h>
  23
  24#include "mtu3.h"
  25#include "mtu3_trace.h"
  26
  27#define QMU_CHECKSUM_LEN        16
  28
  29#define GPD_FLAGS_HWO   BIT(0)
  30#define GPD_FLAGS_BDP   BIT(1)
  31#define GPD_FLAGS_BPS   BIT(2)
  32#define GPD_FLAGS_ZLP   BIT(6)
  33#define GPD_FLAGS_IOC   BIT(7)
  34#define GET_GPD_HWO(gpd)        (le32_to_cpu((gpd)->dw0_info) & GPD_FLAGS_HWO)
  35
  36#define GPD_RX_BUF_LEN_OG(x)    (((x) & 0xffff) << 16)
  37#define GPD_RX_BUF_LEN_EL(x)    (((x) & 0xfffff) << 12)
  38#define GPD_RX_BUF_LEN(mtu, x)  \
  39({                              \
  40        typeof(x) x_ = (x);     \
  41        ((mtu)->gen2cp) ? GPD_RX_BUF_LEN_EL(x_) : GPD_RX_BUF_LEN_OG(x_); \
  42})
  43
  44#define GPD_DATA_LEN_OG(x)      ((x) & 0xffff)
  45#define GPD_DATA_LEN_EL(x)      ((x) & 0xfffff)
  46#define GPD_DATA_LEN(mtu, x)    \
  47({                              \
  48        typeof(x) x_ = (x);     \
  49        ((mtu)->gen2cp) ? GPD_DATA_LEN_EL(x_) : GPD_DATA_LEN_OG(x_); \
  50})
  51
  52#define GPD_EXT_FLAG_ZLP        BIT(29)
  53#define GPD_EXT_NGP_OG(x)       (((x) & 0xf) << 20)
  54#define GPD_EXT_BUF_OG(x)       (((x) & 0xf) << 16)
  55#define GPD_EXT_NGP_EL(x)       (((x) & 0xf) << 28)
  56#define GPD_EXT_BUF_EL(x)       (((x) & 0xf) << 24)
  57#define GPD_EXT_NGP(mtu, x)     \
  58({                              \
  59        typeof(x) x_ = (x);     \
  60        ((mtu)->gen2cp) ? GPD_EXT_NGP_EL(x_) : GPD_EXT_NGP_OG(x_); \
  61})
  62
  63#define GPD_EXT_BUF(mtu, x)     \
  64({                              \
  65        typeof(x) x_ = (x);     \
  66        ((mtu)->gen2cp) ? GPD_EXT_BUF_EL(x_) : GPD_EXT_BUF_OG(x_); \
  67})
  68
  69#define HILO_GEN64(hi, lo) (((u64)(hi) << 32) + (lo))
  70#define HILO_DMA(hi, lo)        \
  71        ((dma_addr_t)HILO_GEN64((le32_to_cpu(hi)), (le32_to_cpu(lo))))
  72
  73static dma_addr_t read_txq_cur_addr(void __iomem *mbase, u8 epnum)
  74{
  75        u32 txcpr;
  76        u32 txhiar;
  77
  78        txcpr = mtu3_readl(mbase, USB_QMU_TQCPR(epnum));
  79        txhiar = mtu3_readl(mbase, USB_QMU_TQHIAR(epnum));
  80
  81        return HILO_DMA(QMU_CUR_GPD_ADDR_HI(txhiar), txcpr);
  82}
  83
  84static dma_addr_t read_rxq_cur_addr(void __iomem *mbase, u8 epnum)
  85{
  86        u32 rxcpr;
  87        u32 rxhiar;
  88
  89        rxcpr = mtu3_readl(mbase, USB_QMU_RQCPR(epnum));
  90        rxhiar = mtu3_readl(mbase, USB_QMU_RQHIAR(epnum));
  91
  92        return HILO_DMA(QMU_CUR_GPD_ADDR_HI(rxhiar), rxcpr);
  93}
  94
  95static void write_txq_start_addr(void __iomem *mbase, u8 epnum, dma_addr_t dma)
  96{
  97        u32 tqhiar;
  98
  99        mtu3_writel(mbase, USB_QMU_TQSAR(epnum),
 100                    cpu_to_le32(lower_32_bits(dma)));
 101        tqhiar = mtu3_readl(mbase, USB_QMU_TQHIAR(epnum));
 102        tqhiar &= ~QMU_START_ADDR_HI_MSK;
 103        tqhiar |= QMU_START_ADDR_HI(upper_32_bits(dma));
 104        mtu3_writel(mbase, USB_QMU_TQHIAR(epnum), tqhiar);
 105}
 106
 107static void write_rxq_start_addr(void __iomem *mbase, u8 epnum, dma_addr_t dma)
 108{
 109        u32 rqhiar;
 110
 111        mtu3_writel(mbase, USB_QMU_RQSAR(epnum),
 112                    cpu_to_le32(lower_32_bits(dma)));
 113        rqhiar = mtu3_readl(mbase, USB_QMU_RQHIAR(epnum));
 114        rqhiar &= ~QMU_START_ADDR_HI_MSK;
 115        rqhiar |= QMU_START_ADDR_HI(upper_32_bits(dma));
 116        mtu3_writel(mbase, USB_QMU_RQHIAR(epnum), rqhiar);
 117}
 118
 119static struct qmu_gpd *gpd_dma_to_virt(struct mtu3_gpd_ring *ring,
 120                dma_addr_t dma_addr)
 121{
 122        dma_addr_t dma_base = ring->dma;
 123        struct qmu_gpd *gpd_head = ring->start;
 124        u32 offset = (dma_addr - dma_base) / sizeof(*gpd_head);
 125
 126        if (offset >= MAX_GPD_NUM)
 127                return NULL;
 128
 129        return gpd_head + offset;
 130}
 131
 132static dma_addr_t gpd_virt_to_dma(struct mtu3_gpd_ring *ring,
 133                struct qmu_gpd *gpd)
 134{
 135        dma_addr_t dma_base = ring->dma;
 136        struct qmu_gpd *gpd_head = ring->start;
 137        u32 offset;
 138
 139        offset = gpd - gpd_head;
 140        if (offset >= MAX_GPD_NUM)
 141                return 0;
 142
 143        return dma_base + (offset * sizeof(*gpd));
 144}
 145
 146static void gpd_ring_init(struct mtu3_gpd_ring *ring, struct qmu_gpd *gpd)
 147{
 148        ring->start = gpd;
 149        ring->enqueue = gpd;
 150        ring->dequeue = gpd;
 151        ring->end = gpd + MAX_GPD_NUM - 1;
 152}
 153
 154static void reset_gpd_list(struct mtu3_ep *mep)
 155{
 156        struct mtu3_gpd_ring *ring = &mep->gpd_ring;
 157        struct qmu_gpd *gpd = ring->start;
 158
 159        if (gpd) {
 160                gpd->dw0_info &= cpu_to_le32(~GPD_FLAGS_HWO);
 161                gpd_ring_init(ring, gpd);
 162        }
 163}
 164
 165int mtu3_gpd_ring_alloc(struct mtu3_ep *mep)
 166{
 167        struct qmu_gpd *gpd;
 168        struct mtu3_gpd_ring *ring = &mep->gpd_ring;
 169
 170        /* software own all gpds as default */
 171        gpd = dma_pool_zalloc(mep->mtu->qmu_gpd_pool, GFP_ATOMIC, &ring->dma);
 172        if (gpd == NULL)
 173                return -ENOMEM;
 174
 175        gpd_ring_init(ring, gpd);
 176
 177        return 0;
 178}
 179
 180void mtu3_gpd_ring_free(struct mtu3_ep *mep)
 181{
 182        struct mtu3_gpd_ring *ring = &mep->gpd_ring;
 183
 184        dma_pool_free(mep->mtu->qmu_gpd_pool,
 185                        ring->start, ring->dma);
 186        memset(ring, 0, sizeof(*ring));
 187}
 188
 189void mtu3_qmu_resume(struct mtu3_ep *mep)
 190{
 191        struct mtu3 *mtu = mep->mtu;
 192        void __iomem *mbase = mtu->mac_base;
 193        int epnum = mep->epnum;
 194        u32 offset;
 195
 196        offset = mep->is_in ? USB_QMU_TQCSR(epnum) : USB_QMU_RQCSR(epnum);
 197
 198        mtu3_writel(mbase, offset, QMU_Q_RESUME);
 199        if (!(mtu3_readl(mbase, offset) & QMU_Q_ACTIVE))
 200                mtu3_writel(mbase, offset, QMU_Q_RESUME);
 201}
 202
 203static struct qmu_gpd *advance_enq_gpd(struct mtu3_gpd_ring *ring)
 204{
 205        if (ring->enqueue < ring->end)
 206                ring->enqueue++;
 207        else
 208                ring->enqueue = ring->start;
 209
 210        return ring->enqueue;
 211}
 212
 213static struct qmu_gpd *advance_deq_gpd(struct mtu3_gpd_ring *ring)
 214{
 215        if (ring->dequeue < ring->end)
 216                ring->dequeue++;
 217        else
 218                ring->dequeue = ring->start;
 219
 220        return ring->dequeue;
 221}
 222
 223/* check if a ring is emtpy */
 224static int gpd_ring_empty(struct mtu3_gpd_ring *ring)
 225{
 226        struct qmu_gpd *enq = ring->enqueue;
 227        struct qmu_gpd *next;
 228
 229        if (ring->enqueue < ring->end)
 230                next = enq + 1;
 231        else
 232                next = ring->start;
 233
 234        /* one gpd is reserved to simplify gpd preparation */
 235        return next == ring->dequeue;
 236}
 237
 238int mtu3_prepare_transfer(struct mtu3_ep *mep)
 239{
 240        return gpd_ring_empty(&mep->gpd_ring);
 241}
 242
 243static int mtu3_prepare_tx_gpd(struct mtu3_ep *mep, struct mtu3_request *mreq)
 244{
 245        struct qmu_gpd *enq;
 246        struct mtu3_gpd_ring *ring = &mep->gpd_ring;
 247        struct qmu_gpd *gpd = ring->enqueue;
 248        struct usb_request *req = &mreq->request;
 249        struct mtu3 *mtu = mep->mtu;
 250        dma_addr_t enq_dma;
 251        u32 ext_addr;
 252
 253        gpd->dw0_info = 0;      /* SW own it */
 254        gpd->buffer = cpu_to_le32(lower_32_bits(req->dma));
 255        ext_addr = GPD_EXT_BUF(mtu, upper_32_bits(req->dma));
 256        gpd->dw3_info = cpu_to_le32(GPD_DATA_LEN(mtu, req->length));
 257
 258        /* get the next GPD */
 259        enq = advance_enq_gpd(ring);
 260        enq_dma = gpd_virt_to_dma(ring, enq);
 261        dev_dbg(mep->mtu->dev, "TX-EP%d queue gpd=%p, enq=%p, qdma=%pad\n",
 262                mep->epnum, gpd, enq, &enq_dma);
 263
 264        enq->dw0_info &= cpu_to_le32(~GPD_FLAGS_HWO);
 265        gpd->next_gpd = cpu_to_le32(lower_32_bits(enq_dma));
 266        ext_addr |= GPD_EXT_NGP(mtu, upper_32_bits(enq_dma));
 267        gpd->dw0_info = cpu_to_le32(ext_addr);
 268
 269        if (req->zero) {
 270                if (mtu->gen2cp)
 271                        gpd->dw0_info |= cpu_to_le32(GPD_FLAGS_ZLP);
 272                else
 273                        gpd->dw3_info |= cpu_to_le32(GPD_EXT_FLAG_ZLP);
 274        }
 275
 276        gpd->dw0_info |= cpu_to_le32(GPD_FLAGS_IOC | GPD_FLAGS_HWO);
 277
 278        mreq->gpd = gpd;
 279        trace_mtu3_prepare_gpd(mep, gpd);
 280
 281        return 0;
 282}
 283
 284static int mtu3_prepare_rx_gpd(struct mtu3_ep *mep, struct mtu3_request *mreq)
 285{
 286        struct qmu_gpd *enq;
 287        struct mtu3_gpd_ring *ring = &mep->gpd_ring;
 288        struct qmu_gpd *gpd = ring->enqueue;
 289        struct usb_request *req = &mreq->request;
 290        struct mtu3 *mtu = mep->mtu;
 291        dma_addr_t enq_dma;
 292        u32 ext_addr;
 293
 294        gpd->dw0_info = 0;      /* SW own it */
 295        gpd->buffer = cpu_to_le32(lower_32_bits(req->dma));
 296        ext_addr = GPD_EXT_BUF(mtu, upper_32_bits(req->dma));
 297        gpd->dw0_info = cpu_to_le32(GPD_RX_BUF_LEN(mtu, req->length));
 298
 299        /* get the next GPD */
 300        enq = advance_enq_gpd(ring);
 301        enq_dma = gpd_virt_to_dma(ring, enq);
 302        dev_dbg(mep->mtu->dev, "RX-EP%d queue gpd=%p, enq=%p, qdma=%pad\n",
 303                mep->epnum, gpd, enq, &enq_dma);
 304
 305        enq->dw0_info &= cpu_to_le32(~GPD_FLAGS_HWO);
 306        gpd->next_gpd = cpu_to_le32(lower_32_bits(enq_dma));
 307        ext_addr |= GPD_EXT_NGP(mtu, upper_32_bits(enq_dma));
 308        gpd->dw3_info = cpu_to_le32(ext_addr);
 309        gpd->dw0_info |= cpu_to_le32(GPD_FLAGS_IOC | GPD_FLAGS_HWO);
 310
 311        mreq->gpd = gpd;
 312        trace_mtu3_prepare_gpd(mep, gpd);
 313
 314        return 0;
 315}
 316
 317void mtu3_insert_gpd(struct mtu3_ep *mep, struct mtu3_request *mreq)
 318{
 319
 320        if (mep->is_in)
 321                mtu3_prepare_tx_gpd(mep, mreq);
 322        else
 323                mtu3_prepare_rx_gpd(mep, mreq);
 324}
 325
 326int mtu3_qmu_start(struct mtu3_ep *mep)
 327{
 328        struct mtu3 *mtu = mep->mtu;
 329        void __iomem *mbase = mtu->mac_base;
 330        struct mtu3_gpd_ring *ring = &mep->gpd_ring;
 331        u8 epnum = mep->epnum;
 332
 333        if (mep->is_in) {
 334                /* set QMU start address */
 335                write_txq_start_addr(mbase, epnum, ring->dma);
 336                mtu3_setbits(mbase, MU3D_EP_TXCR0(epnum), TX_DMAREQEN);
 337                /* send zero length packet according to ZLP flag in GPD */
 338                mtu3_setbits(mbase, U3D_QCR1, QMU_TX_ZLP(epnum));
 339                mtu3_writel(mbase, U3D_TQERRIESR0,
 340                                QMU_TX_LEN_ERR(epnum) | QMU_TX_CS_ERR(epnum));
 341
 342                if (mtu3_readl(mbase, USB_QMU_TQCSR(epnum)) & QMU_Q_ACTIVE) {
 343                        dev_warn(mtu->dev, "Tx %d Active Now!\n", epnum);
 344                        return 0;
 345                }
 346                mtu3_writel(mbase, USB_QMU_TQCSR(epnum), QMU_Q_START);
 347
 348        } else {
 349                write_rxq_start_addr(mbase, epnum, ring->dma);
 350                mtu3_setbits(mbase, MU3D_EP_RXCR0(epnum), RX_DMAREQEN);
 351                /* don't expect ZLP */
 352                mtu3_clrbits(mbase, U3D_QCR3, QMU_RX_ZLP(epnum));
 353                /* move to next GPD when receive ZLP */
 354                mtu3_setbits(mbase, U3D_QCR3, QMU_RX_COZ(epnum));
 355                mtu3_writel(mbase, U3D_RQERRIESR0,
 356                                QMU_RX_LEN_ERR(epnum) | QMU_RX_CS_ERR(epnum));
 357                mtu3_writel(mbase, U3D_RQERRIESR1, QMU_RX_ZLP_ERR(epnum));
 358
 359                if (mtu3_readl(mbase, USB_QMU_RQCSR(epnum)) & QMU_Q_ACTIVE) {
 360                        dev_warn(mtu->dev, "Rx %d Active Now!\n", epnum);
 361                        return 0;
 362                }
 363                mtu3_writel(mbase, USB_QMU_RQCSR(epnum), QMU_Q_START);
 364        }
 365
 366        return 0;
 367}
 368
 369/* may called in atomic context */
 370void mtu3_qmu_stop(struct mtu3_ep *mep)
 371{
 372        struct mtu3 *mtu = mep->mtu;
 373        void __iomem *mbase = mtu->mac_base;
 374        int epnum = mep->epnum;
 375        u32 value = 0;
 376        u32 qcsr;
 377        int ret;
 378
 379        qcsr = mep->is_in ? USB_QMU_TQCSR(epnum) : USB_QMU_RQCSR(epnum);
 380
 381        if (!(mtu3_readl(mbase, qcsr) & QMU_Q_ACTIVE)) {
 382                dev_dbg(mtu->dev, "%s's qmu is inactive now!\n", mep->name);
 383                return;
 384        }
 385        mtu3_writel(mbase, qcsr, QMU_Q_STOP);
 386
 387        ret = readl_poll_timeout_atomic(mbase + qcsr, value,
 388                        !(value & QMU_Q_ACTIVE), 1, 1000);
 389        if (ret) {
 390                dev_err(mtu->dev, "stop %s's qmu failed\n", mep->name);
 391                return;
 392        }
 393
 394        dev_dbg(mtu->dev, "%s's qmu stop now!\n", mep->name);
 395}
 396
 397void mtu3_qmu_flush(struct mtu3_ep *mep)
 398{
 399
 400        dev_dbg(mep->mtu->dev, "%s flush QMU %s\n", __func__,
 401                ((mep->is_in) ? "TX" : "RX"));
 402
 403        /*Stop QMU */
 404        mtu3_qmu_stop(mep);
 405        reset_gpd_list(mep);
 406}
 407
 408/*
 409 * QMU can't transfer zero length packet directly (a hardware limit
 410 * on old SoCs), so when needs to send ZLP, we intentionally trigger
 411 * a length error interrupt, and in the ISR sends a ZLP by BMU.
 412 */
 413static void qmu_tx_zlp_error_handler(struct mtu3 *mtu, u8 epnum)
 414{
 415        struct mtu3_ep *mep = mtu->in_eps + epnum;
 416        struct mtu3_gpd_ring *ring = &mep->gpd_ring;
 417        void __iomem *mbase = mtu->mac_base;
 418        struct qmu_gpd *gpd_current = NULL;
 419        struct mtu3_request *mreq;
 420        dma_addr_t cur_gpd_dma;
 421        u32 txcsr = 0;
 422        int ret;
 423
 424        mreq = next_request(mep);
 425        if (mreq && mreq->request.length != 0)
 426                return;
 427
 428        cur_gpd_dma = read_txq_cur_addr(mbase, epnum);
 429        gpd_current = gpd_dma_to_virt(ring, cur_gpd_dma);
 430
 431        if (GPD_DATA_LEN(mtu, le32_to_cpu(gpd_current->dw3_info)) != 0) {
 432                dev_err(mtu->dev, "TX EP%d buffer length error(!=0)\n", epnum);
 433                return;
 434        }
 435
 436        dev_dbg(mtu->dev, "%s send ZLP for req=%p\n", __func__, mreq);
 437        trace_mtu3_zlp_exp_gpd(mep, gpd_current);
 438
 439        mtu3_clrbits(mbase, MU3D_EP_TXCR0(mep->epnum), TX_DMAREQEN);
 440
 441        ret = readl_poll_timeout_atomic(mbase + MU3D_EP_TXCR0(mep->epnum),
 442                        txcsr, !(txcsr & TX_FIFOFULL), 1, 1000);
 443        if (ret) {
 444                dev_err(mtu->dev, "%s wait for fifo empty fail\n", __func__);
 445                return;
 446        }
 447        mtu3_setbits(mbase, MU3D_EP_TXCR0(mep->epnum), TX_TXPKTRDY);
 448
 449        /* by pass the current GDP */
 450        gpd_current->dw0_info |= cpu_to_le32(GPD_FLAGS_BPS | GPD_FLAGS_HWO);
 451
 452        /*enable DMAREQEN, switch back to QMU mode */
 453        mtu3_setbits(mbase, MU3D_EP_TXCR0(mep->epnum), TX_DMAREQEN);
 454        mtu3_qmu_resume(mep);
 455}
 456
 457/*
 458 * NOTE: request list maybe is already empty as following case:
 459 * queue_tx --> qmu_interrupt(clear interrupt pending, schedule tasklet)-->
 460 * queue_tx --> process_tasklet(meanwhile, the second one is transferred,
 461 * tasklet process both of them)-->qmu_interrupt for second one.
 462 * To avoid upper case, put qmu_done_tx in ISR directly to process it.
 463 */
 464static void qmu_done_tx(struct mtu3 *mtu, u8 epnum)
 465{
 466        struct mtu3_ep *mep = mtu->in_eps + epnum;
 467        struct mtu3_gpd_ring *ring = &mep->gpd_ring;
 468        void __iomem *mbase = mtu->mac_base;
 469        struct qmu_gpd *gpd = ring->dequeue;
 470        struct qmu_gpd *gpd_current = NULL;
 471        struct usb_request *request = NULL;
 472        struct mtu3_request *mreq;
 473        dma_addr_t cur_gpd_dma;
 474
 475        /*transfer phy address got from QMU register to virtual address */
 476        cur_gpd_dma = read_txq_cur_addr(mbase, epnum);
 477        gpd_current = gpd_dma_to_virt(ring, cur_gpd_dma);
 478
 479        dev_dbg(mtu->dev, "%s EP%d, last=%p, current=%p, enq=%p\n",
 480                __func__, epnum, gpd, gpd_current, ring->enqueue);
 481
 482        while (gpd != gpd_current && !GET_GPD_HWO(gpd)) {
 483
 484                mreq = next_request(mep);
 485
 486                if (mreq == NULL || mreq->gpd != gpd) {
 487                        dev_err(mtu->dev, "no correct TX req is found\n");
 488                        break;
 489                }
 490
 491                request = &mreq->request;
 492                request->actual = GPD_DATA_LEN(mtu, le32_to_cpu(gpd->dw3_info));
 493                trace_mtu3_complete_gpd(mep, gpd);
 494                mtu3_req_complete(mep, request, 0);
 495
 496                gpd = advance_deq_gpd(ring);
 497        }
 498
 499        dev_dbg(mtu->dev, "%s EP%d, deq=%p, enq=%p, complete\n",
 500                __func__, epnum, ring->dequeue, ring->enqueue);
 501
 502}
 503
 504static void qmu_done_rx(struct mtu3 *mtu, u8 epnum)
 505{
 506        struct mtu3_ep *mep = mtu->out_eps + epnum;
 507        struct mtu3_gpd_ring *ring = &mep->gpd_ring;
 508        void __iomem *mbase = mtu->mac_base;
 509        struct qmu_gpd *gpd = ring->dequeue;
 510        struct qmu_gpd *gpd_current = NULL;
 511        struct usb_request *req = NULL;
 512        struct mtu3_request *mreq;
 513        dma_addr_t cur_gpd_dma;
 514
 515        cur_gpd_dma = read_rxq_cur_addr(mbase, epnum);
 516        gpd_current = gpd_dma_to_virt(ring, cur_gpd_dma);
 517
 518        dev_dbg(mtu->dev, "%s EP%d, last=%p, current=%p, enq=%p\n",
 519                __func__, epnum, gpd, gpd_current, ring->enqueue);
 520
 521        while (gpd != gpd_current && !GET_GPD_HWO(gpd)) {
 522
 523                mreq = next_request(mep);
 524
 525                if (mreq == NULL || mreq->gpd != gpd) {
 526                        dev_err(mtu->dev, "no correct RX req is found\n");
 527                        break;
 528                }
 529                req = &mreq->request;
 530
 531                req->actual = GPD_DATA_LEN(mtu, le32_to_cpu(gpd->dw3_info));
 532                trace_mtu3_complete_gpd(mep, gpd);
 533                mtu3_req_complete(mep, req, 0);
 534
 535                gpd = advance_deq_gpd(ring);
 536        }
 537
 538        dev_dbg(mtu->dev, "%s EP%d, deq=%p, enq=%p, complete\n",
 539                __func__, epnum, ring->dequeue, ring->enqueue);
 540}
 541
 542static void qmu_done_isr(struct mtu3 *mtu, u32 done_status)
 543{
 544        int i;
 545
 546        for (i = 1; i < mtu->num_eps; i++) {
 547                if (done_status & QMU_RX_DONE_INT(i))
 548                        qmu_done_rx(mtu, i);
 549                if (done_status & QMU_TX_DONE_INT(i))
 550                        qmu_done_tx(mtu, i);
 551        }
 552}
 553
 554static void qmu_exception_isr(struct mtu3 *mtu, u32 qmu_status)
 555{
 556        void __iomem *mbase = mtu->mac_base;
 557        u32 errval;
 558        int i;
 559
 560        if ((qmu_status & RXQ_CSERR_INT) || (qmu_status & RXQ_LENERR_INT)) {
 561                errval = mtu3_readl(mbase, U3D_RQERRIR0);
 562                for (i = 1; i < mtu->num_eps; i++) {
 563                        if (errval & QMU_RX_CS_ERR(i))
 564                                dev_err(mtu->dev, "Rx %d CS error!\n", i);
 565
 566                        if (errval & QMU_RX_LEN_ERR(i))
 567                                dev_err(mtu->dev, "RX %d Length error\n", i);
 568                }
 569                mtu3_writel(mbase, U3D_RQERRIR0, errval);
 570        }
 571
 572        if (qmu_status & RXQ_ZLPERR_INT) {
 573                errval = mtu3_readl(mbase, U3D_RQERRIR1);
 574                for (i = 1; i < mtu->num_eps; i++) {
 575                        if (errval & QMU_RX_ZLP_ERR(i))
 576                                dev_dbg(mtu->dev, "RX EP%d Recv ZLP\n", i);
 577                }
 578                mtu3_writel(mbase, U3D_RQERRIR1, errval);
 579        }
 580
 581        if ((qmu_status & TXQ_CSERR_INT) || (qmu_status & TXQ_LENERR_INT)) {
 582                errval = mtu3_readl(mbase, U3D_TQERRIR0);
 583                for (i = 1; i < mtu->num_eps; i++) {
 584                        if (errval & QMU_TX_CS_ERR(i))
 585                                dev_err(mtu->dev, "Tx %d checksum error!\n", i);
 586
 587                        if (errval & QMU_TX_LEN_ERR(i))
 588                                qmu_tx_zlp_error_handler(mtu, i);
 589                }
 590                mtu3_writel(mbase, U3D_TQERRIR0, errval);
 591        }
 592}
 593
 594irqreturn_t mtu3_qmu_isr(struct mtu3 *mtu)
 595{
 596        void __iomem *mbase = mtu->mac_base;
 597        u32 qmu_status;
 598        u32 qmu_done_status;
 599
 600        /* U3D_QISAR1 is read update */
 601        qmu_status = mtu3_readl(mbase, U3D_QISAR1);
 602        qmu_status &= mtu3_readl(mbase, U3D_QIER1);
 603
 604        qmu_done_status = mtu3_readl(mbase, U3D_QISAR0);
 605        qmu_done_status &= mtu3_readl(mbase, U3D_QIER0);
 606        mtu3_writel(mbase, U3D_QISAR0, qmu_done_status); /* W1C */
 607        dev_dbg(mtu->dev, "=== QMUdone[tx=%x, rx=%x] QMUexp[%x] ===\n",
 608                (qmu_done_status & 0xFFFF), qmu_done_status >> 16,
 609                qmu_status);
 610        trace_mtu3_qmu_isr(qmu_done_status, qmu_status);
 611
 612        if (qmu_done_status)
 613                qmu_done_isr(mtu, qmu_done_status);
 614
 615        if (qmu_status)
 616                qmu_exception_isr(mtu, qmu_status);
 617
 618        return IRQ_HANDLED;
 619}
 620
 621int mtu3_qmu_init(struct mtu3 *mtu)
 622{
 623
 624        compiletime_assert(QMU_GPD_SIZE == 16, "QMU_GPD size SHOULD be 16B");
 625
 626        mtu->qmu_gpd_pool = dma_pool_create("QMU_GPD", mtu->dev,
 627                        QMU_GPD_RING_SIZE, QMU_GPD_SIZE, 0);
 628
 629        if (!mtu->qmu_gpd_pool)
 630                return -ENOMEM;
 631
 632        return 0;
 633}
 634
 635void mtu3_qmu_exit(struct mtu3 *mtu)
 636{
 637        dma_pool_destroy(mtu->qmu_gpd_pool);
 638}
 639