linux/drivers/usb/mtu3/mtu3_qmu.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * mtu3_qmu.c - Queue Management Unit driver for device controller
   4 *
   5 * Copyright (C) 2016 MediaTek Inc.
   6 *
   7 * Author: Chunfeng Yun <chunfeng.yun@mediatek.com>
   8 */
   9
  10/*
  11 * Queue Management Unit (QMU) is designed to unload SW effort
  12 * to serve DMA interrupts.
  13 * By preparing General Purpose Descriptor (GPD) and Buffer Descriptor (BD),
  14 * SW links data buffers and triggers QMU to send / receive data to
  15 * host / from device at a time.
  16 * And now only GPD is supported.
  17 *
  18 * For more detailed information, please refer to QMU Programming Guide
  19 */
  20
  21#include <linux/dmapool.h>
  22#include <linux/iopoll.h>
  23
  24#include "mtu3.h"
  25
  26#define QMU_CHECKSUM_LEN        16
  27
  28#define GPD_FLAGS_HWO   BIT(0)
  29#define GPD_FLAGS_BDP   BIT(1)
  30#define GPD_FLAGS_BPS   BIT(2)
  31#define GPD_FLAGS_IOC   BIT(7)
  32
  33#define GPD_EXT_FLAG_ZLP        BIT(5)
  34#define GPD_EXT_NGP(x)          (((x) & 0xf) << 4)
  35#define GPD_EXT_BUF(x)          (((x) & 0xf) << 0)
  36
  37#define HILO_GEN64(hi, lo) (((u64)(hi) << 32) + (lo))
  38#define HILO_DMA(hi, lo)        \
  39        ((dma_addr_t)HILO_GEN64((le32_to_cpu(hi)), (le32_to_cpu(lo))))
  40
  41static dma_addr_t read_txq_cur_addr(void __iomem *mbase, u8 epnum)
  42{
  43        u32 txcpr;
  44        u32 txhiar;
  45
  46        txcpr = mtu3_readl(mbase, USB_QMU_TQCPR(epnum));
  47        txhiar = mtu3_readl(mbase, USB_QMU_TQHIAR(epnum));
  48
  49        return HILO_DMA(QMU_CUR_GPD_ADDR_HI(txhiar), txcpr);
  50}
  51
  52static dma_addr_t read_rxq_cur_addr(void __iomem *mbase, u8 epnum)
  53{
  54        u32 rxcpr;
  55        u32 rxhiar;
  56
  57        rxcpr = mtu3_readl(mbase, USB_QMU_RQCPR(epnum));
  58        rxhiar = mtu3_readl(mbase, USB_QMU_RQHIAR(epnum));
  59
  60        return HILO_DMA(QMU_CUR_GPD_ADDR_HI(rxhiar), rxcpr);
  61}
  62
  63static void write_txq_start_addr(void __iomem *mbase, u8 epnum, dma_addr_t dma)
  64{
  65        u32 tqhiar;
  66
  67        mtu3_writel(mbase, USB_QMU_TQSAR(epnum),
  68                    cpu_to_le32(lower_32_bits(dma)));
  69        tqhiar = mtu3_readl(mbase, USB_QMU_TQHIAR(epnum));
  70        tqhiar &= ~QMU_START_ADDR_HI_MSK;
  71        tqhiar |= QMU_START_ADDR_HI(upper_32_bits(dma));
  72        mtu3_writel(mbase, USB_QMU_TQHIAR(epnum), tqhiar);
  73}
  74
  75static void write_rxq_start_addr(void __iomem *mbase, u8 epnum, dma_addr_t dma)
  76{
  77        u32 rqhiar;
  78
  79        mtu3_writel(mbase, USB_QMU_RQSAR(epnum),
  80                    cpu_to_le32(lower_32_bits(dma)));
  81        rqhiar = mtu3_readl(mbase, USB_QMU_RQHIAR(epnum));
  82        rqhiar &= ~QMU_START_ADDR_HI_MSK;
  83        rqhiar |= QMU_START_ADDR_HI(upper_32_bits(dma));
  84        mtu3_writel(mbase, USB_QMU_RQHIAR(epnum), rqhiar);
  85}
  86
  87static struct qmu_gpd *gpd_dma_to_virt(struct mtu3_gpd_ring *ring,
  88                dma_addr_t dma_addr)
  89{
  90        dma_addr_t dma_base = ring->dma;
  91        struct qmu_gpd *gpd_head = ring->start;
  92        u32 offset = (dma_addr - dma_base) / sizeof(*gpd_head);
  93
  94        if (offset >= MAX_GPD_NUM)
  95                return NULL;
  96
  97        return gpd_head + offset;
  98}
  99
 100static dma_addr_t gpd_virt_to_dma(struct mtu3_gpd_ring *ring,
 101                struct qmu_gpd *gpd)
 102{
 103        dma_addr_t dma_base = ring->dma;
 104        struct qmu_gpd *gpd_head = ring->start;
 105        u32 offset;
 106
 107        offset = gpd - gpd_head;
 108        if (offset >= MAX_GPD_NUM)
 109                return 0;
 110
 111        return dma_base + (offset * sizeof(*gpd));
 112}
 113
 114static void gpd_ring_init(struct mtu3_gpd_ring *ring, struct qmu_gpd *gpd)
 115{
 116        ring->start = gpd;
 117        ring->enqueue = gpd;
 118        ring->dequeue = gpd;
 119        ring->end = gpd + MAX_GPD_NUM - 1;
 120}
 121
 122static void reset_gpd_list(struct mtu3_ep *mep)
 123{
 124        struct mtu3_gpd_ring *ring = &mep->gpd_ring;
 125        struct qmu_gpd *gpd = ring->start;
 126
 127        if (gpd) {
 128                gpd->flag &= ~GPD_FLAGS_HWO;
 129                gpd_ring_init(ring, gpd);
 130        }
 131}
 132
 133int mtu3_gpd_ring_alloc(struct mtu3_ep *mep)
 134{
 135        struct qmu_gpd *gpd;
 136        struct mtu3_gpd_ring *ring = &mep->gpd_ring;
 137
 138        /* software own all gpds as default */
 139        gpd = dma_pool_zalloc(mep->mtu->qmu_gpd_pool, GFP_ATOMIC, &ring->dma);
 140        if (gpd == NULL)
 141                return -ENOMEM;
 142
 143        gpd_ring_init(ring, gpd);
 144
 145        return 0;
 146}
 147
 148void mtu3_gpd_ring_free(struct mtu3_ep *mep)
 149{
 150        struct mtu3_gpd_ring *ring = &mep->gpd_ring;
 151
 152        dma_pool_free(mep->mtu->qmu_gpd_pool,
 153                        ring->start, ring->dma);
 154        memset(ring, 0, sizeof(*ring));
 155}
 156
 157/*
 158 * calculate check sum of a gpd or bd
 159 * add "noinline" and "mb" to prevent wrong calculation
 160 */
 161static noinline u8 qmu_calc_checksum(u8 *data)
 162{
 163        u8 chksum = 0;
 164        int i;
 165
 166        data[1] = 0x0;  /* set checksum to 0 */
 167
 168        mb();   /* ensure the gpd/bd is really up-to-date */
 169        for (i = 0; i < QMU_CHECKSUM_LEN; i++)
 170                chksum += data[i];
 171
 172        /* Default: HWO=1, @flag[bit0] */
 173        chksum += 1;
 174
 175        return 0xFF - chksum;
 176}
 177
 178void mtu3_qmu_resume(struct mtu3_ep *mep)
 179{
 180        struct mtu3 *mtu = mep->mtu;
 181        void __iomem *mbase = mtu->mac_base;
 182        int epnum = mep->epnum;
 183        u32 offset;
 184
 185        offset = mep->is_in ? USB_QMU_TQCSR(epnum) : USB_QMU_RQCSR(epnum);
 186
 187        mtu3_writel(mbase, offset, QMU_Q_RESUME);
 188        if (!(mtu3_readl(mbase, offset) & QMU_Q_ACTIVE))
 189                mtu3_writel(mbase, offset, QMU_Q_RESUME);
 190}
 191
 192static struct qmu_gpd *advance_enq_gpd(struct mtu3_gpd_ring *ring)
 193{
 194        if (ring->enqueue < ring->end)
 195                ring->enqueue++;
 196        else
 197                ring->enqueue = ring->start;
 198
 199        return ring->enqueue;
 200}
 201
 202static struct qmu_gpd *advance_deq_gpd(struct mtu3_gpd_ring *ring)
 203{
 204        if (ring->dequeue < ring->end)
 205                ring->dequeue++;
 206        else
 207                ring->dequeue = ring->start;
 208
 209        return ring->dequeue;
 210}
 211
 212/* check if a ring is emtpy */
 213static int gpd_ring_empty(struct mtu3_gpd_ring *ring)
 214{
 215        struct qmu_gpd *enq = ring->enqueue;
 216        struct qmu_gpd *next;
 217
 218        if (ring->enqueue < ring->end)
 219                next = enq + 1;
 220        else
 221                next = ring->start;
 222
 223        /* one gpd is reserved to simplify gpd preparation */
 224        return next == ring->dequeue;
 225}
 226
 227int mtu3_prepare_transfer(struct mtu3_ep *mep)
 228{
 229        return gpd_ring_empty(&mep->gpd_ring);
 230}
 231
 232static int mtu3_prepare_tx_gpd(struct mtu3_ep *mep, struct mtu3_request *mreq)
 233{
 234        struct qmu_gpd *enq;
 235        struct mtu3_gpd_ring *ring = &mep->gpd_ring;
 236        struct qmu_gpd *gpd = ring->enqueue;
 237        struct usb_request *req = &mreq->request;
 238        dma_addr_t enq_dma;
 239        u16 ext_addr;
 240
 241        /* set all fields to zero as default value */
 242        memset(gpd, 0, sizeof(*gpd));
 243
 244        gpd->buffer = cpu_to_le32(lower_32_bits(req->dma));
 245        ext_addr = GPD_EXT_BUF(upper_32_bits(req->dma));
 246        gpd->buf_len = cpu_to_le16(req->length);
 247        gpd->flag |= GPD_FLAGS_IOC;
 248
 249        /* get the next GPD */
 250        enq = advance_enq_gpd(ring);
 251        enq_dma = gpd_virt_to_dma(ring, enq);
 252        dev_dbg(mep->mtu->dev, "TX-EP%d queue gpd=%p, enq=%p, qdma=%pad\n",
 253                mep->epnum, gpd, enq, &enq_dma);
 254
 255        enq->flag &= ~GPD_FLAGS_HWO;
 256        gpd->next_gpd = cpu_to_le32(lower_32_bits(enq_dma));
 257        ext_addr |= GPD_EXT_NGP(upper_32_bits(enq_dma));
 258        gpd->tx_ext_addr = cpu_to_le16(ext_addr);
 259
 260        if (req->zero)
 261                gpd->ext_flag |= GPD_EXT_FLAG_ZLP;
 262
 263        gpd->chksum = qmu_calc_checksum((u8 *)gpd);
 264        gpd->flag |= GPD_FLAGS_HWO;
 265
 266        mreq->gpd = gpd;
 267
 268        return 0;
 269}
 270
 271static int mtu3_prepare_rx_gpd(struct mtu3_ep *mep, struct mtu3_request *mreq)
 272{
 273        struct qmu_gpd *enq;
 274        struct mtu3_gpd_ring *ring = &mep->gpd_ring;
 275        struct qmu_gpd *gpd = ring->enqueue;
 276        struct usb_request *req = &mreq->request;
 277        dma_addr_t enq_dma;
 278        u16 ext_addr;
 279
 280        /* set all fields to zero as default value */
 281        memset(gpd, 0, sizeof(*gpd));
 282
 283        gpd->buffer = cpu_to_le32(lower_32_bits(req->dma));
 284        ext_addr = GPD_EXT_BUF(upper_32_bits(req->dma));
 285        gpd->data_buf_len = cpu_to_le16(req->length);
 286        gpd->flag |= GPD_FLAGS_IOC;
 287
 288        /* get the next GPD */
 289        enq = advance_enq_gpd(ring);
 290        enq_dma = gpd_virt_to_dma(ring, enq);
 291        dev_dbg(mep->mtu->dev, "RX-EP%d queue gpd=%p, enq=%p, qdma=%pad\n",
 292                mep->epnum, gpd, enq, &enq_dma);
 293
 294        enq->flag &= ~GPD_FLAGS_HWO;
 295        gpd->next_gpd = cpu_to_le32(lower_32_bits(enq_dma));
 296        ext_addr |= GPD_EXT_NGP(upper_32_bits(enq_dma));
 297        gpd->rx_ext_addr = cpu_to_le16(ext_addr);
 298        gpd->chksum = qmu_calc_checksum((u8 *)gpd);
 299        gpd->flag |= GPD_FLAGS_HWO;
 300
 301        mreq->gpd = gpd;
 302
 303        return 0;
 304}
 305
 306void mtu3_insert_gpd(struct mtu3_ep *mep, struct mtu3_request *mreq)
 307{
 308
 309        if (mep->is_in)
 310                mtu3_prepare_tx_gpd(mep, mreq);
 311        else
 312                mtu3_prepare_rx_gpd(mep, mreq);
 313}
 314
 315int mtu3_qmu_start(struct mtu3_ep *mep)
 316{
 317        struct mtu3 *mtu = mep->mtu;
 318        void __iomem *mbase = mtu->mac_base;
 319        struct mtu3_gpd_ring *ring = &mep->gpd_ring;
 320        u8 epnum = mep->epnum;
 321
 322        if (mep->is_in) {
 323                /* set QMU start address */
 324                write_txq_start_addr(mbase, epnum, ring->dma);
 325                mtu3_setbits(mbase, MU3D_EP_TXCR0(epnum), TX_DMAREQEN);
 326                mtu3_setbits(mbase, U3D_QCR0, QMU_TX_CS_EN(epnum));
 327                /* send zero length packet according to ZLP flag in GPD */
 328                mtu3_setbits(mbase, U3D_QCR1, QMU_TX_ZLP(epnum));
 329                mtu3_writel(mbase, U3D_TQERRIESR0,
 330                                QMU_TX_LEN_ERR(epnum) | QMU_TX_CS_ERR(epnum));
 331
 332                if (mtu3_readl(mbase, USB_QMU_TQCSR(epnum)) & QMU_Q_ACTIVE) {
 333                        dev_warn(mtu->dev, "Tx %d Active Now!\n", epnum);
 334                        return 0;
 335                }
 336                mtu3_writel(mbase, USB_QMU_TQCSR(epnum), QMU_Q_START);
 337
 338        } else {
 339                write_rxq_start_addr(mbase, epnum, ring->dma);
 340                mtu3_setbits(mbase, MU3D_EP_RXCR0(epnum), RX_DMAREQEN);
 341                mtu3_setbits(mbase, U3D_QCR0, QMU_RX_CS_EN(epnum));
 342                /* don't expect ZLP */
 343                mtu3_clrbits(mbase, U3D_QCR3, QMU_RX_ZLP(epnum));
 344                /* move to next GPD when receive ZLP */
 345                mtu3_setbits(mbase, U3D_QCR3, QMU_RX_COZ(epnum));
 346                mtu3_writel(mbase, U3D_RQERRIESR0,
 347                                QMU_RX_LEN_ERR(epnum) | QMU_RX_CS_ERR(epnum));
 348                mtu3_writel(mbase, U3D_RQERRIESR1, QMU_RX_ZLP_ERR(epnum));
 349
 350                if (mtu3_readl(mbase, USB_QMU_RQCSR(epnum)) & QMU_Q_ACTIVE) {
 351                        dev_warn(mtu->dev, "Rx %d Active Now!\n", epnum);
 352                        return 0;
 353                }
 354                mtu3_writel(mbase, USB_QMU_RQCSR(epnum), QMU_Q_START);
 355        }
 356
 357        return 0;
 358}
 359
 360/* may called in atomic context */
 361void mtu3_qmu_stop(struct mtu3_ep *mep)
 362{
 363        struct mtu3 *mtu = mep->mtu;
 364        void __iomem *mbase = mtu->mac_base;
 365        int epnum = mep->epnum;
 366        u32 value = 0;
 367        u32 qcsr;
 368        int ret;
 369
 370        qcsr = mep->is_in ? USB_QMU_TQCSR(epnum) : USB_QMU_RQCSR(epnum);
 371
 372        if (!(mtu3_readl(mbase, qcsr) & QMU_Q_ACTIVE)) {
 373                dev_dbg(mtu->dev, "%s's qmu is inactive now!\n", mep->name);
 374                return;
 375        }
 376        mtu3_writel(mbase, qcsr, QMU_Q_STOP);
 377
 378        ret = readl_poll_timeout_atomic(mbase + qcsr, value,
 379                        !(value & QMU_Q_ACTIVE), 1, 1000);
 380        if (ret) {
 381                dev_err(mtu->dev, "stop %s's qmu failed\n", mep->name);
 382                return;
 383        }
 384
 385        dev_dbg(mtu->dev, "%s's qmu stop now!\n", mep->name);
 386}
 387
 388void mtu3_qmu_flush(struct mtu3_ep *mep)
 389{
 390
 391        dev_dbg(mep->mtu->dev, "%s flush QMU %s\n", __func__,
 392                ((mep->is_in) ? "TX" : "RX"));
 393
 394        /*Stop QMU */
 395        mtu3_qmu_stop(mep);
 396        reset_gpd_list(mep);
 397}
 398
 399/*
 400 * QMU can't transfer zero length packet directly (a hardware limit
 401 * on old SoCs), so when needs to send ZLP, we intentionally trigger
 402 * a length error interrupt, and in the ISR sends a ZLP by BMU.
 403 */
 404static void qmu_tx_zlp_error_handler(struct mtu3 *mtu, u8 epnum)
 405{
 406        struct mtu3_ep *mep = mtu->in_eps + epnum;
 407        struct mtu3_gpd_ring *ring = &mep->gpd_ring;
 408        void __iomem *mbase = mtu->mac_base;
 409        struct qmu_gpd *gpd_current = NULL;
 410        struct usb_request *req = NULL;
 411        struct mtu3_request *mreq;
 412        dma_addr_t cur_gpd_dma;
 413        u32 txcsr = 0;
 414        int ret;
 415
 416        mreq = next_request(mep);
 417        if (mreq && mreq->request.length == 0)
 418                req = &mreq->request;
 419        else
 420                return;
 421
 422        cur_gpd_dma = read_txq_cur_addr(mbase, epnum);
 423        gpd_current = gpd_dma_to_virt(ring, cur_gpd_dma);
 424
 425        if (le16_to_cpu(gpd_current->buf_len) != 0) {
 426                dev_err(mtu->dev, "TX EP%d buffer length error(!=0)\n", epnum);
 427                return;
 428        }
 429
 430        dev_dbg(mtu->dev, "%s send ZLP for req=%p\n", __func__, mreq);
 431
 432        mtu3_clrbits(mbase, MU3D_EP_TXCR0(mep->epnum), TX_DMAREQEN);
 433
 434        ret = readl_poll_timeout_atomic(mbase + MU3D_EP_TXCR0(mep->epnum),
 435                        txcsr, !(txcsr & TX_FIFOFULL), 1, 1000);
 436        if (ret) {
 437                dev_err(mtu->dev, "%s wait for fifo empty fail\n", __func__);
 438                return;
 439        }
 440        mtu3_setbits(mbase, MU3D_EP_TXCR0(mep->epnum), TX_TXPKTRDY);
 441
 442        /* by pass the current GDP */
 443        gpd_current->flag |= GPD_FLAGS_BPS;
 444        gpd_current->chksum = qmu_calc_checksum((u8 *)gpd_current);
 445        gpd_current->flag |= GPD_FLAGS_HWO;
 446
 447        /*enable DMAREQEN, switch back to QMU mode */
 448        mtu3_setbits(mbase, MU3D_EP_TXCR0(mep->epnum), TX_DMAREQEN);
 449        mtu3_qmu_resume(mep);
 450}
 451
 452/*
 453 * NOTE: request list maybe is already empty as following case:
 454 * queue_tx --> qmu_interrupt(clear interrupt pending, schedule tasklet)-->
 455 * queue_tx --> process_tasklet(meanwhile, the second one is transferred,
 456 * tasklet process both of them)-->qmu_interrupt for second one.
 457 * To avoid upper case, put qmu_done_tx in ISR directly to process it.
 458 */
 459static void qmu_done_tx(struct mtu3 *mtu, u8 epnum)
 460{
 461        struct mtu3_ep *mep = mtu->in_eps + epnum;
 462        struct mtu3_gpd_ring *ring = &mep->gpd_ring;
 463        void __iomem *mbase = mtu->mac_base;
 464        struct qmu_gpd *gpd = ring->dequeue;
 465        struct qmu_gpd *gpd_current = NULL;
 466        struct usb_request *request = NULL;
 467        struct mtu3_request *mreq;
 468        dma_addr_t cur_gpd_dma;
 469
 470        /*transfer phy address got from QMU register to virtual address */
 471        cur_gpd_dma = read_txq_cur_addr(mbase, epnum);
 472        gpd_current = gpd_dma_to_virt(ring, cur_gpd_dma);
 473
 474        dev_dbg(mtu->dev, "%s EP%d, last=%p, current=%p, enq=%p\n",
 475                __func__, epnum, gpd, gpd_current, ring->enqueue);
 476
 477        while (gpd != gpd_current && !(gpd->flag & GPD_FLAGS_HWO)) {
 478
 479                mreq = next_request(mep);
 480
 481                if (mreq == NULL || mreq->gpd != gpd) {
 482                        dev_err(mtu->dev, "no correct TX req is found\n");
 483                        break;
 484                }
 485
 486                request = &mreq->request;
 487                request->actual = le16_to_cpu(gpd->buf_len);
 488                mtu3_req_complete(mep, request, 0);
 489
 490                gpd = advance_deq_gpd(ring);
 491        }
 492
 493        dev_dbg(mtu->dev, "%s EP%d, deq=%p, enq=%p, complete\n",
 494                __func__, epnum, ring->dequeue, ring->enqueue);
 495
 496}
 497
 498static void qmu_done_rx(struct mtu3 *mtu, u8 epnum)
 499{
 500        struct mtu3_ep *mep = mtu->out_eps + epnum;
 501        struct mtu3_gpd_ring *ring = &mep->gpd_ring;
 502        void __iomem *mbase = mtu->mac_base;
 503        struct qmu_gpd *gpd = ring->dequeue;
 504        struct qmu_gpd *gpd_current = NULL;
 505        struct usb_request *req = NULL;
 506        struct mtu3_request *mreq;
 507        dma_addr_t cur_gpd_dma;
 508
 509        cur_gpd_dma = read_rxq_cur_addr(mbase, epnum);
 510        gpd_current = gpd_dma_to_virt(ring, cur_gpd_dma);
 511
 512        dev_dbg(mtu->dev, "%s EP%d, last=%p, current=%p, enq=%p\n",
 513                __func__, epnum, gpd, gpd_current, ring->enqueue);
 514
 515        while (gpd != gpd_current && !(gpd->flag & GPD_FLAGS_HWO)) {
 516
 517                mreq = next_request(mep);
 518
 519                if (mreq == NULL || mreq->gpd != gpd) {
 520                        dev_err(mtu->dev, "no correct RX req is found\n");
 521                        break;
 522                }
 523                req = &mreq->request;
 524
 525                req->actual = le16_to_cpu(gpd->buf_len);
 526                mtu3_req_complete(mep, req, 0);
 527
 528                gpd = advance_deq_gpd(ring);
 529        }
 530
 531        dev_dbg(mtu->dev, "%s EP%d, deq=%p, enq=%p, complete\n",
 532                __func__, epnum, ring->dequeue, ring->enqueue);
 533}
 534
 535static void qmu_done_isr(struct mtu3 *mtu, u32 done_status)
 536{
 537        int i;
 538
 539        for (i = 1; i < mtu->num_eps; i++) {
 540                if (done_status & QMU_RX_DONE_INT(i))
 541                        qmu_done_rx(mtu, i);
 542                if (done_status & QMU_TX_DONE_INT(i))
 543                        qmu_done_tx(mtu, i);
 544        }
 545}
 546
 547static void qmu_exception_isr(struct mtu3 *mtu, u32 qmu_status)
 548{
 549        void __iomem *mbase = mtu->mac_base;
 550        u32 errval;
 551        int i;
 552
 553        if ((qmu_status & RXQ_CSERR_INT) || (qmu_status & RXQ_LENERR_INT)) {
 554                errval = mtu3_readl(mbase, U3D_RQERRIR0);
 555                for (i = 1; i < mtu->num_eps; i++) {
 556                        if (errval & QMU_RX_CS_ERR(i))
 557                                dev_err(mtu->dev, "Rx %d CS error!\n", i);
 558
 559                        if (errval & QMU_RX_LEN_ERR(i))
 560                                dev_err(mtu->dev, "RX %d Length error\n", i);
 561                }
 562                mtu3_writel(mbase, U3D_RQERRIR0, errval);
 563        }
 564
 565        if (qmu_status & RXQ_ZLPERR_INT) {
 566                errval = mtu3_readl(mbase, U3D_RQERRIR1);
 567                for (i = 1; i < mtu->num_eps; i++) {
 568                        if (errval & QMU_RX_ZLP_ERR(i))
 569                                dev_dbg(mtu->dev, "RX EP%d Recv ZLP\n", i);
 570                }
 571                mtu3_writel(mbase, U3D_RQERRIR1, errval);
 572        }
 573
 574        if ((qmu_status & TXQ_CSERR_INT) || (qmu_status & TXQ_LENERR_INT)) {
 575                errval = mtu3_readl(mbase, U3D_TQERRIR0);
 576                for (i = 1; i < mtu->num_eps; i++) {
 577                        if (errval & QMU_TX_CS_ERR(i))
 578                                dev_err(mtu->dev, "Tx %d checksum error!\n", i);
 579
 580                        if (errval & QMU_TX_LEN_ERR(i))
 581                                qmu_tx_zlp_error_handler(mtu, i);
 582                }
 583                mtu3_writel(mbase, U3D_TQERRIR0, errval);
 584        }
 585}
 586
 587irqreturn_t mtu3_qmu_isr(struct mtu3 *mtu)
 588{
 589        void __iomem *mbase = mtu->mac_base;
 590        u32 qmu_status;
 591        u32 qmu_done_status;
 592
 593        /* U3D_QISAR1 is read update */
 594        qmu_status = mtu3_readl(mbase, U3D_QISAR1);
 595        qmu_status &= mtu3_readl(mbase, U3D_QIER1);
 596
 597        qmu_done_status = mtu3_readl(mbase, U3D_QISAR0);
 598        qmu_done_status &= mtu3_readl(mbase, U3D_QIER0);
 599        mtu3_writel(mbase, U3D_QISAR0, qmu_done_status); /* W1C */
 600        dev_dbg(mtu->dev, "=== QMUdone[tx=%x, rx=%x] QMUexp[%x] ===\n",
 601                (qmu_done_status & 0xFFFF), qmu_done_status >> 16,
 602                qmu_status);
 603
 604        if (qmu_done_status)
 605                qmu_done_isr(mtu, qmu_done_status);
 606
 607        if (qmu_status)
 608                qmu_exception_isr(mtu, qmu_status);
 609
 610        return IRQ_HANDLED;
 611}
 612
 613int mtu3_qmu_init(struct mtu3 *mtu)
 614{
 615
 616        compiletime_assert(QMU_GPD_SIZE == 16, "QMU_GPD size SHOULD be 16B");
 617
 618        mtu->qmu_gpd_pool = dma_pool_create("QMU_GPD", mtu->dev,
 619                        QMU_GPD_RING_SIZE, QMU_GPD_SIZE, 0);
 620
 621        if (!mtu->qmu_gpd_pool)
 622                return -ENOMEM;
 623
 624        return 0;
 625}
 626
 627void mtu3_qmu_exit(struct mtu3 *mtu)
 628{
 629        dma_pool_destroy(mtu->qmu_gpd_pool);
 630}
 631