linux/drivers/net/wireless/mediatek/mt76/dma.c
<<
>>
Prefs
   1// SPDX-License-Identifier: ISC
   2/*
   3 * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
   4 */
   5
   6#include <linux/dma-mapping.h>
   7#include "mt76.h"
   8#include "dma.h"
   9
  10static struct mt76_txwi_cache *
  11mt76_alloc_txwi(struct mt76_dev *dev)
  12{
  13        struct mt76_txwi_cache *t;
  14        dma_addr_t addr;
  15        u8 *txwi;
  16        int size;
  17
  18        size = L1_CACHE_ALIGN(dev->drv->txwi_size + sizeof(*t));
  19        txwi = devm_kzalloc(dev->dev, size, GFP_ATOMIC);
  20        if (!txwi)
  21                return NULL;
  22
  23        addr = dma_map_single(dev->dev, txwi, dev->drv->txwi_size,
  24                              DMA_TO_DEVICE);
  25        t = (struct mt76_txwi_cache *)(txwi + dev->drv->txwi_size);
  26        t->dma_addr = addr;
  27
  28        return t;
  29}
  30
  31static struct mt76_txwi_cache *
  32__mt76_get_txwi(struct mt76_dev *dev)
  33{
  34        struct mt76_txwi_cache *t = NULL;
  35
  36        spin_lock(&dev->lock);
  37        if (!list_empty(&dev->txwi_cache)) {
  38                t = list_first_entry(&dev->txwi_cache, struct mt76_txwi_cache,
  39                                     list);
  40                list_del(&t->list);
  41        }
  42        spin_unlock(&dev->lock);
  43
  44        return t;
  45}
  46
  47static struct mt76_txwi_cache *
  48mt76_get_txwi(struct mt76_dev *dev)
  49{
  50        struct mt76_txwi_cache *t = __mt76_get_txwi(dev);
  51
  52        if (t)
  53                return t;
  54
  55        return mt76_alloc_txwi(dev);
  56}
  57
  58void
  59mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t)
  60{
  61        if (!t)
  62                return;
  63
  64        spin_lock(&dev->lock);
  65        list_add(&t->list, &dev->txwi_cache);
  66        spin_unlock(&dev->lock);
  67}
  68EXPORT_SYMBOL_GPL(mt76_put_txwi);
  69
  70static void
  71mt76_free_pending_txwi(struct mt76_dev *dev)
  72{
  73        struct mt76_txwi_cache *t;
  74
  75        local_bh_disable();
  76        while ((t = __mt76_get_txwi(dev)) != NULL)
  77                dma_unmap_single(dev->dev, t->dma_addr, dev->drv->txwi_size,
  78                                 DMA_TO_DEVICE);
  79        local_bh_enable();
  80}
  81
  82static void
  83mt76_dma_sync_idx(struct mt76_dev *dev, struct mt76_queue *q)
  84{
  85        writel(q->desc_dma, &q->regs->desc_base);
  86        writel(q->ndesc, &q->regs->ring_size);
  87        q->head = readl(&q->regs->dma_idx);
  88        q->tail = q->head;
  89}
  90
  91static void
  92mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q)
  93{
  94        int i;
  95
  96        if (!q)
  97                return;
  98
  99        /* clear descriptors */
 100        for (i = 0; i < q->ndesc; i++)
 101                q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE);
 102
 103        writel(0, &q->regs->cpu_idx);
 104        writel(0, &q->regs->dma_idx);
 105        mt76_dma_sync_idx(dev, q);
 106}
 107
 108static int
 109mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q,
 110                     int idx, int n_desc, int bufsize,
 111                     u32 ring_base)
 112{
 113        int size;
 114
 115        spin_lock_init(&q->lock);
 116        spin_lock_init(&q->cleanup_lock);
 117
 118        q->regs = dev->mmio.regs + ring_base + idx * MT_RING_SIZE;
 119        q->ndesc = n_desc;
 120        q->buf_size = bufsize;
 121        q->hw_idx = idx;
 122
 123        size = q->ndesc * sizeof(struct mt76_desc);
 124        q->desc = dmam_alloc_coherent(dev->dev, size, &q->desc_dma, GFP_KERNEL);
 125        if (!q->desc)
 126                return -ENOMEM;
 127
 128        size = q->ndesc * sizeof(*q->entry);
 129        q->entry = devm_kzalloc(dev->dev, size, GFP_KERNEL);
 130        if (!q->entry)
 131                return -ENOMEM;
 132
 133        mt76_dma_queue_reset(dev, q);
 134
 135        return 0;
 136}
 137
 138static int
 139mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q,
 140                 struct mt76_queue_buf *buf, int nbufs, u32 info,
 141                 struct sk_buff *skb, void *txwi)
 142{
 143        struct mt76_queue_entry *entry;
 144        struct mt76_desc *desc;
 145        u32 ctrl;
 146        int i, idx = -1;
 147
 148        if (txwi) {
 149                q->entry[q->head].txwi = DMA_DUMMY_DATA;
 150                q->entry[q->head].skip_buf0 = true;
 151        }
 152
 153        for (i = 0; i < nbufs; i += 2, buf += 2) {
 154                u32 buf0 = buf[0].addr, buf1 = 0;
 155
 156                idx = q->head;
 157                q->head = (q->head + 1) % q->ndesc;
 158
 159                desc = &q->desc[idx];
 160                entry = &q->entry[idx];
 161
 162                if (buf[0].skip_unmap)
 163                        entry->skip_buf0 = true;
 164                entry->skip_buf1 = i == nbufs - 1;
 165
 166                entry->dma_addr[0] = buf[0].addr;
 167                entry->dma_len[0] = buf[0].len;
 168
 169                ctrl = FIELD_PREP(MT_DMA_CTL_SD_LEN0, buf[0].len);
 170                if (i < nbufs - 1) {
 171                        entry->dma_addr[1] = buf[1].addr;
 172                        entry->dma_len[1] = buf[1].len;
 173                        buf1 = buf[1].addr;
 174                        ctrl |= FIELD_PREP(MT_DMA_CTL_SD_LEN1, buf[1].len);
 175                        if (buf[1].skip_unmap)
 176                                entry->skip_buf1 = true;
 177                }
 178
 179                if (i == nbufs - 1)
 180                        ctrl |= MT_DMA_CTL_LAST_SEC0;
 181                else if (i == nbufs - 2)
 182                        ctrl |= MT_DMA_CTL_LAST_SEC1;
 183
 184                WRITE_ONCE(desc->buf0, cpu_to_le32(buf0));
 185                WRITE_ONCE(desc->buf1, cpu_to_le32(buf1));
 186                WRITE_ONCE(desc->info, cpu_to_le32(info));
 187                WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl));
 188
 189                q->queued++;
 190        }
 191
 192        q->entry[idx].txwi = txwi;
 193        q->entry[idx].skb = skb;
 194        q->entry[idx].wcid = 0xffff;
 195
 196        return idx;
 197}
 198
 199static void
 200mt76_dma_tx_cleanup_idx(struct mt76_dev *dev, struct mt76_queue *q, int idx,
 201                        struct mt76_queue_entry *prev_e)
 202{
 203        struct mt76_queue_entry *e = &q->entry[idx];
 204
 205        if (!e->skip_buf0)
 206                dma_unmap_single(dev->dev, e->dma_addr[0], e->dma_len[0],
 207                                 DMA_TO_DEVICE);
 208
 209        if (!e->skip_buf1)
 210                dma_unmap_single(dev->dev, e->dma_addr[1], e->dma_len[1],
 211                                 DMA_TO_DEVICE);
 212
 213        if (e->txwi == DMA_DUMMY_DATA)
 214                e->txwi = NULL;
 215
 216        if (e->skb == DMA_DUMMY_DATA)
 217                e->skb = NULL;
 218
 219        *prev_e = *e;
 220        memset(e, 0, sizeof(*e));
 221}
 222
 223static void
 224mt76_dma_kick_queue(struct mt76_dev *dev, struct mt76_queue *q)
 225{
 226        wmb();
 227        writel(q->head, &q->regs->cpu_idx);
 228}
 229
 230static void
 231mt76_dma_tx_cleanup(struct mt76_dev *dev, struct mt76_queue *q, bool flush)
 232{
 233        struct mt76_queue_entry entry;
 234        int last;
 235
 236        if (!q)
 237                return;
 238
 239        spin_lock_bh(&q->cleanup_lock);
 240        if (flush)
 241                last = -1;
 242        else
 243                last = readl(&q->regs->dma_idx);
 244
 245        while (q->queued > 0 && q->tail != last) {
 246                mt76_dma_tx_cleanup_idx(dev, q, q->tail, &entry);
 247                mt76_queue_tx_complete(dev, q, &entry);
 248
 249                if (entry.txwi) {
 250                        if (!(dev->drv->drv_flags & MT_DRV_TXWI_NO_FREE))
 251                                mt76_put_txwi(dev, entry.txwi);
 252                }
 253
 254                if (!flush && q->tail == last)
 255                        last = readl(&q->regs->dma_idx);
 256
 257        }
 258        spin_unlock_bh(&q->cleanup_lock);
 259
 260        if (flush) {
 261                spin_lock_bh(&q->lock);
 262                mt76_dma_sync_idx(dev, q);
 263                mt76_dma_kick_queue(dev, q);
 264                spin_unlock_bh(&q->lock);
 265        }
 266
 267        if (!q->queued)
 268                wake_up(&dev->tx_wait);
 269}
 270
 271static void *
 272mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
 273                 int *len, u32 *info, bool *more)
 274{
 275        struct mt76_queue_entry *e = &q->entry[idx];
 276        struct mt76_desc *desc = &q->desc[idx];
 277        dma_addr_t buf_addr;
 278        void *buf = e->buf;
 279        int buf_len = SKB_WITH_OVERHEAD(q->buf_size);
 280
 281        buf_addr = e->dma_addr[0];
 282        if (len) {
 283                u32 ctl = le32_to_cpu(READ_ONCE(desc->ctrl));
 284                *len = FIELD_GET(MT_DMA_CTL_SD_LEN0, ctl);
 285                *more = !(ctl & MT_DMA_CTL_LAST_SEC0);
 286        }
 287
 288        if (info)
 289                *info = le32_to_cpu(desc->info);
 290
 291        dma_unmap_single(dev->dev, buf_addr, buf_len, DMA_FROM_DEVICE);
 292        e->buf = NULL;
 293
 294        return buf;
 295}
 296
 297static void *
 298mt76_dma_dequeue(struct mt76_dev *dev, struct mt76_queue *q, bool flush,
 299                 int *len, u32 *info, bool *more)
 300{
 301        int idx = q->tail;
 302
 303        *more = false;
 304        if (!q->queued)
 305                return NULL;
 306
 307        if (flush)
 308                q->desc[idx].ctrl |= cpu_to_le32(MT_DMA_CTL_DMA_DONE);
 309        else if (!(q->desc[idx].ctrl & cpu_to_le32(MT_DMA_CTL_DMA_DONE)))
 310                return NULL;
 311
 312        q->tail = (q->tail + 1) % q->ndesc;
 313        q->queued--;
 314
 315        return mt76_dma_get_buf(dev, q, idx, len, info, more);
 316}
 317
 318static int
 319mt76_dma_tx_queue_skb_raw(struct mt76_dev *dev, struct mt76_queue *q,
 320                          struct sk_buff *skb, u32 tx_info)
 321{
 322        struct mt76_queue_buf buf = {};
 323        dma_addr_t addr;
 324
 325        if (q->queued + 1 >= q->ndesc - 1)
 326                goto error;
 327
 328        addr = dma_map_single(dev->dev, skb->data, skb->len,
 329                              DMA_TO_DEVICE);
 330        if (unlikely(dma_mapping_error(dev->dev, addr)))
 331                goto error;
 332
 333        buf.addr = addr;
 334        buf.len = skb->len;
 335
 336        spin_lock_bh(&q->lock);
 337        mt76_dma_add_buf(dev, q, &buf, 1, tx_info, skb, NULL);
 338        mt76_dma_kick_queue(dev, q);
 339        spin_unlock_bh(&q->lock);
 340
 341        return 0;
 342
 343error:
 344        dev_kfree_skb(skb);
 345        return -ENOMEM;
 346}
 347
 348static int
 349mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
 350                      struct sk_buff *skb, struct mt76_wcid *wcid,
 351                      struct ieee80211_sta *sta)
 352{
 353        struct ieee80211_tx_status status = {
 354                .sta = sta,
 355        };
 356        struct mt76_tx_info tx_info = {
 357                .skb = skb,
 358        };
 359        struct ieee80211_hw *hw;
 360        int len, n = 0, ret = -ENOMEM;
 361        struct mt76_txwi_cache *t;
 362        struct sk_buff *iter;
 363        dma_addr_t addr;
 364        u8 *txwi;
 365
 366        t = mt76_get_txwi(dev);
 367        if (!t)
 368                goto free_skb;
 369
 370        txwi = mt76_get_txwi_ptr(dev, t);
 371
 372        skb->prev = skb->next = NULL;
 373        if (dev->drv->drv_flags & MT_DRV_TX_ALIGNED4_SKBS)
 374                mt76_insert_hdr_pad(skb);
 375
 376        len = skb_headlen(skb);
 377        addr = dma_map_single(dev->dev, skb->data, len, DMA_TO_DEVICE);
 378        if (unlikely(dma_mapping_error(dev->dev, addr)))
 379                goto free;
 380
 381        tx_info.buf[n].addr = t->dma_addr;
 382        tx_info.buf[n++].len = dev->drv->txwi_size;
 383        tx_info.buf[n].addr = addr;
 384        tx_info.buf[n++].len = len;
 385
 386        skb_walk_frags(skb, iter) {
 387                if (n == ARRAY_SIZE(tx_info.buf))
 388                        goto unmap;
 389
 390                addr = dma_map_single(dev->dev, iter->data, iter->len,
 391                                      DMA_TO_DEVICE);
 392                if (unlikely(dma_mapping_error(dev->dev, addr)))
 393                        goto unmap;
 394
 395                tx_info.buf[n].addr = addr;
 396                tx_info.buf[n++].len = iter->len;
 397        }
 398        tx_info.nbuf = n;
 399
 400        if (q->queued + (tx_info.nbuf + 1) / 2 >= q->ndesc - 1) {
 401                ret = -ENOMEM;
 402                goto unmap;
 403        }
 404
 405        dma_sync_single_for_cpu(dev->dev, t->dma_addr, dev->drv->txwi_size,
 406                                DMA_TO_DEVICE);
 407        ret = dev->drv->tx_prepare_skb(dev, txwi, q->qid, wcid, sta, &tx_info);
 408        dma_sync_single_for_device(dev->dev, t->dma_addr, dev->drv->txwi_size,
 409                                   DMA_TO_DEVICE);
 410        if (ret < 0)
 411                goto unmap;
 412
 413        return mt76_dma_add_buf(dev, q, tx_info.buf, tx_info.nbuf,
 414                                tx_info.info, tx_info.skb, t);
 415
 416unmap:
 417        for (n--; n > 0; n--)
 418                dma_unmap_single(dev->dev, tx_info.buf[n].addr,
 419                                 tx_info.buf[n].len, DMA_TO_DEVICE);
 420
 421free:
 422#ifdef CONFIG_NL80211_TESTMODE
 423        /* fix tx_done accounting on queue overflow */
 424        if (mt76_is_testmode_skb(dev, skb, &hw)) {
 425                struct mt76_phy *phy = hw->priv;
 426
 427                if (tx_info.skb == phy->test.tx_skb)
 428                        phy->test.tx_done--;
 429        }
 430#endif
 431
 432        mt76_put_txwi(dev, t);
 433
 434free_skb:
 435        status.skb = tx_info.skb;
 436        hw = mt76_tx_status_get_hw(dev, tx_info.skb);
 437        ieee80211_tx_status_ext(hw, &status);
 438
 439        return ret;
 440}
 441
 442static int
 443mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q)
 444{
 445        dma_addr_t addr;
 446        void *buf;
 447        int frames = 0;
 448        int len = SKB_WITH_OVERHEAD(q->buf_size);
 449        int offset = q->buf_offset;
 450
 451        spin_lock_bh(&q->lock);
 452
 453        while (q->queued < q->ndesc - 1) {
 454                struct mt76_queue_buf qbuf;
 455
 456                buf = page_frag_alloc(&q->rx_page, q->buf_size, GFP_ATOMIC);
 457                if (!buf)
 458                        break;
 459
 460                addr = dma_map_single(dev->dev, buf, len, DMA_FROM_DEVICE);
 461                if (unlikely(dma_mapping_error(dev->dev, addr))) {
 462                        skb_free_frag(buf);
 463                        break;
 464                }
 465
 466                qbuf.addr = addr + offset;
 467                qbuf.len = len - offset;
 468                mt76_dma_add_buf(dev, q, &qbuf, 1, 0, buf, NULL);
 469                frames++;
 470        }
 471
 472        if (frames)
 473                mt76_dma_kick_queue(dev, q);
 474
 475        spin_unlock_bh(&q->lock);
 476
 477        return frames;
 478}
 479
 480static void
 481mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q)
 482{
 483        struct page *page;
 484        void *buf;
 485        bool more;
 486
 487        spin_lock_bh(&q->lock);
 488        do {
 489                buf = mt76_dma_dequeue(dev, q, true, NULL, NULL, &more);
 490                if (!buf)
 491                        break;
 492
 493                skb_free_frag(buf);
 494        } while (1);
 495        spin_unlock_bh(&q->lock);
 496
 497        if (!q->rx_page.va)
 498                return;
 499
 500        page = virt_to_page(q->rx_page.va);
 501        __page_frag_cache_drain(page, q->rx_page.pagecnt_bias);
 502        memset(&q->rx_page, 0, sizeof(q->rx_page));
 503}
 504
 505static void
 506mt76_dma_rx_reset(struct mt76_dev *dev, enum mt76_rxq_id qid)
 507{
 508        struct mt76_queue *q = &dev->q_rx[qid];
 509        int i;
 510
 511        for (i = 0; i < q->ndesc; i++)
 512                q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE);
 513
 514        mt76_dma_rx_cleanup(dev, q);
 515        mt76_dma_sync_idx(dev, q);
 516        mt76_dma_rx_fill(dev, q);
 517
 518        if (!q->rx_head)
 519                return;
 520
 521        dev_kfree_skb(q->rx_head);
 522        q->rx_head = NULL;
 523}
 524
 525static void
 526mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data,
 527                  int len, bool more)
 528{
 529        struct sk_buff *skb = q->rx_head;
 530        struct skb_shared_info *shinfo = skb_shinfo(skb);
 531        int nr_frags = shinfo->nr_frags;
 532
 533        if (nr_frags < ARRAY_SIZE(shinfo->frags)) {
 534                struct page *page = virt_to_head_page(data);
 535                int offset = data - page_address(page) + q->buf_offset;
 536
 537                skb_add_rx_frag(skb, nr_frags, page, offset, len, q->buf_size);
 538        } else {
 539                skb_free_frag(data);
 540        }
 541
 542        if (more)
 543                return;
 544
 545        q->rx_head = NULL;
 546        if (nr_frags < ARRAY_SIZE(shinfo->frags))
 547                dev->drv->rx_skb(dev, q - dev->q_rx, skb);
 548        else
 549                dev_kfree_skb(skb);
 550}
 551
 552static int
 553mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
 554{
 555        int len, data_len, done = 0;
 556        struct sk_buff *skb;
 557        unsigned char *data;
 558        bool more;
 559
 560        while (done < budget) {
 561                u32 info;
 562
 563                data = mt76_dma_dequeue(dev, q, false, &len, &info, &more);
 564                if (!data)
 565                        break;
 566
 567                if (q->rx_head)
 568                        data_len = q->buf_size;
 569                else
 570                        data_len = SKB_WITH_OVERHEAD(q->buf_size);
 571
 572                if (data_len < len + q->buf_offset) {
 573                        dev_kfree_skb(q->rx_head);
 574                        q->rx_head = NULL;
 575
 576                        skb_free_frag(data);
 577                        continue;
 578                }
 579
 580                if (q->rx_head) {
 581                        mt76_add_fragment(dev, q, data, len, more);
 582                        continue;
 583                }
 584
 585                skb = build_skb(data, q->buf_size);
 586                if (!skb) {
 587                        skb_free_frag(data);
 588                        continue;
 589                }
 590                skb_reserve(skb, q->buf_offset);
 591
 592                if (q == &dev->q_rx[MT_RXQ_MCU]) {
 593                        u32 *rxfce = (u32 *)skb->cb;
 594                        *rxfce = info;
 595                }
 596
 597                __skb_put(skb, len);
 598                done++;
 599
 600                if (more) {
 601                        q->rx_head = skb;
 602                        continue;
 603                }
 604
 605                dev->drv->rx_skb(dev, q - dev->q_rx, skb);
 606        }
 607
 608        mt76_dma_rx_fill(dev, q);
 609        return done;
 610}
 611
 612int mt76_dma_rx_poll(struct napi_struct *napi, int budget)
 613{
 614        struct mt76_dev *dev;
 615        int qid, done = 0, cur;
 616
 617        dev = container_of(napi->dev, struct mt76_dev, napi_dev);
 618        qid = napi - dev->napi;
 619
 620        rcu_read_lock();
 621
 622        do {
 623                cur = mt76_dma_rx_process(dev, &dev->q_rx[qid], budget - done);
 624                mt76_rx_poll_complete(dev, qid, napi);
 625                done += cur;
 626        } while (cur && done < budget);
 627
 628        rcu_read_unlock();
 629
 630        if (done < budget && napi_complete(napi))
 631                dev->drv->rx_poll_complete(dev, qid);
 632
 633        return done;
 634}
 635EXPORT_SYMBOL_GPL(mt76_dma_rx_poll);
 636
 637static int
 638mt76_dma_init(struct mt76_dev *dev,
 639              int (*poll)(struct napi_struct *napi, int budget))
 640{
 641        int i;
 642
 643        init_dummy_netdev(&dev->napi_dev);
 644        init_dummy_netdev(&dev->tx_napi_dev);
 645        snprintf(dev->napi_dev.name, sizeof(dev->napi_dev.name), "%s",
 646                 wiphy_name(dev->hw->wiphy));
 647        dev->napi_dev.threaded = 1;
 648
 649        mt76_for_each_q_rx(dev, i) {
 650                netif_napi_add(&dev->napi_dev, &dev->napi[i], poll, 64);
 651                mt76_dma_rx_fill(dev, &dev->q_rx[i]);
 652                napi_enable(&dev->napi[i]);
 653        }
 654
 655        return 0;
 656}
 657
 658static const struct mt76_queue_ops mt76_dma_ops = {
 659        .init = mt76_dma_init,
 660        .alloc = mt76_dma_alloc_queue,
 661        .reset_q = mt76_dma_queue_reset,
 662        .tx_queue_skb_raw = mt76_dma_tx_queue_skb_raw,
 663        .tx_queue_skb = mt76_dma_tx_queue_skb,
 664        .tx_cleanup = mt76_dma_tx_cleanup,
 665        .rx_cleanup = mt76_dma_rx_cleanup,
 666        .rx_reset = mt76_dma_rx_reset,
 667        .kick = mt76_dma_kick_queue,
 668};
 669
 670void mt76_dma_attach(struct mt76_dev *dev)
 671{
 672        dev->queue_ops = &mt76_dma_ops;
 673}
 674EXPORT_SYMBOL_GPL(mt76_dma_attach);
 675
 676void mt76_dma_cleanup(struct mt76_dev *dev)
 677{
 678        int i;
 679
 680        mt76_worker_disable(&dev->tx_worker);
 681        netif_napi_del(&dev->tx_napi);
 682
 683        for (i = 0; i < ARRAY_SIZE(dev->phy.q_tx); i++) {
 684                mt76_dma_tx_cleanup(dev, dev->phy.q_tx[i], true);
 685                if (dev->phy2)
 686                        mt76_dma_tx_cleanup(dev, dev->phy2->q_tx[i], true);
 687        }
 688
 689        for (i = 0; i < ARRAY_SIZE(dev->q_mcu); i++)
 690                mt76_dma_tx_cleanup(dev, dev->q_mcu[i], true);
 691
 692        mt76_for_each_q_rx(dev, i) {
 693                netif_napi_del(&dev->napi[i]);
 694                mt76_dma_rx_cleanup(dev, &dev->q_rx[i]);
 695        }
 696
 697        mt76_free_pending_txwi(dev);
 698}
 699EXPORT_SYMBOL_GPL(mt76_dma_cleanup);
 700