linux/drivers/net/wireless/mediatek/mt76/tx.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
   3 *
   4 * Permission to use, copy, modify, and/or distribute this software for any
   5 * purpose with or without fee is hereby granted, provided that the above
   6 * copyright notice and this permission notice appear in all copies.
   7 *
   8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
   9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  15 */
  16
  17#include "mt76.h"
  18
  19static struct mt76_txwi_cache *
  20mt76_alloc_txwi(struct mt76_dev *dev)
  21{
  22        struct mt76_txwi_cache *t;
  23        dma_addr_t addr;
  24        u8 *txwi;
  25        int size;
  26
  27        size = L1_CACHE_ALIGN(dev->drv->txwi_size + sizeof(*t));
  28        txwi = devm_kzalloc(dev->dev, size, GFP_ATOMIC);
  29        if (!txwi)
  30                return NULL;
  31
  32        addr = dma_map_single(dev->dev, txwi, dev->drv->txwi_size,
  33                              DMA_TO_DEVICE);
  34        t = (struct mt76_txwi_cache *)(txwi + dev->drv->txwi_size);
  35        t->dma_addr = addr;
  36
  37        return t;
  38}
  39
  40static struct mt76_txwi_cache *
  41__mt76_get_txwi(struct mt76_dev *dev)
  42{
  43        struct mt76_txwi_cache *t = NULL;
  44
  45        spin_lock_bh(&dev->lock);
  46        if (!list_empty(&dev->txwi_cache)) {
  47                t = list_first_entry(&dev->txwi_cache, struct mt76_txwi_cache,
  48                                     list);
  49                list_del(&t->list);
  50        }
  51        spin_unlock_bh(&dev->lock);
  52
  53        return t;
  54}
  55
  56struct mt76_txwi_cache *
  57mt76_get_txwi(struct mt76_dev *dev)
  58{
  59        struct mt76_txwi_cache *t = __mt76_get_txwi(dev);
  60
  61        if (t)
  62                return t;
  63
  64        return mt76_alloc_txwi(dev);
  65}
  66
  67void
  68mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t)
  69{
  70        if (!t)
  71                return;
  72
  73        spin_lock_bh(&dev->lock);
  74        list_add(&t->list, &dev->txwi_cache);
  75        spin_unlock_bh(&dev->lock);
  76}
  77EXPORT_SYMBOL_GPL(mt76_put_txwi);
  78
  79void mt76_tx_free(struct mt76_dev *dev)
  80{
  81        struct mt76_txwi_cache *t;
  82
  83        while ((t = __mt76_get_txwi(dev)) != NULL)
  84                dma_unmap_single(dev->dev, t->dma_addr, dev->drv->txwi_size,
  85                                 DMA_TO_DEVICE);
  86}
  87
  88static int
  89mt76_txq_get_qid(struct ieee80211_txq *txq)
  90{
  91        if (!txq->sta)
  92                return MT_TXQ_BE;
  93
  94        return txq->ac;
  95}
  96
  97static void
  98mt76_check_agg_ssn(struct mt76_txq *mtxq, struct sk_buff *skb)
  99{
 100        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
 101
 102        if (!ieee80211_is_data_qos(hdr->frame_control) ||
 103            !ieee80211_is_data_present(hdr->frame_control))
 104                return;
 105
 106        mtxq->agg_ssn = le16_to_cpu(hdr->seq_ctrl) + 0x10;
 107}
 108
 109void
 110mt76_tx_status_lock(struct mt76_dev *dev, struct sk_buff_head *list)
 111                   __acquires(&dev->status_list.lock)
 112{
 113        __skb_queue_head_init(list);
 114        spin_lock_bh(&dev->status_list.lock);
 115        __acquire(&dev->status_list.lock);
 116}
 117EXPORT_SYMBOL_GPL(mt76_tx_status_lock);
 118
 119void
 120mt76_tx_status_unlock(struct mt76_dev *dev, struct sk_buff_head *list)
 121                      __releases(&dev->status_list.unlock)
 122{
 123        struct sk_buff *skb;
 124
 125        spin_unlock_bh(&dev->status_list.lock);
 126        __release(&dev->status_list.unlock);
 127
 128        while ((skb = __skb_dequeue(list)) != NULL)
 129                ieee80211_tx_status(dev->hw, skb);
 130}
 131EXPORT_SYMBOL_GPL(mt76_tx_status_unlock);
 132
 133static void
 134__mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb, u8 flags,
 135                          struct sk_buff_head *list)
 136{
 137        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
 138        struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb);
 139        u8 done = MT_TX_CB_DMA_DONE | MT_TX_CB_TXS_DONE;
 140
 141        flags |= cb->flags;
 142        cb->flags = flags;
 143
 144        if ((flags & done) != done)
 145                return;
 146
 147        __skb_unlink(skb, &dev->status_list);
 148
 149        /* Tx status can be unreliable. if it fails, mark the frame as ACKed */
 150        if (flags & MT_TX_CB_TXS_FAILED) {
 151                ieee80211_tx_info_clear_status(info);
 152                info->status.rates[0].idx = -1;
 153                info->flags |= IEEE80211_TX_STAT_ACK;
 154        }
 155
 156        __skb_queue_tail(list, skb);
 157}
 158
 159void
 160mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb,
 161                        struct sk_buff_head *list)
 162{
 163        __mt76_tx_status_skb_done(dev, skb, MT_TX_CB_TXS_DONE, list);
 164}
 165EXPORT_SYMBOL_GPL(mt76_tx_status_skb_done);
 166
 167int
 168mt76_tx_status_skb_add(struct mt76_dev *dev, struct mt76_wcid *wcid,
 169                       struct sk_buff *skb)
 170{
 171        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
 172        struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb);
 173        int pid;
 174
 175        if (!wcid)
 176                return MT_PACKET_ID_NO_ACK;
 177
 178        if (info->flags & IEEE80211_TX_CTL_NO_ACK)
 179                return MT_PACKET_ID_NO_ACK;
 180
 181        if (!(info->flags & (IEEE80211_TX_CTL_REQ_TX_STATUS |
 182                             IEEE80211_TX_CTL_RATE_CTRL_PROBE)))
 183                return MT_PACKET_ID_NO_SKB;
 184
 185        spin_lock_bh(&dev->status_list.lock);
 186
 187        memset(cb, 0, sizeof(*cb));
 188        wcid->packet_id = (wcid->packet_id + 1) & MT_PACKET_ID_MASK;
 189        if (wcid->packet_id == MT_PACKET_ID_NO_ACK ||
 190            wcid->packet_id == MT_PACKET_ID_NO_SKB)
 191                wcid->packet_id = MT_PACKET_ID_FIRST;
 192
 193        pid = wcid->packet_id;
 194        cb->wcid = wcid->idx;
 195        cb->pktid = pid;
 196        cb->jiffies = jiffies;
 197
 198        __skb_queue_tail(&dev->status_list, skb);
 199        spin_unlock_bh(&dev->status_list.lock);
 200
 201        return pid;
 202}
 203EXPORT_SYMBOL_GPL(mt76_tx_status_skb_add);
 204
 205struct sk_buff *
 206mt76_tx_status_skb_get(struct mt76_dev *dev, struct mt76_wcid *wcid, int pktid,
 207                       struct sk_buff_head *list)
 208{
 209        struct sk_buff *skb, *tmp;
 210
 211        skb_queue_walk_safe(&dev->status_list, skb, tmp) {
 212                struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb);
 213
 214                if (wcid && cb->wcid != wcid->idx)
 215                        continue;
 216
 217                if (cb->pktid == pktid)
 218                        return skb;
 219
 220                if (pktid >= 0 &&
 221                    !time_after(jiffies, cb->jiffies + MT_TX_STATUS_SKB_TIMEOUT))
 222                        continue;
 223
 224                __mt76_tx_status_skb_done(dev, skb, MT_TX_CB_TXS_FAILED |
 225                                                    MT_TX_CB_TXS_DONE, list);
 226        }
 227
 228        return NULL;
 229}
 230EXPORT_SYMBOL_GPL(mt76_tx_status_skb_get);
 231
 232void
 233mt76_tx_status_check(struct mt76_dev *dev, struct mt76_wcid *wcid, bool flush)
 234{
 235        struct sk_buff_head list;
 236
 237        mt76_tx_status_lock(dev, &list);
 238        mt76_tx_status_skb_get(dev, wcid, flush ? -1 : 0, &list);
 239        mt76_tx_status_unlock(dev, &list);
 240}
 241EXPORT_SYMBOL_GPL(mt76_tx_status_check);
 242
 243void mt76_tx_complete_skb(struct mt76_dev *dev, struct sk_buff *skb)
 244{
 245        struct sk_buff_head list;
 246
 247        if (!skb->prev) {
 248                ieee80211_free_txskb(dev->hw, skb);
 249                return;
 250        }
 251
 252        mt76_tx_status_lock(dev, &list);
 253        __mt76_tx_status_skb_done(dev, skb, MT_TX_CB_DMA_DONE, &list);
 254        mt76_tx_status_unlock(dev, &list);
 255}
 256EXPORT_SYMBOL_GPL(mt76_tx_complete_skb);
 257
 258void
 259mt76_tx(struct mt76_dev *dev, struct ieee80211_sta *sta,
 260        struct mt76_wcid *wcid, struct sk_buff *skb)
 261{
 262        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
 263        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
 264        struct mt76_queue *q;
 265        int qid = skb_get_queue_mapping(skb);
 266
 267        if (WARN_ON(qid >= MT_TXQ_PSD)) {
 268                qid = MT_TXQ_BE;
 269                skb_set_queue_mapping(skb, qid);
 270        }
 271
 272        if (!(wcid->tx_info & MT_WCID_TX_INFO_SET))
 273                ieee80211_get_tx_rates(info->control.vif, sta, skb,
 274                                       info->control.rates, 1);
 275
 276        if (sta && ieee80211_is_data_qos(hdr->frame_control)) {
 277                struct ieee80211_txq *txq;
 278                struct mt76_txq *mtxq;
 279                u8 tid;
 280
 281                tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
 282                txq = sta->txq[tid];
 283                mtxq = (struct mt76_txq *) txq->drv_priv;
 284
 285                if (mtxq->aggr)
 286                        mt76_check_agg_ssn(mtxq, skb);
 287        }
 288
 289        q = dev->q_tx[qid].q;
 290
 291        spin_lock_bh(&q->lock);
 292        dev->queue_ops->tx_queue_skb(dev, qid, skb, wcid, sta);
 293        dev->queue_ops->kick(dev, q);
 294
 295        if (q->queued > q->ndesc - 8 && !q->stopped) {
 296                ieee80211_stop_queue(dev->hw, skb_get_queue_mapping(skb));
 297                q->stopped = true;
 298        }
 299
 300        spin_unlock_bh(&q->lock);
 301}
 302EXPORT_SYMBOL_GPL(mt76_tx);
 303
 304static struct sk_buff *
 305mt76_txq_dequeue(struct mt76_dev *dev, struct mt76_txq *mtxq, bool ps)
 306{
 307        struct ieee80211_txq *txq = mtxq_to_txq(mtxq);
 308        struct sk_buff *skb;
 309
 310        skb = skb_dequeue(&mtxq->retry_q);
 311        if (skb) {
 312                u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
 313
 314                if (ps && skb_queue_empty(&mtxq->retry_q))
 315                        ieee80211_sta_set_buffered(txq->sta, tid, false);
 316
 317                return skb;
 318        }
 319
 320        skb = ieee80211_tx_dequeue(dev->hw, txq);
 321        if (!skb)
 322                return NULL;
 323
 324        return skb;
 325}
 326
 327static void
 328mt76_queue_ps_skb(struct mt76_dev *dev, struct ieee80211_sta *sta,
 329                  struct sk_buff *skb, bool last)
 330{
 331        struct mt76_wcid *wcid = (struct mt76_wcid *) sta->drv_priv;
 332        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
 333
 334        info->control.flags |= IEEE80211_TX_CTRL_PS_RESPONSE;
 335        if (last)
 336                info->flags |= IEEE80211_TX_STATUS_EOSP |
 337                               IEEE80211_TX_CTL_REQ_TX_STATUS;
 338
 339        mt76_skb_set_moredata(skb, !last);
 340        dev->queue_ops->tx_queue_skb(dev, MT_TXQ_PSD, skb, wcid, sta);
 341}
 342
 343void
 344mt76_release_buffered_frames(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
 345                             u16 tids, int nframes,
 346                             enum ieee80211_frame_release_type reason,
 347                             bool more_data)
 348{
 349        struct mt76_dev *dev = hw->priv;
 350        struct sk_buff *last_skb = NULL;
 351        struct mt76_queue *hwq = dev->q_tx[MT_TXQ_PSD].q;
 352        int i;
 353
 354        spin_lock_bh(&hwq->lock);
 355        for (i = 0; tids && nframes; i++, tids >>= 1) {
 356                struct ieee80211_txq *txq = sta->txq[i];
 357                struct mt76_txq *mtxq = (struct mt76_txq *) txq->drv_priv;
 358                struct sk_buff *skb;
 359
 360                if (!(tids & 1))
 361                        continue;
 362
 363                do {
 364                        skb = mt76_txq_dequeue(dev, mtxq, true);
 365                        if (!skb)
 366                                break;
 367
 368                        if (mtxq->aggr)
 369                                mt76_check_agg_ssn(mtxq, skb);
 370
 371                        nframes--;
 372                        if (last_skb)
 373                                mt76_queue_ps_skb(dev, sta, last_skb, false);
 374
 375                        last_skb = skb;
 376                } while (nframes);
 377        }
 378
 379        if (last_skb) {
 380                mt76_queue_ps_skb(dev, sta, last_skb, true);
 381                dev->queue_ops->kick(dev, hwq);
 382        } else {
 383                ieee80211_sta_eosp(sta);
 384        }
 385
 386        spin_unlock_bh(&hwq->lock);
 387}
 388EXPORT_SYMBOL_GPL(mt76_release_buffered_frames);
 389
 390static int
 391mt76_txq_send_burst(struct mt76_dev *dev, struct mt76_sw_queue *sq,
 392                    struct mt76_txq *mtxq, bool *empty)
 393{
 394        struct ieee80211_txq *txq = mtxq_to_txq(mtxq);
 395        enum mt76_txq_id qid = mt76_txq_get_qid(txq);
 396        struct mt76_wcid *wcid = mtxq->wcid;
 397        struct mt76_queue *hwq = sq->q;
 398        struct ieee80211_tx_info *info;
 399        struct sk_buff *skb;
 400        int n_frames = 1, limit;
 401        struct ieee80211_tx_rate tx_rate;
 402        bool ampdu;
 403        bool probe;
 404        int idx;
 405
 406        if (test_bit(MT_WCID_FLAG_PS, &wcid->flags)) {
 407                *empty = true;
 408                return 0;
 409        }
 410
 411        skb = mt76_txq_dequeue(dev, mtxq, false);
 412        if (!skb) {
 413                *empty = true;
 414                return 0;
 415        }
 416
 417        info = IEEE80211_SKB_CB(skb);
 418        if (!(wcid->tx_info & MT_WCID_TX_INFO_SET))
 419                ieee80211_get_tx_rates(txq->vif, txq->sta, skb,
 420                                       info->control.rates, 1);
 421        tx_rate = info->control.rates[0];
 422
 423        probe = (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE);
 424        ampdu = IEEE80211_SKB_CB(skb)->flags & IEEE80211_TX_CTL_AMPDU;
 425        limit = ampdu ? 16 : 3;
 426
 427        if (ampdu)
 428                mt76_check_agg_ssn(mtxq, skb);
 429
 430        idx = dev->queue_ops->tx_queue_skb(dev, qid, skb, wcid, txq->sta);
 431
 432        if (idx < 0)
 433                return idx;
 434
 435        do {
 436                bool cur_ampdu;
 437
 438                if (probe)
 439                        break;
 440
 441                if (test_bit(MT76_OFFCHANNEL, &dev->state) ||
 442                    test_bit(MT76_RESET, &dev->state))
 443                        return -EBUSY;
 444
 445                skb = mt76_txq_dequeue(dev, mtxq, false);
 446                if (!skb) {
 447                        *empty = true;
 448                        break;
 449                }
 450
 451                info = IEEE80211_SKB_CB(skb);
 452                cur_ampdu = info->flags & IEEE80211_TX_CTL_AMPDU;
 453
 454                if (ampdu != cur_ampdu ||
 455                    (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)) {
 456                        skb_queue_tail(&mtxq->retry_q, skb);
 457                        break;
 458                }
 459
 460                info->control.rates[0] = tx_rate;
 461
 462                if (cur_ampdu)
 463                        mt76_check_agg_ssn(mtxq, skb);
 464
 465                idx = dev->queue_ops->tx_queue_skb(dev, qid, skb, wcid,
 466                                                   txq->sta);
 467                if (idx < 0)
 468                        return idx;
 469
 470                n_frames++;
 471        } while (n_frames < limit);
 472
 473        if (!probe) {
 474                hwq->entry[idx].qid = sq - dev->q_tx;
 475                hwq->entry[idx].schedule = true;
 476                sq->swq_queued++;
 477        }
 478
 479        dev->queue_ops->kick(dev, hwq);
 480
 481        return n_frames;
 482}
 483
 484static int
 485mt76_txq_schedule_list(struct mt76_dev *dev, enum mt76_txq_id qid)
 486{
 487        struct mt76_sw_queue *sq = &dev->q_tx[qid];
 488        struct mt76_queue *hwq = sq->q;
 489        struct ieee80211_txq *txq;
 490        struct mt76_txq *mtxq;
 491        struct mt76_wcid *wcid;
 492        int ret = 0;
 493
 494        spin_lock_bh(&hwq->lock);
 495        while (1) {
 496                bool empty = false;
 497
 498                if (sq->swq_queued >= 4)
 499                        break;
 500
 501                if (test_bit(MT76_OFFCHANNEL, &dev->state) ||
 502                    test_bit(MT76_RESET, &dev->state)) {
 503                        ret = -EBUSY;
 504                        break;
 505                }
 506
 507                txq = ieee80211_next_txq(dev->hw, qid);
 508                if (!txq)
 509                        break;
 510
 511                mtxq = (struct mt76_txq *)txq->drv_priv;
 512                wcid = mtxq->wcid;
 513                if (wcid && test_bit(MT_WCID_FLAG_PS, &wcid->flags))
 514                        continue;
 515
 516                if (mtxq->send_bar && mtxq->aggr) {
 517                        struct ieee80211_txq *txq = mtxq_to_txq(mtxq);
 518                        struct ieee80211_sta *sta = txq->sta;
 519                        struct ieee80211_vif *vif = txq->vif;
 520                        u16 agg_ssn = mtxq->agg_ssn;
 521                        u8 tid = txq->tid;
 522
 523                        mtxq->send_bar = false;
 524                        spin_unlock_bh(&hwq->lock);
 525                        ieee80211_send_bar(vif, sta->addr, tid, agg_ssn);
 526                        spin_lock_bh(&hwq->lock);
 527                }
 528
 529                ret += mt76_txq_send_burst(dev, sq, mtxq, &empty);
 530                if (skb_queue_empty(&mtxq->retry_q))
 531                        empty = true;
 532                ieee80211_return_txq(dev->hw, txq, !empty);
 533        }
 534        spin_unlock_bh(&hwq->lock);
 535
 536        return ret;
 537}
 538
 539void mt76_txq_schedule(struct mt76_dev *dev, enum mt76_txq_id qid)
 540{
 541        struct mt76_sw_queue *sq = &dev->q_tx[qid];
 542        int len;
 543
 544        if (qid >= 4)
 545                return;
 546
 547        if (sq->swq_queued >= 4)
 548                return;
 549
 550        rcu_read_lock();
 551
 552        do {
 553                ieee80211_txq_schedule_start(dev->hw, qid);
 554                len = mt76_txq_schedule_list(dev, qid);
 555                ieee80211_txq_schedule_end(dev->hw, qid);
 556        } while (len > 0);
 557
 558        rcu_read_unlock();
 559}
 560EXPORT_SYMBOL_GPL(mt76_txq_schedule);
 561
 562void mt76_txq_schedule_all(struct mt76_dev *dev)
 563{
 564        int i;
 565
 566        for (i = 0; i <= MT_TXQ_BK; i++)
 567                mt76_txq_schedule(dev, i);
 568}
 569EXPORT_SYMBOL_GPL(mt76_txq_schedule_all);
 570
 571void mt76_stop_tx_queues(struct mt76_dev *dev, struct ieee80211_sta *sta,
 572                         bool send_bar)
 573{
 574        int i;
 575
 576        for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
 577                struct ieee80211_txq *txq = sta->txq[i];
 578                struct mt76_queue *hwq;
 579                struct mt76_txq *mtxq;
 580
 581                if (!txq)
 582                        continue;
 583
 584                mtxq = (struct mt76_txq *)txq->drv_priv;
 585                hwq = mtxq->swq->q;
 586
 587                spin_lock_bh(&hwq->lock);
 588                mtxq->send_bar = mtxq->aggr && send_bar;
 589                spin_unlock_bh(&hwq->lock);
 590        }
 591}
 592EXPORT_SYMBOL_GPL(mt76_stop_tx_queues);
 593
 594void mt76_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq)
 595{
 596        struct mt76_dev *dev = hw->priv;
 597
 598        if (!test_bit(MT76_STATE_RUNNING, &dev->state))
 599                return;
 600
 601        tasklet_schedule(&dev->tx_tasklet);
 602}
 603EXPORT_SYMBOL_GPL(mt76_wake_tx_queue);
 604
 605void mt76_txq_remove(struct mt76_dev *dev, struct ieee80211_txq *txq)
 606{
 607        struct mt76_txq *mtxq;
 608        struct sk_buff *skb;
 609
 610        if (!txq)
 611                return;
 612
 613        mtxq = (struct mt76_txq *) txq->drv_priv;
 614
 615        while ((skb = skb_dequeue(&mtxq->retry_q)) != NULL)
 616                ieee80211_free_txskb(dev->hw, skb);
 617}
 618EXPORT_SYMBOL_GPL(mt76_txq_remove);
 619
 620void mt76_txq_init(struct mt76_dev *dev, struct ieee80211_txq *txq)
 621{
 622        struct mt76_txq *mtxq = (struct mt76_txq *) txq->drv_priv;
 623
 624        skb_queue_head_init(&mtxq->retry_q);
 625
 626        mtxq->swq = &dev->q_tx[mt76_txq_get_qid(txq)];
 627}
 628EXPORT_SYMBOL_GPL(mt76_txq_init);
 629
 630u8 mt76_ac_to_hwq(u8 ac)
 631{
 632        static const u8 wmm_queue_map[] = {
 633                [IEEE80211_AC_BE] = 0,
 634                [IEEE80211_AC_BK] = 1,
 635                [IEEE80211_AC_VI] = 2,
 636                [IEEE80211_AC_VO] = 3,
 637        };
 638
 639        if (WARN_ON(ac >= IEEE80211_NUM_ACS))
 640                return 0;
 641
 642        return wmm_queue_map[ac];
 643}
 644EXPORT_SYMBOL_GPL(mt76_ac_to_hwq);
 645